Commit 27a4626a

Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
2024-03-30 05:08:28
feat(api): adding temperature parameter (#1282)
1 parent 7828119
Changed files (11)
src/openai/resources/beta/threads/messages/messages.py
@@ -52,7 +52,7 @@ class Messages(SyncAPIResource):
         thread_id: str,
         *,
         content: str,
-        role: Literal["user"],
+        role: Literal["user", "assistant"],
         file_ids: List[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -68,8 +68,13 @@ class Messages(SyncAPIResource):
         Args:
           content: The content of the message.
 
-          role: The role of the entity that is creating the message. Currently only `user` is
-              supported.
+          role:
+              The role of the entity that is creating the message. Allowed values include:
+
+              - `user`: Indicates the message is sent by an actual user and should be used in
+                most cases to represent user-generated messages.
+              - `assistant`: Indicates the message is generated by the assistant. Use this
+                value to insert messages from the assistant into the conversation.
 
           file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
               the message should use. There can be a maximum of 10 files attached to a
@@ -276,7 +281,7 @@ class AsyncMessages(AsyncAPIResource):
         thread_id: str,
         *,
         content: str,
-        role: Literal["user"],
+        role: Literal["user", "assistant"],
         file_ids: List[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -292,8 +297,13 @@ class AsyncMessages(AsyncAPIResource):
         Args:
           content: The content of the message.
 
-          role: The role of the entity that is creating the message. Currently only `user` is
-              supported.
+          role:
+              The role of the entity that is creating the message. Allowed values include:
+
+              - `user`: Indicates the message is sent by an actual user and should be used in
+                most cases to represent user-generated messages.
+              - `assistant`: Indicates the message is generated by the assistant. Use this
+                value to insert messages from the assistant into the conversation.
 
           file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
               the message should use. There can be a maximum of 10 files attached to a
src/openai/resources/beta/threads/runs/runs.py
@@ -76,6 +76,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -114,6 +115,10 @@ class Runs(SyncAPIResource):
               events, terminating when the Run enters a terminal state with a `data: [DONE]`
               message.
 
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
           tools: Override the tools the assistant can use for this run. This is useful for
               modifying the behavior on a per-run basis.
 
@@ -138,6 +143,7 @@ class Runs(SyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -176,6 +182,10 @@ class Runs(SyncAPIResource):
               model associated with the assistant. If not, the model associated with the
               assistant will be used.
 
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
           tools: Override the tools the assistant can use for this run. This is useful for
               modifying the behavior on a per-run basis.
 
@@ -200,6 +210,7 @@ class Runs(SyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -238,6 +249,10 @@ class Runs(SyncAPIResource):
               model associated with the assistant. If not, the model associated with the
               assistant will be used.
 
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
           tools: Override the tools the assistant can use for this run. This is useful for
               modifying the behavior on a per-run basis.
 
@@ -262,6 +277,7 @@ class Runs(SyncAPIResource):
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -283,6 +299,7 @@ class Runs(SyncAPIResource):
                     "metadata": metadata,
                     "model": model,
                     "stream": stream,
+                    "temperature": temperature,
                     "tools": tools,
                 },
                 run_create_params.RunCreateParams,
@@ -489,6 +506,7 @@ class Runs(SyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         thread_id: str,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -510,6 +528,7 @@ class Runs(SyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         thread_id: str,
         event_handler: AssistantEventHandlerT,
@@ -531,6 +550,7 @@ class Runs(SyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         thread_id: str,
         event_handler: AssistantEventHandlerT | None = None,
@@ -561,6 +581,7 @@ class Runs(SyncAPIResource):
                     "instructions": instructions,
                     "metadata": metadata,
                     "model": model,
+                    "temperature": temperature,
                     "stream": True,
                     "tools": tools,
                 },
@@ -841,6 +862,7 @@ class AsyncRuns(AsyncAPIResource):
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -879,6 +901,10 @@ class AsyncRuns(AsyncAPIResource):
               events, terminating when the Run enters a terminal state with a `data: [DONE]`
               message.
 
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
           tools: Override the tools the assistant can use for this run. This is useful for
               modifying the behavior on a per-run basis.
 
@@ -903,6 +929,7 @@ class AsyncRuns(AsyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -941,6 +968,10 @@ class AsyncRuns(AsyncAPIResource):
               model associated with the assistant. If not, the model associated with the
               assistant will be used.
 
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
           tools: Override the tools the assistant can use for this run. This is useful for
               modifying the behavior on a per-run basis.
 
@@ -965,6 +996,7 @@ class AsyncRuns(AsyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1003,6 +1035,10 @@ class AsyncRuns(AsyncAPIResource):
               model associated with the assistant. If not, the model associated with the
               assistant will be used.
 
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
           tools: Override the tools the assistant can use for this run. This is useful for
               modifying the behavior on a per-run basis.
 
@@ -1027,6 +1063,7 @@ class AsyncRuns(AsyncAPIResource):
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1048,6 +1085,7 @@ class AsyncRuns(AsyncAPIResource):
                     "metadata": metadata,
                     "model": model,
                     "stream": stream,
+                    "temperature": temperature,
                     "tools": tools,
                 },
                 run_create_params.RunCreateParams,
@@ -1254,6 +1292,7 @@ class AsyncRuns(AsyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         thread_id: str,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1275,6 +1314,7 @@ class AsyncRuns(AsyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         thread_id: str,
         event_handler: AsyncAssistantEventHandlerT,
@@ -1296,6 +1336,7 @@ class AsyncRuns(AsyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         thread_id: str,
         event_handler: AsyncAssistantEventHandlerT | None = None,
@@ -1328,6 +1369,7 @@ class AsyncRuns(AsyncAPIResource):
                     "instructions": instructions,
                     "metadata": metadata,
                     "model": model,
+                    "temperature": temperature,
                     "stream": True,
                     "tools": tools,
                 },
src/openai/resources/beta/threads/threads.py
@@ -244,6 +244,7 @@ class Threads(SyncAPIResource):
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -278,6 +279,10 @@ class Threads(SyncAPIResource):
               events, terminating when the Run enters a terminal state with a `data: [DONE]`
               message.
 
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
           thread: If no thread is provided, an empty thread will be created.
 
           tools: Override the tools the assistant can use for this run. This is useful for
@@ -302,6 +307,7 @@ class Threads(SyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -336,6 +342,10 @@ class Threads(SyncAPIResource):
               model associated with the assistant. If not, the model associated with the
               assistant will be used.
 
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
           thread: If no thread is provided, an empty thread will be created.
 
           tools: Override the tools the assistant can use for this run. This is useful for
@@ -360,6 +370,7 @@ class Threads(SyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -394,6 +405,10 @@ class Threads(SyncAPIResource):
               model associated with the assistant. If not, the model associated with the
               assistant will be used.
 
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
           thread: If no thread is provided, an empty thread will be created.
 
           tools: Override the tools the assistant can use for this run. This is useful for
@@ -418,6 +433,7 @@ class Threads(SyncAPIResource):
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -437,6 +453,7 @@ class Threads(SyncAPIResource):
                     "metadata": metadata,
                     "model": model,
                     "stream": stream,
+                    "temperature": temperature,
                     "thread": thread,
                     "tools": tools,
                 },
@@ -458,6 +475,7 @@ class Threads(SyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -478,6 +496,7 @@ class Threads(SyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         event_handler: AssistantEventHandlerT,
@@ -498,6 +517,7 @@ class Threads(SyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         event_handler: AssistantEventHandlerT | None = None,
@@ -524,6 +544,7 @@ class Threads(SyncAPIResource):
                     "instructions": instructions,
                     "metadata": metadata,
                     "model": model,
+                    "temperature": temperature,
                     "stream": True,
                     "thread": thread,
                     "tools": tools,
@@ -723,6 +744,7 @@ class AsyncThreads(AsyncAPIResource):
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -757,6 +779,10 @@ class AsyncThreads(AsyncAPIResource):
               events, terminating when the Run enters a terminal state with a `data: [DONE]`
               message.
 
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
           thread: If no thread is provided, an empty thread will be created.
 
           tools: Override the tools the assistant can use for this run. This is useful for
@@ -781,6 +807,7 @@ class AsyncThreads(AsyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -815,6 +842,10 @@ class AsyncThreads(AsyncAPIResource):
               model associated with the assistant. If not, the model associated with the
               assistant will be used.
 
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
           thread: If no thread is provided, an empty thread will be created.
 
           tools: Override the tools the assistant can use for this run. This is useful for
@@ -839,6 +870,7 @@ class AsyncThreads(AsyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -873,6 +905,10 @@ class AsyncThreads(AsyncAPIResource):
               model associated with the assistant. If not, the model associated with the
               assistant will be used.
 
+          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+              make the output more random, while lower values like 0.2 will make it more
+              focused and deterministic.
+
           thread: If no thread is provided, an empty thread will be created.
 
           tools: Override the tools the assistant can use for this run. This is useful for
@@ -897,6 +933,7 @@ class AsyncThreads(AsyncAPIResource):
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -916,6 +953,7 @@ class AsyncThreads(AsyncAPIResource):
                     "metadata": metadata,
                     "model": model,
                     "stream": stream,
+                    "temperature": temperature,
                     "thread": thread,
                     "tools": tools,
                 },
@@ -937,6 +975,7 @@ class AsyncThreads(AsyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -957,6 +996,7 @@ class AsyncThreads(AsyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         event_handler: AsyncAssistantEventHandlerT,
@@ -977,6 +1017,7 @@ class AsyncThreads(AsyncAPIResource):
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[object] | NotGiven = NOT_GIVEN,
         model: Optional[str] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
         event_handler: AsyncAssistantEventHandlerT | None = None,
@@ -1005,6 +1046,7 @@ class AsyncThreads(AsyncAPIResource):
                     "instructions": instructions,
                     "metadata": metadata,
                     "model": model,
+                    "temperature": temperature,
                     "stream": True,
                     "thread": thread,
                     "tools": tools,
src/openai/types/beta/threads/message.py
@@ -63,9 +63,9 @@ class Message(BaseModel):
 
     run_id: Optional[str] = None
     """
-    If applicable, the ID of the
-    [run](https://platform.openai.com/docs/api-reference/runs) associated with the
-    authoring of this message.
+    The ID of the [run](https://platform.openai.com/docs/api-reference/runs)
+    associated with the creation of this message. Value is `null` when messages are
+    created manually using the create message or create thread endpoints.
     """
 
     status: Literal["in_progress", "incomplete", "completed"]
src/openai/types/beta/threads/message_create_params.py
@@ -12,10 +12,13 @@ class MessageCreateParams(TypedDict, total=False):
     content: Required[str]
     """The content of the message."""
 
-    role: Required[Literal["user"]]
-    """The role of the entity that is creating the message.
+    role: Required[Literal["user", "assistant"]]
+    """The role of the entity that is creating the message. Allowed values include:
 
-    Currently only `user` is supported.
+    - `user`: Indicates the message is sent by an actual user and should be used in
+      most cases to represent user-generated messages.
+    - `assistant`: Indicates the message is generated by the assistant. Use this
+      value to insert messages from the assistant into the conversation.
     """
 
     file_ids: List[str]
src/openai/types/beta/threads/run.py
@@ -139,3 +139,6 @@ class Run(BaseModel):
     This value will be `null` if the run is not in a terminal state (i.e.
     `in_progress`, `queued`, etc.).
     """
+
+    temperature: Optional[float] = None
+    """The sampling temperature used for this run. If not set, defaults to 1."""
src/openai/types/beta/threads/run_create_params.py
@@ -48,6 +48,13 @@ class RunCreateParamsBase(TypedDict, total=False):
     assistant will be used.
     """
 
+    temperature: Optional[float]
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic.
+    """
+
     tools: Optional[Iterable[AssistantToolParam]]
     """Override the tools the assistant can use for this run.
 
src/openai/types/beta/thread_create_and_run_params.py
@@ -49,6 +49,13 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
     assistant will be used.
     """
 
+    temperature: Optional[float]
+    """What sampling temperature to use, between 0 and 2.
+
+    Higher values like 0.8 will make the output more random, while lower values like
+    0.2 will make it more focused and deterministic.
+    """
+
     thread: Thread
     """If no thread is provided, an empty thread will be created."""
 
@@ -63,10 +70,13 @@ class ThreadMessage(TypedDict, total=False):
     content: Required[str]
     """The content of the message."""
 
-    role: Required[Literal["user"]]
-    """The role of the entity that is creating the message.
+    role: Required[Literal["user", "assistant"]]
+    """The role of the entity that is creating the message. Allowed values include:
 
-    Currently only `user` is supported.
+    - `user`: Indicates the message is sent by an actual user and should be used in
+      most cases to represent user-generated messages.
+    - `assistant`: Indicates the message is generated by the assistant. Use this
+      value to insert messages from the assistant into the conversation.
     """
 
     file_ids: List[str]
src/openai/types/beta/thread_create_params.py
@@ -28,10 +28,13 @@ class Message(TypedDict, total=False):
     content: Required[str]
     """The content of the message."""
 
-    role: Required[Literal["user"]]
-    """The role of the entity that is creating the message.
+    role: Required[Literal["user", "assistant"]]
+    """The role of the entity that is creating the message. Allowed values include:
 
-    Currently only `user` is supported.
+    - `user`: Indicates the message is sent by an actual user and should be used in
+      most cases to represent user-generated messages.
+    - `assistant`: Indicates the message is generated by the assistant. Use this
+      value to insert messages from the assistant into the conversation.
     """
 
     file_ids: List[str]
tests/api_resources/beta/threads/test_runs.py
@@ -38,6 +38,7 @@ class TestRuns:
             metadata={},
             model="string",
             stream=False,
+            temperature=1,
             tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
         )
         assert_matches_type(Run, run, path=["response"])
@@ -95,6 +96,7 @@ class TestRuns:
             instructions="string",
             metadata={},
             model="string",
+            temperature=1,
             tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
         )
         run_stream.response.close()
@@ -492,6 +494,7 @@ class TestAsyncRuns:
             metadata={},
             model="string",
             stream=False,
+            temperature=1,
             tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
         )
         assert_matches_type(Run, run, path=["response"])
@@ -549,6 +552,7 @@ class TestAsyncRuns:
             instructions="string",
             metadata={},
             model="string",
+            temperature=1,
             tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
         )
         await run_stream.response.aclose()
tests/api_resources/beta/test_threads.py
@@ -210,6 +210,7 @@ class TestThreads:
             metadata={},
             model="string",
             stream=False,
+            temperature=1,
             thread={
                 "messages": [
                     {
@@ -277,6 +278,7 @@ class TestThreads:
             instructions="string",
             metadata={},
             model="string",
+            temperature=1,
             thread={
                 "messages": [
                     {
@@ -522,6 +524,7 @@ class TestAsyncThreads:
             metadata={},
             model="string",
             stream=False,
+            temperature=1,
             thread={
                 "messages": [
                     {
@@ -589,6 +592,7 @@ class TestAsyncThreads:
             instructions="string",
             metadata={},
             model="string",
+            temperature=1,
             thread={
                 "messages": [
                     {