Commit cca09707

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-06-03 03:39:03
release: 1.83.0 (#2393) tag: v1.83.0
* Fix a typo: "occurences" -> "occurrences" (#2387) * chore: deprecate Assistants API * chore(api): mark some methods as deprecated * feat(api): Config update for pakrym-stream-param * fix(client): return binary content from `get /containers/{container_id}/files/{file_id}/content` * codegen metadata * chore(docs): remove reference to rye shell * feat(client): add follow_redirects request option * fix(api): Fix evals and code interpreter interfaces * release: 1.83.0 --------- Co-authored-by: Roman A <121314722+GameRoMan@users.noreply.github.com> Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
1 parent 9173f3d
Changed files (64)
examples
src
tests
examples/assistant.py
@@ -1,37 +0,0 @@
-import openai
-
-# gets API Key from environment variable OPENAI_API_KEY
-client = openai.OpenAI()
-
-assistant = client.beta.assistants.create(
-    name="Math Tutor",
-    instructions="You are a personal math tutor. Write and run code to answer math questions.",
-    tools=[{"type": "code_interpreter"}],
-    model="gpt-4-1106-preview",
-)
-
-thread = client.beta.threads.create()
-
-message = client.beta.threads.messages.create(
-    thread_id=thread.id,
-    role="user",
-    content="I need to solve the equation `3x + 11 = 14`. Can you help me?",
-)
-
-run = client.beta.threads.runs.create_and_poll(
-    thread_id=thread.id,
-    assistant_id=assistant.id,
-    instructions="Please address the user as Jane Doe. The user has a premium account.",
-)
-
-print("Run completed with status: " + run.status)
-
-if run.status == "completed":
-    messages = client.beta.threads.messages.list(thread_id=thread.id)
-
-    print("messages: ")
-    for message in messages:
-        assert message.content[0].type == "text"
-        print({"role": message.role, "message": message.content[0].text.value})
-
-    client.beta.assistants.delete(assistant.id)
examples/assistant_stream.py
@@ -1,33 +0,0 @@
-import openai
-
-# gets API Key from environment variable OPENAI_API_KEY
-client = openai.OpenAI()
-
-assistant = client.beta.assistants.create(
-    name="Math Tutor",
-    instructions="You are a personal math tutor. Write and run code to answer math questions.",
-    tools=[{"type": "code_interpreter"}],
-    model="gpt-4-1106-preview",
-)
-
-thread = client.beta.threads.create()
-
-message = client.beta.threads.messages.create(
-    thread_id=thread.id,
-    role="user",
-    content="I need to solve the equation `3x + 11 = 14`. Can you help me?",
-)
-
-print("starting run stream")
-
-stream = client.beta.threads.runs.create(
-    thread_id=thread.id,
-    assistant_id=assistant.id,
-    instructions="Please address the user as Jane Doe. The user has a premium account.",
-    stream=True,
-)
-
-for event in stream:
-    print(event.model_dump_json(indent=2, exclude_unset=True))
-
-client.beta.assistants.delete(assistant.id)
examples/assistant_stream_helpers.py
@@ -1,78 +0,0 @@
-from __future__ import annotations
-
-from typing_extensions import override
-
-import openai
-from openai import AssistantEventHandler
-from openai.types.beta import AssistantStreamEvent
-from openai.types.beta.threads import Text, TextDelta
-from openai.types.beta.threads.runs import RunStep, RunStepDelta
-
-
-class EventHandler(AssistantEventHandler):
-    @override
-    def on_event(self, event: AssistantStreamEvent) -> None:
-        if event.event == "thread.run.step.created":
-            details = event.data.step_details
-            if details.type == "tool_calls":
-                print("Generating code to interpret:\n\n```py")
-        elif event.event == "thread.message.created":
-            print("\nResponse:\n")
-
-    @override
-    def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None:
-        print(delta.value, end="", flush=True)
-
-    @override
-    def on_run_step_done(self, run_step: RunStep) -> None:
-        details = run_step.step_details
-        if details.type == "tool_calls":
-            for tool in details.tool_calls:
-                if tool.type == "code_interpreter":
-                    print("\n```\nExecuting code...")
-
-    @override
-    def on_run_step_delta(self, delta: RunStepDelta, snapshot: RunStep) -> None:
-        details = delta.step_details
-        if details is not None and details.type == "tool_calls":
-            for tool in details.tool_calls or []:
-                if tool.type == "code_interpreter" and tool.code_interpreter and tool.code_interpreter.input:
-                    print(tool.code_interpreter.input, end="", flush=True)
-
-
-def main() -> None:
-    client = openai.OpenAI()
-
-    assistant = client.beta.assistants.create(
-        name="Math Tutor",
-        instructions="You are a personal math tutor. Write and run code to answer math questions.",
-        tools=[{"type": "code_interpreter"}],
-        model="gpt-4-1106-preview",
-    )
-
-    try:
-        question = "I need to solve the equation `3x + 11 = 14`. Can you help me?"
-
-        thread = client.beta.threads.create(
-            messages=[
-                {
-                    "role": "user",
-                    "content": question,
-                },
-            ]
-        )
-        print(f"Question: {question}\n")
-
-        with client.beta.threads.runs.stream(
-            thread_id=thread.id,
-            assistant_id=assistant.id,
-            instructions="Please address the user as Jane Doe. The user has a premium account.",
-            event_handler=EventHandler(),
-        ) as stream:
-            stream.until_done()
-            print()
-    finally:
-        client.beta.assistants.delete(assistant.id)
-
-
-main()
src/openai/_utils/_transform.py
@@ -212,7 +212,7 @@ def _transform_recursive(
         return data
 
     if isinstance(data, pydantic.BaseModel):
-        return model_dump(data, exclude_unset=True, mode="json", exclude=getattr(data, '__api_exclude__', None))
+        return model_dump(data, exclude_unset=True, mode="json", exclude=getattr(data, "__api_exclude__", None))
 
     annotated_type = _get_annotated_type(annotation)
     if annotated_type is None:
src/openai/lib/_parsing/_responses.py
@@ -109,7 +109,7 @@ def parse_response(
             or output.type == "code_interpreter_call"
             or output.type == "local_shell_call"
             or output.type == "mcp_list_tools"
-            or output.type == 'exec'
+            or output.type == "exec"
         ):
             output_list.append(output)
         elif TYPE_CHECKING:  # type: ignore
src/openai/resources/beta/realtime/sessions.py
@@ -43,6 +43,7 @@ class Sessions(SyncAPIResource):
     def create(
         self,
         *,
+        client_secret: session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
         input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
         input_audio_noise_reduction: session_create_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
         input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
@@ -83,6 +84,8 @@ class Sessions(SyncAPIResource):
         the Realtime API.
 
         Args:
+          client_secret: Configuration options for the generated client secret.
+
           input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
               `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
               (mono), and little-endian byte order.
@@ -163,6 +166,7 @@ class Sessions(SyncAPIResource):
             "/realtime/sessions",
             body=maybe_transform(
                 {
+                    "client_secret": client_secret,
                     "input_audio_format": input_audio_format,
                     "input_audio_noise_reduction": input_audio_noise_reduction,
                     "input_audio_transcription": input_audio_transcription,
@@ -209,6 +213,7 @@ class AsyncSessions(AsyncAPIResource):
     async def create(
         self,
         *,
+        client_secret: session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
         input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
         input_audio_noise_reduction: session_create_params.InputAudioNoiseReduction | NotGiven = NOT_GIVEN,
         input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
@@ -249,6 +254,8 @@ class AsyncSessions(AsyncAPIResource):
         the Realtime API.
 
         Args:
+          client_secret: Configuration options for the generated client secret.
+
           input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
               `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
               (mono), and little-endian byte order.
@@ -329,6 +336,7 @@ class AsyncSessions(AsyncAPIResource):
             "/realtime/sessions",
             body=await async_maybe_transform(
                 {
+                    "client_secret": client_secret,
                     "input_audio_format": input_audio_format,
                     "input_audio_noise_reduction": input_audio_noise_reduction,
                     "input_audio_transcription": input_audio_transcription,
src/openai/resources/beta/realtime/transcription_sessions.py
@@ -43,6 +43,7 @@ class TranscriptionSessions(SyncAPIResource):
     def create(
         self,
         *,
+        client_secret: transcription_session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
         include: List[str] | NotGiven = NOT_GIVEN,
         input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
         input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
@@ -67,6 +68,8 @@ class TranscriptionSessions(SyncAPIResource):
         the Realtime API.
 
         Args:
+          client_secret: Configuration options for the generated client secret.
+
           include:
               The set of items to include in the transcription. Current available items are:
 
@@ -113,6 +116,7 @@ class TranscriptionSessions(SyncAPIResource):
             "/realtime/transcription_sessions",
             body=maybe_transform(
                 {
+                    "client_secret": client_secret,
                     "include": include,
                     "input_audio_format": input_audio_format,
                     "input_audio_noise_reduction": input_audio_noise_reduction,
@@ -152,6 +156,7 @@ class AsyncTranscriptionSessions(AsyncAPIResource):
     async def create(
         self,
         *,
+        client_secret: transcription_session_create_params.ClientSecret | NotGiven = NOT_GIVEN,
         include: List[str] | NotGiven = NOT_GIVEN,
         input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
         input_audio_noise_reduction: transcription_session_create_params.InputAudioNoiseReduction
@@ -176,6 +181,8 @@ class AsyncTranscriptionSessions(AsyncAPIResource):
         the Realtime API.
 
         Args:
+          client_secret: Configuration options for the generated client secret.
+
           include:
               The set of items to include in the transcription. Current available items are:
 
@@ -222,6 +229,7 @@ class AsyncTranscriptionSessions(AsyncAPIResource):
             "/realtime/transcription_sessions",
             body=await async_maybe_transform(
                 {
+                    "client_secret": client_secret,
                     "include": include,
                     "input_audio_format": input_audio_format,
                     "input_audio_noise_reduction": input_audio_noise_reduction,
src/openai/resources/beta/threads/runs/runs.py
@@ -83,6 +83,7 @@ class Runs(SyncAPIResource):
         return RunsWithStreamingResponse(self)
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def create(
         self,
         thread_id: str,
@@ -233,6 +234,7 @@ class Runs(SyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def create(
         self,
         thread_id: str,
@@ -383,6 +385,7 @@ class Runs(SyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def create(
         self,
         thread_id: str,
@@ -532,6 +535,7 @@ class Runs(SyncAPIResource):
         """
         ...
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     @required_args(["assistant_id"], ["assistant_id", "stream"])
     def create(
         self,
@@ -601,6 +605,7 @@ class Runs(SyncAPIResource):
             stream_cls=Stream[AssistantStreamEvent],
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def retrieve(
         self,
         run_id: str,
@@ -638,6 +643,7 @@ class Runs(SyncAPIResource):
             cast_to=Run,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def update(
         self,
         run_id: str,
@@ -684,6 +690,7 @@ class Runs(SyncAPIResource):
             cast_to=Run,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def list(
         self,
         thread_id: str,
@@ -751,6 +758,7 @@ class Runs(SyncAPIResource):
             model=Run,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def cancel(
         self,
         run_id: str,
@@ -788,6 +796,7 @@ class Runs(SyncAPIResource):
             cast_to=Run,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def create_and_poll(
         self,
         *,
@@ -822,7 +831,7 @@ class Runs(SyncAPIResource):
         lifecycles can be found here:
         https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
         """
-        run = self.create(
+        run = self.create(  # pyright: ignore[reportDeprecated]
             thread_id=thread_id,
             assistant_id=assistant_id,
             include=include,
@@ -848,7 +857,7 @@ class Runs(SyncAPIResource):
             extra_body=extra_body,
             timeout=timeout,
         )
-        return self.poll(
+        return self.poll(  # pyright: ignore[reportDeprecated]
             run.id,
             thread_id=thread_id,
             extra_headers=extra_headers,
@@ -996,6 +1005,7 @@ class Runs(SyncAPIResource):
         )
         return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler())
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def poll(
         self,
         run_id: str,
@@ -1018,7 +1028,7 @@ class Runs(SyncAPIResource):
 
         terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired", "incomplete"}
         while True:
-            response = self.with_raw_response.retrieve(
+            response = self.with_raw_response.retrieve(  # pyright: ignore[reportDeprecated]
                 thread_id=thread_id,
                 run_id=run_id,
                 extra_headers=extra_headers,
@@ -1042,6 +1052,7 @@ class Runs(SyncAPIResource):
             self._sleep(poll_interval_ms / 1000)
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def stream(
         self,
         *,
@@ -1074,6 +1085,7 @@ class Runs(SyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def stream(
         self,
         *,
@@ -1106,6 +1118,7 @@ class Runs(SyncAPIResource):
         """Create a Run stream"""
         ...
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def stream(
         self,
         *,
@@ -1184,6 +1197,7 @@ class Runs(SyncAPIResource):
         return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler())
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def submit_tool_outputs(
         self,
         run_id: str,
@@ -1222,6 +1236,7 @@ class Runs(SyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def submit_tool_outputs(
         self,
         run_id: str,
@@ -1260,6 +1275,7 @@ class Runs(SyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def submit_tool_outputs(
         self,
         run_id: str,
@@ -1297,7 +1313,9 @@ class Runs(SyncAPIResource):
         """
         ...
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     @required_args(["thread_id", "tool_outputs"], ["thread_id", "stream", "tool_outputs"])
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def submit_tool_outputs(
         self,
         run_id: str,
@@ -1336,6 +1354,7 @@ class Runs(SyncAPIResource):
             stream_cls=Stream[AssistantStreamEvent],
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def submit_tool_outputs_and_poll(
         self,
         *,
@@ -1355,7 +1374,7 @@ class Runs(SyncAPIResource):
         More information on Run lifecycles can be found here:
         https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
         """
-        run = self.submit_tool_outputs(
+        run = self.submit_tool_outputs(  # pyright: ignore[reportDeprecated]
             run_id=run_id,
             thread_id=thread_id,
             tool_outputs=tool_outputs,
@@ -1365,7 +1384,7 @@ class Runs(SyncAPIResource):
             extra_body=extra_body,
             timeout=timeout,
         )
-        return self.poll(
+        return self.poll(  # pyright: ignore[reportDeprecated]
             run_id=run.id,
             thread_id=thread_id,
             extra_headers=extra_headers,
@@ -1376,6 +1395,7 @@ class Runs(SyncAPIResource):
         )
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def submit_tool_outputs_stream(
         self,
         *,
@@ -1397,6 +1417,7 @@ class Runs(SyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def submit_tool_outputs_stream(
         self,
         *,
@@ -1418,6 +1439,7 @@ class Runs(SyncAPIResource):
         """
         ...
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def submit_tool_outputs_stream(
         self,
         *,
@@ -1494,6 +1516,7 @@ class AsyncRuns(AsyncAPIResource):
         return AsyncRunsWithStreamingResponse(self)
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def create(
         self,
         thread_id: str,
@@ -1644,6 +1667,7 @@ class AsyncRuns(AsyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def create(
         self,
         thread_id: str,
@@ -1794,6 +1818,7 @@ class AsyncRuns(AsyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def create(
         self,
         thread_id: str,
@@ -1943,7 +1968,9 @@ class AsyncRuns(AsyncAPIResource):
         """
         ...
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     @required_args(["assistant_id"], ["assistant_id", "stream"])
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def create(
         self,
         thread_id: str,
@@ -2012,6 +2039,7 @@ class AsyncRuns(AsyncAPIResource):
             stream_cls=AsyncStream[AssistantStreamEvent],
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def retrieve(
         self,
         run_id: str,
@@ -2049,6 +2077,7 @@ class AsyncRuns(AsyncAPIResource):
             cast_to=Run,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def update(
         self,
         run_id: str,
@@ -2095,6 +2124,7 @@ class AsyncRuns(AsyncAPIResource):
             cast_to=Run,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def list(
         self,
         thread_id: str,
@@ -2162,6 +2192,7 @@ class AsyncRuns(AsyncAPIResource):
             model=Run,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def cancel(
         self,
         run_id: str,
@@ -2199,6 +2230,7 @@ class AsyncRuns(AsyncAPIResource):
             cast_to=Run,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def create_and_poll(
         self,
         *,
@@ -2233,7 +2265,7 @@ class AsyncRuns(AsyncAPIResource):
         lifecycles can be found here:
         https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
         """
-        run = await self.create(
+        run = await self.create(  # pyright: ignore[reportDeprecated]
             thread_id=thread_id,
             assistant_id=assistant_id,
             include=include,
@@ -2259,7 +2291,7 @@ class AsyncRuns(AsyncAPIResource):
             extra_body=extra_body,
             timeout=timeout,
         )
-        return await self.poll(
+        return await self.poll(  # pyright: ignore[reportDeprecated]
             run.id,
             thread_id=thread_id,
             extra_headers=extra_headers,
@@ -2405,6 +2437,7 @@ class AsyncRuns(AsyncAPIResource):
         )
         return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def poll(
         self,
         run_id: str,
@@ -2427,7 +2460,7 @@ class AsyncRuns(AsyncAPIResource):
 
         terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired", "incomplete"}
         while True:
-            response = await self.with_raw_response.retrieve(
+            response = await self.with_raw_response.retrieve(  # pyright: ignore[reportDeprecated]
                 thread_id=thread_id,
                 run_id=run_id,
                 extra_headers=extra_headers,
@@ -2451,6 +2484,7 @@ class AsyncRuns(AsyncAPIResource):
             await self._sleep(poll_interval_ms / 1000)
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def stream(
         self,
         *,
@@ -2482,6 +2516,7 @@ class AsyncRuns(AsyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def stream(
         self,
         *,
@@ -2514,6 +2549,7 @@ class AsyncRuns(AsyncAPIResource):
         """Create a Run stream"""
         ...
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def stream(
         self,
         *,
@@ -2594,6 +2630,7 @@ class AsyncRuns(AsyncAPIResource):
         return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def submit_tool_outputs(
         self,
         run_id: str,
@@ -2632,6 +2669,7 @@ class AsyncRuns(AsyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def submit_tool_outputs(
         self,
         run_id: str,
@@ -2670,6 +2708,7 @@ class AsyncRuns(AsyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def submit_tool_outputs(
         self,
         run_id: str,
@@ -2707,7 +2746,9 @@ class AsyncRuns(AsyncAPIResource):
         """
         ...
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     @required_args(["thread_id", "tool_outputs"], ["thread_id", "stream", "tool_outputs"])
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def submit_tool_outputs(
         self,
         run_id: str,
@@ -2746,6 +2787,7 @@ class AsyncRuns(AsyncAPIResource):
             stream_cls=AsyncStream[AssistantStreamEvent],
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def submit_tool_outputs_and_poll(
         self,
         *,
@@ -2765,7 +2807,7 @@ class AsyncRuns(AsyncAPIResource):
         More information on Run lifecycles can be found here:
         https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
         """
-        run = await self.submit_tool_outputs(
+        run = await self.submit_tool_outputs(  # pyright: ignore[reportDeprecated]
             run_id=run_id,
             thread_id=thread_id,
             tool_outputs=tool_outputs,
@@ -2775,7 +2817,7 @@ class AsyncRuns(AsyncAPIResource):
             extra_body=extra_body,
             timeout=timeout,
         )
-        return await self.poll(
+        return await self.poll(  # pyright: ignore[reportDeprecated]
             run_id=run.id,
             thread_id=thread_id,
             extra_headers=extra_headers,
@@ -2786,6 +2828,7 @@ class AsyncRuns(AsyncAPIResource):
         )
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def submit_tool_outputs_stream(
         self,
         *,
@@ -2807,6 +2850,7 @@ class AsyncRuns(AsyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def submit_tool_outputs_stream(
         self,
         *,
@@ -2828,6 +2872,7 @@ class AsyncRuns(AsyncAPIResource):
         """
         ...
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def submit_tool_outputs_stream(
         self,
         *,
@@ -2885,23 +2930,35 @@ class RunsWithRawResponse:
     def __init__(self, runs: Runs) -> None:
         self._runs = runs
 
-        self.create = _legacy_response.to_raw_response_wrapper(
-            runs.create,
+        self.create = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                runs.create  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.retrieve = _legacy_response.to_raw_response_wrapper(
-            runs.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                runs.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.update = _legacy_response.to_raw_response_wrapper(
-            runs.update,
+        self.update = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                runs.update  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.list = _legacy_response.to_raw_response_wrapper(
-            runs.list,
+        self.list = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                runs.list  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.cancel = _legacy_response.to_raw_response_wrapper(
-            runs.cancel,
+        self.cancel = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                runs.cancel  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.submit_tool_outputs = _legacy_response.to_raw_response_wrapper(
-            runs.submit_tool_outputs,
+        self.submit_tool_outputs = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                runs.submit_tool_outputs  # pyright: ignore[reportDeprecated],
+            )
         )
 
     @cached_property
@@ -2913,23 +2970,35 @@ class AsyncRunsWithRawResponse:
     def __init__(self, runs: AsyncRuns) -> None:
         self._runs = runs
 
-        self.create = _legacy_response.async_to_raw_response_wrapper(
-            runs.create,
+        self.create = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                runs.create  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
-            runs.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                runs.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.update = _legacy_response.async_to_raw_response_wrapper(
-            runs.update,
+        self.update = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                runs.update  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.list = _legacy_response.async_to_raw_response_wrapper(
-            runs.list,
+        self.list = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                runs.list  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.cancel = _legacy_response.async_to_raw_response_wrapper(
-            runs.cancel,
+        self.cancel = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                runs.cancel  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.submit_tool_outputs = _legacy_response.async_to_raw_response_wrapper(
-            runs.submit_tool_outputs,
+        self.submit_tool_outputs = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                runs.submit_tool_outputs  # pyright: ignore[reportDeprecated],
+            )
         )
 
     @cached_property
@@ -2941,23 +3010,35 @@ class RunsWithStreamingResponse:
     def __init__(self, runs: Runs) -> None:
         self._runs = runs
 
-        self.create = to_streamed_response_wrapper(
-            runs.create,
+        self.create = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                runs.create  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.retrieve = to_streamed_response_wrapper(
-            runs.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                runs.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.update = to_streamed_response_wrapper(
-            runs.update,
+        self.update = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                runs.update  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.list = to_streamed_response_wrapper(
-            runs.list,
+        self.list = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                runs.list  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.cancel = to_streamed_response_wrapper(
-            runs.cancel,
+        self.cancel = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                runs.cancel  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.submit_tool_outputs = to_streamed_response_wrapper(
-            runs.submit_tool_outputs,
+        self.submit_tool_outputs = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                runs.submit_tool_outputs  # pyright: ignore[reportDeprecated],
+            )
         )
 
     @cached_property
@@ -2969,23 +3050,35 @@ class AsyncRunsWithStreamingResponse:
     def __init__(self, runs: AsyncRuns) -> None:
         self._runs = runs
 
-        self.create = async_to_streamed_response_wrapper(
-            runs.create,
+        self.create = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                runs.create  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.retrieve = async_to_streamed_response_wrapper(
-            runs.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                runs.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.update = async_to_streamed_response_wrapper(
-            runs.update,
+        self.update = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                runs.update  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.list = async_to_streamed_response_wrapper(
-            runs.list,
+        self.list = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                runs.list  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.cancel = async_to_streamed_response_wrapper(
-            runs.cancel,
+        self.cancel = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                runs.cancel  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.submit_tool_outputs = async_to_streamed_response_wrapper(
-            runs.submit_tool_outputs,
+        self.submit_tool_outputs = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                runs.submit_tool_outputs  # pyright: ignore[reportDeprecated],
+            )
         )
 
     @cached_property
src/openai/resources/beta/threads/runs/steps.py
@@ -2,6 +2,7 @@
 
 from __future__ import annotations
 
+import typing_extensions
 from typing import List
 from typing_extensions import Literal
 
@@ -42,6 +43,7 @@ class Steps(SyncAPIResource):
         """
         return StepsWithStreamingResponse(self)
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def retrieve(
         self,
         step_id: str,
@@ -95,6 +97,7 @@ class Steps(SyncAPIResource):
             cast_to=RunStep,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def list(
         self,
         run_id: str,
@@ -196,6 +199,7 @@ class AsyncSteps(AsyncAPIResource):
         """
         return AsyncStepsWithStreamingResponse(self)
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def retrieve(
         self,
         step_id: str,
@@ -249,6 +253,7 @@ class AsyncSteps(AsyncAPIResource):
             cast_to=RunStep,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def list(
         self,
         run_id: str,
@@ -334,11 +339,15 @@ class StepsWithRawResponse:
     def __init__(self, steps: Steps) -> None:
         self._steps = steps
 
-        self.retrieve = _legacy_response.to_raw_response_wrapper(
-            steps.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                steps.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.list = _legacy_response.to_raw_response_wrapper(
-            steps.list,
+        self.list = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                steps.list  # pyright: ignore[reportDeprecated],
+            )
         )
 
 
@@ -346,11 +355,15 @@ class AsyncStepsWithRawResponse:
     def __init__(self, steps: AsyncSteps) -> None:
         self._steps = steps
 
-        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
-            steps.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                steps.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.list = _legacy_response.async_to_raw_response_wrapper(
-            steps.list,
+        self.list = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                steps.list  # pyright: ignore[reportDeprecated],
+            )
         )
 
 
@@ -358,11 +371,15 @@ class StepsWithStreamingResponse:
     def __init__(self, steps: Steps) -> None:
         self._steps = steps
 
-        self.retrieve = to_streamed_response_wrapper(
-            steps.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                steps.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.list = to_streamed_response_wrapper(
-            steps.list,
+        self.list = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                steps.list  # pyright: ignore[reportDeprecated],
+            )
         )
 
 
@@ -370,9 +387,13 @@ class AsyncStepsWithStreamingResponse:
     def __init__(self, steps: AsyncSteps) -> None:
         self._steps = steps
 
-        self.retrieve = async_to_streamed_response_wrapper(
-            steps.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                steps.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.list = async_to_streamed_response_wrapper(
-            steps.list,
+        self.list = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                steps.list  # pyright: ignore[reportDeprecated],
+            )
         )
src/openai/resources/beta/threads/messages.py
@@ -2,6 +2,7 @@
 
 from __future__ import annotations
 
+import typing_extensions
 from typing import Union, Iterable, Optional
 from typing_extensions import Literal
 
@@ -47,6 +48,7 @@ class Messages(SyncAPIResource):
         """
         return MessagesWithStreamingResponse(self)
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def create(
         self,
         thread_id: str,
@@ -113,6 +115,7 @@ class Messages(SyncAPIResource):
             cast_to=Message,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def retrieve(
         self,
         message_id: str,
@@ -150,6 +153,7 @@ class Messages(SyncAPIResource):
             cast_to=Message,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def update(
         self,
         message_id: str,
@@ -196,6 +200,7 @@ class Messages(SyncAPIResource):
             cast_to=Message,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def list(
         self,
         thread_id: str,
@@ -267,6 +272,7 @@ class Messages(SyncAPIResource):
             model=Message,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def delete(
         self,
         message_id: str,
@@ -325,6 +331,7 @@ class AsyncMessages(AsyncAPIResource):
         """
         return AsyncMessagesWithStreamingResponse(self)
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def create(
         self,
         thread_id: str,
@@ -391,6 +398,7 @@ class AsyncMessages(AsyncAPIResource):
             cast_to=Message,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def retrieve(
         self,
         message_id: str,
@@ -428,6 +436,7 @@ class AsyncMessages(AsyncAPIResource):
             cast_to=Message,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def update(
         self,
         message_id: str,
@@ -474,6 +483,7 @@ class AsyncMessages(AsyncAPIResource):
             cast_to=Message,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def list(
         self,
         thread_id: str,
@@ -545,6 +555,7 @@ class AsyncMessages(AsyncAPIResource):
             model=Message,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def delete(
         self,
         message_id: str,
@@ -587,20 +598,30 @@ class MessagesWithRawResponse:
     def __init__(self, messages: Messages) -> None:
         self._messages = messages
 
-        self.create = _legacy_response.to_raw_response_wrapper(
-            messages.create,
+        self.create = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                messages.create  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.retrieve = _legacy_response.to_raw_response_wrapper(
-            messages.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                messages.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.update = _legacy_response.to_raw_response_wrapper(
-            messages.update,
+        self.update = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                messages.update  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.list = _legacy_response.to_raw_response_wrapper(
-            messages.list,
+        self.list = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                messages.list  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.delete = _legacy_response.to_raw_response_wrapper(
-            messages.delete,
+        self.delete = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                messages.delete  # pyright: ignore[reportDeprecated],
+            )
         )
 
 
@@ -608,20 +629,30 @@ class AsyncMessagesWithRawResponse:
     def __init__(self, messages: AsyncMessages) -> None:
         self._messages = messages
 
-        self.create = _legacy_response.async_to_raw_response_wrapper(
-            messages.create,
+        self.create = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                messages.create  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
-            messages.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                messages.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.update = _legacy_response.async_to_raw_response_wrapper(
-            messages.update,
+        self.update = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                messages.update  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.list = _legacy_response.async_to_raw_response_wrapper(
-            messages.list,
+        self.list = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                messages.list  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.delete = _legacy_response.async_to_raw_response_wrapper(
-            messages.delete,
+        self.delete = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                messages.delete  # pyright: ignore[reportDeprecated],
+            )
         )
 
 
@@ -629,20 +660,30 @@ class MessagesWithStreamingResponse:
     def __init__(self, messages: Messages) -> None:
         self._messages = messages
 
-        self.create = to_streamed_response_wrapper(
-            messages.create,
+        self.create = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                messages.create  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.retrieve = to_streamed_response_wrapper(
-            messages.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                messages.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.update = to_streamed_response_wrapper(
-            messages.update,
+        self.update = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                messages.update  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.list = to_streamed_response_wrapper(
-            messages.list,
+        self.list = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                messages.list  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.delete = to_streamed_response_wrapper(
-            messages.delete,
+        self.delete = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                messages.delete  # pyright: ignore[reportDeprecated],
+            )
         )
 
 
@@ -650,18 +691,28 @@ class AsyncMessagesWithStreamingResponse:
     def __init__(self, messages: AsyncMessages) -> None:
         self._messages = messages
 
-        self.create = async_to_streamed_response_wrapper(
-            messages.create,
+        self.create = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                messages.create  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.retrieve = async_to_streamed_response_wrapper(
-            messages.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                messages.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.update = async_to_streamed_response_wrapper(
-            messages.update,
+        self.update = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                messages.update  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.list = async_to_streamed_response_wrapper(
-            messages.list,
+        self.list = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                messages.list  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.delete = async_to_streamed_response_wrapper(
-            messages.delete,
+        self.delete = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                messages.delete  # pyright: ignore[reportDeprecated],
+            )
         )
src/openai/resources/beta/threads/threads.py
@@ -2,6 +2,7 @@
 
 from __future__ import annotations
 
+import typing_extensions
 from typing import Union, Iterable, Optional
 from functools import partial
 from typing_extensions import Literal, overload
@@ -86,6 +87,7 @@ class Threads(SyncAPIResource):
         """
         return ThreadsWithStreamingResponse(self)
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def create(
         self,
         *,
@@ -143,6 +145,7 @@ class Threads(SyncAPIResource):
             cast_to=Thread,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def retrieve(
         self,
         thread_id: str,
@@ -177,6 +180,7 @@ class Threads(SyncAPIResource):
             cast_to=Thread,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def update(
         self,
         thread_id: str,
@@ -232,6 +236,7 @@ class Threads(SyncAPIResource):
             cast_to=Thread,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def delete(
         self,
         thread_id: str,
@@ -267,6 +272,7 @@ class Threads(SyncAPIResource):
         )
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def create_and_run(
         self,
         *,
@@ -400,6 +406,7 @@ class Threads(SyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def create_and_run(
         self,
         *,
@@ -533,6 +540,7 @@ class Threads(SyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def create_and_run(
         self,
         *,
@@ -665,7 +673,9 @@ class Threads(SyncAPIResource):
         """
         ...
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     @required_args(["assistant_id"], ["assistant_id", "stream"])
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     def create_and_run(
         self,
         *,
@@ -757,7 +767,7 @@ class Threads(SyncAPIResource):
         More information on Run lifecycles can be found here:
         https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
         """
-        run = self.create_and_run(
+        run = self.create_and_run(  # pyright: ignore[reportDeprecated]
             assistant_id=assistant_id,
             instructions=instructions,
             max_completion_tokens=max_completion_tokens,
@@ -779,7 +789,7 @@ class Threads(SyncAPIResource):
             extra_body=extra_body,
             timeout=timeout,
         )
-        return self.runs.poll(run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms)
+        return self.runs.poll(run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms)  # pyright: ignore[reportDeprecated]
 
     @overload
     def create_and_run_stream(
@@ -935,6 +945,7 @@ class AsyncThreads(AsyncAPIResource):
         """
         return AsyncThreadsWithStreamingResponse(self)
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def create(
         self,
         *,
@@ -992,6 +1003,7 @@ class AsyncThreads(AsyncAPIResource):
             cast_to=Thread,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def retrieve(
         self,
         thread_id: str,
@@ -1026,6 +1038,7 @@ class AsyncThreads(AsyncAPIResource):
             cast_to=Thread,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def update(
         self,
         thread_id: str,
@@ -1081,6 +1094,7 @@ class AsyncThreads(AsyncAPIResource):
             cast_to=Thread,
         )
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def delete(
         self,
         thread_id: str,
@@ -1116,6 +1130,7 @@ class AsyncThreads(AsyncAPIResource):
         )
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def create_and_run(
         self,
         *,
@@ -1249,6 +1264,7 @@ class AsyncThreads(AsyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def create_and_run(
         self,
         *,
@@ -1382,6 +1398,7 @@ class AsyncThreads(AsyncAPIResource):
         ...
 
     @overload
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def create_and_run(
         self,
         *,
@@ -1514,7 +1531,9 @@ class AsyncThreads(AsyncAPIResource):
         """
         ...
 
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     @required_args(["assistant_id"], ["assistant_id", "stream"])
+    @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
     async def create_and_run(
         self,
         *,
@@ -1606,7 +1625,7 @@ class AsyncThreads(AsyncAPIResource):
         More information on Run lifecycles can be found here:
         https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
         """
-        run = await self.create_and_run(
+        run = await self.create_and_run(  # pyright: ignore[reportDeprecated]
             assistant_id=assistant_id,
             instructions=instructions,
             max_completion_tokens=max_completion_tokens,
@@ -1628,7 +1647,7 @@ class AsyncThreads(AsyncAPIResource):
             extra_body=extra_body,
             timeout=timeout,
         )
-        return await self.runs.poll(
+        return await self.runs.poll(  # pyright: ignore[reportDeprecated]
             run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms
         )
 
@@ -1764,20 +1783,30 @@ class ThreadsWithRawResponse:
     def __init__(self, threads: Threads) -> None:
         self._threads = threads
 
-        self.create = _legacy_response.to_raw_response_wrapper(
-            threads.create,
+        self.create = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                threads.create  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.retrieve = _legacy_response.to_raw_response_wrapper(
-            threads.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                threads.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.update = _legacy_response.to_raw_response_wrapper(
-            threads.update,
+        self.update = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                threads.update  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.delete = _legacy_response.to_raw_response_wrapper(
-            threads.delete,
+        self.delete = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                threads.delete  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.create_and_run = _legacy_response.to_raw_response_wrapper(
-            threads.create_and_run,
+        self.create_and_run = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.to_raw_response_wrapper(
+                threads.create_and_run  # pyright: ignore[reportDeprecated],
+            )
         )
 
     @cached_property
@@ -1793,20 +1822,30 @@ class AsyncThreadsWithRawResponse:
     def __init__(self, threads: AsyncThreads) -> None:
         self._threads = threads
 
-        self.create = _legacy_response.async_to_raw_response_wrapper(
-            threads.create,
+        self.create = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                threads.create  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
-            threads.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                threads.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.update = _legacy_response.async_to_raw_response_wrapper(
-            threads.update,
+        self.update = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                threads.update  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.delete = _legacy_response.async_to_raw_response_wrapper(
-            threads.delete,
+        self.delete = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                threads.delete  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.create_and_run = _legacy_response.async_to_raw_response_wrapper(
-            threads.create_and_run,
+        self.create_and_run = (  # pyright: ignore[reportDeprecated]
+            _legacy_response.async_to_raw_response_wrapper(
+                threads.create_and_run  # pyright: ignore[reportDeprecated],
+            )
         )
 
     @cached_property
@@ -1822,20 +1861,30 @@ class ThreadsWithStreamingResponse:
     def __init__(self, threads: Threads) -> None:
         self._threads = threads
 
-        self.create = to_streamed_response_wrapper(
-            threads.create,
+        self.create = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                threads.create  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.retrieve = to_streamed_response_wrapper(
-            threads.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                threads.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.update = to_streamed_response_wrapper(
-            threads.update,
+        self.update = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                threads.update  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.delete = to_streamed_response_wrapper(
-            threads.delete,
+        self.delete = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                threads.delete  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.create_and_run = to_streamed_response_wrapper(
-            threads.create_and_run,
+        self.create_and_run = (  # pyright: ignore[reportDeprecated]
+            to_streamed_response_wrapper(
+                threads.create_and_run  # pyright: ignore[reportDeprecated],
+            )
         )
 
     @cached_property
@@ -1851,20 +1900,30 @@ class AsyncThreadsWithStreamingResponse:
     def __init__(self, threads: AsyncThreads) -> None:
         self._threads = threads
 
-        self.create = async_to_streamed_response_wrapper(
-            threads.create,
+        self.create = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                threads.create  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.retrieve = async_to_streamed_response_wrapper(
-            threads.retrieve,
+        self.retrieve = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                threads.retrieve  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.update = async_to_streamed_response_wrapper(
-            threads.update,
+        self.update = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                threads.update  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.delete = async_to_streamed_response_wrapper(
-            threads.delete,
+        self.delete = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                threads.delete  # pyright: ignore[reportDeprecated],
+            )
         )
-        self.create_and_run = async_to_streamed_response_wrapper(
-            threads.create_and_run,
+        self.create_and_run = (  # pyright: ignore[reportDeprecated]
+            async_to_streamed_response_wrapper(
+                threads.create_and_run  # pyright: ignore[reportDeprecated],
+            )
         )
 
     @cached_property
src/openai/resources/chat/completions/completions.py
@@ -263,9 +263,9 @@ class Completions(SyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarentee.
+                latency guarantee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarentee.
+                tier with a lower uptime SLA and no latency guarantee.
               - If set to 'flex', the request will be processed with the Flex Processing
                 service tier.
                 [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -541,9 +541,9 @@ class Completions(SyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarentee.
+                latency guarantee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarentee.
+                tier with a lower uptime SLA and no latency guarantee.
               - If set to 'flex', the request will be processed with the Flex Processing
                 service tier.
                 [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -810,9 +810,9 @@ class Completions(SyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarentee.
+                latency guarantee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarentee.
+                tier with a lower uptime SLA and no latency guarantee.
               - If set to 'flex', the request will be processed with the Flex Processing
                 service tier.
                 [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -1366,9 +1366,9 @@ class AsyncCompletions(AsyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarentee.
+                latency guarantee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarentee.
+                tier with a lower uptime SLA and no latency guarantee.
               - If set to 'flex', the request will be processed with the Flex Processing
                 service tier.
                 [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -1644,9 +1644,9 @@ class AsyncCompletions(AsyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarentee.
+                latency guarantee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarentee.
+                tier with a lower uptime SLA and no latency guarantee.
               - If set to 'flex', the request will be processed with the Flex Processing
                 service tier.
                 [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -1913,9 +1913,9 @@ class AsyncCompletions(AsyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarentee.
+                latency guarantee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarentee.
+                tier with a lower uptime SLA and no latency guarantee.
               - If set to 'flex', the request will be processed with the Flex Processing
                 service tier.
                 [Learn more](https://platform.openai.com/docs/guides/flex-processing).
src/openai/resources/containers/files/content.py
@@ -5,10 +5,15 @@ from __future__ import annotations
 import httpx
 
 from .... import _legacy_response
-from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
 from ...._compat import cached_property
 from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...._response import (
+    StreamedBinaryAPIResponse,
+    AsyncStreamedBinaryAPIResponse,
+    to_custom_streamed_response_wrapper,
+    async_to_custom_streamed_response_wrapper,
+)
 from ...._base_client import make_request_options
 
 __all__ = ["Content", "AsyncContent"]
@@ -45,7 +50,7 @@ class Content(SyncAPIResource):
         extra_query: Query | None = None,
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
-    ) -> None:
+    ) -> _legacy_response.HttpxBinaryResponseContent:
         """
         Retrieve Container File Content
 
@@ -62,13 +67,13 @@ class Content(SyncAPIResource):
             raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
         if not file_id:
             raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
-        extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+        extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
         return self._get(
             f"/containers/{container_id}/files/{file_id}/content",
             options=make_request_options(
                 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
             ),
-            cast_to=NoneType,
+            cast_to=_legacy_response.HttpxBinaryResponseContent,
         )
 
 
@@ -103,7 +108,7 @@ class AsyncContent(AsyncAPIResource):
         extra_query: Query | None = None,
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
-    ) -> None:
+    ) -> _legacy_response.HttpxBinaryResponseContent:
         """
         Retrieve Container File Content
 
@@ -120,13 +125,13 @@ class AsyncContent(AsyncAPIResource):
             raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
         if not file_id:
             raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
-        extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+        extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
         return await self._get(
             f"/containers/{container_id}/files/{file_id}/content",
             options=make_request_options(
                 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
             ),
-            cast_to=NoneType,
+            cast_to=_legacy_response.HttpxBinaryResponseContent,
         )
 
 
@@ -152,8 +157,9 @@ class ContentWithStreamingResponse:
     def __init__(self, content: Content) -> None:
         self._content = content
 
-        self.retrieve = to_streamed_response_wrapper(
+        self.retrieve = to_custom_streamed_response_wrapper(
             content.retrieve,
+            StreamedBinaryAPIResponse,
         )
 
 
@@ -161,6 +167,7 @@ class AsyncContentWithStreamingResponse:
     def __init__(self, content: AsyncContent) -> None:
         self._content = content
 
-        self.retrieve = async_to_streamed_response_wrapper(
+        self.retrieve = async_to_custom_streamed_response_wrapper(
             content.retrieve,
+            AsyncStreamedBinaryAPIResponse,
         )
src/openai/resources/fine_tuning/alpha/graders.py
@@ -2,8 +2,6 @@
 
 from __future__ import annotations
 
-from typing import Union, Iterable
-
 import httpx
 
 from .... import _legacy_response
@@ -45,7 +43,7 @@ class Graders(SyncAPIResource):
         *,
         grader: grader_run_params.Grader,
         model_sample: str,
-        reference_answer: Union[str, Iterable[object], float, object],
+        item: object | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -59,9 +57,15 @@ class Graders(SyncAPIResource):
         Args:
           grader: The grader used for the fine-tuning job.
 
-          model_sample: The model sample to be evaluated.
+          model_sample: The model sample to be evaluated. This value will be used to populate the
+              `sample` namespace. See
+              [the guide](https://platform.openai.com/docs/guides/graders) for more details.
+              The `output_json` variable will be populated if the model sample is a valid JSON
+              string.
 
-          reference_answer: The reference answer for the evaluation.
+          item: The dataset item provided to the grader. This will be used to populate the
+              `item` namespace. See
+              [the guide](https://platform.openai.com/docs/guides/graders) for more details.
 
           extra_headers: Send extra headers
 
@@ -77,7 +81,7 @@ class Graders(SyncAPIResource):
                 {
                     "grader": grader,
                     "model_sample": model_sample,
-                    "reference_answer": reference_answer,
+                    "item": item,
                 },
                 grader_run_params.GraderRunParams,
             ),
@@ -147,7 +151,7 @@ class AsyncGraders(AsyncAPIResource):
         *,
         grader: grader_run_params.Grader,
         model_sample: str,
-        reference_answer: Union[str, Iterable[object], float, object],
+        item: object | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -161,9 +165,15 @@ class AsyncGraders(AsyncAPIResource):
         Args:
           grader: The grader used for the fine-tuning job.
 
-          model_sample: The model sample to be evaluated.
+          model_sample: The model sample to be evaluated. This value will be used to populate the
+              `sample` namespace. See
+              [the guide](https://platform.openai.com/docs/guides/graders) for more details.
+              The `output_json` variable will be populated if the model sample is a valid JSON
+              string.
 
-          reference_answer: The reference answer for the evaluation.
+          item: The dataset item provided to the grader. This will be used to populate the
+              `item` namespace. See
+              [the guide](https://platform.openai.com/docs/guides/graders) for more details.
 
           extra_headers: Send extra headers
 
@@ -179,7 +189,7 @@ class AsyncGraders(AsyncAPIResource):
                 {
                     "grader": grader,
                     "model_sample": model_sample,
-                    "reference_answer": reference_answer,
+                    "item": item,
                 },
                 grader_run_params.GraderRunParams,
             ),
src/openai/resources/responses/responses.py
@@ -149,6 +149,8 @@ class Responses(SyncAPIResource):
                 multi-turn conversations when using the Responses API statelessly (like when
                 the `store` parameter is set to `false`, or when an organization is enrolled
                 in the zero data retention program).
+              - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+                in code interpreter tool call items.
 
           instructions: Inserts a system (or developer) message as the first item in the model's
               context.
@@ -186,9 +188,9 @@ class Responses(SyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarentee.
+                latency guarantee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarentee.
+                tier with a lower uptime SLA and no latency guarantee.
               - If set to 'flex', the request will be processed with the Flex Processing
                 service tier.
                 [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -349,6 +351,8 @@ class Responses(SyncAPIResource):
                 multi-turn conversations when using the Responses API statelessly (like when
                 the `store` parameter is set to `false`, or when an organization is enrolled
                 in the zero data retention program).
+              - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+                in code interpreter tool call items.
 
           instructions: Inserts a system (or developer) message as the first item in the model's
               context.
@@ -386,9 +390,9 @@ class Responses(SyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarentee.
+                latency guarantee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarentee.
+                tier with a lower uptime SLA and no latency guarantee.
               - If set to 'flex', the request will be processed with the Flex Processing
                 service tier.
                 [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -542,6 +546,8 @@ class Responses(SyncAPIResource):
                 multi-turn conversations when using the Responses API statelessly (like when
                 the `store` parameter is set to `false`, or when an organization is enrolled
                 in the zero data retention program).
+              - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+                in code interpreter tool call items.
 
           instructions: Inserts a system (or developer) message as the first item in the model's
               context.
@@ -579,9 +585,9 @@ class Responses(SyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarentee.
+                latency guarantee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarentee.
+                tier with a lower uptime SLA and no latency guarantee.
               - If set to 'flex', the request will be processed with the Flex Processing
                 service tier.
                 [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -976,6 +982,8 @@ class Responses(SyncAPIResource):
         response_id: str,
         *,
         include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        starting_after: int | NotGiven = NOT_GIVEN,
+        stream: Literal[False] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1016,6 +1024,7 @@ class Responses(SyncAPIResource):
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> Response | Stream[ResponseStreamEvent]: ...
 
+    @overload
     def retrieve(
         self,
         response_id: str,
@@ -1037,15 +1046,55 @@ class Responses(SyncAPIResource):
           include: Additional fields to include in the response. See the `include` parameter for
               Response creation above for more information.
 
-          stream: If set to true, the model response data will be streamed to the client using
+          starting_after: The sequence number of the event after which to start streaming.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
               [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
               See the
               [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
               for more information.
 
-          starting_after: When retrieving a background response, this parameter can be used to start
-            replaying after an event with the given sequence number. Must be used in conjunction with
-            the `stream` parameter set to `true`.
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    def retrieve(
+        self,
+        response_id: str,
+        *,
+        stream: Literal[True],
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        starting_after: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Stream[ResponseStreamEvent]:
+        """
+        Retrieves a model response with the given ID.
+
+        Args:
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Additional fields to include in the response. See the `include` parameter for
+              Response creation above for more information.
+
+          starting_after: The sequence number of the event after which to start streaming.
 
           extra_headers: Send extra headers
 
@@ -1055,6 +1104,63 @@ class Responses(SyncAPIResource):
 
           timeout: Override the client-level default timeout for this request, in seconds
         """
+        ...
+
+    @overload
+    def retrieve(
+        self,
+        response_id: str,
+        *,
+        stream: bool,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        starting_after: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | Stream[ResponseStreamEvent]:
+        """
+        Retrieves a model response with the given ID.
+
+        Args:
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Additional fields to include in the response. See the `include` parameter for
+              Response creation above for more information.
+
+          starting_after: The sequence number of the event after which to start streaming.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    def retrieve(
+        self,
+        response_id: str,
+        *,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        starting_after: int | NotGiven = NOT_GIVEN,
+        stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | Stream[ResponseStreamEvent]:
         if not response_id:
             raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
         return self._get(
@@ -1067,8 +1173,8 @@ class Responses(SyncAPIResource):
                 query=maybe_transform(
                     {
                         "include": include,
-                        "stream": stream,
                         "starting_after": starting_after,
+                        "stream": stream,
                     },
                     response_retrieve_params.ResponseRetrieveParams,
                 ),
@@ -1122,7 +1228,7 @@ class Responses(SyncAPIResource):
         extra_query: Query | None = None,
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
-    ) -> None:
+    ) -> Response:
         """Cancels a model response with the given ID.
 
         Only responses created with the
@@ -1140,13 +1246,12 @@ class Responses(SyncAPIResource):
         """
         if not response_id:
             raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
-        extra_headers = {"Accept": "*/*", **(extra_headers or {})}
         return self._post(
             f"/responses/{response_id}/cancel",
             options=make_request_options(
                 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
             ),
-            cast_to=NoneType,
+            cast_to=Response,
         )
 
 
@@ -1252,6 +1357,8 @@ class AsyncResponses(AsyncAPIResource):
                 multi-turn conversations when using the Responses API statelessly (like when
                 the `store` parameter is set to `false`, or when an organization is enrolled
                 in the zero data retention program).
+              - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+                in code interpreter tool call items.
 
           instructions: Inserts a system (or developer) message as the first item in the model's
               context.
@@ -1289,9 +1396,9 @@ class AsyncResponses(AsyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarentee.
+                latency guarantee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarentee.
+                tier with a lower uptime SLA and no latency guarantee.
               - If set to 'flex', the request will be processed with the Flex Processing
                 service tier.
                 [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -1452,6 +1559,8 @@ class AsyncResponses(AsyncAPIResource):
                 multi-turn conversations when using the Responses API statelessly (like when
                 the `store` parameter is set to `false`, or when an organization is enrolled
                 in the zero data retention program).
+              - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+                in code interpreter tool call items.
 
           instructions: Inserts a system (or developer) message as the first item in the model's
               context.
@@ -1489,9 +1598,9 @@ class AsyncResponses(AsyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarentee.
+                latency guarantee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarentee.
+                tier with a lower uptime SLA and no latency guarantee.
               - If set to 'flex', the request will be processed with the Flex Processing
                 service tier.
                 [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -1645,6 +1754,8 @@ class AsyncResponses(AsyncAPIResource):
                 multi-turn conversations when using the Responses API statelessly (like when
                 the `store` parameter is set to `false`, or when an organization is enrolled
                 in the zero data retention program).
+              - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+                in code interpreter tool call items.
 
           instructions: Inserts a system (or developer) message as the first item in the model's
               context.
@@ -1682,9 +1793,9 @@ class AsyncResponses(AsyncAPIResource):
                 utilize scale tier credits until they are exhausted.
               - If set to 'auto', and the Project is not Scale tier enabled, the request will
                 be processed using the default service tier with a lower uptime SLA and no
-                latency guarentee.
+                latency guarantee.
               - If set to 'default', the request will be processed using the default service
-                tier with a lower uptime SLA and no latency guarentee.
+                tier with a lower uptime SLA and no latency guarantee.
               - If set to 'flex', the request will be processed with the Flex Processing
                 service tier.
                 [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -2083,6 +2194,8 @@ class AsyncResponses(AsyncAPIResource):
         response_id: str,
         *,
         include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        starting_after: int | NotGiven = NOT_GIVEN,
+        stream: Literal[False] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -2123,6 +2236,7 @@ class AsyncResponses(AsyncAPIResource):
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> Response | AsyncStream[ResponseStreamEvent]: ...
 
+    @overload
     async def retrieve(
         self,
         response_id: str,
@@ -2144,9 +2258,96 @@ class AsyncResponses(AsyncAPIResource):
           include: Additional fields to include in the response. See the `include` parameter for
               Response creation above for more information.
 
-          stream:
-          starting_after: When retrieving a background response, this parameter can be used to start
-          replaying after an event with the given sequence number. Must be used in
+          starting_after: The sequence number of the event after which to start streaming.
+
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def retrieve(
+        self,
+        response_id: str,
+        *,
+        stream: Literal[True],
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        starting_after: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncStream[ResponseStreamEvent]:
+        """
+        Retrieves a model response with the given ID.
+
+        Args:
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Additional fields to include in the response. See the `include` parameter for
+              Response creation above for more information.
+
+          starting_after: The sequence number of the event after which to start streaming.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        ...
+
+    @overload
+    async def retrieve(
+        self,
+        response_id: str,
+        *,
+        stream: bool,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        starting_after: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | AsyncStream[ResponseStreamEvent]:
+        """
+        Retrieves a model response with the given ID.
+
+        Args:
+          stream: If set to true, the model response data will be streamed to the client as it is
+              generated using
+              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+              See the
+              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+              for more information.
+
+          include: Additional fields to include in the response. See the `include` parameter for
+              Response creation above for more information.
+
+          starting_after: The sequence number of the event after which to start streaming.
 
           extra_headers: Send extra headers
 
@@ -2156,6 +2357,22 @@ class AsyncResponses(AsyncAPIResource):
 
           timeout: Override the client-level default timeout for this request, in seconds
         """
+        ...
+
+    async def retrieve(
+        self,
+        response_id: str,
+        *,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        starting_after: int | NotGiven = NOT_GIVEN,
+        stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Response | AsyncStream[ResponseStreamEvent]:
         if not response_id:
             raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
         return await self._get(
@@ -2168,8 +2385,8 @@ class AsyncResponses(AsyncAPIResource):
                 query=await async_maybe_transform(
                     {
                         "include": include,
-                        "stream": stream,
                         "starting_after": starting_after,
+                        "stream": stream,
                     },
                     response_retrieve_params.ResponseRetrieveParams,
                 ),
@@ -2223,7 +2440,7 @@ class AsyncResponses(AsyncAPIResource):
         extra_query: Query | None = None,
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
-    ) -> None:
+    ) -> Response:
         """Cancels a model response with the given ID.
 
         Only responses created with the
@@ -2241,13 +2458,12 @@ class AsyncResponses(AsyncAPIResource):
         """
         if not response_id:
             raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
-        extra_headers = {"Accept": "*/*", **(extra_headers or {})}
         return await self._post(
             f"/responses/{response_id}/cancel",
             options=make_request_options(
                 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
             ),
-            cast_to=NoneType,
+            cast_to=Response,
         )
 
 
src/openai/resources/images.py
@@ -144,7 +144,7 @@ class Images(SyncAPIResource):
           image: The image(s) to edit. Must be a supported image file or an array of images.
 
               For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
-              25MB. You can provide up to 16 images.
+              50MB. You can provide up to 16 images.
 
               For `dall-e-2`, you can only provide one image, and it should be a square `png`
               file less than 4MB.
@@ -468,7 +468,7 @@ class AsyncImages(AsyncAPIResource):
           image: The image(s) to edit. Must be a supported image file or an array of images.
 
               For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
-              25MB. You can provide up to 16 images.
+              50MB. You can provide up to 16 images.
 
               For `dall-e-2`, you can only provide one image, and it should be a square `png`
               file less than 4MB.
src/openai/types/audio/transcription_text_delta_event.py
@@ -12,7 +12,7 @@ class Logprob(BaseModel):
     token: Optional[str] = None
     """The token that was used to generate the log probability."""
 
-    bytes: Optional[List[object]] = None
+    bytes: Optional[List[int]] = None
     """The bytes that were used to generate the log probability."""
 
     logprob: Optional[float] = None
src/openai/types/audio/transcription_text_done_event.py
@@ -12,7 +12,7 @@ class Logprob(BaseModel):
     token: Optional[str] = None
     """The token that was used to generate the log probability."""
 
-    bytes: Optional[List[object]] = None
+    bytes: Optional[List[int]] = None
     """The bytes that were used to generate the log probability."""
 
     logprob: Optional[float] = None
src/openai/types/beta/realtime/session_create_params.py
@@ -5,10 +5,21 @@ from __future__ import annotations
 from typing import List, Union, Iterable
 from typing_extensions import Literal, TypedDict
 
-__all__ = ["SessionCreateParams", "InputAudioNoiseReduction", "InputAudioTranscription", "Tool", "TurnDetection"]
+__all__ = [
+    "SessionCreateParams",
+    "ClientSecret",
+    "ClientSecretExpiresAt",
+    "InputAudioNoiseReduction",
+    "InputAudioTranscription",
+    "Tool",
+    "TurnDetection",
+]
 
 
 class SessionCreateParams(TypedDict, total=False):
+    client_secret: ClientSecret
+    """Configuration options for the generated client secret."""
+
     input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
     """The format of input audio.
 
@@ -124,6 +135,25 @@ class SessionCreateParams(TypedDict, total=False):
     """
 
 
+class ClientSecretExpiresAt(TypedDict, total=False):
+    anchor: Literal["created_at"]
+    """The anchor point for the ephemeral token expiration.
+
+    Only `created_at` is currently supported.
+    """
+
+    seconds: int
+    """The number of seconds from the anchor point to the expiration.
+
+    Select a value between `10` and `7200`.
+    """
+
+
+class ClientSecret(TypedDict, total=False):
+    expires_at: ClientSecretExpiresAt
+    """Configuration for the ephemeral token expiration."""
+
+
 class InputAudioNoiseReduction(TypedDict, total=False):
     type: Literal["near_field", "far_field"]
     """Type of noise reduction.
src/openai/types/beta/realtime/session_update_event.py
@@ -8,6 +8,8 @@ from ...._models import BaseModel
 __all__ = [
     "SessionUpdateEvent",
     "Session",
+    "SessionClientSecret",
+    "SessionClientSecretExpiresAt",
     "SessionInputAudioNoiseReduction",
     "SessionInputAudioTranscription",
     "SessionTool",
@@ -15,6 +17,25 @@ __all__ = [
 ]
 
 
+class SessionClientSecretExpiresAt(BaseModel):
+    anchor: Optional[Literal["created_at"]] = None
+    """The anchor point for the ephemeral token expiration.
+
+    Only `created_at` is currently supported.
+    """
+
+    seconds: Optional[int] = None
+    """The number of seconds from the anchor point to the expiration.
+
+    Select a value between `10` and `7200`.
+    """
+
+
+class SessionClientSecret(BaseModel):
+    expires_at: Optional[SessionClientSecretExpiresAt] = None
+    """Configuration for the ephemeral token expiration."""
+
+
 class SessionInputAudioNoiseReduction(BaseModel):
     type: Optional[Literal["near_field", "far_field"]] = None
     """Type of noise reduction.
@@ -116,6 +137,9 @@ class SessionTurnDetection(BaseModel):
 
 
 class Session(BaseModel):
+    client_secret: Optional[SessionClientSecret] = None
+    """Configuration options for the generated client secret."""
+
     input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
     """The format of input audio.
 
src/openai/types/beta/realtime/session_update_event_param.py
@@ -8,6 +8,8 @@ from typing_extensions import Literal, Required, TypedDict
 __all__ = [
     "SessionUpdateEventParam",
     "Session",
+    "SessionClientSecret",
+    "SessionClientSecretExpiresAt",
     "SessionInputAudioNoiseReduction",
     "SessionInputAudioTranscription",
     "SessionTool",
@@ -15,6 +17,25 @@ __all__ = [
 ]
 
 
+class SessionClientSecretExpiresAt(TypedDict, total=False):
+    anchor: Literal["created_at"]
+    """The anchor point for the ephemeral token expiration.
+
+    Only `created_at` is currently supported.
+    """
+
+    seconds: int
+    """The number of seconds from the anchor point to the expiration.
+
+    Select a value between `10` and `7200`.
+    """
+
+
+class SessionClientSecret(TypedDict, total=False):
+    expires_at: SessionClientSecretExpiresAt
+    """Configuration for the ephemeral token expiration."""
+
+
 class SessionInputAudioNoiseReduction(TypedDict, total=False):
     type: Literal["near_field", "far_field"]
     """Type of noise reduction.
@@ -116,6 +137,9 @@ class SessionTurnDetection(TypedDict, total=False):
 
 
 class Session(TypedDict, total=False):
+    client_secret: SessionClientSecret
+    """Configuration options for the generated client secret."""
+
     input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
     """The format of input audio.
 
src/openai/types/beta/realtime/transcription_session_create_params.py
@@ -5,10 +5,20 @@ from __future__ import annotations
 from typing import List
 from typing_extensions import Literal, TypedDict
 
-__all__ = ["TranscriptionSessionCreateParams", "InputAudioNoiseReduction", "InputAudioTranscription", "TurnDetection"]
+__all__ = [
+    "TranscriptionSessionCreateParams",
+    "ClientSecret",
+    "ClientSecretExpiresAt",
+    "InputAudioNoiseReduction",
+    "InputAudioTranscription",
+    "TurnDetection",
+]
 
 
 class TranscriptionSessionCreateParams(TypedDict, total=False):
+    client_secret: ClientSecret
+    """Configuration options for the generated client secret."""
+
     include: List[str]
     """The set of items to include in the transcription. Current available items are:
 
@@ -60,6 +70,25 @@ class TranscriptionSessionCreateParams(TypedDict, total=False):
     """
 
 
+class ClientSecretExpiresAt(TypedDict, total=False):
+    anchor: Literal["created_at"]
+    """The anchor point for the ephemeral token expiration.
+
+    Only `created_at` is currently supported.
+    """
+
+    seconds: int
+    """The number of seconds from the anchor point to the expiration.
+
+    Select a value between `10` and `7200`.
+    """
+
+
+class ClientSecret(TypedDict, total=False):
+    expires_at: ClientSecretExpiresAt
+    """Configuration for the ephemeral token expiration."""
+
+
 class InputAudioNoiseReduction(TypedDict, total=False):
     type: Literal["near_field", "far_field"]
     """Type of noise reduction.
src/openai/types/beta/realtime/transcription_session_update.py
@@ -8,12 +8,33 @@ from ...._models import BaseModel
 __all__ = [
     "TranscriptionSessionUpdate",
     "Session",
+    "SessionClientSecret",
+    "SessionClientSecretExpiresAt",
     "SessionInputAudioNoiseReduction",
     "SessionInputAudioTranscription",
     "SessionTurnDetection",
 ]
 
 
+class SessionClientSecretExpiresAt(BaseModel):
+    anchor: Optional[Literal["created_at"]] = None
+    """The anchor point for the ephemeral token expiration.
+
+    Only `created_at` is currently supported.
+    """
+
+    seconds: Optional[int] = None
+    """The number of seconds from the anchor point to the expiration.
+
+    Select a value between `10` and `7200`.
+    """
+
+
+class SessionClientSecret(BaseModel):
+    expires_at: Optional[SessionClientSecretExpiresAt] = None
+    """Configuration for the ephemeral token expiration."""
+
+
 class SessionInputAudioNoiseReduction(BaseModel):
     type: Optional[Literal["near_field", "far_field"]] = None
     """Type of noise reduction.
@@ -99,6 +120,9 @@ class SessionTurnDetection(BaseModel):
 
 
 class Session(BaseModel):
+    client_secret: Optional[SessionClientSecret] = None
+    """Configuration options for the generated client secret."""
+
     include: Optional[List[str]] = None
     """The set of items to include in the transcription. Current available items are:
 
src/openai/types/beta/realtime/transcription_session_update_param.py
@@ -8,12 +8,33 @@ from typing_extensions import Literal, Required, TypedDict
 __all__ = [
     "TranscriptionSessionUpdateParam",
     "Session",
+    "SessionClientSecret",
+    "SessionClientSecretExpiresAt",
     "SessionInputAudioNoiseReduction",
     "SessionInputAudioTranscription",
     "SessionTurnDetection",
 ]
 
 
+class SessionClientSecretExpiresAt(TypedDict, total=False):
+    anchor: Literal["created_at"]
+    """The anchor point for the ephemeral token expiration.
+
+    Only `created_at` is currently supported.
+    """
+
+    seconds: int
+    """The number of seconds from the anchor point to the expiration.
+
+    Select a value between `10` and `7200`.
+    """
+
+
+class SessionClientSecret(TypedDict, total=False):
+    expires_at: SessionClientSecretExpiresAt
+    """Configuration for the ephemeral token expiration."""
+
+
 class SessionInputAudioNoiseReduction(TypedDict, total=False):
     type: Literal["near_field", "far_field"]
     """Type of noise reduction.
@@ -99,6 +120,9 @@ class SessionTurnDetection(TypedDict, total=False):
 
 
 class Session(TypedDict, total=False):
+    client_secret: SessionClientSecret
+    """Configuration options for the generated client secret."""
+
     include: List[str]
     """The set of items to include in the transcription. Current available items are:
 
src/openai/types/chat/chat_completion.py
@@ -68,9 +68,9 @@ class ChatCompletion(BaseModel):
       utilize scale tier credits until they are exhausted.
     - If set to 'auto', and the Project is not Scale tier enabled, the request will
       be processed using the default service tier with a lower uptime SLA and no
-      latency guarentee.
+      latency guarantee.
     - If set to 'default', the request will be processed using the default service
-      tier with a lower uptime SLA and no latency guarentee.
+      tier with a lower uptime SLA and no latency guarantee.
     - If set to 'flex', the request will be processed with the Flex Processing
       service tier.
       [Learn more](https://platform.openai.com/docs/guides/flex-processing).
src/openai/types/chat/chat_completion_chunk.py
@@ -137,9 +137,9 @@ class ChatCompletionChunk(BaseModel):
       utilize scale tier credits until they are exhausted.
     - If set to 'auto', and the Project is not Scale tier enabled, the request will
       be processed using the default service tier with a lower uptime SLA and no
-      latency guarentee.
+      latency guarantee.
     - If set to 'default', the request will be processed using the default service
-      tier with a lower uptime SLA and no latency guarentee.
+      tier with a lower uptime SLA and no latency guarantee.
     - If set to 'flex', the request will be processed with the Flex Processing
       service tier.
       [Learn more](https://platform.openai.com/docs/guides/flex-processing).
src/openai/types/chat/completion_create_params.py
@@ -217,9 +217,9 @@ class CompletionCreateParamsBase(TypedDict, total=False):
       utilize scale tier credits until they are exhausted.
     - If set to 'auto', and the Project is not Scale tier enabled, the request will
       be processed using the default service tier with a lower uptime SLA and no
-      latency guarentee.
+      latency guarantee.
     - If set to 'default', the request will be processed using the default service
-      tier with a lower uptime SLA and no latency guarentee.
+      tier with a lower uptime SLA and no latency guarantee.
     - If set to 'flex', the request will be processed with the Flex Processing
       service tier.
       [Learn more](https://platform.openai.com/docs/guides/flex-processing).
src/openai/types/fine_tuning/alpha/grader_run_params.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import Union, Iterable
+from typing import Union
 from typing_extensions import Required, TypeAlias, TypedDict
 
 from ...graders.multi_grader_param import MultiGraderParam
@@ -19,10 +19,20 @@ class GraderRunParams(TypedDict, total=False):
     """The grader used for the fine-tuning job."""
 
     model_sample: Required[str]
-    """The model sample to be evaluated."""
+    """The model sample to be evaluated.
 
-    reference_answer: Required[Union[str, Iterable[object], float, object]]
-    """The reference answer for the evaluation."""
+    This value will be used to populate the `sample` namespace. See
+    [the guide](https://platform.openai.com/docs/guides/graders) for more details.
+    The `output_json` variable will be populated if the model sample is a valid JSON
+    string.
+    """
+
+    item: object
+    """The dataset item provided to the grader.
+
+    This will be used to populate the `item` namespace. See
+    [the guide](https://platform.openai.com/docs/guides/graders) for more details.
+    """
 
 
 Grader: TypeAlias = Union[
src/openai/types/fine_tuning/fine_tuning_job.py
@@ -28,7 +28,7 @@ class Error(BaseModel):
 
 
 class Hyperparameters(BaseModel):
-    batch_size: Union[Literal["auto"], int, Optional[object], None] = None
+    batch_size: Union[Literal["auto"], int, None] = None
     """Number of examples in each batch.
 
     A larger batch size means that model parameters are updated less frequently, but
src/openai/types/graders/multi_grader.py
@@ -1,6 +1,6 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import Dict, Union
+from typing import Union
 from typing_extensions import Literal, TypeAlias
 
 from ..._models import BaseModel
@@ -19,7 +19,11 @@ class MultiGrader(BaseModel):
     calculate_output: str
     """A formula to calculate the output based on grader results."""
 
-    graders: Dict[str, Graders]
+    graders: Graders
+    """
+    A StringCheckGrader object that performs a string comparison between input and
+    reference using a specified operation.
+    """
 
     name: str
     """The name of the grader."""
src/openai/types/graders/multi_grader_param.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import Dict, Union
+from typing import Union
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from .python_grader_param import PythonGraderParam
@@ -22,7 +22,11 @@ class MultiGraderParam(TypedDict, total=False):
     calculate_output: Required[str]
     """A formula to calculate the output based on grader results."""
 
-    graders: Required[Dict[str, Graders]]
+    graders: Required[Graders]
+    """
+    A StringCheckGrader object that performs a string comparison between input and
+    reference using a specified operation.
+    """
 
     name: Required[str]
     """The name of the grader."""
src/openai/types/responses/parsed_response.py
@@ -55,7 +55,7 @@ class ParsedResponseOutputMessage(ResponseOutputMessage, GenericModel, Generic[C
 class ParsedResponseFunctionToolCall(ResponseFunctionToolCall):
     parsed_arguments: object = None
 
-    __api_exclude__ = {'parsed_arguments'}
+    __api_exclude__ = {"parsed_arguments"}
 
 
 ParsedResponseOutputItem: TypeAlias = Annotated[
src/openai/types/responses/response.py
@@ -164,9 +164,9 @@ class Response(BaseModel):
       utilize scale tier credits until they are exhausted.
     - If set to 'auto', and the Project is not Scale tier enabled, the request will
       be processed using the default service tier with a lower uptime SLA and no
-      latency guarentee.
+      latency guarantee.
     - If set to 'default', the request will be processed using the default service
-      tier with a lower uptime SLA and no latency guarentee.
+      tier with a lower uptime SLA and no latency guarantee.
     - If set to 'flex', the request will be processed with the Flex Processing
       service tier.
       [Learn more](https://platform.openai.com/docs/guides/flex-processing).
src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
@@ -17,5 +17,5 @@ class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel):
     sequence_number: int
     """The sequence number of this event."""
 
-    type: Literal["response.code_interpreter_call.code.delta"]
-    """The type of the event. Always `response.code_interpreter_call.code.delta`."""
+    type: Literal["response.code_interpreter_call_code.delta"]
+    """The type of the event. Always `response.code_interpreter_call_code.delta`."""
src/openai/types/responses/response_code_interpreter_call_code_done_event.py
@@ -17,5 +17,5 @@ class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel):
     sequence_number: int
     """The sequence number of this event."""
 
-    type: Literal["response.code_interpreter_call.code.done"]
-    """The type of the event. Always `response.code_interpreter_call.code.done`."""
+    type: Literal["response.code_interpreter_call_code.done"]
+    """The type of the event. Always `response.code_interpreter_call_code.done`."""
src/openai/types/responses/response_create_params.py
@@ -67,6 +67,8 @@ class ResponseCreateParamsBase(TypedDict, total=False):
       multi-turn conversations when using the Responses API statelessly (like when
       the `store` parameter is set to `false`, or when an organization is enrolled
       in the zero data retention program).
+    - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+      in code interpreter tool call items.
     """
 
     instructions: Optional[str]
@@ -122,9 +124,9 @@ class ResponseCreateParamsBase(TypedDict, total=False):
       utilize scale tier credits until they are exhausted.
     - If set to 'auto', and the Project is not Scale tier enabled, the request will
       be processed using the default service tier with a lower uptime SLA and no
-      latency guarentee.
+      latency guarantee.
     - If set to 'default', the request will be processed using the default service
-      tier with a lower uptime SLA and no latency guarentee.
+      tier with a lower uptime SLA and no latency guarantee.
     - If set to 'flex', the request will be processed with the Flex Processing
       service tier.
       [Learn more](https://platform.openai.com/docs/guides/flex-processing).
src/openai/types/responses/response_includable.py
@@ -9,4 +9,5 @@ ResponseIncludable: TypeAlias = Literal[
     "message.input_image.image_url",
     "computer_call_output.output.image_url",
     "reasoning.encrypted_content",
+    "code_interpreter_call.outputs",
 ]
src/openai/types/responses/response_output_text.py
@@ -1,12 +1,21 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import List, Union
+from typing import List, Union, Optional
 from typing_extensions import Literal, Annotated, TypeAlias
 
 from ..._utils import PropertyInfo
 from ..._models import BaseModel
 
-__all__ = ["ResponseOutputText", "Annotation", "AnnotationFileCitation", "AnnotationURLCitation", "AnnotationFilePath"]
+__all__ = [
+    "ResponseOutputText",
+    "Annotation",
+    "AnnotationFileCitation",
+    "AnnotationURLCitation",
+    "AnnotationContainerFileCitation",
+    "AnnotationFilePath",
+    "Logprob",
+    "LogprobTopLogprob",
+]
 
 
 class AnnotationFileCitation(BaseModel):
@@ -37,6 +46,23 @@ class AnnotationURLCitation(BaseModel):
     """The URL of the web resource."""
 
 
+class AnnotationContainerFileCitation(BaseModel):
+    container_id: str
+    """The ID of the container file."""
+
+    end_index: int
+    """The index of the last character of the container file citation in the message."""
+
+    file_id: str
+    """The ID of the file."""
+
+    start_index: int
+    """The index of the first character of the container file citation in the message."""
+
+    type: Literal["container_file_citation"]
+    """The type of the container file citation. Always `container_file_citation`."""
+
+
 class AnnotationFilePath(BaseModel):
     file_id: str
     """The ID of the file."""
@@ -49,10 +75,29 @@ class AnnotationFilePath(BaseModel):
 
 
 Annotation: TypeAlias = Annotated[
-    Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath], PropertyInfo(discriminator="type")
+    Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationContainerFileCitation, AnnotationFilePath],
+    PropertyInfo(discriminator="type"),
 ]
 
 
+class LogprobTopLogprob(BaseModel):
+    token: str
+
+    bytes: List[int]
+
+    logprob: float
+
+
+class Logprob(BaseModel):
+    token: str
+
+    bytes: List[int]
+
+    logprob: float
+
+    top_logprobs: List[LogprobTopLogprob]
+
+
 class ResponseOutputText(BaseModel):
     annotations: List[Annotation]
     """The annotations of the text output."""
@@ -62,3 +107,5 @@ class ResponseOutputText(BaseModel):
 
     type: Literal["output_text"]
     """The type of the output text. Always `output_text`."""
+
+    logprobs: Optional[List[Logprob]] = None
src/openai/types/responses/response_output_text_param.py
@@ -10,7 +10,10 @@ __all__ = [
     "Annotation",
     "AnnotationFileCitation",
     "AnnotationURLCitation",
+    "AnnotationContainerFileCitation",
     "AnnotationFilePath",
+    "Logprob",
+    "LogprobTopLogprob",
 ]
 
 
@@ -42,6 +45,23 @@ class AnnotationURLCitation(TypedDict, total=False):
     """The URL of the web resource."""
 
 
+class AnnotationContainerFileCitation(TypedDict, total=False):
+    container_id: Required[str]
+    """The ID of the container file."""
+
+    end_index: Required[int]
+    """The index of the last character of the container file citation in the message."""
+
+    file_id: Required[str]
+    """The ID of the file."""
+
+    start_index: Required[int]
+    """The index of the first character of the container file citation in the message."""
+
+    type: Required[Literal["container_file_citation"]]
+    """The type of the container file citation. Always `container_file_citation`."""
+
+
 class AnnotationFilePath(TypedDict, total=False):
     file_id: Required[str]
     """The ID of the file."""
@@ -53,7 +73,27 @@ class AnnotationFilePath(TypedDict, total=False):
     """The type of the file path. Always `file_path`."""
 
 
-Annotation: TypeAlias = Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationFilePath]
+Annotation: TypeAlias = Union[
+    AnnotationFileCitation, AnnotationURLCitation, AnnotationContainerFileCitation, AnnotationFilePath
+]
+
+
+class LogprobTopLogprob(TypedDict, total=False):
+    token: Required[str]
+
+    bytes: Required[Iterable[int]]
+
+    logprob: Required[float]
+
+
+class Logprob(TypedDict, total=False):
+    token: Required[str]
+
+    bytes: Required[Iterable[int]]
+
+    logprob: Required[float]
+
+    top_logprobs: Required[Iterable[LogprobTopLogprob]]
 
 
 class ResponseOutputTextParam(TypedDict, total=False):
@@ -65,3 +105,5 @@ class ResponseOutputTextParam(TypedDict, total=False):
 
     type: Required[Literal["output_text"]]
     """The type of the output text. Always `output_text`."""
+
+    logprobs: Iterable[Logprob]
src/openai/types/responses/response_retrieve_params.py
@@ -2,17 +2,47 @@
 
 from __future__ import annotations
 
-from typing import List
-from typing_extensions import TypedDict
+from typing import List, Union
+from typing_extensions import Literal, Required, TypedDict
 
 from .response_includable import ResponseIncludable
 
-__all__ = ["ResponseRetrieveParams"]
+__all__ = ["ResponseRetrieveParamsBase", "ResponseRetrieveParamsNonStreaming", "ResponseRetrieveParamsStreaming"]
 
 
-class ResponseRetrieveParams(TypedDict, total=False):
+class ResponseRetrieveParamsBase(TypedDict, total=False):
     include: List[ResponseIncludable]
     """Additional fields to include in the response.
 
     See the `include` parameter for Response creation above for more information.
     """
+
+    starting_after: int
+    """The sequence number of the event after which to start streaming."""
+
+
+class ResponseRetrieveParamsNonStreaming(ResponseRetrieveParamsBase, total=False):
+    stream: Literal[False]
+    """
+    If set to true, the model response data will be streamed to the client as it is
+    generated using
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+    See the
+    [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+    for more information.
+    """
+
+
+class ResponseRetrieveParamsStreaming(ResponseRetrieveParamsBase):
+    stream: Required[Literal[True]]
+    """
+    If set to true, the model response data will be streamed to the client as it is
+    generated using
+    [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+    See the
+    [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
+    for more information.
+    """
+
+
+ResponseRetrieveParams = Union[ResponseRetrieveParamsNonStreaming, ResponseRetrieveParamsStreaming]
src/openai/types/responses/tool_param.py
@@ -28,6 +28,7 @@ __all__ = [
     "LocalShell",
 ]
 
+
 class McpAllowedToolsMcpAllowedToolsFilter(TypedDict, total=False):
     tool_names: List[str]
     """List of allowed tool names."""
@@ -177,5 +178,5 @@ ToolParam: TypeAlias = Union[
     LocalShell,
 ]
 
-  
+
 ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionToolParam]
src/openai/types/image_edit_params.py
@@ -16,7 +16,7 @@ class ImageEditParams(TypedDict, total=False):
     """The image(s) to edit. Must be a supported image file or an array of images.
 
     For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
-    25MB. You can provide up to 16 images.
+    50MB. You can provide up to 16 images.
 
     For `dall-e-2`, you can only provide one image, and it should be a square `png`
     file less than 4MB.
src/openai/_base_client.py
@@ -962,6 +962,9 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
             if self.custom_auth is not None:
                 kwargs["auth"] = self.custom_auth
 
+            if options.follow_redirects is not None:
+                kwargs["follow_redirects"] = options.follow_redirects
+
             log.debug("Sending HTTP Request: %s %s", request.method, request.url)
 
             response = None
@@ -1477,6 +1480,9 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
             if self.custom_auth is not None:
                 kwargs["auth"] = self.custom_auth
 
+            if options.follow_redirects is not None:
+                kwargs["follow_redirects"] = options.follow_redirects
+
             log.debug("Sending HTTP Request: %s %s", request.method, request.url)
 
             response = None
src/openai/_models.py
@@ -777,6 +777,7 @@ class FinalRequestOptionsInput(TypedDict, total=False):
     idempotency_key: str
     json_data: Body
     extra_json: AnyMapping
+    follow_redirects: bool
 
 
 @final
@@ -790,6 +791,7 @@ class FinalRequestOptions(pydantic.BaseModel):
     files: Union[HttpxRequestFiles, None] = None
     idempotency_key: Union[str, None] = None
     post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
+    follow_redirects: Union[bool, None] = None
 
     # It should be noted that we cannot use `json` here as that would override
     # a BaseModel method in an incompatible fashion.
src/openai/_types.py
@@ -101,6 +101,7 @@ class RequestOptions(TypedDict, total=False):
     params: Query
     extra_json: AnyMapping
     idempotency_key: str
+    follow_redirects: bool
 
 
 # Sentinel class used until PEP 0661 is accepted
@@ -217,3 +218,4 @@ class _GenericAlias(Protocol):
 
 class HttpxSendArgs(TypedDict, total=False):
     auth: httpx.Auth
+    follow_redirects: bool
src/openai/_version.py
@@ -1,4 +1,4 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 __title__ = "openai"
-__version__ = "1.82.1"  # x-release-please-version
+__version__ = "1.83.0"  # x-release-please-version
tests/api_resources/beta/realtime/test_sessions.py
@@ -25,6 +25,12 @@ class TestSessions:
     @parametrize
     def test_method_create_with_all_params(self, client: OpenAI) -> None:
         session = client.beta.realtime.sessions.create(
+            client_secret={
+                "expires_at": {
+                    "anchor": "created_at",
+                    "seconds": 0,
+                }
+            },
             input_audio_format="pcm16",
             input_audio_noise_reduction={"type": "near_field"},
             input_audio_transcription={
@@ -92,6 +98,12 @@ class TestAsyncSessions:
     @parametrize
     async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
         session = await async_client.beta.realtime.sessions.create(
+            client_secret={
+                "expires_at": {
+                    "anchor": "created_at",
+                    "seconds": 0,
+                }
+            },
             input_audio_format="pcm16",
             input_audio_noise_reduction={"type": "near_field"},
             input_audio_transcription={
tests/api_resources/beta/realtime/test_transcription_sessions.py
@@ -25,6 +25,12 @@ class TestTranscriptionSessions:
     @parametrize
     def test_method_create_with_all_params(self, client: OpenAI) -> None:
         transcription_session = client.beta.realtime.transcription_sessions.create(
+            client_secret={
+                "expires_at": {
+                    "anchor": "created_at",
+                    "seconds": 0,
+                }
+            },
             include=["string"],
             input_audio_format="pcm16",
             input_audio_noise_reduction={"type": "near_field"},
@@ -78,6 +84,12 @@ class TestAsyncTranscriptionSessions:
     @parametrize
     async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
         transcription_session = await async_client.beta.realtime.transcription_sessions.create(
+            client_secret={
+                "expires_at": {
+                    "anchor": "created_at",
+                    "seconds": 0,
+                }
+            },
             include=["string"],
             input_audio_format="pcm16",
             input_audio_noise_reduction={"type": "near_field"},
tests/api_resources/beta/threads/runs/test_steps.py
@@ -12,6 +12,8 @@ from tests.utils import assert_matches_type
 from openai.pagination import SyncCursorPage, AsyncCursorPage
 from openai.types.beta.threads.runs import RunStep
 
+# pyright: reportDeprecated=false
+
 base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
 
 
@@ -20,30 +22,35 @@ class TestSteps:
 
     @parametrize
     def test_method_retrieve(self, client: OpenAI) -> None:
-        step = client.beta.threads.runs.steps.retrieve(
-            "string",
-            thread_id="string",
-            run_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            step = client.beta.threads.runs.steps.retrieve(
+                step_id="step_id",
+                thread_id="thread_id",
+                run_id="run_id",
+            )
+
         assert_matches_type(RunStep, step, path=["response"])
 
     @parametrize
     def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
-        step = client.beta.threads.runs.steps.retrieve(
-            step_id="step_id",
-            thread_id="thread_id",
-            run_id="run_id",
-            include=["step_details.tool_calls[*].file_search.results[*].content"],
-        )
+        with pytest.warns(DeprecationWarning):
+            step = client.beta.threads.runs.steps.retrieve(
+                step_id="step_id",
+                thread_id="thread_id",
+                run_id="run_id",
+                include=["step_details.tool_calls[*].file_search.results[*].content"],
+            )
+
         assert_matches_type(RunStep, step, path=["response"])
 
     @parametrize
     def test_raw_response_retrieve(self, client: OpenAI) -> None:
-        response = client.beta.threads.runs.steps.with_raw_response.retrieve(
-            "string",
-            thread_id="string",
-            run_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.runs.steps.with_raw_response.retrieve(
+                step_id="step_id",
+                thread_id="thread_id",
+                run_id="run_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -52,69 +59,76 @@ class TestSteps:
 
     @parametrize
     def test_streaming_response_retrieve(self, client: OpenAI) -> None:
-        with client.beta.threads.runs.steps.with_streaming_response.retrieve(
-            "string",
-            thread_id="string",
-            run_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            step = response.parse()
-            assert_matches_type(RunStep, step, path=["response"])
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.runs.steps.with_streaming_response.retrieve(
+                step_id="step_id",
+                thread_id="thread_id",
+                run_id="run_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+                step = response.parse()
+                assert_matches_type(RunStep, step, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_retrieve(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.runs.steps.with_raw_response.retrieve(
-                "string",
-                thread_id="",
-                run_id="string",
-            )
-
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            client.beta.threads.runs.steps.with_raw_response.retrieve(
-                "string",
-                thread_id="string",
-                run_id="",
-            )
-
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"):
-            client.beta.threads.runs.steps.with_raw_response.retrieve(
-                "",
-                thread_id="string",
-                run_id="string",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.runs.steps.with_raw_response.retrieve(
+                    step_id="step_id",
+                    thread_id="",
+                    run_id="run_id",
+                )
+
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                client.beta.threads.runs.steps.with_raw_response.retrieve(
+                    step_id="step_id",
+                    thread_id="thread_id",
+                    run_id="",
+                )
+
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"):
+                client.beta.threads.runs.steps.with_raw_response.retrieve(
+                    step_id="",
+                    thread_id="thread_id",
+                    run_id="run_id",
+                )
 
     @parametrize
     def test_method_list(self, client: OpenAI) -> None:
-        step = client.beta.threads.runs.steps.list(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            step = client.beta.threads.runs.steps.list(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(SyncCursorPage[RunStep], step, path=["response"])
 
     @parametrize
     def test_method_list_with_all_params(self, client: OpenAI) -> None:
-        step = client.beta.threads.runs.steps.list(
-            run_id="run_id",
-            thread_id="thread_id",
-            after="after",
-            before="before",
-            include=["step_details.tool_calls[*].file_search.results[*].content"],
-            limit=0,
-            order="asc",
-        )
+        with pytest.warns(DeprecationWarning):
+            step = client.beta.threads.runs.steps.list(
+                run_id="run_id",
+                thread_id="thread_id",
+                after="after",
+                before="before",
+                include=["step_details.tool_calls[*].file_search.results[*].content"],
+                limit=0,
+                order="asc",
+            )
+
         assert_matches_type(SyncCursorPage[RunStep], step, path=["response"])
 
     @parametrize
     def test_raw_response_list(self, client: OpenAI) -> None:
-        response = client.beta.threads.runs.steps.with_raw_response.list(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.runs.steps.with_raw_response.list(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -123,31 +137,33 @@ class TestSteps:
 
     @parametrize
     def test_streaming_response_list(self, client: OpenAI) -> None:
-        with client.beta.threads.runs.steps.with_streaming_response.list(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.runs.steps.with_streaming_response.list(
+                run_id="run_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            step = response.parse()
-            assert_matches_type(SyncCursorPage[RunStep], step, path=["response"])
+                step = response.parse()
+                assert_matches_type(SyncCursorPage[RunStep], step, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_list(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.runs.steps.with_raw_response.list(
-                "string",
-                thread_id="",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.runs.steps.with_raw_response.list(
+                    run_id="run_id",
+                    thread_id="",
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            client.beta.threads.runs.steps.with_raw_response.list(
-                "",
-                thread_id="string",
-            )
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                client.beta.threads.runs.steps.with_raw_response.list(
+                    run_id="",
+                    thread_id="thread_id",
+                )
 
 
 class TestAsyncSteps:
@@ -155,30 +171,35 @@ class TestAsyncSteps:
 
     @parametrize
     async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
-        step = await async_client.beta.threads.runs.steps.retrieve(
-            "string",
-            thread_id="string",
-            run_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            step = await async_client.beta.threads.runs.steps.retrieve(
+                step_id="step_id",
+                thread_id="thread_id",
+                run_id="run_id",
+            )
+
         assert_matches_type(RunStep, step, path=["response"])
 
     @parametrize
     async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        step = await async_client.beta.threads.runs.steps.retrieve(
-            step_id="step_id",
-            thread_id="thread_id",
-            run_id="run_id",
-            include=["step_details.tool_calls[*].file_search.results[*].content"],
-        )
+        with pytest.warns(DeprecationWarning):
+            step = await async_client.beta.threads.runs.steps.retrieve(
+                step_id="step_id",
+                thread_id="thread_id",
+                run_id="run_id",
+                include=["step_details.tool_calls[*].file_search.results[*].content"],
+            )
+
         assert_matches_type(RunStep, step, path=["response"])
 
     @parametrize
     async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
-            "string",
-            thread_id="string",
-            run_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
+                step_id="step_id",
+                thread_id="thread_id",
+                run_id="run_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -187,69 +208,76 @@ class TestAsyncSteps:
 
     @parametrize
     async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.runs.steps.with_streaming_response.retrieve(
-            "string",
-            thread_id="string",
-            run_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            step = await response.parse()
-            assert_matches_type(RunStep, step, path=["response"])
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.runs.steps.with_streaming_response.retrieve(
+                step_id="step_id",
+                thread_id="thread_id",
+                run_id="run_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+                step = await response.parse()
+                assert_matches_type(RunStep, step, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
-                "string",
-                thread_id="",
-                run_id="string",
-            )
-
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
-                "string",
-                thread_id="string",
-                run_id="",
-            )
-
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"):
-            await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
-                "",
-                thread_id="string",
-                run_id="string",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
+                    step_id="step_id",
+                    thread_id="",
+                    run_id="run_id",
+                )
+
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
+                    step_id="step_id",
+                    thread_id="thread_id",
+                    run_id="",
+                )
+
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"):
+                await async_client.beta.threads.runs.steps.with_raw_response.retrieve(
+                    step_id="",
+                    thread_id="thread_id",
+                    run_id="run_id",
+                )
 
     @parametrize
     async def test_method_list(self, async_client: AsyncOpenAI) -> None:
-        step = await async_client.beta.threads.runs.steps.list(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            step = await async_client.beta.threads.runs.steps.list(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"])
 
     @parametrize
     async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        step = await async_client.beta.threads.runs.steps.list(
-            run_id="run_id",
-            thread_id="thread_id",
-            after="after",
-            before="before",
-            include=["step_details.tool_calls[*].file_search.results[*].content"],
-            limit=0,
-            order="asc",
-        )
+        with pytest.warns(DeprecationWarning):
+            step = await async_client.beta.threads.runs.steps.list(
+                run_id="run_id",
+                thread_id="thread_id",
+                after="after",
+                before="before",
+                include=["step_details.tool_calls[*].file_search.results[*].content"],
+                limit=0,
+                order="asc",
+            )
+
         assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"])
 
     @parametrize
     async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.runs.steps.with_raw_response.list(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.runs.steps.with_raw_response.list(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -258,28 +286,30 @@ class TestAsyncSteps:
 
     @parametrize
     async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.runs.steps.with_streaming_response.list(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.runs.steps.with_streaming_response.list(
+                run_id="run_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            step = await response.parse()
-            assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"])
+                step = await response.parse()
+                assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.runs.steps.with_raw_response.list(
-                "string",
-                thread_id="",
-            )
-
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            await async_client.beta.threads.runs.steps.with_raw_response.list(
-                "",
-                thread_id="string",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.runs.steps.with_raw_response.list(
+                    run_id="run_id",
+                    thread_id="",
+                )
+
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                await async_client.beta.threads.runs.steps.with_raw_response.list(
+                    run_id="",
+                    thread_id="thread_id",
+                )
tests/api_resources/beta/threads/test_messages.py
@@ -15,6 +15,8 @@ from openai.types.beta.threads import (
     MessageDeleted,
 )
 
+# pyright: reportDeprecated=false
+
 base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
 
 
@@ -23,36 +25,41 @@ class TestMessages:
 
     @parametrize
     def test_method_create(self, client: OpenAI) -> None:
-        message = client.beta.threads.messages.create(
-            "string",
-            content="string",
-            role="user",
-        )
+        with pytest.warns(DeprecationWarning):
+            message = client.beta.threads.messages.create(
+                thread_id="thread_id",
+                content="string",
+                role="user",
+            )
+
         assert_matches_type(Message, message, path=["response"])
 
     @parametrize
     def test_method_create_with_all_params(self, client: OpenAI) -> None:
-        message = client.beta.threads.messages.create(
-            "string",
-            content="string",
-            role="user",
-            attachments=[
-                {
-                    "file_id": "file_id",
-                    "tools": [{"type": "code_interpreter"}],
-                }
-            ],
-            metadata={"foo": "string"},
-        )
+        with pytest.warns(DeprecationWarning):
+            message = client.beta.threads.messages.create(
+                thread_id="thread_id",
+                content="string",
+                role="user",
+                attachments=[
+                    {
+                        "file_id": "file_id",
+                        "tools": [{"type": "code_interpreter"}],
+                    }
+                ],
+                metadata={"foo": "string"},
+            )
+
         assert_matches_type(Message, message, path=["response"])
 
     @parametrize
     def test_raw_response_create(self, client: OpenAI) -> None:
-        response = client.beta.threads.messages.with_raw_response.create(
-            "string",
-            content="string",
-            role="user",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.messages.with_raw_response.create(
+                thread_id="thread_id",
+                content="string",
+                role="user",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -61,42 +68,47 @@ class TestMessages:
 
     @parametrize
     def test_streaming_response_create(self, client: OpenAI) -> None:
-        with client.beta.threads.messages.with_streaming_response.create(
-            "string",
-            content="string",
-            role="user",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.messages.with_streaming_response.create(
+                thread_id="thread_id",
+                content="string",
+                role="user",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            message = response.parse()
-            assert_matches_type(Message, message, path=["response"])
+                message = response.parse()
+                assert_matches_type(Message, message, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_create(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.messages.with_raw_response.create(
-                "",
-                content="string",
-                role="user",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.messages.with_raw_response.create(
+                    thread_id="",
+                    content="string",
+                    role="user",
+                )
 
     @parametrize
     def test_method_retrieve(self, client: OpenAI) -> None:
-        message = client.beta.threads.messages.retrieve(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            message = client.beta.threads.messages.retrieve(
+                message_id="message_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(Message, message, path=["response"])
 
     @parametrize
     def test_raw_response_retrieve(self, client: OpenAI) -> None:
-        response = client.beta.threads.messages.with_raw_response.retrieve(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.messages.with_raw_response.retrieve(
+                message_id="message_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -105,55 +117,62 @@ class TestMessages:
 
     @parametrize
     def test_streaming_response_retrieve(self, client: OpenAI) -> None:
-        with client.beta.threads.messages.with_streaming_response.retrieve(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.messages.with_streaming_response.retrieve(
+                message_id="message_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            message = response.parse()
-            assert_matches_type(Message, message, path=["response"])
+                message = response.parse()
+                assert_matches_type(Message, message, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_retrieve(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.messages.with_raw_response.retrieve(
-                "string",
-                thread_id="",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.messages.with_raw_response.retrieve(
+                    message_id="message_id",
+                    thread_id="",
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
-            client.beta.threads.messages.with_raw_response.retrieve(
-                "",
-                thread_id="string",
-            )
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+                client.beta.threads.messages.with_raw_response.retrieve(
+                    message_id="",
+                    thread_id="thread_id",
+                )
 
     @parametrize
     def test_method_update(self, client: OpenAI) -> None:
-        message = client.beta.threads.messages.update(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            message = client.beta.threads.messages.update(
+                message_id="message_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(Message, message, path=["response"])
 
     @parametrize
     def test_method_update_with_all_params(self, client: OpenAI) -> None:
-        message = client.beta.threads.messages.update(
-            message_id="message_id",
-            thread_id="thread_id",
-            metadata={"foo": "string"},
-        )
+        with pytest.warns(DeprecationWarning):
+            message = client.beta.threads.messages.update(
+                message_id="message_id",
+                thread_id="thread_id",
+                metadata={"foo": "string"},
+            )
+
         assert_matches_type(Message, message, path=["response"])
 
     @parametrize
     def test_raw_response_update(self, client: OpenAI) -> None:
-        response = client.beta.threads.messages.with_raw_response.update(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.messages.with_raw_response.update(
+                message_id="message_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -162,56 +181,63 @@ class TestMessages:
 
     @parametrize
     def test_streaming_response_update(self, client: OpenAI) -> None:
-        with client.beta.threads.messages.with_streaming_response.update(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.messages.with_streaming_response.update(
+                message_id="message_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            message = response.parse()
-            assert_matches_type(Message, message, path=["response"])
+                message = response.parse()
+                assert_matches_type(Message, message, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_update(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.messages.with_raw_response.update(
-                "string",
-                thread_id="",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.messages.with_raw_response.update(
+                    message_id="message_id",
+                    thread_id="",
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
-            client.beta.threads.messages.with_raw_response.update(
-                "",
-                thread_id="string",
-            )
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+                client.beta.threads.messages.with_raw_response.update(
+                    message_id="",
+                    thread_id="thread_id",
+                )
 
     @parametrize
     def test_method_list(self, client: OpenAI) -> None:
-        message = client.beta.threads.messages.list(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            message = client.beta.threads.messages.list(
+                thread_id="thread_id",
+            )
+
         assert_matches_type(SyncCursorPage[Message], message, path=["response"])
 
     @parametrize
     def test_method_list_with_all_params(self, client: OpenAI) -> None:
-        message = client.beta.threads.messages.list(
-            "string",
-            after="string",
-            before="string",
-            limit=0,
-            order="asc",
-            run_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            message = client.beta.threads.messages.list(
+                thread_id="thread_id",
+                after="after",
+                before="before",
+                limit=0,
+                order="asc",
+                run_id="run_id",
+            )
+
         assert_matches_type(SyncCursorPage[Message], message, path=["response"])
 
     @parametrize
     def test_raw_response_list(self, client: OpenAI) -> None:
-        response = client.beta.threads.messages.with_raw_response.list(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.messages.with_raw_response.list(
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -220,38 +246,43 @@ class TestMessages:
 
     @parametrize
     def test_streaming_response_list(self, client: OpenAI) -> None:
-        with client.beta.threads.messages.with_streaming_response.list(
-            "string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.messages.with_streaming_response.list(
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            message = response.parse()
-            assert_matches_type(SyncCursorPage[Message], message, path=["response"])
+                message = response.parse()
+                assert_matches_type(SyncCursorPage[Message], message, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_list(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.messages.with_raw_response.list(
-                "",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.messages.with_raw_response.list(
+                    thread_id="",
+                )
 
     @parametrize
     def test_method_delete(self, client: OpenAI) -> None:
-        message = client.beta.threads.messages.delete(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            message = client.beta.threads.messages.delete(
+                message_id="message_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(MessageDeleted, message, path=["response"])
 
     @parametrize
     def test_raw_response_delete(self, client: OpenAI) -> None:
-        response = client.beta.threads.messages.with_raw_response.delete(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.messages.with_raw_response.delete(
+                message_id="message_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -260,31 +291,33 @@ class TestMessages:
 
     @parametrize
     def test_streaming_response_delete(self, client: OpenAI) -> None:
-        with client.beta.threads.messages.with_streaming_response.delete(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.messages.with_streaming_response.delete(
+                message_id="message_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            message = response.parse()
-            assert_matches_type(MessageDeleted, message, path=["response"])
+                message = response.parse()
+                assert_matches_type(MessageDeleted, message, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_delete(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.messages.with_raw_response.delete(
-                "string",
-                thread_id="",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.messages.with_raw_response.delete(
+                    message_id="message_id",
+                    thread_id="",
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
-            client.beta.threads.messages.with_raw_response.delete(
-                "",
-                thread_id="string",
-            )
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+                client.beta.threads.messages.with_raw_response.delete(
+                    message_id="",
+                    thread_id="thread_id",
+                )
 
 
 class TestAsyncMessages:
@@ -292,36 +325,41 @@ class TestAsyncMessages:
 
     @parametrize
     async def test_method_create(self, async_client: AsyncOpenAI) -> None:
-        message = await async_client.beta.threads.messages.create(
-            "string",
-            content="string",
-            role="user",
-        )
+        with pytest.warns(DeprecationWarning):
+            message = await async_client.beta.threads.messages.create(
+                thread_id="thread_id",
+                content="string",
+                role="user",
+            )
+
         assert_matches_type(Message, message, path=["response"])
 
     @parametrize
     async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        message = await async_client.beta.threads.messages.create(
-            "string",
-            content="string",
-            role="user",
-            attachments=[
-                {
-                    "file_id": "file_id",
-                    "tools": [{"type": "code_interpreter"}],
-                }
-            ],
-            metadata={"foo": "string"},
-        )
+        with pytest.warns(DeprecationWarning):
+            message = await async_client.beta.threads.messages.create(
+                thread_id="thread_id",
+                content="string",
+                role="user",
+                attachments=[
+                    {
+                        "file_id": "file_id",
+                        "tools": [{"type": "code_interpreter"}],
+                    }
+                ],
+                metadata={"foo": "string"},
+            )
+
         assert_matches_type(Message, message, path=["response"])
 
     @parametrize
     async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.messages.with_raw_response.create(
-            "string",
-            content="string",
-            role="user",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.messages.with_raw_response.create(
+                thread_id="thread_id",
+                content="string",
+                role="user",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -330,42 +368,47 @@ class TestAsyncMessages:
 
     @parametrize
     async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.messages.with_streaming_response.create(
-            "string",
-            content="string",
-            role="user",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.messages.with_streaming_response.create(
+                thread_id="thread_id",
+                content="string",
+                role="user",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            message = await response.parse()
-            assert_matches_type(Message, message, path=["response"])
+                message = await response.parse()
+                assert_matches_type(Message, message, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.messages.with_raw_response.create(
-                "",
-                content="string",
-                role="user",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.messages.with_raw_response.create(
+                    thread_id="",
+                    content="string",
+                    role="user",
+                )
 
     @parametrize
     async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
-        message = await async_client.beta.threads.messages.retrieve(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            message = await async_client.beta.threads.messages.retrieve(
+                message_id="message_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(Message, message, path=["response"])
 
     @parametrize
     async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.messages.with_raw_response.retrieve(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.messages.with_raw_response.retrieve(
+                message_id="message_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -374,55 +417,62 @@ class TestAsyncMessages:
 
     @parametrize
     async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.messages.with_streaming_response.retrieve(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.messages.with_streaming_response.retrieve(
+                message_id="message_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            message = await response.parse()
-            assert_matches_type(Message, message, path=["response"])
+                message = await response.parse()
+                assert_matches_type(Message, message, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.messages.with_raw_response.retrieve(
-                "string",
-                thread_id="",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.messages.with_raw_response.retrieve(
+                    message_id="message_id",
+                    thread_id="",
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
-            await async_client.beta.threads.messages.with_raw_response.retrieve(
-                "",
-                thread_id="string",
-            )
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+                await async_client.beta.threads.messages.with_raw_response.retrieve(
+                    message_id="",
+                    thread_id="thread_id",
+                )
 
     @parametrize
     async def test_method_update(self, async_client: AsyncOpenAI) -> None:
-        message = await async_client.beta.threads.messages.update(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            message = await async_client.beta.threads.messages.update(
+                message_id="message_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(Message, message, path=["response"])
 
     @parametrize
     async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        message = await async_client.beta.threads.messages.update(
-            message_id="message_id",
-            thread_id="thread_id",
-            metadata={"foo": "string"},
-        )
+        with pytest.warns(DeprecationWarning):
+            message = await async_client.beta.threads.messages.update(
+                message_id="message_id",
+                thread_id="thread_id",
+                metadata={"foo": "string"},
+            )
+
         assert_matches_type(Message, message, path=["response"])
 
     @parametrize
     async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.messages.with_raw_response.update(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.messages.with_raw_response.update(
+                message_id="message_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -431,56 +481,63 @@ class TestAsyncMessages:
 
     @parametrize
     async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.messages.with_streaming_response.update(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.messages.with_streaming_response.update(
+                message_id="message_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            message = await response.parse()
-            assert_matches_type(Message, message, path=["response"])
+                message = await response.parse()
+                assert_matches_type(Message, message, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.messages.with_raw_response.update(
-                "string",
-                thread_id="",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.messages.with_raw_response.update(
+                    message_id="message_id",
+                    thread_id="",
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
-            await async_client.beta.threads.messages.with_raw_response.update(
-                "",
-                thread_id="string",
-            )
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+                await async_client.beta.threads.messages.with_raw_response.update(
+                    message_id="",
+                    thread_id="thread_id",
+                )
 
     @parametrize
     async def test_method_list(self, async_client: AsyncOpenAI) -> None:
-        message = await async_client.beta.threads.messages.list(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            message = await async_client.beta.threads.messages.list(
+                thread_id="thread_id",
+            )
+
         assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
 
     @parametrize
     async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        message = await async_client.beta.threads.messages.list(
-            "string",
-            after="string",
-            before="string",
-            limit=0,
-            order="asc",
-            run_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            message = await async_client.beta.threads.messages.list(
+                thread_id="thread_id",
+                after="after",
+                before="before",
+                limit=0,
+                order="asc",
+                run_id="run_id",
+            )
+
         assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
 
     @parametrize
     async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.messages.with_raw_response.list(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.messages.with_raw_response.list(
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -489,38 +546,43 @@ class TestAsyncMessages:
 
     @parametrize
     async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.messages.with_streaming_response.list(
-            "string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.messages.with_streaming_response.list(
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            message = await response.parse()
-            assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
+                message = await response.parse()
+                assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.messages.with_raw_response.list(
-                "",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.messages.with_raw_response.list(
+                    thread_id="",
+                )
 
     @parametrize
     async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
-        message = await async_client.beta.threads.messages.delete(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            message = await async_client.beta.threads.messages.delete(
+                message_id="message_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(MessageDeleted, message, path=["response"])
 
     @parametrize
     async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.messages.with_raw_response.delete(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.messages.with_raw_response.delete(
+                message_id="message_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -529,28 +591,30 @@ class TestAsyncMessages:
 
     @parametrize
     async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.messages.with_streaming_response.delete(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.messages.with_streaming_response.delete(
+                message_id="message_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            message = await response.parse()
-            assert_matches_type(MessageDeleted, message, path=["response"])
+                message = await response.parse()
+                assert_matches_type(MessageDeleted, message, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.messages.with_raw_response.delete(
-                "string",
-                thread_id="",
-            )
-
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
-            await async_client.beta.threads.messages.with_raw_response.delete(
-                "",
-                thread_id="string",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.messages.with_raw_response.delete(
+                    message_id="message_id",
+                    thread_id="",
+                )
+
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+                await async_client.beta.threads.messages.with_raw_response.delete(
+                    message_id="",
+                    thread_id="thread_id",
+                )
tests/api_resources/beta/threads/test_runs.py
@@ -24,58 +24,63 @@ class TestRuns:
 
     @parametrize
     def test_method_create_overload_1(self, client: OpenAI) -> None:
-        run = client.beta.threads.runs.create(
-            "string",
-            assistant_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            run = client.beta.threads.runs.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
-        run = client.beta.threads.runs.create(
-            thread_id="thread_id",
-            assistant_id="assistant_id",
-            include=["step_details.tool_calls[*].file_search.results[*].content"],
-            additional_instructions="additional_instructions",
-            additional_messages=[
-                {
-                    "content": "string",
-                    "role": "user",
-                    "attachments": [
-                        {
-                            "file_id": "file_id",
-                            "tools": [{"type": "code_interpreter"}],
-                        }
-                    ],
-                    "metadata": {"foo": "string"},
-                }
-            ],
-            instructions="string",
-            max_completion_tokens=256,
-            max_prompt_tokens=256,
-            metadata={"foo": "string"},
-            model="string",
-            parallel_tool_calls=True,
-            reasoning_effort="low",
-            response_format="auto",
-            stream=False,
-            temperature=1,
-            tool_choice="none",
-            tools=[{"type": "code_interpreter"}],
-            top_p=1,
-            truncation_strategy={
-                "type": "auto",
-                "last_messages": 1,
-            },
-        )
+        with pytest.warns(DeprecationWarning):
+            run = client.beta.threads.runs.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+                include=["step_details.tool_calls[*].file_search.results[*].content"],
+                additional_instructions="additional_instructions",
+                additional_messages=[
+                    {
+                        "content": "string",
+                        "role": "user",
+                        "attachments": [
+                            {
+                                "file_id": "file_id",
+                                "tools": [{"type": "code_interpreter"}],
+                            }
+                        ],
+                        "metadata": {"foo": "string"},
+                    }
+                ],
+                instructions="instructions",
+                max_completion_tokens=256,
+                max_prompt_tokens=256,
+                metadata={"foo": "string"},
+                model="string",
+                parallel_tool_calls=True,
+                reasoning_effort="low",
+                response_format="auto",
+                stream=False,
+                temperature=1,
+                tool_choice="none",
+                tools=[{"type": "code_interpreter"}],
+                top_p=1,
+                truncation_strategy={
+                    "type": "auto",
+                    "last_messages": 1,
+                },
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
-        response = client.beta.threads.runs.with_raw_response.create(
-            "string",
-            assistant_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.runs.with_raw_response.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -84,82 +89,89 @@ class TestRuns:
 
     @parametrize
     def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
-        with client.beta.threads.runs.with_streaming_response.create(
-            "string",
-            assistant_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.runs.with_streaming_response.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            run = response.parse()
-            assert_matches_type(Run, run, path=["response"])
+                run = response.parse()
+                assert_matches_type(Run, run, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_create_overload_1(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.create(
-                "",
-                assistant_id="string",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.create(
+                    thread_id="",
+                    assistant_id="assistant_id",
+                )
 
     @parametrize
     def test_method_create_overload_2(self, client: OpenAI) -> None:
-        run_stream = client.beta.threads.runs.create(
-            "string",
-            assistant_id="string",
-            stream=True,
-        )
+        with pytest.warns(DeprecationWarning):
+            run_stream = client.beta.threads.runs.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+                stream=True,
+            )
+
         run_stream.response.close()
 
     @parametrize
     def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
-        run_stream = client.beta.threads.runs.create(
-            "string",
-            assistant_id="string",
-            stream=True,
-            include=["step_details.tool_calls[*].file_search.results[*].content"],
-            additional_instructions="additional_instructions",
-            additional_messages=[
-                {
-                    "content": "string",
-                    "role": "user",
-                    "attachments": [
-                        {
-                            "file_id": "file_id",
-                            "tools": [{"type": "code_interpreter"}],
-                        }
-                    ],
-                    "metadata": {"foo": "string"},
-                }
-            ],
-            instructions="string",
-            max_completion_tokens=256,
-            max_prompt_tokens=256,
-            metadata={"foo": "string"},
-            model="string",
-            parallel_tool_calls=True,
-            reasoning_effort="low",
-            response_format="auto",
-            temperature=1,
-            tool_choice="none",
-            tools=[{"type": "code_interpreter"}],
-            top_p=1,
-            truncation_strategy={
-                "type": "auto",
-                "last_messages": 1,
-            },
-        )
+        with pytest.warns(DeprecationWarning):
+            run_stream = client.beta.threads.runs.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+                stream=True,
+                include=["step_details.tool_calls[*].file_search.results[*].content"],
+                additional_instructions="additional_instructions",
+                additional_messages=[
+                    {
+                        "content": "string",
+                        "role": "user",
+                        "attachments": [
+                            {
+                                "file_id": "file_id",
+                                "tools": [{"type": "code_interpreter"}],
+                            }
+                        ],
+                        "metadata": {"foo": "string"},
+                    }
+                ],
+                instructions="instructions",
+                max_completion_tokens=256,
+                max_prompt_tokens=256,
+                metadata={"foo": "string"},
+                model="string",
+                parallel_tool_calls=True,
+                reasoning_effort="low",
+                response_format="auto",
+                temperature=1,
+                tool_choice="none",
+                tools=[{"type": "code_interpreter"}],
+                top_p=1,
+                truncation_strategy={
+                    "type": "auto",
+                    "last_messages": 1,
+                },
+            )
+
         run_stream.response.close()
 
     @parametrize
     def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
-        response = client.beta.threads.runs.with_raw_response.create(
-            "string",
-            assistant_id="string",
-            stream=True,
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.runs.with_raw_response.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+                stream=True,
+            )
 
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
         stream = response.parse()
@@ -167,42 +179,47 @@ class TestRuns:
 
     @parametrize
     def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
-        with client.beta.threads.runs.with_streaming_response.create(
-            "string",
-            assistant_id="string",
-            stream=True,
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.runs.with_streaming_response.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+                stream=True,
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            stream = response.parse()
-            stream.close()
+                stream = response.parse()
+                stream.close()
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_create_overload_2(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.create(
-                "",
-                assistant_id="string",
-                stream=True,
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.create(
+                    thread_id="",
+                    assistant_id="assistant_id",
+                    stream=True,
+                )
 
     @parametrize
     def test_method_retrieve(self, client: OpenAI) -> None:
-        run = client.beta.threads.runs.retrieve(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            run = client.beta.threads.runs.retrieve(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     def test_raw_response_retrieve(self, client: OpenAI) -> None:
-        response = client.beta.threads.runs.with_raw_response.retrieve(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.runs.with_raw_response.retrieve(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -211,55 +228,62 @@ class TestRuns:
 
     @parametrize
     def test_streaming_response_retrieve(self, client: OpenAI) -> None:
-        with client.beta.threads.runs.with_streaming_response.retrieve(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.runs.with_streaming_response.retrieve(
+                run_id="run_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            run = response.parse()
-            assert_matches_type(Run, run, path=["response"])
+                run = response.parse()
+                assert_matches_type(Run, run, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_retrieve(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.retrieve(
-                "string",
-                thread_id="",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.retrieve(
+                    run_id="run_id",
+                    thread_id="",
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.retrieve(
-                "",
-                thread_id="string",
-            )
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.retrieve(
+                    run_id="",
+                    thread_id="thread_id",
+                )
 
     @parametrize
     def test_method_update(self, client: OpenAI) -> None:
-        run = client.beta.threads.runs.update(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            run = client.beta.threads.runs.update(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     def test_method_update_with_all_params(self, client: OpenAI) -> None:
-        run = client.beta.threads.runs.update(
-            run_id="run_id",
-            thread_id="thread_id",
-            metadata={"foo": "string"},
-        )
+        with pytest.warns(DeprecationWarning):
+            run = client.beta.threads.runs.update(
+                run_id="run_id",
+                thread_id="thread_id",
+                metadata={"foo": "string"},
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     def test_raw_response_update(self, client: OpenAI) -> None:
-        response = client.beta.threads.runs.with_raw_response.update(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.runs.with_raw_response.update(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -268,55 +292,62 @@ class TestRuns:
 
     @parametrize
     def test_streaming_response_update(self, client: OpenAI) -> None:
-        with client.beta.threads.runs.with_streaming_response.update(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.runs.with_streaming_response.update(
+                run_id="run_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            run = response.parse()
-            assert_matches_type(Run, run, path=["response"])
+                run = response.parse()
+                assert_matches_type(Run, run, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_update(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.update(
-                "string",
-                thread_id="",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.update(
+                    run_id="run_id",
+                    thread_id="",
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.update(
-                "",
-                thread_id="string",
-            )
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.update(
+                    run_id="",
+                    thread_id="thread_id",
+                )
 
     @parametrize
     def test_method_list(self, client: OpenAI) -> None:
-        run = client.beta.threads.runs.list(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            run = client.beta.threads.runs.list(
+                thread_id="thread_id",
+            )
+
         assert_matches_type(SyncCursorPage[Run], run, path=["response"])
 
     @parametrize
     def test_method_list_with_all_params(self, client: OpenAI) -> None:
-        run = client.beta.threads.runs.list(
-            "string",
-            after="string",
-            before="string",
-            limit=0,
-            order="asc",
-        )
+        with pytest.warns(DeprecationWarning):
+            run = client.beta.threads.runs.list(
+                thread_id="thread_id",
+                after="after",
+                before="before",
+                limit=0,
+                order="asc",
+            )
+
         assert_matches_type(SyncCursorPage[Run], run, path=["response"])
 
     @parametrize
     def test_raw_response_list(self, client: OpenAI) -> None:
-        response = client.beta.threads.runs.with_raw_response.list(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.runs.with_raw_response.list(
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -325,38 +356,43 @@ class TestRuns:
 
     @parametrize
     def test_streaming_response_list(self, client: OpenAI) -> None:
-        with client.beta.threads.runs.with_streaming_response.list(
-            "string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.runs.with_streaming_response.list(
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            run = response.parse()
-            assert_matches_type(SyncCursorPage[Run], run, path=["response"])
+                run = response.parse()
+                assert_matches_type(SyncCursorPage[Run], run, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_list(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.list(
-                "",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.list(
+                    thread_id="",
+                )
 
     @parametrize
     def test_method_cancel(self, client: OpenAI) -> None:
-        run = client.beta.threads.runs.cancel(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            run = client.beta.threads.runs.cancel(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     def test_raw_response_cancel(self, client: OpenAI) -> None:
-        response = client.beta.threads.runs.with_raw_response.cancel(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.runs.with_raw_response.cancel(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -365,63 +401,70 @@ class TestRuns:
 
     @parametrize
     def test_streaming_response_cancel(self, client: OpenAI) -> None:
-        with client.beta.threads.runs.with_streaming_response.cancel(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.runs.with_streaming_response.cancel(
+                run_id="run_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            run = response.parse()
-            assert_matches_type(Run, run, path=["response"])
+                run = response.parse()
+                assert_matches_type(Run, run, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_cancel(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.cancel(
-                "string",
-                thread_id="",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.cancel(
+                    run_id="run_id",
+                    thread_id="",
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.cancel(
-                "",
-                thread_id="string",
-            )
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.cancel(
+                    run_id="",
+                    thread_id="thread_id",
+                )
 
     @parametrize
     def test_method_submit_tool_outputs_overload_1(self, client: OpenAI) -> None:
-        run = client.beta.threads.runs.submit_tool_outputs(
-            run_id="run_id",
-            thread_id="thread_id",
-            tool_outputs=[{}],
-        )
+        with pytest.warns(DeprecationWarning):
+            run = client.beta.threads.runs.submit_tool_outputs(
+                run_id="run_id",
+                thread_id="thread_id",
+                tool_outputs=[{}],
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     def test_method_submit_tool_outputs_with_all_params_overload_1(self, client: OpenAI) -> None:
-        run = client.beta.threads.runs.submit_tool_outputs(
-            "string",
-            thread_id="string",
-            tool_outputs=[
-                {
-                    "output": "output",
-                    "tool_call_id": "tool_call_id",
-                }
-            ],
-            stream=False,
-        )
+        with pytest.warns(DeprecationWarning):
+            run = client.beta.threads.runs.submit_tool_outputs(
+                run_id="run_id",
+                thread_id="thread_id",
+                tool_outputs=[
+                    {
+                        "output": "output",
+                        "tool_call_id": "tool_call_id",
+                    }
+                ],
+                stream=False,
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     def test_raw_response_submit_tool_outputs_overload_1(self, client: OpenAI) -> None:
-        response = client.beta.threads.runs.with_raw_response.submit_tool_outputs(
-            run_id="run_id",
-            thread_id="thread_id",
-            tool_outputs=[{}],
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+                run_id="run_id",
+                thread_id="thread_id",
+                tool_outputs=[{}],
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -430,53 +473,58 @@ class TestRuns:
 
     @parametrize
     def test_streaming_response_submit_tool_outputs_overload_1(self, client: OpenAI) -> None:
-        with client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
-            run_id="run_id",
-            thread_id="thread_id",
-            tool_outputs=[{}],
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
+                run_id="run_id",
+                thread_id="thread_id",
+                tool_outputs=[{}],
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            run = response.parse()
-            assert_matches_type(Run, run, path=["response"])
+                run = response.parse()
+                assert_matches_type(Run, run, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_submit_tool_outputs_overload_1(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.submit_tool_outputs(
-                "string",
-                thread_id="",
-                tool_outputs=[{}],
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+                    run_id="run_id",
+                    thread_id="",
+                    tool_outputs=[{}],
+                )
+
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+                    run_id="",
+                    thread_id="thread_id",
+                    tool_outputs=[{}],
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.submit_tool_outputs(
-                run_id="",
+    @parametrize
+    def test_method_submit_tool_outputs_overload_2(self, client: OpenAI) -> None:
+        with pytest.warns(DeprecationWarning):
+            run_stream = client.beta.threads.runs.submit_tool_outputs(
+                run_id="run_id",
                 thread_id="thread_id",
+                stream=True,
                 tool_outputs=[{}],
             )
 
-    @parametrize
-    def test_method_submit_tool_outputs_overload_2(self, client: OpenAI) -> None:
-        run_stream = client.beta.threads.runs.submit_tool_outputs(
-            "string",
-            thread_id="string",
-            stream=True,
-            tool_outputs=[{}],
-        )
         run_stream.response.close()
 
     @parametrize
     def test_raw_response_submit_tool_outputs_overload_2(self, client: OpenAI) -> None:
-        response = client.beta.threads.runs.with_raw_response.submit_tool_outputs(
-            "string",
-            thread_id="string",
-            stream=True,
-            tool_outputs=[{}],
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+                run_id="run_id",
+                thread_id="thread_id",
+                stream=True,
+                tool_outputs=[{}],
+            )
 
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
         stream = response.parse()
@@ -484,37 +532,39 @@ class TestRuns:
 
     @parametrize
     def test_streaming_response_submit_tool_outputs_overload_2(self, client: OpenAI) -> None:
-        with client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
-            "string",
-            thread_id="string",
-            stream=True,
-            tool_outputs=[{}],
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            stream = response.parse()
-            stream.close()
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
+                run_id="run_id",
+                thread_id="thread_id",
+                stream=True,
+                tool_outputs=[{}],
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+                stream = response.parse()
+                stream.close()
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_submit_tool_outputs_overload_2(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.submit_tool_outputs(
-                "string",
-                thread_id="",
-                stream=True,
-                tool_outputs=[{}],
-            )
-
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            client.beta.threads.runs.with_raw_response.submit_tool_outputs(
-                "",
-                thread_id="string",
-                stream=True,
-                tool_outputs=[{}],
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+                    run_id="run_id",
+                    thread_id="",
+                    stream=True,
+                    tool_outputs=[{}],
+                )
+
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+                    run_id="",
+                    thread_id="thread_id",
+                    stream=True,
+                    tool_outputs=[{}],
+                )
 
 
 class TestAsyncRuns:
@@ -522,58 +572,63 @@ class TestAsyncRuns:
 
     @parametrize
     async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
-        run = await async_client.beta.threads.runs.create(
-            "string",
-            assistant_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            run = await async_client.beta.threads.runs.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
-        run = await async_client.beta.threads.runs.create(
-            thread_id="thread_id",
-            assistant_id="assistant_id",
-            include=["step_details.tool_calls[*].file_search.results[*].content"],
-            additional_instructions="additional_instructions",
-            additional_messages=[
-                {
-                    "content": "string",
-                    "role": "user",
-                    "attachments": [
-                        {
-                            "file_id": "file_id",
-                            "tools": [{"type": "code_interpreter"}],
-                        }
-                    ],
-                    "metadata": {"foo": "string"},
-                }
-            ],
-            instructions="string",
-            max_completion_tokens=256,
-            max_prompt_tokens=256,
-            metadata={"foo": "string"},
-            model="string",
-            parallel_tool_calls=True,
-            reasoning_effort="low",
-            response_format="auto",
-            stream=False,
-            temperature=1,
-            tool_choice="none",
-            tools=[{"type": "code_interpreter"}],
-            top_p=1,
-            truncation_strategy={
-                "type": "auto",
-                "last_messages": 1,
-            },
-        )
+        with pytest.warns(DeprecationWarning):
+            run = await async_client.beta.threads.runs.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+                include=["step_details.tool_calls[*].file_search.results[*].content"],
+                additional_instructions="additional_instructions",
+                additional_messages=[
+                    {
+                        "content": "string",
+                        "role": "user",
+                        "attachments": [
+                            {
+                                "file_id": "file_id",
+                                "tools": [{"type": "code_interpreter"}],
+                            }
+                        ],
+                        "metadata": {"foo": "string"},
+                    }
+                ],
+                instructions="instructions",
+                max_completion_tokens=256,
+                max_prompt_tokens=256,
+                metadata={"foo": "string"},
+                model="string",
+                parallel_tool_calls=True,
+                reasoning_effort="low",
+                response_format="auto",
+                stream=False,
+                temperature=1,
+                tool_choice="none",
+                tools=[{"type": "code_interpreter"}],
+                top_p=1,
+                truncation_strategy={
+                    "type": "auto",
+                    "last_messages": 1,
+                },
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.runs.with_raw_response.create(
-            "string",
-            assistant_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.runs.with_raw_response.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -582,82 +637,89 @@ class TestAsyncRuns:
 
     @parametrize
     async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.runs.with_streaming_response.create(
-            "string",
-            assistant_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.runs.with_streaming_response.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            run = await response.parse()
-            assert_matches_type(Run, run, path=["response"])
+                run = await response.parse()
+                assert_matches_type(Run, run, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_create_overload_1(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.create(
-                "",
-                assistant_id="string",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.create(
+                    thread_id="",
+                    assistant_id="assistant_id",
+                )
 
     @parametrize
     async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
-        run_stream = await async_client.beta.threads.runs.create(
-            "string",
-            assistant_id="string",
-            stream=True,
-        )
+        with pytest.warns(DeprecationWarning):
+            run_stream = await async_client.beta.threads.runs.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+                stream=True,
+            )
+
         await run_stream.response.aclose()
 
     @parametrize
     async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
-        run_stream = await async_client.beta.threads.runs.create(
-            "string",
-            assistant_id="string",
-            stream=True,
-            include=["step_details.tool_calls[*].file_search.results[*].content"],
-            additional_instructions="additional_instructions",
-            additional_messages=[
-                {
-                    "content": "string",
-                    "role": "user",
-                    "attachments": [
-                        {
-                            "file_id": "file_id",
-                            "tools": [{"type": "code_interpreter"}],
-                        }
-                    ],
-                    "metadata": {"foo": "string"},
-                }
-            ],
-            instructions="string",
-            max_completion_tokens=256,
-            max_prompt_tokens=256,
-            metadata={"foo": "string"},
-            model="string",
-            parallel_tool_calls=True,
-            reasoning_effort="low",
-            response_format="auto",
-            temperature=1,
-            tool_choice="none",
-            tools=[{"type": "code_interpreter"}],
-            top_p=1,
-            truncation_strategy={
-                "type": "auto",
-                "last_messages": 1,
-            },
-        )
+        with pytest.warns(DeprecationWarning):
+            run_stream = await async_client.beta.threads.runs.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+                stream=True,
+                include=["step_details.tool_calls[*].file_search.results[*].content"],
+                additional_instructions="additional_instructions",
+                additional_messages=[
+                    {
+                        "content": "string",
+                        "role": "user",
+                        "attachments": [
+                            {
+                                "file_id": "file_id",
+                                "tools": [{"type": "code_interpreter"}],
+                            }
+                        ],
+                        "metadata": {"foo": "string"},
+                    }
+                ],
+                instructions="instructions",
+                max_completion_tokens=256,
+                max_prompt_tokens=256,
+                metadata={"foo": "string"},
+                model="string",
+                parallel_tool_calls=True,
+                reasoning_effort="low",
+                response_format="auto",
+                temperature=1,
+                tool_choice="none",
+                tools=[{"type": "code_interpreter"}],
+                top_p=1,
+                truncation_strategy={
+                    "type": "auto",
+                    "last_messages": 1,
+                },
+            )
+
         await run_stream.response.aclose()
 
     @parametrize
     async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.runs.with_raw_response.create(
-            "string",
-            assistant_id="string",
-            stream=True,
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.runs.with_raw_response.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+                stream=True,
+            )
 
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
         stream = response.parse()
@@ -665,42 +727,47 @@ class TestAsyncRuns:
 
     @parametrize
     async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.runs.with_streaming_response.create(
-            "string",
-            assistant_id="string",
-            stream=True,
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.runs.with_streaming_response.create(
+                thread_id="thread_id",
+                assistant_id="assistant_id",
+                stream=True,
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            stream = await response.parse()
-            await stream.close()
+                stream = await response.parse()
+                await stream.close()
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_create_overload_2(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.create(
-                "",
-                assistant_id="string",
-                stream=True,
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.create(
+                    thread_id="",
+                    assistant_id="assistant_id",
+                    stream=True,
+                )
 
     @parametrize
     async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
-        run = await async_client.beta.threads.runs.retrieve(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            run = await async_client.beta.threads.runs.retrieve(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.runs.with_raw_response.retrieve(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.runs.with_raw_response.retrieve(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -709,55 +776,62 @@ class TestAsyncRuns:
 
     @parametrize
     async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.runs.with_streaming_response.retrieve(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.runs.with_streaming_response.retrieve(
+                run_id="run_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            run = await response.parse()
-            assert_matches_type(Run, run, path=["response"])
+                run = await response.parse()
+                assert_matches_type(Run, run, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.retrieve(
-                "string",
-                thread_id="",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.retrieve(
+                    run_id="run_id",
+                    thread_id="",
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.retrieve(
-                "",
-                thread_id="string",
-            )
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.retrieve(
+                    run_id="",
+                    thread_id="thread_id",
+                )
 
     @parametrize
     async def test_method_update(self, async_client: AsyncOpenAI) -> None:
-        run = await async_client.beta.threads.runs.update(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            run = await async_client.beta.threads.runs.update(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        run = await async_client.beta.threads.runs.update(
-            run_id="run_id",
-            thread_id="thread_id",
-            metadata={"foo": "string"},
-        )
+        with pytest.warns(DeprecationWarning):
+            run = await async_client.beta.threads.runs.update(
+                run_id="run_id",
+                thread_id="thread_id",
+                metadata={"foo": "string"},
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.runs.with_raw_response.update(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.runs.with_raw_response.update(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -766,55 +840,62 @@ class TestAsyncRuns:
 
     @parametrize
     async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.runs.with_streaming_response.update(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.runs.with_streaming_response.update(
+                run_id="run_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            run = await response.parse()
-            assert_matches_type(Run, run, path=["response"])
+                run = await response.parse()
+                assert_matches_type(Run, run, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.update(
-                "string",
-                thread_id="",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.update(
+                    run_id="run_id",
+                    thread_id="",
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.update(
-                "",
-                thread_id="string",
-            )
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.update(
+                    run_id="",
+                    thread_id="thread_id",
+                )
 
     @parametrize
     async def test_method_list(self, async_client: AsyncOpenAI) -> None:
-        run = await async_client.beta.threads.runs.list(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            run = await async_client.beta.threads.runs.list(
+                thread_id="thread_id",
+            )
+
         assert_matches_type(AsyncCursorPage[Run], run, path=["response"])
 
     @parametrize
     async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        run = await async_client.beta.threads.runs.list(
-            "string",
-            after="string",
-            before="string",
-            limit=0,
-            order="asc",
-        )
+        with pytest.warns(DeprecationWarning):
+            run = await async_client.beta.threads.runs.list(
+                thread_id="thread_id",
+                after="after",
+                before="before",
+                limit=0,
+                order="asc",
+            )
+
         assert_matches_type(AsyncCursorPage[Run], run, path=["response"])
 
     @parametrize
     async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.runs.with_raw_response.list(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.runs.with_raw_response.list(
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -823,38 +904,43 @@ class TestAsyncRuns:
 
     @parametrize
     async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.runs.with_streaming_response.list(
-            "string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.runs.with_streaming_response.list(
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            run = await response.parse()
-            assert_matches_type(AsyncCursorPage[Run], run, path=["response"])
+                run = await response.parse()
+                assert_matches_type(AsyncCursorPage[Run], run, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.list(
-                "",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.list(
+                    thread_id="",
+                )
 
     @parametrize
     async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
-        run = await async_client.beta.threads.runs.cancel(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            run = await async_client.beta.threads.runs.cancel(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.runs.with_raw_response.cancel(
-            "string",
-            thread_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.runs.with_raw_response.cancel(
+                run_id="run_id",
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -863,63 +949,70 @@ class TestAsyncRuns:
 
     @parametrize
     async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.runs.with_streaming_response.cancel(
-            "string",
-            thread_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.runs.with_streaming_response.cancel(
+                run_id="run_id",
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            run = await response.parse()
-            assert_matches_type(Run, run, path=["response"])
+                run = await response.parse()
+                assert_matches_type(Run, run, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.cancel(
-                "string",
-                thread_id="",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.cancel(
+                    run_id="run_id",
+                    thread_id="",
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.cancel(
-                "",
-                thread_id="string",
-            )
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.cancel(
+                    run_id="",
+                    thread_id="thread_id",
+                )
 
     @parametrize
     async def test_method_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None:
-        run = await async_client.beta.threads.runs.submit_tool_outputs(
-            run_id="run_id",
-            thread_id="thread_id",
-            tool_outputs=[{}],
-        )
+        with pytest.warns(DeprecationWarning):
+            run = await async_client.beta.threads.runs.submit_tool_outputs(
+                run_id="run_id",
+                thread_id="thread_id",
+                tool_outputs=[{}],
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     async def test_method_submit_tool_outputs_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
-        run = await async_client.beta.threads.runs.submit_tool_outputs(
-            "string",
-            thread_id="string",
-            tool_outputs=[
-                {
-                    "output": "output",
-                    "tool_call_id": "tool_call_id",
-                }
-            ],
-            stream=False,
-        )
+        with pytest.warns(DeprecationWarning):
+            run = await async_client.beta.threads.runs.submit_tool_outputs(
+                run_id="run_id",
+                thread_id="thread_id",
+                tool_outputs=[
+                    {
+                        "output": "output",
+                        "tool_call_id": "tool_call_id",
+                    }
+                ],
+                stream=False,
+            )
+
         assert_matches_type(Run, run, path=["response"])
 
     @parametrize
     async def test_raw_response_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
-            run_id="run_id",
-            thread_id="thread_id",
-            tool_outputs=[{}],
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+                run_id="run_id",
+                thread_id="thread_id",
+                tool_outputs=[{}],
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -928,53 +1021,58 @@ class TestAsyncRuns:
 
     @parametrize
     async def test_streaming_response_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
-            run_id="run_id",
-            thread_id="thread_id",
-            tool_outputs=[{}],
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
+                run_id="run_id",
+                thread_id="thread_id",
+                tool_outputs=[{}],
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            run = await response.parse()
-            assert_matches_type(Run, run, path=["response"])
+                run = await response.parse()
+                assert_matches_type(Run, run, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
-                "string",
-                thread_id="",
-                tool_outputs=[{}],
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+                    run_id="run_id",
+                    thread_id="",
+                    tool_outputs=[{}],
+                )
+
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+                    run_id="",
+                    thread_id="thread_id",
+                    tool_outputs=[{}],
+                )
 
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
-                run_id="",
+    @parametrize
+    async def test_method_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None:
+        with pytest.warns(DeprecationWarning):
+            run_stream = await async_client.beta.threads.runs.submit_tool_outputs(
+                run_id="run_id",
                 thread_id="thread_id",
+                stream=True,
                 tool_outputs=[{}],
             )
 
-    @parametrize
-    async def test_method_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None:
-        run_stream = await async_client.beta.threads.runs.submit_tool_outputs(
-            "string",
-            thread_id="string",
-            stream=True,
-            tool_outputs=[{}],
-        )
         await run_stream.response.aclose()
 
     @parametrize
     async def test_raw_response_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
-            "string",
-            thread_id="string",
-            stream=True,
-            tool_outputs=[{}],
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+                run_id="run_id",
+                thread_id="thread_id",
+                stream=True,
+                tool_outputs=[{}],
+            )
 
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
         stream = response.parse()
@@ -982,34 +1080,36 @@ class TestAsyncRuns:
 
     @parametrize
     async def test_streaming_response_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
-            "string",
-            thread_id="string",
-            stream=True,
-            tool_outputs=[{}],
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            stream = await response.parse()
-            await stream.close()
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs(
+                run_id="run_id",
+                thread_id="thread_id",
+                stream=True,
+                tool_outputs=[{}],
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+                stream = await response.parse()
+                await stream.close()
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
-                "string",
-                thread_id="",
-                stream=True,
-                tool_outputs=[{}],
-            )
-
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
-            await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
-                "",
-                thread_id="string",
-                stream=True,
-                tool_outputs=[{}],
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+                    run_id="run_id",
+                    thread_id="",
+                    stream=True,
+                    tool_outputs=[{}],
+                )
+
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+                await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs(
+                    run_id="",
+                    thread_id="thread_id",
+                    stream=True,
+                    tool_outputs=[{}],
+                )
tests/api_resources/beta/test_threads.py
@@ -15,6 +15,8 @@ from openai.types.beta import (
 )
 from openai.types.beta.threads import Run
 
+# pyright: reportDeprecated=false
+
 base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
 
 
@@ -23,45 +25,50 @@ class TestThreads:
 
     @parametrize
     def test_method_create(self, client: OpenAI) -> None:
-        thread = client.beta.threads.create()
+        with pytest.warns(DeprecationWarning):
+            thread = client.beta.threads.create()
+
         assert_matches_type(Thread, thread, path=["response"])
 
     @parametrize
     def test_method_create_with_all_params(self, client: OpenAI) -> None:
-        thread = client.beta.threads.create(
-            messages=[
-                {
-                    "content": "string",
-                    "role": "user",
-                    "attachments": [
-                        {
-                            "file_id": "file_id",
-                            "tools": [{"type": "code_interpreter"}],
-                        }
-                    ],
-                    "metadata": {"foo": "string"},
-                }
-            ],
-            metadata={"foo": "string"},
-            tool_resources={
-                "code_interpreter": {"file_ids": ["string"]},
-                "file_search": {
-                    "vector_store_ids": ["string"],
-                    "vector_stores": [
-                        {
-                            "chunking_strategy": {"type": "auto"},
-                            "file_ids": ["string"],
-                            "metadata": {"foo": "string"},
-                        }
-                    ],
+        with pytest.warns(DeprecationWarning):
+            thread = client.beta.threads.create(
+                messages=[
+                    {
+                        "content": "string",
+                        "role": "user",
+                        "attachments": [
+                            {
+                                "file_id": "file_id",
+                                "tools": [{"type": "code_interpreter"}],
+                            }
+                        ],
+                        "metadata": {"foo": "string"},
+                    }
+                ],
+                metadata={"foo": "string"},
+                tool_resources={
+                    "code_interpreter": {"file_ids": ["string"]},
+                    "file_search": {
+                        "vector_store_ids": ["string"],
+                        "vector_stores": [
+                            {
+                                "chunking_strategy": {"type": "auto"},
+                                "file_ids": ["string"],
+                                "metadata": {"foo": "string"},
+                            }
+                        ],
+                    },
                 },
-            },
-        )
+            )
+
         assert_matches_type(Thread, thread, path=["response"])
 
     @parametrize
     def test_raw_response_create(self, client: OpenAI) -> None:
-        response = client.beta.threads.with_raw_response.create()
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.with_raw_response.create()
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -70,27 +77,31 @@ class TestThreads:
 
     @parametrize
     def test_streaming_response_create(self, client: OpenAI) -> None:
-        with client.beta.threads.with_streaming_response.create() as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.with_streaming_response.create() as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            thread = response.parse()
-            assert_matches_type(Thread, thread, path=["response"])
+                thread = response.parse()
+                assert_matches_type(Thread, thread, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_method_retrieve(self, client: OpenAI) -> None:
-        thread = client.beta.threads.retrieve(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            thread = client.beta.threads.retrieve(
+                "thread_id",
+            )
+
         assert_matches_type(Thread, thread, path=["response"])
 
     @parametrize
     def test_raw_response_retrieve(self, client: OpenAI) -> None:
-        response = client.beta.threads.with_raw_response.retrieve(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.with_raw_response.retrieve(
+                "thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -99,48 +110,55 @@ class TestThreads:
 
     @parametrize
     def test_streaming_response_retrieve(self, client: OpenAI) -> None:
-        with client.beta.threads.with_streaming_response.retrieve(
-            "string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.with_streaming_response.retrieve(
+                "thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            thread = response.parse()
-            assert_matches_type(Thread, thread, path=["response"])
+                thread = response.parse()
+                assert_matches_type(Thread, thread, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_retrieve(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.with_raw_response.retrieve(
-                "",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.with_raw_response.retrieve(
+                    "",
+                )
 
     @parametrize
     def test_method_update(self, client: OpenAI) -> None:
-        thread = client.beta.threads.update(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            thread = client.beta.threads.update(
+                thread_id="thread_id",
+            )
+
         assert_matches_type(Thread, thread, path=["response"])
 
     @parametrize
     def test_method_update_with_all_params(self, client: OpenAI) -> None:
-        thread = client.beta.threads.update(
-            thread_id="thread_id",
-            metadata={"foo": "string"},
-            tool_resources={
-                "code_interpreter": {"file_ids": ["string"]},
-                "file_search": {"vector_store_ids": ["string"]},
-            },
-        )
+        with pytest.warns(DeprecationWarning):
+            thread = client.beta.threads.update(
+                thread_id="thread_id",
+                metadata={"foo": "string"},
+                tool_resources={
+                    "code_interpreter": {"file_ids": ["string"]},
+                    "file_search": {"vector_store_ids": ["string"]},
+                },
+            )
+
         assert_matches_type(Thread, thread, path=["response"])
 
     @parametrize
     def test_raw_response_update(self, client: OpenAI) -> None:
-        response = client.beta.threads.with_raw_response.update(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.with_raw_response.update(
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -149,36 +167,41 @@ class TestThreads:
 
     @parametrize
     def test_streaming_response_update(self, client: OpenAI) -> None:
-        with client.beta.threads.with_streaming_response.update(
-            "string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.with_streaming_response.update(
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            thread = response.parse()
-            assert_matches_type(Thread, thread, path=["response"])
+                thread = response.parse()
+                assert_matches_type(Thread, thread, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_update(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.with_raw_response.update(
-                "",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.with_raw_response.update(
+                    thread_id="",
+                )
 
     @parametrize
     def test_method_delete(self, client: OpenAI) -> None:
-        thread = client.beta.threads.delete(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            thread = client.beta.threads.delete(
+                "thread_id",
+            )
+
         assert_matches_type(ThreadDeleted, thread, path=["response"])
 
     @parametrize
     def test_raw_response_delete(self, client: OpenAI) -> None:
-        response = client.beta.threads.with_raw_response.delete(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.with_raw_response.delete(
+                "thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -187,92 +210,99 @@ class TestThreads:
 
     @parametrize
     def test_streaming_response_delete(self, client: OpenAI) -> None:
-        with client.beta.threads.with_streaming_response.delete(
-            "string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.with_streaming_response.delete(
+                "thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            thread = response.parse()
-            assert_matches_type(ThreadDeleted, thread, path=["response"])
+                thread = response.parse()
+                assert_matches_type(ThreadDeleted, thread, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_path_params_delete(self, client: OpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            client.beta.threads.with_raw_response.delete(
-                "",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                client.beta.threads.with_raw_response.delete(
+                    "",
+                )
 
     @parametrize
     def test_method_create_and_run_overload_1(self, client: OpenAI) -> None:
-        thread = client.beta.threads.create_and_run(
-            assistant_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            thread = client.beta.threads.create_and_run(
+                assistant_id="assistant_id",
+            )
+
         assert_matches_type(Run, thread, path=["response"])
 
     @parametrize
     def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) -> None:
-        thread = client.beta.threads.create_and_run(
-            assistant_id="string",
-            instructions="string",
-            max_completion_tokens=256,
-            max_prompt_tokens=256,
-            metadata={"foo": "string"},
-            model="string",
-            parallel_tool_calls=True,
-            response_format="auto",
-            stream=False,
-            temperature=1,
-            thread={
-                "messages": [
-                    {
-                        "content": "string",
-                        "role": "user",
-                        "attachments": [
-                            {
-                                "file_id": "file_id",
-                                "tools": [{"type": "code_interpreter"}],
-                            }
-                        ],
-                        "metadata": {"foo": "string"},
-                    }
-                ],
-                "metadata": {"foo": "string"},
-                "tool_resources": {
-                    "code_interpreter": {"file_ids": ["string"]},
-                    "file_search": {
-                        "vector_store_ids": ["string"],
-                        "vector_stores": [
-                            {
-                                "chunking_strategy": {"type": "auto"},
-                                "file_ids": ["string"],
-                                "metadata": {"foo": "string"},
-                            }
-                        ],
+        with pytest.warns(DeprecationWarning):
+            thread = client.beta.threads.create_and_run(
+                assistant_id="assistant_id",
+                instructions="instructions",
+                max_completion_tokens=256,
+                max_prompt_tokens=256,
+                metadata={"foo": "string"},
+                model="string",
+                parallel_tool_calls=True,
+                response_format="auto",
+                stream=False,
+                temperature=1,
+                thread={
+                    "messages": [
+                        {
+                            "content": "string",
+                            "role": "user",
+                            "attachments": [
+                                {
+                                    "file_id": "file_id",
+                                    "tools": [{"type": "code_interpreter"}],
+                                }
+                            ],
+                            "metadata": {"foo": "string"},
+                        }
+                    ],
+                    "metadata": {"foo": "string"},
+                    "tool_resources": {
+                        "code_interpreter": {"file_ids": ["string"]},
+                        "file_search": {
+                            "vector_store_ids": ["string"],
+                            "vector_stores": [
+                                {
+                                    "chunking_strategy": {"type": "auto"},
+                                    "file_ids": ["string"],
+                                    "metadata": {"foo": "string"},
+                                }
+                            ],
+                        },
                     },
                 },
-            },
-            tool_choice="none",
-            tool_resources={
-                "code_interpreter": {"file_ids": ["string"]},
-                "file_search": {"vector_store_ids": ["string"]},
-            },
-            tools=[{"type": "code_interpreter"}],
-            top_p=1,
-            truncation_strategy={
-                "type": "auto",
-                "last_messages": 1,
-            },
-        )
+                tool_choice="none",
+                tool_resources={
+                    "code_interpreter": {"file_ids": ["string"]},
+                    "file_search": {"vector_store_ids": ["string"]},
+                },
+                tools=[{"type": "code_interpreter"}],
+                top_p=1,
+                truncation_strategy={
+                    "type": "auto",
+                    "last_messages": 1,
+                },
+            )
+
         assert_matches_type(Run, thread, path=["response"])
 
     @parametrize
     def test_raw_response_create_and_run_overload_1(self, client: OpenAI) -> None:
-        response = client.beta.threads.with_raw_response.create_and_run(
-            assistant_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.with_raw_response.create_and_run(
+                assistant_id="assistant_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -281,87 +311,93 @@ class TestThreads:
 
     @parametrize
     def test_streaming_response_create_and_run_overload_1(self, client: OpenAI) -> None:
-        with client.beta.threads.with_streaming_response.create_and_run(
-            assistant_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.with_streaming_response.create_and_run(
+                assistant_id="assistant_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            thread = response.parse()
-            assert_matches_type(Run, thread, path=["response"])
+                thread = response.parse()
+                assert_matches_type(Run, thread, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     def test_method_create_and_run_overload_2(self, client: OpenAI) -> None:
-        thread_stream = client.beta.threads.create_and_run(
-            assistant_id="string",
-            stream=True,
-        )
+        with pytest.warns(DeprecationWarning):
+            thread_stream = client.beta.threads.create_and_run(
+                assistant_id="assistant_id",
+                stream=True,
+            )
+
         thread_stream.response.close()
 
     @parametrize
     def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) -> None:
-        thread_stream = client.beta.threads.create_and_run(
-            assistant_id="string",
-            stream=True,
-            instructions="string",
-            max_completion_tokens=256,
-            max_prompt_tokens=256,
-            metadata={"foo": "string"},
-            model="string",
-            parallel_tool_calls=True,
-            response_format="auto",
-            temperature=1,
-            thread={
-                "messages": [
-                    {
-                        "content": "string",
-                        "role": "user",
-                        "attachments": [
-                            {
-                                "file_id": "file_id",
-                                "tools": [{"type": "code_interpreter"}],
-                            }
-                        ],
-                        "metadata": {"foo": "string"},
-                    }
-                ],
-                "metadata": {"foo": "string"},
-                "tool_resources": {
-                    "code_interpreter": {"file_ids": ["string"]},
-                    "file_search": {
-                        "vector_store_ids": ["string"],
-                        "vector_stores": [
-                            {
-                                "chunking_strategy": {"type": "auto"},
-                                "file_ids": ["string"],
-                                "metadata": {"foo": "string"},
-                            }
-                        ],
+        with pytest.warns(DeprecationWarning):
+            thread_stream = client.beta.threads.create_and_run(
+                assistant_id="assistant_id",
+                stream=True,
+                instructions="instructions",
+                max_completion_tokens=256,
+                max_prompt_tokens=256,
+                metadata={"foo": "string"},
+                model="string",
+                parallel_tool_calls=True,
+                response_format="auto",
+                temperature=1,
+                thread={
+                    "messages": [
+                        {
+                            "content": "string",
+                            "role": "user",
+                            "attachments": [
+                                {
+                                    "file_id": "file_id",
+                                    "tools": [{"type": "code_interpreter"}],
+                                }
+                            ],
+                            "metadata": {"foo": "string"},
+                        }
+                    ],
+                    "metadata": {"foo": "string"},
+                    "tool_resources": {
+                        "code_interpreter": {"file_ids": ["string"]},
+                        "file_search": {
+                            "vector_store_ids": ["string"],
+                            "vector_stores": [
+                                {
+                                    "chunking_strategy": {"type": "auto"},
+                                    "file_ids": ["string"],
+                                    "metadata": {"foo": "string"},
+                                }
+                            ],
+                        },
                     },
                 },
-            },
-            tool_choice="none",
-            tool_resources={
-                "code_interpreter": {"file_ids": ["string"]},
-                "file_search": {"vector_store_ids": ["string"]},
-            },
-            tools=[{"type": "code_interpreter"}],
-            top_p=1,
-            truncation_strategy={
-                "type": "auto",
-                "last_messages": 1,
-            },
-        )
+                tool_choice="none",
+                tool_resources={
+                    "code_interpreter": {"file_ids": ["string"]},
+                    "file_search": {"vector_store_ids": ["string"]},
+                },
+                tools=[{"type": "code_interpreter"}],
+                top_p=1,
+                truncation_strategy={
+                    "type": "auto",
+                    "last_messages": 1,
+                },
+            )
+
         thread_stream.response.close()
 
     @parametrize
     def test_raw_response_create_and_run_overload_2(self, client: OpenAI) -> None:
-        response = client.beta.threads.with_raw_response.create_and_run(
-            assistant_id="string",
-            stream=True,
-        )
+        with pytest.warns(DeprecationWarning):
+            response = client.beta.threads.with_raw_response.create_and_run(
+                assistant_id="assistant_id",
+                stream=True,
+            )
 
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
         stream = response.parse()
@@ -369,15 +405,16 @@ class TestThreads:
 
     @parametrize
     def test_streaming_response_create_and_run_overload_2(self, client: OpenAI) -> None:
-        with client.beta.threads.with_streaming_response.create_and_run(
-            assistant_id="string",
-            stream=True,
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            with client.beta.threads.with_streaming_response.create_and_run(
+                assistant_id="assistant_id",
+                stream=True,
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            stream = response.parse()
-            stream.close()
+                stream = response.parse()
+                stream.close()
 
         assert cast(Any, response.is_closed) is True
 
@@ -387,45 +424,50 @@ class TestAsyncThreads:
 
     @parametrize
     async def test_method_create(self, async_client: AsyncOpenAI) -> None:
-        thread = await async_client.beta.threads.create()
+        with pytest.warns(DeprecationWarning):
+            thread = await async_client.beta.threads.create()
+
         assert_matches_type(Thread, thread, path=["response"])
 
     @parametrize
     async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        thread = await async_client.beta.threads.create(
-            messages=[
-                {
-                    "content": "string",
-                    "role": "user",
-                    "attachments": [
-                        {
-                            "file_id": "file_id",
-                            "tools": [{"type": "code_interpreter"}],
-                        }
-                    ],
-                    "metadata": {"foo": "string"},
-                }
-            ],
-            metadata={"foo": "string"},
-            tool_resources={
-                "code_interpreter": {"file_ids": ["string"]},
-                "file_search": {
-                    "vector_store_ids": ["string"],
-                    "vector_stores": [
-                        {
-                            "chunking_strategy": {"type": "auto"},
-                            "file_ids": ["string"],
-                            "metadata": {"foo": "string"},
-                        }
-                    ],
+        with pytest.warns(DeprecationWarning):
+            thread = await async_client.beta.threads.create(
+                messages=[
+                    {
+                        "content": "string",
+                        "role": "user",
+                        "attachments": [
+                            {
+                                "file_id": "file_id",
+                                "tools": [{"type": "code_interpreter"}],
+                            }
+                        ],
+                        "metadata": {"foo": "string"},
+                    }
+                ],
+                metadata={"foo": "string"},
+                tool_resources={
+                    "code_interpreter": {"file_ids": ["string"]},
+                    "file_search": {
+                        "vector_store_ids": ["string"],
+                        "vector_stores": [
+                            {
+                                "chunking_strategy": {"type": "auto"},
+                                "file_ids": ["string"],
+                                "metadata": {"foo": "string"},
+                            }
+                        ],
+                    },
                 },
-            },
-        )
+            )
+
         assert_matches_type(Thread, thread, path=["response"])
 
     @parametrize
     async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.with_raw_response.create()
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.with_raw_response.create()
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -434,27 +476,31 @@ class TestAsyncThreads:
 
     @parametrize
     async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.with_streaming_response.create() as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.with_streaming_response.create() as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            thread = await response.parse()
-            assert_matches_type(Thread, thread, path=["response"])
+                thread = await response.parse()
+                assert_matches_type(Thread, thread, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
-        thread = await async_client.beta.threads.retrieve(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            thread = await async_client.beta.threads.retrieve(
+                "thread_id",
+            )
+
         assert_matches_type(Thread, thread, path=["response"])
 
     @parametrize
     async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.with_raw_response.retrieve(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.with_raw_response.retrieve(
+                "thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -463,48 +509,55 @@ class TestAsyncThreads:
 
     @parametrize
     async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.with_streaming_response.retrieve(
-            "string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.with_streaming_response.retrieve(
+                "thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            thread = await response.parse()
-            assert_matches_type(Thread, thread, path=["response"])
+                thread = await response.parse()
+                assert_matches_type(Thread, thread, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.with_raw_response.retrieve(
-                "",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.with_raw_response.retrieve(
+                    "",
+                )
 
     @parametrize
     async def test_method_update(self, async_client: AsyncOpenAI) -> None:
-        thread = await async_client.beta.threads.update(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            thread = await async_client.beta.threads.update(
+                thread_id="thread_id",
+            )
+
         assert_matches_type(Thread, thread, path=["response"])
 
     @parametrize
     async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
-        thread = await async_client.beta.threads.update(
-            thread_id="thread_id",
-            metadata={"foo": "string"},
-            tool_resources={
-                "code_interpreter": {"file_ids": ["string"]},
-                "file_search": {"vector_store_ids": ["string"]},
-            },
-        )
+        with pytest.warns(DeprecationWarning):
+            thread = await async_client.beta.threads.update(
+                thread_id="thread_id",
+                metadata={"foo": "string"},
+                tool_resources={
+                    "code_interpreter": {"file_ids": ["string"]},
+                    "file_search": {"vector_store_ids": ["string"]},
+                },
+            )
+
         assert_matches_type(Thread, thread, path=["response"])
 
     @parametrize
     async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.with_raw_response.update(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.with_raw_response.update(
+                thread_id="thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -513,36 +566,41 @@ class TestAsyncThreads:
 
     @parametrize
     async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.with_streaming_response.update(
-            "string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.with_streaming_response.update(
+                thread_id="thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            thread = await response.parse()
-            assert_matches_type(Thread, thread, path=["response"])
+                thread = await response.parse()
+                assert_matches_type(Thread, thread, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.with_raw_response.update(
-                "",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.with_raw_response.update(
+                    thread_id="",
+                )
 
     @parametrize
     async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
-        thread = await async_client.beta.threads.delete(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            thread = await async_client.beta.threads.delete(
+                "thread_id",
+            )
+
         assert_matches_type(ThreadDeleted, thread, path=["response"])
 
     @parametrize
     async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.with_raw_response.delete(
-            "string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.with_raw_response.delete(
+                "thread_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -551,92 +609,99 @@ class TestAsyncThreads:
 
     @parametrize
     async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.with_streaming_response.delete(
-            "string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.with_streaming_response.delete(
+                "thread_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            thread = await response.parse()
-            assert_matches_type(ThreadDeleted, thread, path=["response"])
+                thread = await response.parse()
+                assert_matches_type(ThreadDeleted, thread, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
-        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
-            await async_client.beta.threads.with_raw_response.delete(
-                "",
-            )
+        with pytest.warns(DeprecationWarning):
+            with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+                await async_client.beta.threads.with_raw_response.delete(
+                    "",
+                )
 
     @parametrize
     async def test_method_create_and_run_overload_1(self, async_client: AsyncOpenAI) -> None:
-        thread = await async_client.beta.threads.create_and_run(
-            assistant_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            thread = await async_client.beta.threads.create_and_run(
+                assistant_id="assistant_id",
+            )
+
         assert_matches_type(Run, thread, path=["response"])
 
     @parametrize
     async def test_method_create_and_run_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
-        thread = await async_client.beta.threads.create_and_run(
-            assistant_id="string",
-            instructions="string",
-            max_completion_tokens=256,
-            max_prompt_tokens=256,
-            metadata={"foo": "string"},
-            model="string",
-            parallel_tool_calls=True,
-            response_format="auto",
-            stream=False,
-            temperature=1,
-            thread={
-                "messages": [
-                    {
-                        "content": "string",
-                        "role": "user",
-                        "attachments": [
-                            {
-                                "file_id": "file_id",
-                                "tools": [{"type": "code_interpreter"}],
-                            }
-                        ],
-                        "metadata": {"foo": "string"},
-                    }
-                ],
-                "metadata": {"foo": "string"},
-                "tool_resources": {
-                    "code_interpreter": {"file_ids": ["string"]},
-                    "file_search": {
-                        "vector_store_ids": ["string"],
-                        "vector_stores": [
-                            {
-                                "chunking_strategy": {"type": "auto"},
-                                "file_ids": ["string"],
-                                "metadata": {"foo": "string"},
-                            }
-                        ],
+        with pytest.warns(DeprecationWarning):
+            thread = await async_client.beta.threads.create_and_run(
+                assistant_id="assistant_id",
+                instructions="instructions",
+                max_completion_tokens=256,
+                max_prompt_tokens=256,
+                metadata={"foo": "string"},
+                model="string",
+                parallel_tool_calls=True,
+                response_format="auto",
+                stream=False,
+                temperature=1,
+                thread={
+                    "messages": [
+                        {
+                            "content": "string",
+                            "role": "user",
+                            "attachments": [
+                                {
+                                    "file_id": "file_id",
+                                    "tools": [{"type": "code_interpreter"}],
+                                }
+                            ],
+                            "metadata": {"foo": "string"},
+                        }
+                    ],
+                    "metadata": {"foo": "string"},
+                    "tool_resources": {
+                        "code_interpreter": {"file_ids": ["string"]},
+                        "file_search": {
+                            "vector_store_ids": ["string"],
+                            "vector_stores": [
+                                {
+                                    "chunking_strategy": {"type": "auto"},
+                                    "file_ids": ["string"],
+                                    "metadata": {"foo": "string"},
+                                }
+                            ],
+                        },
                     },
                 },
-            },
-            tool_choice="none",
-            tool_resources={
-                "code_interpreter": {"file_ids": ["string"]},
-                "file_search": {"vector_store_ids": ["string"]},
-            },
-            tools=[{"type": "code_interpreter"}],
-            top_p=1,
-            truncation_strategy={
-                "type": "auto",
-                "last_messages": 1,
-            },
-        )
+                tool_choice="none",
+                tool_resources={
+                    "code_interpreter": {"file_ids": ["string"]},
+                    "file_search": {"vector_store_ids": ["string"]},
+                },
+                tools=[{"type": "code_interpreter"}],
+                top_p=1,
+                truncation_strategy={
+                    "type": "auto",
+                    "last_messages": 1,
+                },
+            )
+
         assert_matches_type(Run, thread, path=["response"])
 
     @parametrize
     async def test_raw_response_create_and_run_overload_1(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.with_raw_response.create_and_run(
-            assistant_id="string",
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.with_raw_response.create_and_run(
+                assistant_id="assistant_id",
+            )
 
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -645,87 +710,93 @@ class TestAsyncThreads:
 
     @parametrize
     async def test_streaming_response_create_and_run_overload_1(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.with_streaming_response.create_and_run(
-            assistant_id="string",
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.with_streaming_response.create_and_run(
+                assistant_id="assistant_id",
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
-            thread = await response.parse()
-            assert_matches_type(Run, thread, path=["response"])
+                thread = await response.parse()
+                assert_matches_type(Run, thread, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
     async def test_method_create_and_run_overload_2(self, async_client: AsyncOpenAI) -> None:
-        thread_stream = await async_client.beta.threads.create_and_run(
-            assistant_id="string",
-            stream=True,
-        )
+        with pytest.warns(DeprecationWarning):
+            thread_stream = await async_client.beta.threads.create_and_run(
+                assistant_id="assistant_id",
+                stream=True,
+            )
+
         await thread_stream.response.aclose()
 
     @parametrize
     async def test_method_create_and_run_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
-        thread_stream = await async_client.beta.threads.create_and_run(
-            assistant_id="string",
-            stream=True,
-            instructions="string",
-            max_completion_tokens=256,
-            max_prompt_tokens=256,
-            metadata={"foo": "string"},
-            model="string",
-            parallel_tool_calls=True,
-            response_format="auto",
-            temperature=1,
-            thread={
-                "messages": [
-                    {
-                        "content": "string",
-                        "role": "user",
-                        "attachments": [
-                            {
-                                "file_id": "file_id",
-                                "tools": [{"type": "code_interpreter"}],
-                            }
-                        ],
-                        "metadata": {"foo": "string"},
-                    }
-                ],
-                "metadata": {"foo": "string"},
-                "tool_resources": {
-                    "code_interpreter": {"file_ids": ["string"]},
-                    "file_search": {
-                        "vector_store_ids": ["string"],
-                        "vector_stores": [
-                            {
-                                "chunking_strategy": {"type": "auto"},
-                                "file_ids": ["string"],
-                                "metadata": {"foo": "string"},
-                            }
-                        ],
+        with pytest.warns(DeprecationWarning):
+            thread_stream = await async_client.beta.threads.create_and_run(
+                assistant_id="assistant_id",
+                stream=True,
+                instructions="instructions",
+                max_completion_tokens=256,
+                max_prompt_tokens=256,
+                metadata={"foo": "string"},
+                model="string",
+                parallel_tool_calls=True,
+                response_format="auto",
+                temperature=1,
+                thread={
+                    "messages": [
+                        {
+                            "content": "string",
+                            "role": "user",
+                            "attachments": [
+                                {
+                                    "file_id": "file_id",
+                                    "tools": [{"type": "code_interpreter"}],
+                                }
+                            ],
+                            "metadata": {"foo": "string"},
+                        }
+                    ],
+                    "metadata": {"foo": "string"},
+                    "tool_resources": {
+                        "code_interpreter": {"file_ids": ["string"]},
+                        "file_search": {
+                            "vector_store_ids": ["string"],
+                            "vector_stores": [
+                                {
+                                    "chunking_strategy": {"type": "auto"},
+                                    "file_ids": ["string"],
+                                    "metadata": {"foo": "string"},
+                                }
+                            ],
+                        },
                     },
                 },
-            },
-            tool_choice="none",
-            tool_resources={
-                "code_interpreter": {"file_ids": ["string"]},
-                "file_search": {"vector_store_ids": ["string"]},
-            },
-            tools=[{"type": "code_interpreter"}],
-            top_p=1,
-            truncation_strategy={
-                "type": "auto",
-                "last_messages": 1,
-            },
-        )
+                tool_choice="none",
+                tool_resources={
+                    "code_interpreter": {"file_ids": ["string"]},
+                    "file_search": {"vector_store_ids": ["string"]},
+                },
+                tools=[{"type": "code_interpreter"}],
+                top_p=1,
+                truncation_strategy={
+                    "type": "auto",
+                    "last_messages": 1,
+                },
+            )
+
         await thread_stream.response.aclose()
 
     @parametrize
     async def test_raw_response_create_and_run_overload_2(self, async_client: AsyncOpenAI) -> None:
-        response = await async_client.beta.threads.with_raw_response.create_and_run(
-            assistant_id="string",
-            stream=True,
-        )
+        with pytest.warns(DeprecationWarning):
+            response = await async_client.beta.threads.with_raw_response.create_and_run(
+                assistant_id="assistant_id",
+                stream=True,
+            )
 
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
         stream = response.parse()
@@ -733,14 +804,15 @@ class TestAsyncThreads:
 
     @parametrize
     async def test_streaming_response_create_and_run_overload_2(self, async_client: AsyncOpenAI) -> None:
-        async with async_client.beta.threads.with_streaming_response.create_and_run(
-            assistant_id="string",
-            stream=True,
-        ) as response:
-            assert not response.is_closed
-            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
-            stream = await response.parse()
-            await stream.close()
+        with pytest.warns(DeprecationWarning):
+            async with async_client.beta.threads.with_streaming_response.create_and_run(
+                assistant_id="assistant_id",
+                stream=True,
+            ) as response:
+                assert not response.is_closed
+                assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+                stream = await response.parse()
+                await stream.close()
 
         assert cast(Any, response.is_closed) is True
tests/api_resources/containers/files/test_content.py
@@ -5,9 +5,15 @@ from __future__ import annotations
 import os
 from typing import Any, cast
 
+import httpx
 import pytest
+from respx import MockRouter
 
+import openai._legacy_response as _legacy_response
 from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+
+# pyright: reportDeprecated=false
 
 base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
 
@@ -16,15 +22,25 @@ class TestContent:
     parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
 
     @parametrize
-    def test_method_retrieve(self, client: OpenAI) -> None:
+    @pytest.mark.respx(base_url=base_url)
+    def test_method_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/containers/container_id/files/file_id/content").mock(
+            return_value=httpx.Response(200, json={"foo": "bar"})
+        )
         content = client.containers.files.content.retrieve(
             file_id="file_id",
             container_id="container_id",
         )
-        assert content is None
+        assert isinstance(content, _legacy_response.HttpxBinaryResponseContent)
+        assert content.json() == {"foo": "bar"}
 
     @parametrize
-    def test_raw_response_retrieve(self, client: OpenAI) -> None:
+    @pytest.mark.respx(base_url=base_url)
+    def test_raw_response_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/containers/container_id/files/file_id/content").mock(
+            return_value=httpx.Response(200, json={"foo": "bar"})
+        )
+
         response = client.containers.files.content.with_raw_response.retrieve(
             file_id="file_id",
             container_id="container_id",
@@ -33,10 +49,14 @@ class TestContent:
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
         content = response.parse()
-        assert content is None
+        assert_matches_type(_legacy_response.HttpxBinaryResponseContent, content, path=["response"])
 
     @parametrize
-    def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+    @pytest.mark.respx(base_url=base_url)
+    def test_streaming_response_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/containers/container_id/files/file_id/content").mock(
+            return_value=httpx.Response(200, json={"foo": "bar"})
+        )
         with client.containers.files.content.with_streaming_response.retrieve(
             file_id="file_id",
             container_id="container_id",
@@ -45,11 +65,12 @@ class TestContent:
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
             content = response.parse()
-            assert content is None
+            assert_matches_type(bytes, content, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
+    @pytest.mark.respx(base_url=base_url)
     def test_path_params_retrieve(self, client: OpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
             client.containers.files.content.with_raw_response.retrieve(
@@ -68,15 +89,25 @@ class TestAsyncContent:
     parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
 
     @parametrize
-    async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+    @pytest.mark.respx(base_url=base_url)
+    async def test_method_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/containers/container_id/files/file_id/content").mock(
+            return_value=httpx.Response(200, json={"foo": "bar"})
+        )
         content = await async_client.containers.files.content.retrieve(
             file_id="file_id",
             container_id="container_id",
         )
-        assert content is None
+        assert isinstance(content, _legacy_response.HttpxBinaryResponseContent)
+        assert content.json() == {"foo": "bar"}
 
     @parametrize
-    async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+    @pytest.mark.respx(base_url=base_url)
+    async def test_raw_response_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/containers/container_id/files/file_id/content").mock(
+            return_value=httpx.Response(200, json={"foo": "bar"})
+        )
+
         response = await async_client.containers.files.content.with_raw_response.retrieve(
             file_id="file_id",
             container_id="container_id",
@@ -85,10 +116,14 @@ class TestAsyncContent:
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
         content = response.parse()
-        assert content is None
+        assert_matches_type(_legacy_response.HttpxBinaryResponseContent, content, path=["response"])
 
     @parametrize
-    async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+    @pytest.mark.respx(base_url=base_url)
+    async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/containers/container_id/files/file_id/content").mock(
+            return_value=httpx.Response(200, json={"foo": "bar"})
+        )
         async with async_client.containers.files.content.with_streaming_response.retrieve(
             file_id="file_id",
             container_id="container_id",
@@ -97,11 +132,12 @@ class TestAsyncContent:
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
             content = await response.parse()
-            assert content is None
+            assert_matches_type(bytes, content, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
     @parametrize
+    @pytest.mark.respx(base_url=base_url)
     async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
             await async_client.containers.files.content.with_raw_response.retrieve(
tests/api_resources/fine_tuning/alpha/test_graders.py
@@ -31,7 +31,6 @@ class TestGraders:
                 "type": "string_check",
             },
             model_sample="model_sample",
-            reference_answer="string",
         )
         assert_matches_type(GraderRunResponse, grader, path=["response"])
 
@@ -46,7 +45,7 @@ class TestGraders:
                 "type": "string_check",
             },
             model_sample="model_sample",
-            reference_answer="string",
+            item={},
         )
         assert_matches_type(GraderRunResponse, grader, path=["response"])
 
@@ -61,7 +60,6 @@ class TestGraders:
                 "type": "string_check",
             },
             model_sample="model_sample",
-            reference_answer="string",
         )
 
         assert response.is_closed is True
@@ -80,7 +78,6 @@ class TestGraders:
                 "type": "string_check",
             },
             model_sample="model_sample",
-            reference_answer="string",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -167,7 +164,6 @@ class TestAsyncGraders:
                 "type": "string_check",
             },
             model_sample="model_sample",
-            reference_answer="string",
         )
         assert_matches_type(GraderRunResponse, grader, path=["response"])
 
@@ -182,7 +178,7 @@ class TestAsyncGraders:
                 "type": "string_check",
             },
             model_sample="model_sample",
-            reference_answer="string",
+            item={},
         )
         assert_matches_type(GraderRunResponse, grader, path=["response"])
 
@@ -197,7 +193,6 @@ class TestAsyncGraders:
                 "type": "string_check",
             },
             model_sample="model_sample",
-            reference_answer="string",
         )
 
         assert response.is_closed is True
@@ -216,7 +211,6 @@ class TestAsyncGraders:
                 "type": "string_check",
             },
             model_sample="model_sample",
-            reference_answer="string",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
tests/api_resources/test_responses.py
@@ -164,22 +164,24 @@ class TestResponses:
         assert cast(Any, response.is_closed) is True
 
     @parametrize
-    def test_method_retrieve(self, client: OpenAI) -> None:
+    def test_method_retrieve_overload_1(self, client: OpenAI) -> None:
         response = client.responses.retrieve(
             response_id="resp_677efb5139a88190b512bc3fef8e535d",
         )
         assert_matches_type(Response, response, path=["response"])
 
     @parametrize
-    def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
+    def test_method_retrieve_with_all_params_overload_1(self, client: OpenAI) -> None:
         response = client.responses.retrieve(
             response_id="resp_677efb5139a88190b512bc3fef8e535d",
             include=["file_search_call.results"],
+            starting_after=0,
+            stream=False,
         )
         assert_matches_type(Response, response, path=["response"])
 
     @parametrize
-    def test_raw_response_retrieve(self, client: OpenAI) -> None:
+    def test_raw_response_retrieve_overload_1(self, client: OpenAI) -> None:
         http_response = client.responses.with_raw_response.retrieve(
             response_id="resp_677efb5139a88190b512bc3fef8e535d",
         )
@@ -190,7 +192,7 @@ class TestResponses:
         assert_matches_type(Response, response, path=["response"])
 
     @parametrize
-    def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+    def test_streaming_response_retrieve_overload_1(self, client: OpenAI) -> None:
         with client.responses.with_streaming_response.retrieve(
             response_id="resp_677efb5139a88190b512bc3fef8e535d",
         ) as http_response:
@@ -203,10 +205,61 @@ class TestResponses:
         assert cast(Any, http_response.is_closed) is True
 
     @parametrize
-    def test_path_params_retrieve(self, client: OpenAI) -> None:
+    def test_path_params_retrieve_overload_1(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+            client.responses.with_raw_response.retrieve(
+                response_id="",
+            )
+
+    @parametrize
+    def test_method_retrieve_overload_2(self, client: OpenAI) -> None:
+        response_stream = client.responses.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+            stream=True,
+        )
+        response_stream.response.close()
+
+    @parametrize
+    def test_method_retrieve_with_all_params_overload_2(self, client: OpenAI) -> None:
+        response_stream = client.responses.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+            stream=True,
+            include=["file_search_call.results"],
+            starting_after=0,
+        )
+        response_stream.response.close()
+
+    @parametrize
+    def test_raw_response_retrieve_overload_2(self, client: OpenAI) -> None:
+        response = client.responses.with_raw_response.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+            stream=True,
+        )
+
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        stream = response.parse()
+        stream.close()
+
+    @parametrize
+    def test_streaming_response_retrieve_overload_2(self, client: OpenAI) -> None:
+        with client.responses.with_streaming_response.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+            stream=True,
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            stream = response.parse()
+            stream.close()
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_retrieve_overload_2(self, client: OpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
             client.responses.with_raw_response.retrieve(
                 response_id="",
+                stream=True,
             )
 
     @parametrize
@@ -252,7 +305,7 @@ class TestResponses:
         response = client.responses.cancel(
             "resp_677efb5139a88190b512bc3fef8e535d",
         )
-        assert response is None
+        assert_matches_type(Response, response, path=["response"])
 
     @parametrize
     def test_raw_response_cancel(self, client: OpenAI) -> None:
@@ -263,7 +316,7 @@ class TestResponses:
         assert http_response.is_closed is True
         assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
         response = http_response.parse()
-        assert response is None
+        assert_matches_type(Response, response, path=["response"])
 
     @parametrize
     def test_streaming_response_cancel(self, client: OpenAI) -> None:
@@ -274,7 +327,7 @@ class TestResponses:
             assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
 
             response = http_response.parse()
-            assert response is None
+            assert_matches_type(Response, response, path=["response"])
 
         assert cast(Any, http_response.is_closed) is True
 
@@ -436,22 +489,24 @@ class TestAsyncResponses:
         assert cast(Any, response.is_closed) is True
 
     @parametrize
-    async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+    async def test_method_retrieve_overload_1(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.responses.retrieve(
             response_id="resp_677efb5139a88190b512bc3fef8e535d",
         )
         assert_matches_type(Response, response, path=["response"])
 
     @parametrize
-    async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
+    async def test_method_retrieve_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.responses.retrieve(
             response_id="resp_677efb5139a88190b512bc3fef8e535d",
             include=["file_search_call.results"],
+            starting_after=0,
+            stream=False,
         )
         assert_matches_type(Response, response, path=["response"])
 
     @parametrize
-    async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+    async def test_raw_response_retrieve_overload_1(self, async_client: AsyncOpenAI) -> None:
         http_response = await async_client.responses.with_raw_response.retrieve(
             response_id="resp_677efb5139a88190b512bc3fef8e535d",
         )
@@ -462,7 +517,7 @@ class TestAsyncResponses:
         assert_matches_type(Response, response, path=["response"])
 
     @parametrize
-    async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+    async def test_streaming_response_retrieve_overload_1(self, async_client: AsyncOpenAI) -> None:
         async with async_client.responses.with_streaming_response.retrieve(
             response_id="resp_677efb5139a88190b512bc3fef8e535d",
         ) as http_response:
@@ -475,10 +530,61 @@ class TestAsyncResponses:
         assert cast(Any, http_response.is_closed) is True
 
     @parametrize
-    async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+    async def test_path_params_retrieve_overload_1(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+            await async_client.responses.with_raw_response.retrieve(
+                response_id="",
+            )
+
+    @parametrize
+    async def test_method_retrieve_overload_2(self, async_client: AsyncOpenAI) -> None:
+        response_stream = await async_client.responses.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+            stream=True,
+        )
+        await response_stream.response.aclose()
+
+    @parametrize
+    async def test_method_retrieve_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
+        response_stream = await async_client.responses.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+            stream=True,
+            include=["file_search_call.results"],
+            starting_after=0,
+        )
+        await response_stream.response.aclose()
+
+    @parametrize
+    async def test_raw_response_retrieve_overload_2(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.responses.with_raw_response.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+            stream=True,
+        )
+
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        stream = response.parse()
+        await stream.close()
+
+    @parametrize
+    async def test_streaming_response_retrieve_overload_2(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.responses.with_streaming_response.retrieve(
+            response_id="resp_677efb5139a88190b512bc3fef8e535d",
+            stream=True,
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            stream = await response.parse()
+            await stream.close()
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_retrieve_overload_2(self, async_client: AsyncOpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
             await async_client.responses.with_raw_response.retrieve(
                 response_id="",
+                stream=True,
             )
 
     @parametrize
@@ -524,7 +630,7 @@ class TestAsyncResponses:
         response = await async_client.responses.cancel(
             "resp_677efb5139a88190b512bc3fef8e535d",
         )
-        assert response is None
+        assert_matches_type(Response, response, path=["response"])
 
     @parametrize
     async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
@@ -535,7 +641,7 @@ class TestAsyncResponses:
         assert http_response.is_closed is True
         assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
         response = http_response.parse()
-        assert response is None
+        assert_matches_type(Response, response, path=["response"])
 
     @parametrize
     async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
@@ -546,7 +652,7 @@ class TestAsyncResponses:
             assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
 
             response = await http_response.parse()
-            assert response is None
+            assert_matches_type(Response, response, path=["response"])
 
         assert cast(Any, http_response.is_closed) is True
 
tests/lib/chat/_utils.py
@@ -28,7 +28,7 @@ def print_obj(obj: object, monkeypatch: pytest.MonkeyPatch) -> str:
 
         string = rich_print_str(obj)
 
-        # we remove all `fn_name.<locals>.` occurences
+        # we remove all `fn_name.<locals>.` occurrences
         # so that we can share the same snapshots between
         # pydantic v1 and pydantic v2 as their output for
         # generic models differs, e.g.
tests/lib/test_assistants.py
@@ -11,7 +11,7 @@ def test_create_and_run_poll_method_definition_in_sync(sync: bool, client: OpenA
     checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
 
     assert_signatures_in_sync(
-        checking_client.beta.threads.create_and_run,
+        checking_client.beta.threads.create_and_run,  # pyright: ignore[reportDeprecated]
         checking_client.beta.threads.create_and_run_poll,
         exclude_params={"stream"},
     )
@@ -22,7 +22,7 @@ def test_create_and_run_stream_method_definition_in_sync(sync: bool, client: Ope
     checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
 
     assert_signatures_in_sync(
-        checking_client.beta.threads.create_and_run,
+        checking_client.beta.threads.create_and_run,  # pyright: ignore[reportDeprecated]
         checking_client.beta.threads.create_and_run_stream,
         exclude_params={"stream"},
     )
@@ -33,8 +33,8 @@ def test_run_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_
     checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
 
     assert_signatures_in_sync(
-        checking_client.beta.threads.runs.create,
-        checking_client.beta.threads.runs.stream,
+        checking_client.beta.threads.runs.create,  # pyright: ignore[reportDeprecated]
+        checking_client.beta.threads.runs.stream,  # pyright: ignore[reportDeprecated]
         exclude_params={"stream"},
     )
 
@@ -44,7 +44,7 @@ def test_create_and_poll_method_definition_in_sync(sync: bool, client: OpenAI, a
     checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
 
     assert_signatures_in_sync(
-        checking_client.beta.threads.runs.create,
-        checking_client.beta.threads.runs.create_and_poll,
+        checking_client.beta.threads.runs.create,  # pyright: ignore[reportDeprecated]
+        checking_client.beta.threads.runs.create_and_poll,  # pyright: ignore[reportDeprecated]
         exclude_params={"stream"},
     )
tests/test_client.py
@@ -908,6 +908,33 @@ class TestOpenAI:
             assert response.retries_taken == failures_before_success
             assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
 
+    @pytest.mark.respx(base_url=base_url)
+    def test_follow_redirects(self, respx_mock: MockRouter) -> None:
+        # Test that the default follow_redirects=True allows following redirects
+        respx_mock.post("/redirect").mock(
+            return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+        )
+        respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
+
+        response = self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
+        assert response.status_code == 200
+        assert response.json() == {"status": "ok"}
+
+    @pytest.mark.respx(base_url=base_url)
+    def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
+        # Test that follow_redirects=False prevents following redirects
+        respx_mock.post("/redirect").mock(
+            return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+        )
+
+        with pytest.raises(APIStatusError) as exc_info:
+            self.client.post(
+                "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response
+            )
+
+        assert exc_info.value.response.status_code == 302
+        assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected"
+
 
 class TestAsyncOpenAI:
     client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
@@ -1829,3 +1856,30 @@ class TestAsyncOpenAI:
                     raise AssertionError("calling get_platform using asyncify resulted in a hung process")
 
                 time.sleep(0.1)
+
+    @pytest.mark.respx(base_url=base_url)
+    async def test_follow_redirects(self, respx_mock: MockRouter) -> None:
+        # Test that the default follow_redirects=True allows following redirects
+        respx_mock.post("/redirect").mock(
+            return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+        )
+        respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
+
+        response = await self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
+        assert response.status_code == 200
+        assert response.json() == {"status": "ok"}
+
+    @pytest.mark.respx(base_url=base_url)
+    async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
+        # Test that follow_redirects=False prevents following redirects
+        respx_mock.post("/redirect").mock(
+            return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+        )
+
+        with pytest.raises(APIStatusError) as exc_info:
+            await self.client.post(
+                "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response
+            )
+
+        assert exc_info.value.response.status_code == 302
+        assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected"
.release-please-manifest.json
@@ -1,3 +1,3 @@
 {
-  ".": "1.82.1"
+  ".": "1.83.0"
 }
\ No newline at end of file
.stats.yml
@@ -1,4 +1,4 @@
 configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-fc64d7c2c8f51f750813375356c3f3fdfc7fc1b1b34f19c20a5410279d445d37.yml
-openapi_spec_hash: 618285fc70199ee32b9ebe4bf72f7e4c
-config_hash: 535b6e5f26a295d609b259c8cb8f656c
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-2bcc845d8635bf93ddcf9ee723af4d7928248412a417bee5fc10d863a1e13867.yml
+openapi_spec_hash: 865230cb3abeb01bd85de05891af23c4
+config_hash: ed1e6b3c5f93d12b80d31167f55c557c
api.md
@@ -784,7 +784,7 @@ Methods:
 - <code title="post /responses">client.responses.<a href="./src/openai/resources/responses/responses.py">create</a>(\*\*<a href="src/openai/types/responses/response_create_params.py">params</a>) -> <a href="./src/openai/types/responses/response.py">Response</a></code>
 - <code title="get /responses/{response_id}">client.responses.<a href="./src/openai/resources/responses/responses.py">retrieve</a>(response_id, \*\*<a href="src/openai/types/responses/response_retrieve_params.py">params</a>) -> <a href="./src/openai/types/responses/response.py">Response</a></code>
 - <code title="delete /responses/{response_id}">client.responses.<a href="./src/openai/resources/responses/responses.py">delete</a>(response_id) -> None</code>
-- <code title="post /responses/{response_id}/cancel">client.responses.<a href="./src/openai/resources/responses/responses.py">cancel</a>(response_id) -> None</code>
+- <code title="post /responses/{response_id}/cancel">client.responses.<a href="./src/openai/resources/responses/responses.py">cancel</a>(response_id) -> <a href="./src/openai/types/responses/response.py">Response</a></code>
 
 ## InputItems
 
@@ -894,4 +894,4 @@ Methods:
 
 Methods:
 
-- <code title="get /containers/{container_id}/files/{file_id}/content">client.containers.files.content.<a href="./src/openai/resources/containers/files/content.py">retrieve</a>(file_id, \*, container_id) -> None</code>
+- <code title="get /containers/{container_id}/files/{file_id}/content">client.containers.files.content.<a href="./src/openai/resources/containers/files/content.py">retrieve</a>(file_id, \*, container_id) -> HttpxBinaryResponseContent</code>
CHANGELOG.md
@@ -1,5 +1,27 @@
 # Changelog
 
+## 1.83.0 (2025-06-02)
+
+Full Changelog: [v1.82.1...v1.83.0](https://github.com/openai/openai-python/compare/v1.82.1...v1.83.0)
+
+### Features
+
+* **api:** Config update for pakrym-stream-param ([88bcf3a](https://github.com/openai/openai-python/commit/88bcf3af9ce8ffa8347547d4d30aacac1ceba939))
+* **client:** add follow_redirects request option ([26d715f](https://github.com/openai/openai-python/commit/26d715f4e9b0f2b19e2ac16acc796a949338e1e1))
+
+
+### Bug Fixes
+
+* **api:** Fix evals and code interpreter interfaces ([2650159](https://github.com/openai/openai-python/commit/2650159f6d01f6eb481cf8c7942142e4fd21ce44))
+* **client:** return binary content from `get /containers/{container_id}/files/{file_id}/content` ([f7c80c4](https://github.com/openai/openai-python/commit/f7c80c4368434bd0be7436375076ba33a62f63b5))
+
+
+### Chores
+
+* **api:** mark some methods as deprecated ([3e2ca57](https://github.com/openai/openai-python/commit/3e2ca571cb6cdd9e15596590605b2f98a4c5a42e))
+* deprecate Assistants API ([9d166d7](https://github.com/openai/openai-python/commit/9d166d795e03dea49af680ec9597e9497522187c))
+* **docs:** remove reference to rye shell ([c7978e9](https://github.com/openai/openai-python/commit/c7978e9f1640c311022988fcd716cbb5c865daa8))
+
 ## 1.82.1 (2025-05-29)
 
 Full Changelog: [v1.82.0...v1.82.1](https://github.com/openai/openai-python/compare/v1.82.0...v1.82.1)
CONTRIBUTING.md
@@ -17,8 +17,7 @@ $ rye sync --all-features
 You can then run scripts using `rye run python script.py` or by activating the virtual environment:
 
 ```sh
-$ rye shell
-# or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work
+# Activate the virtual environment - https://docs.python.org/3/library/venv.html#how-venvs-work
 $ source .venv/bin/activate
 
 # now you can omit the `rye run` prefix
pyproject.toml
@@ -1,6 +1,6 @@
 [project]
 name = "openai"
-version = "1.82.1"
+version = "1.83.0"
 description = "The official Python library for the openai API"
 dynamic = ["readme"]
 license = "Apache-2.0"