Commit bf4a9a42

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-07-22 05:17:00
chore(api): event shapes more accurate
1 parent c6b9335
src/openai/lib/streaming/responses/_events.py
@@ -21,9 +21,7 @@ from ....types.responses import (
     ResponseRefusalDoneEvent,
     ResponseRefusalDeltaEvent,
     ResponseMcpCallFailedEvent,
-    ResponseReasoningDoneEvent,
     ResponseOutputItemDoneEvent,
-    ResponseReasoningDeltaEvent,
     ResponseContentPartDoneEvent,
     ResponseOutputItemAddedEvent,
     ResponseContentPartAddedEvent,
@@ -139,10 +137,8 @@ ResponseStreamEvent: TypeAlias = Annotated[
         ResponseMcpListToolsInProgressEvent,
         ResponseOutputTextAnnotationAddedEvent,
         ResponseQueuedEvent,
-        ResponseReasoningDeltaEvent,
         ResponseReasoningSummaryDeltaEvent,
         ResponseReasoningSummaryDoneEvent,
-        ResponseReasoningDoneEvent,
     ],
     PropertyInfo(discriminator="type"),
 ]
src/openai/lib/streaming/responses/_responses.py
@@ -264,6 +264,7 @@ class ResponseStreamState(Generic[TextFormatT]):
                     item_id=event.item_id,
                     output_index=event.output_index,
                     sequence_number=event.sequence_number,
+                    logprobs=event.logprobs,
                     type="response.output_text.delta",
                     snapshot=content.text,
                 )
@@ -282,6 +283,7 @@ class ResponseStreamState(Generic[TextFormatT]):
                     item_id=event.item_id,
                     output_index=event.output_index,
                     sequence_number=event.sequence_number,
+                    logprobs=event.logprobs,
                     type="response.output_text.done",
                     text=event.text,
                     parsed=parse_text(event.text, text_format=self._text_format),
src/openai/resources/audio/speech.py
@@ -50,9 +50,7 @@ class Speech(SyncAPIResource):
         *,
         input: str,
         model: Union[str, SpeechModel],
-        voice: Union[
-            str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-        ],
+        voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]],
         instructions: str | NotGiven = NOT_GIVEN,
         response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
         speed: float | NotGiven = NOT_GIVEN,
@@ -146,9 +144,7 @@ class AsyncSpeech(AsyncAPIResource):
         *,
         input: str,
         model: Union[str, SpeechModel],
-        voice: Union[
-            str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-        ],
+        voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]],
         instructions: str | NotGiven = NOT_GIVEN,
         response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
         speed: float | NotGiven = NOT_GIVEN,
src/openai/resources/beta/realtime/sessions.py
@@ -66,9 +66,7 @@ class Sessions(SyncAPIResource):
         tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
         tracing: session_create_params.Tracing | NotGiven = NOT_GIVEN,
         turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
-        voice: Union[
-            str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-        ]
+        voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
         | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -163,8 +161,7 @@ class Sessions(SyncAPIResource):
 
           voice: The voice the model uses to respond. Voice cannot be changed during the session
               once the model has responded with audio at least once. Current voice options are
-              `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
-              `shimmer`, and `verse`.
+              `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
 
           extra_headers: Send extra headers
 
@@ -251,9 +248,7 @@ class AsyncSessions(AsyncAPIResource):
         tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
         tracing: session_create_params.Tracing | NotGiven = NOT_GIVEN,
         turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
-        voice: Union[
-            str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-        ]
+        voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
         | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -348,8 +343,7 @@ class AsyncSessions(AsyncAPIResource):
 
           voice: The voice the model uses to respond. Voice cannot be changed during the session
               once the model has responded with audio at least once. Current voice options are
-              `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
-              `shimmer`, and `verse`.
+              `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
 
           extra_headers: Send extra headers
 
src/openai/resources/chat/completions/completions.py
@@ -417,7 +417,7 @@ class Completions(SyncAPIResource):
               - If set to 'auto', then the request will be processed with the service tier
                 configured in the Project settings. Unless otherwise configured, the Project
                 will use 'default'.
-              - If set to 'default', then the requset will be processed with the standard
+              - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                 'priority', then the request will be processed with the corresponding service
@@ -697,7 +697,7 @@ class Completions(SyncAPIResource):
               - If set to 'auto', then the request will be processed with the service tier
                 configured in the Project settings. Unless otherwise configured, the Project
                 will use 'default'.
-              - If set to 'default', then the requset will be processed with the standard
+              - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                 'priority', then the request will be processed with the corresponding service
@@ -968,7 +968,7 @@ class Completions(SyncAPIResource):
               - If set to 'auto', then the request will be processed with the service tier
                 configured in the Project settings. Unless otherwise configured, the Project
                 will use 'default'.
-              - If set to 'default', then the requset will be processed with the standard
+              - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                 'priority', then the request will be processed with the corresponding service
@@ -1784,7 +1784,7 @@ class AsyncCompletions(AsyncAPIResource):
               - If set to 'auto', then the request will be processed with the service tier
                 configured in the Project settings. Unless otherwise configured, the Project
                 will use 'default'.
-              - If set to 'default', then the requset will be processed with the standard
+              - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                 'priority', then the request will be processed with the corresponding service
@@ -2064,7 +2064,7 @@ class AsyncCompletions(AsyncAPIResource):
               - If set to 'auto', then the request will be processed with the service tier
                 configured in the Project settings. Unless otherwise configured, the Project
                 will use 'default'.
-              - If set to 'default', then the requset will be processed with the standard
+              - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                 'priority', then the request will be processed with the corresponding service
@@ -2335,7 +2335,7 @@ class AsyncCompletions(AsyncAPIResource):
               - If set to 'auto', then the request will be processed with the service tier
                 configured in the Project settings. Unless otherwise configured, the Project
                 will use 'default'.
-              - If set to 'default', then the requset will be processed with the standard
+              - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                 'priority', then the request will be processed with the corresponding service
src/openai/resources/responses/responses.py
@@ -198,7 +198,7 @@ class Responses(SyncAPIResource):
               - If set to 'auto', then the request will be processed with the service tier
                 configured in the Project settings. Unless otherwise configured, the Project
                 will use 'default'.
-              - If set to 'default', then the requset will be processed with the standard
+              - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                 'priority', then the request will be processed with the corresponding service
@@ -414,7 +414,7 @@ class Responses(SyncAPIResource):
               - If set to 'auto', then the request will be processed with the service tier
                 configured in the Project settings. Unless otherwise configured, the Project
                 will use 'default'.
-              - If set to 'default', then the requset will be processed with the standard
+              - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                 'priority', then the request will be processed with the corresponding service
@@ -623,7 +623,7 @@ class Responses(SyncAPIResource):
               - If set to 'auto', then the request will be processed with the service tier
                 configured in the Project settings. Unless otherwise configured, the Project
                 will use 'default'.
-              - If set to 'default', then the requset will be processed with the standard
+              - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                 'priority', then the request will be processed with the corresponding service
@@ -1463,7 +1463,7 @@ class AsyncResponses(AsyncAPIResource):
               - If set to 'auto', then the request will be processed with the service tier
                 configured in the Project settings. Unless otherwise configured, the Project
                 will use 'default'.
-              - If set to 'default', then the requset will be processed with the standard
+              - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                 'priority', then the request will be processed with the corresponding service
@@ -1679,7 +1679,7 @@ class AsyncResponses(AsyncAPIResource):
               - If set to 'auto', then the request will be processed with the service tier
                 configured in the Project settings. Unless otherwise configured, the Project
                 will use 'default'.
-              - If set to 'default', then the requset will be processed with the standard
+              - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                 'priority', then the request will be processed with the corresponding service
@@ -1888,7 +1888,7 @@ class AsyncResponses(AsyncAPIResource):
               - If set to 'auto', then the request will be processed with the service tier
                 configured in the Project settings. Unless otherwise configured, the Project
                 will use 'default'.
-              - If set to 'default', then the requset will be processed with the standard
+              - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                 'priority', then the request will be processed with the corresponding service
src/openai/resources/images.py
@@ -196,6 +196,9 @@ class Images(SyncAPIResource):
               responses that return partial images. Value must be between 0 and 3. When set to
               0, the response will be a single image sent in one streaming event.
 
+              Note that the final image may be sent before the full number of partial images
+              are generated if the full image is generated more quickly.
+
           quality: The quality of the image that will be generated. `high`, `medium` and `low` are
               only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
               Defaults to `auto`.
@@ -310,6 +313,9 @@ class Images(SyncAPIResource):
               responses that return partial images. Value must be between 0 and 3. When set to
               0, the response will be a single image sent in one streaming event.
 
+              Note that the final image may be sent before the full number of partial images
+              are generated if the full image is generated more quickly.
+
           quality: The quality of the image that will be generated. `high`, `medium` and `low` are
               only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
               Defaults to `auto`.
@@ -420,6 +426,9 @@ class Images(SyncAPIResource):
               responses that return partial images. Value must be between 0 and 3. When set to
               0, the response will be a single image sent in one streaming event.
 
+              Note that the final image may be sent before the full number of partial images
+              are generated if the full image is generated more quickly.
+
           quality: The quality of the image that will be generated. `high`, `medium` and `low` are
               only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
               Defaults to `auto`.
@@ -579,6 +588,9 @@ class Images(SyncAPIResource):
               responses that return partial images. Value must be between 0 and 3. When set to
               0, the response will be a single image sent in one streaming event.
 
+              Note that the final image may be sent before the full number of partial images
+              are generated if the full image is generated more quickly.
+
           quality: The quality of the image that will be generated.
 
               - `auto` (default value) will automatically select the best quality for the
@@ -690,6 +702,9 @@ class Images(SyncAPIResource):
               responses that return partial images. Value must be between 0 and 3. When set to
               0, the response will be a single image sent in one streaming event.
 
+              Note that the final image may be sent before the full number of partial images
+              are generated if the full image is generated more quickly.
+
           quality: The quality of the image that will be generated.
 
               - `auto` (default value) will automatically select the best quality for the
@@ -797,6 +812,9 @@ class Images(SyncAPIResource):
               responses that return partial images. Value must be between 0 and 3. When set to
               0, the response will be a single image sent in one streaming event.
 
+              Note that the final image may be sent before the full number of partial images
+              are generated if the full image is generated more quickly.
+
           quality: The quality of the image that will be generated.
 
               - `auto` (default value) will automatically select the best quality for the
@@ -1066,6 +1084,9 @@ class AsyncImages(AsyncAPIResource):
               responses that return partial images. Value must be between 0 and 3. When set to
               0, the response will be a single image sent in one streaming event.
 
+              Note that the final image may be sent before the full number of partial images
+              are generated if the full image is generated more quickly.
+
           quality: The quality of the image that will be generated. `high`, `medium` and `low` are
               only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
               Defaults to `auto`.
@@ -1180,6 +1201,9 @@ class AsyncImages(AsyncAPIResource):
               responses that return partial images. Value must be between 0 and 3. When set to
               0, the response will be a single image sent in one streaming event.
 
+              Note that the final image may be sent before the full number of partial images
+              are generated if the full image is generated more quickly.
+
           quality: The quality of the image that will be generated. `high`, `medium` and `low` are
               only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
               Defaults to `auto`.
@@ -1290,6 +1314,9 @@ class AsyncImages(AsyncAPIResource):
               responses that return partial images. Value must be between 0 and 3. When set to
               0, the response will be a single image sent in one streaming event.
 
+              Note that the final image may be sent before the full number of partial images
+              are generated if the full image is generated more quickly.
+
           quality: The quality of the image that will be generated. `high`, `medium` and `low` are
               only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
               Defaults to `auto`.
@@ -1449,6 +1476,9 @@ class AsyncImages(AsyncAPIResource):
               responses that return partial images. Value must be between 0 and 3. When set to
               0, the response will be a single image sent in one streaming event.
 
+              Note that the final image may be sent before the full number of partial images
+              are generated if the full image is generated more quickly.
+
           quality: The quality of the image that will be generated.
 
               - `auto` (default value) will automatically select the best quality for the
@@ -1560,6 +1590,9 @@ class AsyncImages(AsyncAPIResource):
               responses that return partial images. Value must be between 0 and 3. When set to
               0, the response will be a single image sent in one streaming event.
 
+              Note that the final image may be sent before the full number of partial images
+              are generated if the full image is generated more quickly.
+
           quality: The quality of the image that will be generated.
 
               - `auto` (default value) will automatically select the best quality for the
@@ -1667,6 +1700,9 @@ class AsyncImages(AsyncAPIResource):
               responses that return partial images. Value must be between 0 and 3. When set to
               0, the response will be a single image sent in one streaming event.
 
+              Note that the final image may be sent before the full number of partial images
+              are generated if the full image is generated more quickly.
+
           quality: The quality of the image that will be generated.
 
               - `auto` (default value) will automatically select the best quality for the
src/openai/types/audio/speech_create_params.py
@@ -20,11 +20,7 @@ class SpeechCreateParams(TypedDict, total=False):
     `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
     """
 
-    voice: Required[
-        Union[
-            str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-        ]
-    ]
+    voice: Required[Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]]
     """The voice to use when generating the audio.
 
     Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`,
src/openai/types/beta/realtime/realtime_response.py
@@ -80,13 +80,8 @@ class RealtimeResponse(BaseModel):
     will become the input for later turns.
     """
 
-    voice: Union[
-        str,
-        Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"],
-        None,
-    ] = None
+    voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None
     """
     The voice the model used to respond. Current voice options are `alloy`, `ash`,
-    `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
-    `verse`.
+    `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
     """
src/openai/types/beta/realtime/response_create_event.py
@@ -101,16 +101,12 @@ class Response(BaseModel):
     tools: Optional[List[ResponseTool]] = None
     """Tools (functions) available to the model."""
 
-    voice: Union[
-        str,
-        Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"],
-        None,
-    ] = None
+    voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
     audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
-    `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
+    `coral`, `echo`, `sage`, `shimmer`, and `verse`.
     """
 
 
src/openai/types/beta/realtime/response_create_event_param.py
@@ -102,14 +102,12 @@ class Response(TypedDict, total=False):
     tools: Iterable[ResponseTool]
     """Tools (functions) available to the model."""
 
-    voice: Union[
-        str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-    ]
+    voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
     audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
-    `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
+    `coral`, `echo`, `sage`, `shimmer`, and `verse`.
     """
 
 
src/openai/types/beta/realtime/session.py
@@ -268,14 +268,10 @@ class Session(BaseModel):
     natural conversations, but may have a higher latency.
     """
 
-    voice: Union[
-        str,
-        Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"],
-        None,
-    ] = None
+    voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
     audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
-    `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
+    `coral`, `echo`, `sage`, `shimmer`, and `verse`.
     """
src/openai/types/beta/realtime/session_create_params.py
@@ -145,14 +145,12 @@ class SessionCreateParams(TypedDict, total=False):
     natural conversations, but may have a higher latency.
     """
 
-    voice: Union[
-        str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-    ]
+    voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
     audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
-    `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
+    `coral`, `echo`, `sage`, `shimmer`, and `verse`.
     """
 
 
src/openai/types/beta/realtime/session_create_response.py
@@ -187,14 +187,10 @@ class SessionCreateResponse(BaseModel):
     speech.
     """
 
-    voice: Union[
-        str,
-        Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"],
-        None,
-    ] = None
+    voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
     audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
-    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    `coral`, `echo`, `sage`, `shimmer`, and `verse`.
     """
src/openai/types/beta/realtime/session_update_event.py
@@ -290,16 +290,12 @@ class Session(BaseModel):
     natural conversations, but may have a higher latency.
     """
 
-    voice: Union[
-        str,
-        Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"],
-        None,
-    ] = None
+    voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
     audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
-    `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
+    `coral`, `echo`, `sage`, `shimmer`, and `verse`.
     """
 
 
src/openai/types/beta/realtime/session_update_event_param.py
@@ -288,14 +288,12 @@ class Session(TypedDict, total=False):
     natural conversations, but may have a higher latency.
     """
 
-    voice: Union[
-        str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-    ]
+    voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
     audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
-    `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
+    `coral`, `echo`, `sage`, `shimmer`, and `verse`.
     """
 
 
src/openai/types/chat/chat_completion.py
@@ -65,7 +65,7 @@ class ChatCompletion(BaseModel):
     - If set to 'auto', then the request will be processed with the service tier
       configured in the Project settings. Unless otherwise configured, the Project
       will use 'default'.
-    - If set to 'default', then the requset will be processed with the standard
+    - If set to 'default', then the request will be processed with the standard
       pricing and performance for the selected model.
     - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
       'priority', then the request will be processed with the corresponding service
src/openai/types/chat/chat_completion_audio_param.py
@@ -15,11 +15,7 @@ class ChatCompletionAudioParam(TypedDict, total=False):
     Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`.
     """
 
-    voice: Required[
-        Union[
-            str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
-        ]
-    ]
+    voice: Required[Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]]
     """The voice the model uses to respond.
 
     Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`,
src/openai/types/chat/chat_completion_chunk.py
@@ -134,7 +134,7 @@ class ChatCompletionChunk(BaseModel):
     - If set to 'auto', then the request will be processed with the service tier
       configured in the Project settings. Unless otherwise configured, the Project
       will use 'default'.
-    - If set to 'default', then the requset will be processed with the standard
+    - If set to 'default', then the request will be processed with the standard
       pricing and performance for the selected model.
     - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
       'priority', then the request will be processed with the corresponding service
src/openai/types/chat/completion_create_params.py
@@ -214,7 +214,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     - If set to 'auto', then the request will be processed with the service tier
       configured in the Project settings. Unless otherwise configured, the Project
       will use 'default'.
-    - If set to 'default', then the requset will be processed with the standard
+    - If set to 'default', then the request will be processed with the standard
       pricing and performance for the selected model.
     - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
       'priority', then the request will be processed with the corresponding service
src/openai/types/responses/__init__.py
@@ -81,11 +81,9 @@ from .response_input_content_param import ResponseInputContentParam as ResponseI
 from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent
 from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam
 from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam
-from .response_reasoning_done_event import ResponseReasoningDoneEvent as ResponseReasoningDoneEvent
 from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam
 from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall
 from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent as ResponseMcpCallFailedEvent
-from .response_reasoning_delta_event import ResponseReasoningDeltaEvent as ResponseReasoningDeltaEvent
 from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent
 from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent
 from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem
src/openai/types/responses/response.py
@@ -176,7 +176,7 @@ class Response(BaseModel):
     - If set to 'auto', then the request will be processed with the service tier
       configured in the Project settings. Unless otherwise configured, the Project
       will use 'default'.
-    - If set to 'default', then the requset will be processed with the standard
+    - If set to 'default', then the request will be processed with the standard
       pricing and performance for the selected model.
     - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
       'priority', then the request will be processed with the corresponding service
src/openai/types/responses/response_code_interpreter_tool_call.py
@@ -45,7 +45,11 @@ class ResponseCodeInterpreterToolCall(BaseModel):
     """
 
     status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]
-    """The status of the code interpreter tool call."""
+    """The status of the code interpreter tool call.
+
+    Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and
+    `failed`.
+    """
 
     type: Literal["code_interpreter_call"]
     """The type of the code interpreter tool call. Always `code_interpreter_call`."""
src/openai/types/responses/response_code_interpreter_tool_call_param.py
@@ -44,7 +44,11 @@ class ResponseCodeInterpreterToolCallParam(TypedDict, total=False):
     """
 
     status: Required[Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]]
-    """The status of the code interpreter tool call."""
+    """The status of the code interpreter tool call.
+
+    Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and
+    `failed`.
+    """
 
     type: Required[Literal["code_interpreter_call"]]
     """The type of the code interpreter tool call. Always `code_interpreter_call`."""
src/openai/types/responses/response_create_params.py
@@ -136,7 +136,7 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     - If set to 'auto', then the request will be processed with the service tier
       configured in the Project settings. Unless otherwise configured, the Project
       will use 'default'.
-    - If set to 'default', then the requset will be processed with the standard
+    - If set to 'default', then the request will be processed with the standard
       pricing and performance for the selected model.
     - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
       'priority', then the request will be processed with the corresponding service
src/openai/types/responses/response_mcp_call_arguments_delta_event.py
@@ -8,8 +8,11 @@ __all__ = ["ResponseMcpCallArgumentsDeltaEvent"]
 
 
 class ResponseMcpCallArgumentsDeltaEvent(BaseModel):
-    delta: object
-    """The partial update to the arguments for the MCP tool call."""
+    delta: str
+    """
+    A JSON string containing the partial update to the arguments for the MCP tool
+    call.
+    """
 
     item_id: str
     """The unique identifier of the MCP tool call item being processed."""
src/openai/types/responses/response_mcp_call_arguments_done_event.py
@@ -8,8 +8,8 @@ __all__ = ["ResponseMcpCallArgumentsDoneEvent"]
 
 
 class ResponseMcpCallArgumentsDoneEvent(BaseModel):
-    arguments: object
-    """The finalized arguments for the MCP tool call."""
+    arguments: str
+    """A JSON string containing the finalized arguments for the MCP tool call."""
 
     item_id: str
     """The unique identifier of the MCP tool call item being processed."""
src/openai/types/responses/response_mcp_call_completed_event.py
@@ -8,6 +8,12 @@ __all__ = ["ResponseMcpCallCompletedEvent"]
 
 
 class ResponseMcpCallCompletedEvent(BaseModel):
+    item_id: str
+    """The ID of the MCP tool call item that completed."""
+
+    output_index: int
+    """The index of the output item that completed."""
+
     sequence_number: int
     """The sequence number of this event."""
 
src/openai/types/responses/response_mcp_call_failed_event.py
@@ -8,6 +8,12 @@ __all__ = ["ResponseMcpCallFailedEvent"]
 
 
 class ResponseMcpCallFailedEvent(BaseModel):
+    item_id: str
+    """The ID of the MCP tool call item that failed."""
+
+    output_index: int
+    """The index of the output item that failed."""
+
     sequence_number: int
     """The sequence number of this event."""
 
src/openai/types/responses/response_mcp_list_tools_completed_event.py
@@ -8,6 +8,12 @@ __all__ = ["ResponseMcpListToolsCompletedEvent"]
 
 
 class ResponseMcpListToolsCompletedEvent(BaseModel):
+    item_id: str
+    """The ID of the MCP tool call item that produced this output."""
+
+    output_index: int
+    """The index of the output item that was processed."""
+
     sequence_number: int
     """The sequence number of this event."""
 
src/openai/types/responses/response_mcp_list_tools_failed_event.py
@@ -8,6 +8,12 @@ __all__ = ["ResponseMcpListToolsFailedEvent"]
 
 
 class ResponseMcpListToolsFailedEvent(BaseModel):
+    item_id: str
+    """The ID of the MCP tool call item that failed."""
+
+    output_index: int
+    """The index of the output item that failed."""
+
     sequence_number: int
     """The sequence number of this event."""
 
src/openai/types/responses/response_mcp_list_tools_in_progress_event.py
@@ -8,6 +8,12 @@ __all__ = ["ResponseMcpListToolsInProgressEvent"]
 
 
 class ResponseMcpListToolsInProgressEvent(BaseModel):
+    item_id: str
+    """The ID of the MCP tool call item that is being processed."""
+
+    output_index: int
+    """The index of the output item that is being processed."""
+
     sequence_number: int
     """The sequence number of this event."""
 
src/openai/types/responses/response_reasoning_delta_event.py
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["ResponseReasoningDeltaEvent"]
-
-
-class ResponseReasoningDeltaEvent(BaseModel):
-    content_index: int
-    """The index of the reasoning content part within the output item."""
-
-    delta: object
-    """The partial update to the reasoning content."""
-
-    item_id: str
-    """The unique identifier of the item for which reasoning is being updated."""
-
-    output_index: int
-    """The index of the output item in the response's output array."""
-
-    sequence_number: int
-    """The sequence number of this event."""
-
-    type: Literal["response.reasoning.delta"]
-    """The type of the event. Always 'response.reasoning.delta'."""
src/openai/types/responses/response_reasoning_done_event.py
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["ResponseReasoningDoneEvent"]
-
-
-class ResponseReasoningDoneEvent(BaseModel):
-    content_index: int
-    """The index of the reasoning content part within the output item."""
-
-    item_id: str
-    """The unique identifier of the item for which reasoning is finalized."""
-
-    output_index: int
-    """The index of the output item in the response's output array."""
-
-    sequence_number: int
-    """The sequence number of this event."""
-
-    text: str
-    """The finalized reasoning text."""
-
-    type: Literal["response.reasoning.done"]
-    """The type of the event. Always 'response.reasoning.done'."""
src/openai/types/responses/response_stream_event.py
@@ -17,9 +17,7 @@ from .response_audio_delta_event import ResponseAudioDeltaEvent
 from .response_in_progress_event import ResponseInProgressEvent
 from .response_refusal_done_event import ResponseRefusalDoneEvent
 from .response_refusal_delta_event import ResponseRefusalDeltaEvent
-from .response_reasoning_done_event import ResponseReasoningDoneEvent
 from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent
-from .response_reasoning_delta_event import ResponseReasoningDeltaEvent
 from .response_output_item_done_event import ResponseOutputItemDoneEvent
 from .response_content_part_done_event import ResponseContentPartDoneEvent
 from .response_output_item_added_event import ResponseOutputItemAddedEvent
@@ -111,8 +109,6 @@ ResponseStreamEvent: TypeAlias = Annotated[
         ResponseMcpListToolsInProgressEvent,
         ResponseOutputTextAnnotationAddedEvent,
         ResponseQueuedEvent,
-        ResponseReasoningDeltaEvent,
-        ResponseReasoningDoneEvent,
         ResponseReasoningSummaryDeltaEvent,
         ResponseReasoningSummaryDoneEvent,
     ],
src/openai/types/responses/response_text_delta_event.py
@@ -1,10 +1,30 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
+from typing import List, Optional
 from typing_extensions import Literal
 
 from ..._models import BaseModel
 
-__all__ = ["ResponseTextDeltaEvent"]
+__all__ = ["ResponseTextDeltaEvent", "Logprob", "LogprobTopLogprob"]
+
+
+class LogprobTopLogprob(BaseModel):
+    token: Optional[str] = None
+    """A possible text token."""
+
+    logprob: Optional[float] = None
+    """The log probability of this token."""
+
+
+class Logprob(BaseModel):
+    token: str
+    """A possible text token."""
+
+    logprob: float
+    """The log probability of this token."""
+
+    top_logprobs: Optional[List[LogprobTopLogprob]] = None
+    """The log probability of the top 20 most likely tokens."""
 
 
 class ResponseTextDeltaEvent(BaseModel):
@@ -17,6 +37,9 @@ class ResponseTextDeltaEvent(BaseModel):
     item_id: str
     """The ID of the output item that the text delta was added to."""
 
+    logprobs: List[Logprob]
+    """The log probabilities of the tokens in the delta."""
+
     output_index: int
     """The index of the output item that the text delta was added to."""
 
src/openai/types/responses/response_text_done_event.py
@@ -1,10 +1,30 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
+from typing import List, Optional
 from typing_extensions import Literal
 
 from ..._models import BaseModel
 
-__all__ = ["ResponseTextDoneEvent"]
+__all__ = ["ResponseTextDoneEvent", "Logprob", "LogprobTopLogprob"]
+
+
+class LogprobTopLogprob(BaseModel):
+    token: Optional[str] = None
+    """A possible text token."""
+
+    logprob: Optional[float] = None
+    """The log probability of this token."""
+
+
+class Logprob(BaseModel):
+    token: str
+    """A possible text token."""
+
+    logprob: float
+    """The log probability of this token."""
+
+    top_logprobs: Optional[List[LogprobTopLogprob]] = None
+    """The log probability of the top 20 most likely tokens."""
 
 
 class ResponseTextDoneEvent(BaseModel):
@@ -14,6 +34,9 @@ class ResponseTextDoneEvent(BaseModel):
     item_id: str
     """The ID of the output item that the text content is finalized."""
 
+    logprobs: List[Logprob]
+    """The log probabilities of the tokens in the delta."""
+
     output_index: int
     """The index of the output item that the text content is finalized."""
 
src/openai/types/shared/function_definition.py
@@ -39,5 +39,5 @@ class FunctionDefinition(BaseModel):
     If set to true, the model will follow the exact schema defined in the
     `parameters` field. Only a subset of JSON Schema is supported when `strict` is
     `true`. Learn more about Structured Outputs in the
-    [function calling guide](docs/guides/function-calling).
+    [function calling guide](https://platform.openai.com/docs/guides/function-calling).
     """
src/openai/types/shared_params/function_definition.py
@@ -41,5 +41,5 @@ class FunctionDefinition(TypedDict, total=False):
     If set to true, the model will follow the exact schema defined in the
     `parameters` field. Only a subset of JSON Schema is supported when `strict` is
     `true`. Learn more about Structured Outputs in the
-    [function calling guide](docs/guides/function-calling).
+    [function calling guide](https://platform.openai.com/docs/guides/function-calling).
     """
src/openai/types/image_edit_params.py
@@ -85,6 +85,9 @@ class ImageEditParamsBase(TypedDict, total=False):
     This parameter is used for streaming responses that return partial images. Value
     must be between 0 and 3. When set to 0, the response will be a single image sent
     in one streaming event.
+
+    Note that the final image may be sent before the full number of partial images
+    are generated if the full image is generated more quickly.
     """
 
     quality: Optional[Literal["standard", "low", "medium", "high", "auto"]]
src/openai/types/image_generate_params.py
@@ -68,6 +68,9 @@ class ImageGenerateParamsBase(TypedDict, total=False):
     This parameter is used for streaming responses that return partial images. Value
     must be between 0 and 3. When set to 0, the response will be a single image sent
     in one streaming event.
+
+    Note that the final image may be sent before the full number of partial images
+    are generated if the full image is generated more quickly.
     """
 
     quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]]
src/openai/types/images_response.py
@@ -25,7 +25,7 @@ class Usage(BaseModel):
     """The input tokens detailed information for the image generation."""
 
     output_tokens: int
-    """The number of image tokens in the output image."""
+    """The number of output tokens generated by the model."""
 
     total_tokens: int
     """The total number of tokens (images and text) used for the image generation."""
.stats.yml
@@ -1,4 +1,4 @@
 configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-670ea0d2cc44f52a87dd3cadea45632953283e0636ba30788fdbdb22a232ccac.yml
-openapi_spec_hash: d8b7d38911fead545adf3e4297956410
-config_hash: b2a4028fdbb27a08de89831ed310e244
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b2a451656ca64d30d174391ebfd94806b4de3ab76dc55b92843cfb7f1a54ecb6.yml
+openapi_spec_hash: 27d9691b400f28c17ef063a1374048b0
+config_hash: e822d0c9082c8b312264403949243179
api.md
@@ -791,8 +791,6 @@ from openai.types.responses import (
     ResponseOutputTextAnnotationAddedEvent,
     ResponsePrompt,
     ResponseQueuedEvent,
-    ResponseReasoningDeltaEvent,
-    ResponseReasoningDoneEvent,
     ResponseReasoningItem,
     ResponseReasoningSummaryDeltaEvent,
     ResponseReasoningSummaryDoneEvent,