Commit 2e73b529

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-03-27 04:32:36
chore(api): updates to supported Voice IDs (#2261)
1 parent a4b9f40
src/openai/resources/audio/speech.py
@@ -53,7 +53,9 @@ class Speech(SyncAPIResource):
         *,
         input: str,
         model: Union[str, SpeechModel],
-        voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"],
+        voice: Union[
+            str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
+        ],
         instructions: str | NotGiven = NOT_GIVEN,
         response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
         speed: float | NotGiven = NOT_GIVEN,
@@ -75,8 +77,8 @@ class Speech(SyncAPIResource):
               `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
 
           voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
-              `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the
-              voices are available in the
+              `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
+              `verse`. Previews of the voices are available in the
               [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
 
           instructions: Control the voice of your generated audio with additional instructions. Does not
@@ -142,7 +144,9 @@ class AsyncSpeech(AsyncAPIResource):
         *,
         input: str,
         model: Union[str, SpeechModel],
-        voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"],
+        voice: Union[
+            str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
+        ],
         instructions: str | NotGiven = NOT_GIVEN,
         response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
         speed: float | NotGiven = NOT_GIVEN,
@@ -164,8 +168,8 @@ class AsyncSpeech(AsyncAPIResource):
               `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
 
           voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
-              `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the
-              voices are available in the
+              `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
+              `verse`. Previews of the voices are available in the
               [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
 
           instructions: Control the voice of your generated audio with additional instructions. Does not
src/openai/resources/beta/realtime/sessions.py
@@ -65,7 +65,10 @@ class Sessions(SyncAPIResource):
         tool_choice: str | NotGiven = NOT_GIVEN,
         tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
         turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
-        voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] | NotGiven = NOT_GIVEN,
+        voice: Union[
+            str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
+        ]
+        | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -147,7 +150,8 @@ class Sessions(SyncAPIResource):
 
           voice: The voice the model uses to respond. Voice cannot be changed during the session
               once the model has responded with audio at least once. Current voice options are
-              `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
+              `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
+              `shimmer`, and `verse`.
 
           extra_headers: Send extra headers
 
@@ -227,7 +231,10 @@ class AsyncSessions(AsyncAPIResource):
         tool_choice: str | NotGiven = NOT_GIVEN,
         tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
         turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
-        voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] | NotGiven = NOT_GIVEN,
+        voice: Union[
+            str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
+        ]
+        | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -309,7 +316,8 @@ class AsyncSessions(AsyncAPIResource):
 
           voice: The voice the model uses to respond. Voice cannot be changed during the session
               once the model has responded with audio at least once. Current voice options are
-              `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
+              `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
+              `shimmer`, and `verse`.
 
           extra_headers: Send extra headers
 
src/openai/resources/responses/input_items.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import Any, cast
+from typing import Any, List, cast
 from typing_extensions import Literal
 
 import httpx
@@ -17,6 +17,7 @@ from ...pagination import SyncCursorPage, AsyncCursorPage
 from ..._base_client import AsyncPaginator, make_request_options
 from ...types.responses import input_item_list_params
 from ...types.responses.response_item import ResponseItem
+from ...types.responses.response_includable import ResponseIncludable
 
 __all__ = ["InputItems", "AsyncInputItems"]
 
@@ -47,6 +48,7 @@ class InputItems(SyncAPIResource):
         *,
         after: str | NotGiven = NOT_GIVEN,
         before: str | NotGiven = NOT_GIVEN,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
         limit: int | NotGiven = NOT_GIVEN,
         order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -64,6 +66,9 @@ class InputItems(SyncAPIResource):
 
           before: An item ID to list items before, used in pagination.
 
+          include: Additional fields to include in the response. See the `include` parameter for
+              Response creation above for more information.
+
           limit: A limit on the number of objects to be returned. Limit can range between 1 and
               100, and the default is 20.
 
@@ -94,6 +99,7 @@ class InputItems(SyncAPIResource):
                     {
                         "after": after,
                         "before": before,
+                        "include": include,
                         "limit": limit,
                         "order": order,
                     },
@@ -130,6 +136,7 @@ class AsyncInputItems(AsyncAPIResource):
         *,
         after: str | NotGiven = NOT_GIVEN,
         before: str | NotGiven = NOT_GIVEN,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
         limit: int | NotGiven = NOT_GIVEN,
         order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -147,6 +154,9 @@ class AsyncInputItems(AsyncAPIResource):
 
           before: An item ID to list items before, used in pagination.
 
+          include: Additional fields to include in the response. See the `include` parameter for
+              Response creation above for more information.
+
           limit: A limit on the number of objects to be returned. Limit can range between 1 and
               100, and the default is 20.
 
@@ -177,6 +187,7 @@ class AsyncInputItems(AsyncAPIResource):
                     {
                         "after": after,
                         "before": before,
+                        "include": include,
                         "limit": limit,
                         "order": order,
                     },
src/openai/resources/responses/responses.py
@@ -149,8 +149,8 @@ class Responses(SyncAPIResource):
               context.
 
               When using along with `previous_response_id`, the instructions from a previous
-              response will be not be carried over to the next response. This makes it simple
-              to swap out system (or developer) messages in new responses.
+              response will not be carried over to the next response. This makes it simple to
+              swap out system (or developer) messages in new responses.
 
           max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
               including visible output tokens and
@@ -321,8 +321,8 @@ class Responses(SyncAPIResource):
               context.
 
               When using along with `previous_response_id`, the instructions from a previous
-              response will be not be carried over to the next response. This makes it simple
-              to swap out system (or developer) messages in new responses.
+              response will not be carried over to the next response. This makes it simple to
+              swap out system (or developer) messages in new responses.
 
           max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
               including visible output tokens and
@@ -486,8 +486,8 @@ class Responses(SyncAPIResource):
               context.
 
               When using along with `previous_response_id`, the instructions from a previous
-              response will be not be carried over to the next response. This makes it simple
-              to swap out system (or developer) messages in new responses.
+              response will not be carried over to the next response. This makes it simple to
+              swap out system (or developer) messages in new responses.
 
           max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
               including visible output tokens and
@@ -961,8 +961,8 @@ class AsyncResponses(AsyncAPIResource):
               context.
 
               When using along with `previous_response_id`, the instructions from a previous
-              response will be not be carried over to the next response. This makes it simple
-              to swap out system (or developer) messages in new responses.
+              response will not be carried over to the next response. This makes it simple to
+              swap out system (or developer) messages in new responses.
 
           max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
               including visible output tokens and
@@ -1133,8 +1133,8 @@ class AsyncResponses(AsyncAPIResource):
               context.
 
               When using along with `previous_response_id`, the instructions from a previous
-              response will be not be carried over to the next response. This makes it simple
-              to swap out system (or developer) messages in new responses.
+              response will not be carried over to the next response. This makes it simple to
+              swap out system (or developer) messages in new responses.
 
           max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
               including visible output tokens and
@@ -1298,8 +1298,8 @@ class AsyncResponses(AsyncAPIResource):
               context.
 
               When using along with `previous_response_id`, the instructions from a previous
-              response will be not be carried over to the next response. This makes it simple
-              to swap out system (or developer) messages in new responses.
+              response will not be carried over to the next response. This makes it simple to
+              swap out system (or developer) messages in new responses.
 
           max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
               including visible output tokens and
src/openai/types/audio/speech_create_params.py
@@ -20,11 +20,16 @@ class SpeechCreateParams(TypedDict, total=False):
     `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
     """
 
-    voice: Required[Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"]]
+    voice: Required[
+        Union[
+            str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
+        ]
+    ]
     """The voice to use when generating the audio.
 
-    Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`,
-    `sage` and `shimmer`. Previews of the voices are available in the
+    Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`,
+    `nova`, `sage`, `shimmer`, and `verse`. Previews of the voices are available in
+    the
     [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
     """
 
src/openai/types/beta/realtime/realtime_response.py
@@ -80,8 +80,13 @@ class RealtimeResponse(BaseModel):
     will become the input for later turns.
     """
 
-    voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+    voice: Union[
+        str,
+        Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"],
+        None,
+    ] = None
     """
     The voice the model used to respond. Current voice options are `alloy`, `ash`,
-    `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
+    `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
+    `verse`.
     """
src/openai/types/beta/realtime/response_create_event.py
@@ -101,12 +101,16 @@ class Response(BaseModel):
     tools: Optional[List[ResponseTool]] = None
     """Tools (functions) available to the model."""
 
-    voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+    voice: Union[
+        str,
+        Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"],
+        None,
+    ] = None
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
     audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
-    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
     """
 
 
src/openai/types/beta/realtime/response_create_event_param.py
@@ -102,12 +102,14 @@ class Response(TypedDict, total=False):
     tools: Iterable[ResponseTool]
     """Tools (functions) available to the model."""
 
-    voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]
+    voice: Union[
+        str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
+    ]
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
     audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
-    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
     """
 
 
src/openai/types/beta/realtime/session.py
@@ -218,7 +218,11 @@ class Session(BaseModel):
     natural conversations, but may have a higher latency.
     """
 
-    voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+    voice: Union[
+        str,
+        Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"],
+        None,
+    ] = None
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
src/openai/types/beta/realtime/session_create_params.py
@@ -113,12 +113,14 @@ class SessionCreateParams(TypedDict, total=False):
     natural conversations, but may have a higher latency.
     """
 
-    voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]
+    voice: Union[
+        str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
+    ]
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
     audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
-    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
     """
 
 
src/openai/types/beta/realtime/session_create_response.py
@@ -141,7 +141,11 @@ class SessionCreateResponse(BaseModel):
     speech.
     """
 
-    voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+    voice: Union[
+        str,
+        Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"],
+        None,
+    ] = None
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
src/openai/types/beta/realtime/session_update_event.py
@@ -222,12 +222,16 @@ class Session(BaseModel):
     natural conversations, but may have a higher latency.
     """
 
-    voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+    voice: Union[
+        str,
+        Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"],
+        None,
+    ] = None
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
     audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
-    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
     """
 
 
src/openai/types/beta/realtime/session_update_event_param.py
@@ -220,12 +220,14 @@ class Session(TypedDict, total=False):
     natural conversations, but may have a higher latency.
     """
 
-    voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]
+    voice: Union[
+        str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
+    ]
     """The voice the model uses to respond.
 
     Voice cannot be changed during the session once the model has responded with
     audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
-    `coral`, `echo` `sage`, `shimmer` and `verse`.
+    `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`.
     """
 
 
src/openai/types/beta/realtime/transcription_session_create_params.py
@@ -96,9 +96,10 @@ class InputAudioTranscription(TypedDict, total=False):
 
 class TurnDetection(TypedDict, total=False):
     create_response: bool
-    """
-    Whether or not to automatically generate a response when a VAD stop event
+    """Whether or not to automatically generate a response when a VAD stop event
     occurs.
+
+    Not available for transcription sessions.
     """
 
     eagerness: Literal["low", "medium", "high", "auto"]
@@ -113,7 +114,7 @@ class TurnDetection(TypedDict, total=False):
     """
     Whether or not to automatically interrupt any ongoing response with output to
     the default conversation (i.e. `conversation` of `auto`) when a VAD start event
-    occurs.
+    occurs. Not available for transcription sessions.
     """
 
     prefix_padding_ms: int
src/openai/types/beta/realtime/transcription_session_update.py
@@ -50,9 +50,10 @@ class SessionInputAudioTranscription(BaseModel):
 
 class SessionTurnDetection(BaseModel):
     create_response: Optional[bool] = None
-    """
-    Whether or not to automatically generate a response when a VAD stop event
+    """Whether or not to automatically generate a response when a VAD stop event
     occurs.
+
+    Not available for transcription sessions.
     """
 
     eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
@@ -67,7 +68,7 @@ class SessionTurnDetection(BaseModel):
     """
     Whether or not to automatically interrupt any ongoing response with output to
     the default conversation (i.e. `conversation` of `auto`) when a VAD start event
-    occurs.
+    occurs. Not available for transcription sessions.
     """
 
     prefix_padding_ms: Optional[int] = None
src/openai/types/beta/realtime/transcription_session_update_param.py
@@ -50,9 +50,10 @@ class SessionInputAudioTranscription(TypedDict, total=False):
 
 class SessionTurnDetection(TypedDict, total=False):
     create_response: bool
-    """
-    Whether or not to automatically generate a response when a VAD stop event
+    """Whether or not to automatically generate a response when a VAD stop event
     occurs.
+
+    Not available for transcription sessions.
     """
 
     eagerness: Literal["low", "medium", "high", "auto"]
@@ -67,7 +68,7 @@ class SessionTurnDetection(TypedDict, total=False):
     """
     Whether or not to automatically interrupt any ongoing response with output to
     the default conversation (i.e. `conversation` of `auto`) when a VAD start event
-    occurs.
+    occurs. Not available for transcription sessions.
     """
 
     prefix_padding_ms: int
src/openai/types/chat/chat_completion_audio_param.py
@@ -2,6 +2,7 @@
 
 from __future__ import annotations
 
+from typing import Union
 from typing_extensions import Literal, Required, TypedDict
 
 __all__ = ["ChatCompletionAudioParam"]
@@ -14,7 +15,11 @@ class ChatCompletionAudioParam(TypedDict, total=False):
     Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`.
     """
 
-    voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
+    voice: Required[
+        Union[
+            str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"]
+        ]
+    ]
     """The voice the model uses to respond.
 
     Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, and
src/openai/types/responses/input_item_list_params.py
@@ -2,8 +2,11 @@
 
 from __future__ import annotations
 
+from typing import List
 from typing_extensions import Literal, TypedDict
 
+from .response_includable import ResponseIncludable
+
 __all__ = ["InputItemListParams"]
 
 
@@ -14,6 +17,12 @@ class InputItemListParams(TypedDict, total=False):
     before: str
     """An item ID to list items before, used in pagination."""
 
+    include: List[ResponseIncludable]
+    """Additional fields to include in the response.
+
+    See the `include` parameter for Response creation above for more information.
+    """
+
     limit: int
     """A limit on the number of objects to be returned.
 
src/openai/types/responses/response.py
@@ -47,8 +47,8 @@ class Response(BaseModel):
     context.
 
     When using along with `previous_response_id`, the instructions from a previous
-    response will be not be carried over to the next response. This makes it simple
-    to swap out system (or developer) messages in new responses.
+    response will not be carried over to the next response. This makes it simple to
+    swap out system (or developer) messages in new responses.
     """
 
     metadata: Optional[Metadata] = None
src/openai/types/responses/response_create_params.py
@@ -64,8 +64,8 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     context.
 
     When using along with `previous_response_id`, the instructions from a previous
-    response will be not be carried over to the next response. This makes it simple
-    to swap out system (or developer) messages in new responses.
+    response will not be carried over to the next response. This makes it simple to
+    swap out system (or developer) messages in new responses.
     """
 
     max_output_tokens: Optional[int]
src/openai/types/responses/response_format_text_json_schema_config.py
@@ -11,6 +11,13 @@ __all__ = ["ResponseFormatTextJSONSchemaConfig"]
 
 
 class ResponseFormatTextJSONSchemaConfig(BaseModel):
+    name: str
+    """The name of the response format.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
     schema_: Dict[str, object] = FieldInfo(alias="schema")
     """
     The schema for the response format, described as a JSON Schema object. Learn how
@@ -26,13 +33,6 @@ class ResponseFormatTextJSONSchemaConfig(BaseModel):
     how to respond in the format.
     """
 
-    name: Optional[str] = None
-    """The name of the response format.
-
-    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
-    of 64.
-    """
-
     strict: Optional[bool] = None
     """
     Whether to enable strict schema adherence when generating the output. If set to
src/openai/types/responses/response_format_text_json_schema_config_param.py
@@ -9,6 +9,13 @@ __all__ = ["ResponseFormatTextJSONSchemaConfigParam"]
 
 
 class ResponseFormatTextJSONSchemaConfigParam(TypedDict, total=False):
+    name: Required[str]
+    """The name of the response format.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
     schema: Required[Dict[str, object]]
     """
     The schema for the response format, described as a JSON Schema object. Learn how
@@ -24,13 +31,6 @@ class ResponseFormatTextJSONSchemaConfigParam(TypedDict, total=False):
     how to respond in the format.
     """
 
-    name: str
-    """The name of the response format.
-
-    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
-    of 64.
-    """
-
     strict: Optional[bool]
     """
     Whether to enable strict schema adherence when generating the output. If set to
tests/api_resources/audio/test_speech.py
@@ -28,7 +28,7 @@ class TestSpeech:
         speech = client.audio.speech.create(
             input="string",
             model="string",
-            voice="alloy",
+            voice="ash",
         )
         assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
         assert speech.json() == {"foo": "bar"}
@@ -40,7 +40,7 @@ class TestSpeech:
         speech = client.audio.speech.create(
             input="string",
             model="string",
-            voice="alloy",
+            voice="ash",
             instructions="instructions",
             response_format="mp3",
             speed=0.25,
@@ -56,7 +56,7 @@ class TestSpeech:
         response = client.audio.speech.with_raw_response.create(
             input="string",
             model="string",
-            voice="alloy",
+            voice="ash",
         )
 
         assert response.is_closed is True
@@ -71,7 +71,7 @@ class TestSpeech:
         with client.audio.speech.with_streaming_response.create(
             input="string",
             model="string",
-            voice="alloy",
+            voice="ash",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -92,7 +92,7 @@ class TestAsyncSpeech:
         speech = await async_client.audio.speech.create(
             input="string",
             model="string",
-            voice="alloy",
+            voice="ash",
         )
         assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
         assert speech.json() == {"foo": "bar"}
@@ -104,7 +104,7 @@ class TestAsyncSpeech:
         speech = await async_client.audio.speech.create(
             input="string",
             model="string",
-            voice="alloy",
+            voice="ash",
             instructions="instructions",
             response_format="mp3",
             speed=0.25,
@@ -120,7 +120,7 @@ class TestAsyncSpeech:
         response = await async_client.audio.speech.with_raw_response.create(
             input="string",
             model="string",
-            voice="alloy",
+            voice="ash",
         )
 
         assert response.is_closed is True
@@ -135,7 +135,7 @@ class TestAsyncSpeech:
         async with async_client.audio.speech.with_streaming_response.create(
             input="string",
             model="string",
-            voice="alloy",
+            voice="ash",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
tests/api_resources/beta/realtime/test_sessions.py
@@ -56,7 +56,7 @@ class TestSessions:
                 "threshold": 0,
                 "type": "server_vad",
             },
-            voice="alloy",
+            voice="ash",
         )
         assert_matches_type(SessionCreateResponse, session, path=["response"])
 
@@ -123,7 +123,7 @@ class TestAsyncSessions:
                 "threshold": 0,
                 "type": "server_vad",
             },
-            voice="alloy",
+            voice="ash",
         )
         assert_matches_type(SessionCreateResponse, session, path=["response"])
 
tests/api_resources/chat/test_completions.py
@@ -48,7 +48,7 @@ class TestCompletions:
             model="gpt-4o",
             audio={
                 "format": "wav",
-                "voice": "alloy",
+                "voice": "ash",
             },
             frequency_penalty=-2,
             function_call="none",
@@ -175,7 +175,7 @@ class TestCompletions:
             stream=True,
             audio={
                 "format": "wav",
-                "voice": "alloy",
+                "voice": "ash",
             },
             frequency_penalty=-2,
             function_call="none",
@@ -475,7 +475,7 @@ class TestAsyncCompletions:
             model="gpt-4o",
             audio={
                 "format": "wav",
-                "voice": "alloy",
+                "voice": "ash",
             },
             frequency_penalty=-2,
             function_call="none",
@@ -602,7 +602,7 @@ class TestAsyncCompletions:
             stream=True,
             audio={
                 "format": "wav",
-                "voice": "alloy",
+                "voice": "ash",
             },
             frequency_penalty=-2,
             function_call="none",
tests/api_resources/responses/test_input_items.py
@@ -31,6 +31,7 @@ class TestInputItems:
             response_id="response_id",
             after="after",
             before="before",
+            include=["file_search_call.results"],
             limit=0,
             order="asc",
         )
@@ -84,6 +85,7 @@ class TestAsyncInputItems:
             response_id="response_id",
             after="after",
             before="before",
+            include=["file_search_call.results"],
             limit=0,
             order="asc",
         )
.stats.yml
@@ -1,4 +1,4 @@
 configured_endpoints: 82
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5ad6884898c07591750dde560118baf7074a59aecd1f367f930c5e42b04e848a.yml
-openapi_spec_hash: 0c255269b89767eae26f4d4dc22d3cbd
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml
+openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e
 config_hash: d36e491b0afc4f79e3afad4b3c9bec70