Commit 0843a111

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-08-16 03:11:41
feat(api): add new text parameters, expiration options
1 parent 34014ae
src/openai/resources/beta/realtime/realtime.py
@@ -652,8 +652,8 @@ class RealtimeResponseResource(BaseRealtimeConnectionResource):
         """Send this event to cancel an in-progress response.
 
         The server will respond
-        with a `response.cancelled` event or an error if there is no response to
-        cancel.
+        with a `response.done` event with a status of `response.status=cancelled`. If
+        there is no response to cancel, the server will respond with an error.
         """
         self._connection.send(
             cast(
@@ -904,8 +904,8 @@ class AsyncRealtimeResponseResource(BaseAsyncRealtimeConnectionResource):
         """Send this event to cancel an in-progress response.
 
         The server will respond
-        with a `response.cancelled` event or an error if there is no response to
-        cancel.
+        with a `response.done` event with a status of `response.status=cancelled`. If
+        there is no response to cancel, the server will respond with an error.
         """
         await self._connection.send(
             cast(
src/openai/resources/beta/realtime/sessions.py
@@ -152,7 +152,7 @@ class Sessions(SyncAPIResource):
               set to `null` to turn off, in which case the client must manually trigger model
               response. Server VAD means that the model will detect the start and end of
               speech based on audio volume and respond at the end of user speech. Semantic VAD
-              is more advanced and uses a turn detection model (in conjuction with VAD) to
+              is more advanced and uses a turn detection model (in conjunction with VAD) to
               semantically estimate whether the user has finished speaking, then dynamically
               sets a timeout based on this probability. For example, if user audio trails off
               with "uhhm", the model will score a low probability of turn end and wait longer
@@ -334,7 +334,7 @@ class AsyncSessions(AsyncAPIResource):
               set to `null` to turn off, in which case the client must manually trigger model
               response. Server VAD means that the model will detect the start and end of
               speech based on audio volume and respond at the end of user speech. Semantic VAD
-              is more advanced and uses a turn detection model (in conjuction with VAD) to
+              is more advanced and uses a turn detection model (in conjunction with VAD) to
               semantically estimate whether the user has finished speaking, then dynamically
               sets a timeout based on this probability. For example, if user audio trails off
               with "uhhm", the model will score a low probability of turn end and wait longer
src/openai/resources/beta/realtime/transcription_sessions.py
@@ -96,7 +96,7 @@ class TranscriptionSessions(SyncAPIResource):
               set to `null` to turn off, in which case the client must manually trigger model
               response. Server VAD means that the model will detect the start and end of
               speech based on audio volume and respond at the end of user speech. Semantic VAD
-              is more advanced and uses a turn detection model (in conjuction with VAD) to
+              is more advanced and uses a turn detection model (in conjunction with VAD) to
               semantically estimate whether the user has finished speaking, then dynamically
               sets a timeout based on this probability. For example, if user audio trails off
               with "uhhm", the model will score a low probability of turn end and wait longer
@@ -209,7 +209,7 @@ class AsyncTranscriptionSessions(AsyncAPIResource):
               set to `null` to turn off, in which case the client must manually trigger model
               response. Server VAD means that the model will detect the start and end of
               speech based on audio volume and respond at the end of user speech. Semantic VAD
-              is more advanced and uses a turn detection model (in conjuction with VAD) to
+              is more advanced and uses a turn detection model (in conjunction with VAD) to
               semantically estimate whether the user has finished speaking, then dynamically
               sets a timeout based on this probability. For example, if user audio trails off
               with "uhhm", the model will score a low probability of turn end and wait longer
src/openai/resources/beta/threads/runs/runs.py
@@ -220,7 +220,7 @@ class Runs(SyncAPIResource):
               We generally recommend altering this or temperature but not both.
 
           truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
-              control the intial context window of the run.
+              control the initial context window of the run.
 
           extra_headers: Send extra headers
 
@@ -370,7 +370,7 @@ class Runs(SyncAPIResource):
               We generally recommend altering this or temperature but not both.
 
           truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
-              control the intial context window of the run.
+              control the initial context window of the run.
 
           extra_headers: Send extra headers
 
@@ -520,7 +520,7 @@ class Runs(SyncAPIResource):
               We generally recommend altering this or temperature but not both.
 
           truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
-              control the intial context window of the run.
+              control the initial context window of the run.
 
           extra_headers: Send extra headers
 
@@ -1650,7 +1650,7 @@ class AsyncRuns(AsyncAPIResource):
               We generally recommend altering this or temperature but not both.
 
           truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
-              control the intial context window of the run.
+              control the initial context window of the run.
 
           extra_headers: Send extra headers
 
@@ -1800,7 +1800,7 @@ class AsyncRuns(AsyncAPIResource):
               We generally recommend altering this or temperature but not both.
 
           truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
-              control the intial context window of the run.
+              control the initial context window of the run.
 
           extra_headers: Send extra headers
 
@@ -1950,7 +1950,7 @@ class AsyncRuns(AsyncAPIResource):
               We generally recommend altering this or temperature but not both.
 
           truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
-              control the intial context window of the run.
+              control the initial context window of the run.
 
           extra_headers: Send extra headers
 
src/openai/resources/beta/threads/threads.py
@@ -393,7 +393,7 @@ class Threads(SyncAPIResource):
               We generally recommend altering this or temperature but not both.
 
           truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
-              control the intial context window of the run.
+              control the initial context window of the run.
 
           extra_headers: Send extra headers
 
@@ -527,7 +527,7 @@ class Threads(SyncAPIResource):
               We generally recommend altering this or temperature but not both.
 
           truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
-              control the intial context window of the run.
+              control the initial context window of the run.
 
           extra_headers: Send extra headers
 
@@ -661,7 +661,7 @@ class Threads(SyncAPIResource):
               We generally recommend altering this or temperature but not both.
 
           truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
-              control the intial context window of the run.
+              control the initial context window of the run.
 
           extra_headers: Send extra headers
 
@@ -1251,7 +1251,7 @@ class AsyncThreads(AsyncAPIResource):
               We generally recommend altering this or temperature but not both.
 
           truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
-              control the intial context window of the run.
+              control the initial context window of the run.
 
           extra_headers: Send extra headers
 
@@ -1385,7 +1385,7 @@ class AsyncThreads(AsyncAPIResource):
               We generally recommend altering this or temperature but not both.
 
           truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
-              control the intial context window of the run.
+              control the initial context window of the run.
 
           extra_headers: Send extra headers
 
@@ -1519,7 +1519,7 @@ class AsyncThreads(AsyncAPIResource):
               We generally recommend altering this or temperature but not both.
 
           truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
-              control the intial context window of the run.
+              control the initial context window of the run.
 
           extra_headers: Send extra headers
 
src/openai/resources/chat/completions/completions.py
@@ -103,6 +103,7 @@ class Completions(SyncAPIResource):
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         prompt_cache_key: str | NotGiven = NOT_GIVEN,
         reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        text: completion_create_params.Text | NotGiven = NOT_GIVEN,
         safety_identifier: str | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
@@ -203,6 +204,7 @@ class Completions(SyncAPIResource):
                     "prompt_cache_key": prompt_cache_key,
                     "reasoning_effort": reasoning_effort,
                     "response_format": _type_to_response_format(response_format),
+                    "text": text,
                     "safety_identifier": safety_identifier,
                     "seed": seed,
                     "service_tier": service_tier,
@@ -265,6 +267,7 @@ class Completions(SyncAPIResource):
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: completion_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
         tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -438,9 +441,8 @@ class Completions(SyncAPIResource):
               - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-                'priority', then the request will be processed with the corresponding service
-                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-                Priority processing.
+                '[priority](https://openai.com/api-priority-processing/)', then the request
+                will be processed with the corresponding service tier.
               - When not set, the default behavior is 'auto'.
 
               When the `service_tier` parameter is set, the response body will include the
@@ -554,6 +556,7 @@ class Completions(SyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: completion_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
         tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -736,9 +739,8 @@ class Completions(SyncAPIResource):
               - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-                'priority', then the request will be processed with the corresponding service
-                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-                Priority processing.
+                '[priority](https://openai.com/api-priority-processing/)', then the request
+                will be processed with the corresponding service tier.
               - When not set, the default behavior is 'auto'.
 
               When the `service_tier` parameter is set, the response body will include the
@@ -843,6 +845,7 @@ class Completions(SyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: completion_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
         tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1025,9 +1028,8 @@ class Completions(SyncAPIResource):
               - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-                'priority', then the request will be processed with the corresponding service
-                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-                Priority processing.
+                '[priority](https://openai.com/api-priority-processing/)', then the request
+                will be processed with the corresponding service tier.
               - When not set, the default behavior is 'auto'.
 
               When the `service_tier` parameter is set, the response body will include the
@@ -1132,6 +1134,7 @@ class Completions(SyncAPIResource):
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: completion_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
         tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1178,6 +1181,7 @@ class Completions(SyncAPIResource):
                     "stream": stream,
                     "stream_options": stream_options,
                     "temperature": temperature,
+                    "text": text,
                     "tool_choice": tool_choice,
                     "tools": tools,
                     "top_logprobs": top_logprobs,
@@ -1400,6 +1404,7 @@ class Completions(SyncAPIResource):
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         prompt_cache_key: str | NotGiven = NOT_GIVEN,
         reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        text: completion_create_params.Text | NotGiven = NOT_GIVEN,
         safety_identifier: str | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
@@ -1470,6 +1475,7 @@ class Completions(SyncAPIResource):
             presence_penalty=presence_penalty,
             prompt_cache_key=prompt_cache_key,
             reasoning_effort=reasoning_effort,
+            text=text,
             safety_identifier=safety_identifier,
             seed=seed,
             service_tier=service_tier,
@@ -1542,6 +1548,7 @@ class AsyncCompletions(AsyncAPIResource):
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         prompt_cache_key: str | NotGiven = NOT_GIVEN,
         reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        text: completion_create_params.Text | NotGiven = NOT_GIVEN,
         safety_identifier: str | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
@@ -1642,6 +1649,7 @@ class AsyncCompletions(AsyncAPIResource):
                     "prompt_cache_key": prompt_cache_key,
                     "reasoning_effort": reasoning_effort,
                     "response_format": _type_to_response_format(response_format),
+                    "text": text,
                     "safety_identifier": safety_identifier,
                     "seed": seed,
                     "service_tier": service_tier,
@@ -1704,6 +1712,7 @@ class AsyncCompletions(AsyncAPIResource):
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: completion_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
         tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1877,9 +1886,8 @@ class AsyncCompletions(AsyncAPIResource):
               - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-                'priority', then the request will be processed with the corresponding service
-                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-                Priority processing.
+                '[priority](https://openai.com/api-priority-processing/)', then the request
+                will be processed with the corresponding service tier.
               - When not set, the default behavior is 'auto'.
 
               When the `service_tier` parameter is set, the response body will include the
@@ -1993,6 +2001,7 @@ class AsyncCompletions(AsyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: completion_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
         tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2175,9 +2184,8 @@ class AsyncCompletions(AsyncAPIResource):
               - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-                'priority', then the request will be processed with the corresponding service
-                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-                Priority processing.
+                '[priority](https://openai.com/api-priority-processing/)', then the request
+                will be processed with the corresponding service tier.
               - When not set, the default behavior is 'auto'.
 
               When the `service_tier` parameter is set, the response body will include the
@@ -2282,6 +2290,7 @@ class AsyncCompletions(AsyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: completion_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
         tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2464,9 +2473,8 @@ class AsyncCompletions(AsyncAPIResource):
               - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-                'priority', then the request will be processed with the corresponding service
-                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-                Priority processing.
+                '[priority](https://openai.com/api-priority-processing/)', then the request
+                will be processed with the corresponding service tier.
               - When not set, the default behavior is 'auto'.
 
               When the `service_tier` parameter is set, the response body will include the
@@ -2571,6 +2579,7 @@ class AsyncCompletions(AsyncAPIResource):
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        text: completion_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
         tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2617,6 +2626,7 @@ class AsyncCompletions(AsyncAPIResource):
                     "stream": stream,
                     "stream_options": stream_options,
                     "temperature": temperature,
+                    "text": text,
                     "tool_choice": tool_choice,
                     "tools": tools,
                     "top_logprobs": top_logprobs,
@@ -2839,6 +2849,7 @@ class AsyncCompletions(AsyncAPIResource):
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         prompt_cache_key: str | NotGiven = NOT_GIVEN,
         reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
+        text: completion_create_params.Text | NotGiven = NOT_GIVEN,
         safety_identifier: str | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
@@ -2910,6 +2921,7 @@ class AsyncCompletions(AsyncAPIResource):
             presence_penalty=presence_penalty,
             prompt_cache_key=prompt_cache_key,
             reasoning_effort=reasoning_effort,
+            text=text,
             safety_identifier=safety_identifier,
             seed=seed,
             service_tier=service_tier,
src/openai/resources/responses/responses.py
@@ -43,7 +43,6 @@ from ...types.shared_params.responses_model import ResponsesModel
 from ...types.responses.response_input_param import ResponseInputParam
 from ...types.responses.response_prompt_param import ResponsePromptParam
 from ...types.responses.response_stream_event import ResponseStreamEvent
-from ...types.responses.response_text_config_param import ResponseTextConfigParam
 
 __all__ = ["Responses", "AsyncResponses"]
 
@@ -95,7 +94,7 @@ class Responses(SyncAPIResource):
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -195,7 +194,7 @@ class Responses(SyncAPIResource):
               hit rates. Replaces the `user` field.
               [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
 
-          reasoning: **o-series models only**
+          reasoning: **gpt-5 and o-series models only**
 
               Configuration options for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning).
@@ -214,9 +213,8 @@ class Responses(SyncAPIResource):
               - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-                'priority', then the request will be processed with the corresponding service
-                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-                Priority processing.
+                '[priority](https://openai.com/api-priority-processing/)', then the request
+                will be processed with the corresponding service tier.
               - When not set, the default behavior is 'auto'.
 
               When the `service_tier` parameter is set, the response body will include the
@@ -240,12 +238,6 @@ class Responses(SyncAPIResource):
               focused and deterministic. We generally recommend altering this or `top_p` but
               not both.
 
-          text: Configuration options for a text response from the model. Can be plain text or
-              structured JSON data. Learn more:
-
-              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
           tool_choice: How the model should select which tool (or tools) to use when generating a
               response. See the `tools` parameter to see how to specify which tools the model
               can call.
@@ -323,7 +315,7 @@ class Responses(SyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -430,7 +422,7 @@ class Responses(SyncAPIResource):
               hit rates. Replaces the `user` field.
               [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
 
-          reasoning: **o-series models only**
+          reasoning: **gpt-5 and o-series models only**
 
               Configuration options for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning).
@@ -449,9 +441,8 @@ class Responses(SyncAPIResource):
               - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-                'priority', then the request will be processed with the corresponding service
-                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-                Priority processing.
+                '[priority](https://openai.com/api-priority-processing/)', then the request
+                will be processed with the corresponding service tier.
               - When not set, the default behavior is 'auto'.
 
               When the `service_tier` parameter is set, the response body will include the
@@ -468,12 +459,6 @@ class Responses(SyncAPIResource):
               focused and deterministic. We generally recommend altering this or `top_p` but
               not both.
 
-          text: Configuration options for a text response from the model. Can be plain text or
-              structured JSON data. Learn more:
-
-              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
           tool_choice: How the model should select which tool (or tools) to use when generating a
               response. See the `tools` parameter to see how to specify which tools the model
               can call.
@@ -551,7 +536,7 @@ class Responses(SyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -658,7 +643,7 @@ class Responses(SyncAPIResource):
               hit rates. Replaces the `user` field.
               [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
 
-          reasoning: **o-series models only**
+          reasoning: **gpt-5 and o-series models only**
 
               Configuration options for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning).
@@ -677,9 +662,8 @@ class Responses(SyncAPIResource):
               - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-                'priority', then the request will be processed with the corresponding service
-                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-                Priority processing.
+                '[priority](https://openai.com/api-priority-processing/)', then the request
+                will be processed with the corresponding service tier.
               - When not set, the default behavior is 'auto'.
 
               When the `service_tier` parameter is set, the response body will include the
@@ -696,12 +680,6 @@ class Responses(SyncAPIResource):
               focused and deterministic. We generally recommend altering this or `top_p` but
               not both.
 
-          text: Configuration options for a text response from the model. Can be plain text or
-              structured JSON data. Learn more:
-
-              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
           tool_choice: How the model should select which tool (or tools) to use when generating a
               response. See the `tools` parameter to see how to specify which tools the model
               can call.
@@ -778,7 +756,7 @@ class Responses(SyncAPIResource):
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -869,7 +847,7 @@ class Responses(SyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -901,7 +879,7 @@ class Responses(SyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -1030,7 +1008,7 @@ class Responses(SyncAPIResource):
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1461,7 +1439,7 @@ class AsyncResponses(AsyncAPIResource):
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1561,7 +1539,7 @@ class AsyncResponses(AsyncAPIResource):
               hit rates. Replaces the `user` field.
               [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
 
-          reasoning: **o-series models only**
+          reasoning: **gpt-5 and o-series models only**
 
               Configuration options for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning).
@@ -1580,9 +1558,8 @@ class AsyncResponses(AsyncAPIResource):
               - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-                'priority', then the request will be processed with the corresponding service
-                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-                Priority processing.
+                '[priority](https://openai.com/api-priority-processing/)', then the request
+                will be processed with the corresponding service tier.
               - When not set, the default behavior is 'auto'.
 
               When the `service_tier` parameter is set, the response body will include the
@@ -1606,12 +1583,6 @@ class AsyncResponses(AsyncAPIResource):
               focused and deterministic. We generally recommend altering this or `top_p` but
               not both.
 
-          text: Configuration options for a text response from the model. Can be plain text or
-              structured JSON data. Learn more:
-
-              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
           tool_choice: How the model should select which tool (or tools) to use when generating a
               response. See the `tools` parameter to see how to specify which tools the model
               can call.
@@ -1689,7 +1660,7 @@ class AsyncResponses(AsyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1796,7 +1767,7 @@ class AsyncResponses(AsyncAPIResource):
               hit rates. Replaces the `user` field.
               [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
 
-          reasoning: **o-series models only**
+          reasoning: **gpt-5 and o-series models only**
 
               Configuration options for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning).
@@ -1815,9 +1786,8 @@ class AsyncResponses(AsyncAPIResource):
               - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-                'priority', then the request will be processed with the corresponding service
-                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-                Priority processing.
+                '[priority](https://openai.com/api-priority-processing/)', then the request
+                will be processed with the corresponding service tier.
               - When not set, the default behavior is 'auto'.
 
               When the `service_tier` parameter is set, the response body will include the
@@ -1834,12 +1804,6 @@ class AsyncResponses(AsyncAPIResource):
               focused and deterministic. We generally recommend altering this or `top_p` but
               not both.
 
-          text: Configuration options for a text response from the model. Can be plain text or
-              structured JSON data. Learn more:
-
-              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
           tool_choice: How the model should select which tool (or tools) to use when generating a
               response. See the `tools` parameter to see how to specify which tools the model
               can call.
@@ -1917,7 +1881,7 @@ class AsyncResponses(AsyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2024,7 +1988,7 @@ class AsyncResponses(AsyncAPIResource):
               hit rates. Replaces the `user` field.
               [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
 
-          reasoning: **o-series models only**
+          reasoning: **gpt-5 and o-series models only**
 
               Configuration options for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning).
@@ -2043,9 +2007,8 @@ class AsyncResponses(AsyncAPIResource):
               - If set to 'default', then the request will be processed with the standard
                 pricing and performance for the selected model.
               - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-                'priority', then the request will be processed with the corresponding service
-                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-                Priority processing.
+                '[priority](https://openai.com/api-priority-processing/)', then the request
+                will be processed with the corresponding service tier.
               - When not set, the default behavior is 'auto'.
 
               When the `service_tier` parameter is set, the response body will include the
@@ -2062,12 +2025,6 @@ class AsyncResponses(AsyncAPIResource):
               focused and deterministic. We generally recommend altering this or `top_p` but
               not both.
 
-          text: Configuration options for a text response from the model. Can be plain text or
-              structured JSON data. Learn more:
-
-              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
           tool_choice: How the model should select which tool (or tools) to use when generating a
               response. See the `tools` parameter to see how to specify which tools the model
               can call.
@@ -2144,7 +2101,7 @@ class AsyncResponses(AsyncAPIResource):
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2235,7 +2192,7 @@ class AsyncResponses(AsyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -2267,7 +2224,7 @@ class AsyncResponses(AsyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -2400,7 +2357,7 @@ class AsyncResponses(AsyncAPIResource):
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: response_create_params.Text | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
src/openai/resources/uploads/uploads.py
@@ -170,6 +170,7 @@ class Uploads(SyncAPIResource):
         filename: str,
         mime_type: str,
         purpose: FilePurpose,
+        expires_after: upload_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -213,6 +214,9 @@ class Uploads(SyncAPIResource):
               See the
               [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
 
+          expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire
+              after 30 days and all other files are persisted until they are manually deleted.
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -229,6 +233,7 @@ class Uploads(SyncAPIResource):
                     "filename": filename,
                     "mime_type": mime_type,
                     "purpose": purpose,
+                    "expires_after": expires_after,
                 },
                 upload_create_params.UploadCreateParams,
             ),
@@ -473,6 +478,7 @@ class AsyncUploads(AsyncAPIResource):
         filename: str,
         mime_type: str,
         purpose: FilePurpose,
+        expires_after: upload_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -516,6 +522,9 @@ class AsyncUploads(AsyncAPIResource):
               See the
               [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
 
+          expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire
+              after 30 days and all other files are persisted until they are manually deleted.
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -532,6 +541,7 @@ class AsyncUploads(AsyncAPIResource):
                     "filename": filename,
                     "mime_type": mime_type,
                     "purpose": purpose,
+                    "expires_after": expires_after,
                 },
                 upload_create_params.UploadCreateParams,
             ),
src/openai/resources/batches.py
@@ -49,6 +49,7 @@ class Batches(SyncAPIResource):
         endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
         input_file_id: str,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        output_expires_after: batch_create_params.OutputExpiresAfter | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -85,6 +86,9 @@ class Batches(SyncAPIResource):
               Keys are strings with a maximum length of 64 characters. Values are strings with
               a maximum length of 512 characters.
 
+          output_expires_after: The expiration policy for the output and/or error file that are generated for a
+              batch.
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -101,6 +105,7 @@ class Batches(SyncAPIResource):
                     "endpoint": endpoint,
                     "input_file_id": input_file_id,
                     "metadata": metadata,
+                    "output_expires_after": output_expires_after,
                 },
                 batch_create_params.BatchCreateParams,
             ),
@@ -259,6 +264,7 @@ class AsyncBatches(AsyncAPIResource):
         endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
         input_file_id: str,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        output_expires_after: batch_create_params.OutputExpiresAfter | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -295,6 +301,9 @@ class AsyncBatches(AsyncAPIResource):
               Keys are strings with a maximum length of 64 characters. Values are strings with
               a maximum length of 512 characters.
 
+          output_expires_after: The expiration policy for the output and/or error file that are generated for a
+              batch.
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -311,6 +320,7 @@ class AsyncBatches(AsyncAPIResource):
                     "endpoint": endpoint,
                     "input_file_id": input_file_id,
                     "metadata": metadata,
+                    "output_expires_after": output_expires_after,
                 },
                 batch_create_params.BatchCreateParams,
             ),
src/openai/resources/files.py
@@ -57,6 +57,7 @@ class Files(SyncAPIResource):
         *,
         file: FileTypes,
         purpose: FilePurpose,
+        expires_after: file_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -68,7 +69,7 @@ class Files(SyncAPIResource):
 
         Individual files can be
         up to 512 MB, and the size of all files uploaded by one organization can be up
-        to 100 GB.
+        to 1 TB.
 
         The Assistants API supports files up to 2 million tokens and of specific file
         types. See the
@@ -96,6 +97,9 @@ class Files(SyncAPIResource):
               fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
               Flexible file type for any purpose - `evals`: Used for eval data sets
 
+          expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire
+              after 30 days and all other files are persisted until they are manually deleted.
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -108,6 +112,7 @@ class Files(SyncAPIResource):
             {
                 "file": file,
                 "purpose": purpose,
+                "expires_after": expires_after,
             }
         )
         files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
@@ -369,6 +374,7 @@ class AsyncFiles(AsyncAPIResource):
         *,
         file: FileTypes,
         purpose: FilePurpose,
+        expires_after: file_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -380,7 +386,7 @@ class AsyncFiles(AsyncAPIResource):
 
         Individual files can be
         up to 512 MB, and the size of all files uploaded by one organization can be up
-        to 100 GB.
+        to 1 TB.
 
         The Assistants API supports files up to 2 million tokens and of specific file
         types. See the
@@ -408,6 +414,9 @@ class AsyncFiles(AsyncAPIResource):
               fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
               Flexible file type for any purpose - `evals`: Used for eval data sets
 
+          expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire
+              after 30 days and all other files are persisted until they are manually deleted.
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -420,6 +429,7 @@ class AsyncFiles(AsyncAPIResource):
             {
                 "file": file,
                 "purpose": purpose,
+                "expires_after": expires_after,
             }
         )
         files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
src/openai/types/beta/realtime/session.py
@@ -260,7 +260,7 @@ class Session(BaseModel):
     This can be set to `null` to turn off, in which case the client must manually
     trigger model response. Server VAD means that the model will detect the start
     and end of speech based on audio volume and respond at the end of user speech.
-    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    Semantic VAD is more advanced and uses a turn detection model (in conjunction
     with VAD) to semantically estimate whether the user has finished speaking, then
     dynamically sets a timeout based on this probability. For example, if user audio
     trails off with "uhhm", the model will score a low probability of turn end and
src/openai/types/beta/realtime/session_create_params.py
@@ -137,7 +137,7 @@ class SessionCreateParams(TypedDict, total=False):
     This can be set to `null` to turn off, in which case the client must manually
     trigger model response. Server VAD means that the model will detect the start
     and end of speech based on audio volume and respond at the end of user speech.
-    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    Semantic VAD is more advanced and uses a turn detection model (in conjunction
     with VAD) to semantically estimate whether the user has finished speaking, then
     dynamically sets a timeout based on this probability. For example, if user audio
     trails off with "uhhm", the model will score a low probability of turn end and
src/openai/types/beta/realtime/session_update_event.py
@@ -282,7 +282,7 @@ class Session(BaseModel):
     This can be set to `null` to turn off, in which case the client must manually
     trigger model response. Server VAD means that the model will detect the start
     and end of speech based on audio volume and respond at the end of user speech.
-    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    Semantic VAD is more advanced and uses a turn detection model (in conjunction
     with VAD) to semantically estimate whether the user has finished speaking, then
     dynamically sets a timeout based on this probability. For example, if user audio
     trails off with "uhhm", the model will score a low probability of turn end and
src/openai/types/beta/realtime/session_update_event_param.py
@@ -280,7 +280,7 @@ class Session(TypedDict, total=False):
     This can be set to `null` to turn off, in which case the client must manually
     trigger model response. Server VAD means that the model will detect the start
     and end of speech based on audio volume and respond at the end of user speech.
-    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    Semantic VAD is more advanced and uses a turn detection model (in conjunction
     with VAD) to semantically estimate whether the user has finished speaking, then
     dynamically sets a timeout based on this probability. For example, if user audio
     trails off with "uhhm", the model will score a low probability of turn end and
src/openai/types/beta/realtime/transcription_session_create_params.py
@@ -61,7 +61,7 @@ class TranscriptionSessionCreateParams(TypedDict, total=False):
     This can be set to `null` to turn off, in which case the client must manually
     trigger model response. Server VAD means that the model will detect the start
     and end of speech based on audio volume and respond at the end of user speech.
-    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    Semantic VAD is more advanced and uses a turn detection model (in conjunction
     with VAD) to semantically estimate whether the user has finished speaking, then
     dynamically sets a timeout based on this probability. For example, if user audio
     trails off with "uhhm", the model will score a low probability of turn end and
src/openai/types/beta/realtime/transcription_session_update.py
@@ -165,7 +165,7 @@ class Session(BaseModel):
     This can be set to `null` to turn off, in which case the client must manually
     trigger model response. Server VAD means that the model will detect the start
     and end of speech based on audio volume and respond at the end of user speech.
-    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    Semantic VAD is more advanced and uses a turn detection model (in conjunction
     with VAD) to semantically estimate whether the user has finished speaking, then
     dynamically sets a timeout based on this probability. For example, if user audio
     trails off with "uhhm", the model will score a low probability of turn end and
src/openai/types/beta/realtime/transcription_session_update_param.py
@@ -165,7 +165,7 @@ class Session(TypedDict, total=False):
     This can be set to `null` to turn off, in which case the client must manually
     trigger model response. Server VAD means that the model will detect the start
     and end of speech based on audio volume and respond at the end of user speech.
-    Semantic VAD is more advanced and uses a turn detection model (in conjuction
+    Semantic VAD is more advanced and uses a turn detection model (in conjunction
     with VAD) to semantically estimate whether the user has finished speaking, then
     dynamically sets a timeout based on this probability. For example, if user audio
     trails off with "uhhm", the model will score a low probability of turn end and
src/openai/types/beta/threads/run.py
@@ -228,7 +228,7 @@ class Run(BaseModel):
     truncation_strategy: Optional[TruncationStrategy] = None
     """Controls for how a thread will be truncated prior to the run.
 
-    Use this to control the intial context window of the run.
+    Use this to control the initial context window of the run.
     """
 
     usage: Optional[Usage] = None
src/openai/types/beta/threads/run_create_params.py
@@ -176,7 +176,7 @@ class RunCreateParamsBase(TypedDict, total=False):
     truncation_strategy: Optional[TruncationStrategy]
     """Controls for how a thread will be truncated prior to the run.
 
-    Use this to control the intial context window of the run.
+    Use this to control the initial context window of the run.
     """
 
 
src/openai/types/beta/thread_create_and_run_params.py
@@ -169,7 +169,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
     truncation_strategy: Optional[TruncationStrategy]
     """Controls for how a thread will be truncated prior to the run.
 
-    Use this to control the intial context window of the run.
+    Use this to control the initial context window of the run.
     """
 
 
src/openai/types/chat/chat_completion.py
@@ -68,9 +68,8 @@ class ChatCompletion(BaseModel):
     - If set to 'default', then the request will be processed with the standard
       pricing and performance for the selected model.
     - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-      'priority', then the request will be processed with the corresponding service
-      tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-      Priority processing.
+      '[priority](https://openai.com/api-priority-processing/)', then the request
+      will be processed with the corresponding service tier.
     - When not set, the default behavior is 'auto'.
 
     When the `service_tier` parameter is set, the response body will include the
src/openai/types/chat/chat_completion_chunk.py
@@ -137,9 +137,8 @@ class ChatCompletionChunk(BaseModel):
     - If set to 'default', then the request will be processed with the standard
       pricing and performance for the selected model.
     - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-      'priority', then the request will be processed with the corresponding service
-      tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-      Priority processing.
+      '[priority](https://openai.com/api-priority-processing/)', then the request
+      will be processed with the corresponding service tier.
     - When not set, the default behavior is 'auto'.
 
     When the `service_tier` parameter is set, the response body will include the
src/openai/types/chat/completion_create_params.py
@@ -25,6 +25,7 @@ __all__ = [
     "FunctionCall",
     "Function",
     "ResponseFormat",
+    "Text",
     "WebSearchOptions",
     "WebSearchOptionsUserLocation",
     "WebSearchOptionsUserLocationApproximate",
@@ -233,9 +234,8 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     - If set to 'default', then the request will be processed with the standard
       pricing and performance for the selected model.
     - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-      'priority', then the request will be processed with the corresponding service
-      tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-      Priority processing.
+      '[priority](https://openai.com/api-priority-processing/)', then the request
+      will be processed with the corresponding service tier.
     - When not set, the default behavior is 'auto'.
 
     When the `service_tier` parameter is set, the response body will include the
@@ -271,6 +271,8 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     this or `top_p` but not both.
     """
 
+    text: Text
+
     tool_choice: ChatCompletionToolChoiceOptionParam
     """
     Controls which (if any) tool is called by the model. `none` means the model will
@@ -365,6 +367,16 @@ class Function(TypedDict, total=False):
 ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject]
 
 
+class Text(TypedDict, total=False):
+    verbosity: Optional[Literal["low", "medium", "high"]]
+    """Constrains the verbosity of the model's response.
+
+    Lower values will result in more concise responses, while higher values will
+    result in more verbose responses. Currently supported values are `low`,
+    `medium`, and `high`.
+    """
+
+
 class WebSearchOptionsUserLocationApproximate(TypedDict, total=False):
     city: str
     """Free text input for the city of the user, e.g. `San Francisco`."""
src/openai/types/responses/__init__.py
@@ -42,7 +42,6 @@ from .response_input_image import ResponseInputImage as ResponseInputImage
 from .response_input_param import ResponseInputParam as ResponseInputParam
 from .response_output_item import ResponseOutputItem as ResponseOutputItem
 from .response_output_text import ResponseOutputText as ResponseOutputText
-from .response_text_config import ResponseTextConfig as ResponseTextConfig
 from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction
 from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent
 from .response_prompt_param import ResponsePromptParam as ResponsePromptParam
@@ -76,7 +75,6 @@ from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudio
 from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent
 from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam
 from .response_output_text_param import ResponseOutputTextParam as ResponseOutputTextParam
-from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam
 from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam
 from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall
 from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig
src/openai/types/responses/response.py
@@ -18,11 +18,11 @@ from .response_input_item import ResponseInputItem
 from .tool_choice_allowed import ToolChoiceAllowed
 from .tool_choice_options import ToolChoiceOptions
 from .response_output_item import ResponseOutputItem
-from .response_text_config import ResponseTextConfig
 from .tool_choice_function import ToolChoiceFunction
 from ..shared.responses_model import ResponsesModel
+from .response_format_text_config import ResponseFormatTextConfig
 
-__all__ = ["Response", "IncompleteDetails", "ToolChoice"]
+__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Text"]
 
 
 class IncompleteDetails(BaseModel):
@@ -35,6 +35,32 @@ ToolChoice: TypeAlias = Union[
 ]
 
 
+class Text(BaseModel):
+    format: Optional[ResponseFormatTextConfig] = None
+    """An object specifying the format that the model must output.
+
+    Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
+    ensures the model will match your supplied JSON schema. Learn more in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    The default format is `{ "type": "text" }` with no additional options.
+
+    **Not recommended for gpt-4o and newer models:**
+
+    Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+    ensures the message the model generates is valid JSON. Using `json_schema` is
+    preferred for models that support it.
+    """
+
+    verbosity: Optional[Literal["low", "medium", "high"]] = None
+    """Constrains the verbosity of the model's response.
+
+    Lower values will result in more concise responses, while higher values will
+    result in more verbose responses. Currently supported values are `low`,
+    `medium`, and `high`.
+    """
+
+
 class Response(BaseModel):
     id: str
     """Unique identifier for this Response."""
@@ -177,7 +203,7 @@ class Response(BaseModel):
     """
 
     reasoning: Optional[Reasoning] = None
-    """**o-series models only**
+    """**gpt-5 and o-series models only**
 
     Configuration options for
     [reasoning models](https://platform.openai.com/docs/guides/reasoning).
@@ -201,9 +227,8 @@ class Response(BaseModel):
     - If set to 'default', then the request will be processed with the standard
       pricing and performance for the selected model.
     - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-      'priority', then the request will be processed with the corresponding service
-      tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-      Priority processing.
+      '[priority](https://openai.com/api-priority-processing/)', then the request
+      will be processed with the corresponding service tier.
     - When not set, the default behavior is 'auto'.
 
     When the `service_tier` parameter is set, the response body will include the
@@ -219,14 +244,7 @@ class Response(BaseModel):
     `incomplete`.
     """
 
-    text: Optional[ResponseTextConfig] = None
-    """Configuration options for a text response from the model.
-
-    Can be plain text or structured JSON data. Learn more:
-
-    - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-    - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-    """
+    text: Optional[Text] = None
 
     top_logprobs: Optional[int] = None
     """
src/openai/types/responses/response_create_params.py
@@ -16,13 +16,14 @@ from .tool_choice_types_param import ToolChoiceTypesParam
 from ..shared_params.reasoning import Reasoning
 from .tool_choice_custom_param import ToolChoiceCustomParam
 from .tool_choice_allowed_param import ToolChoiceAllowedParam
-from .response_text_config_param import ResponseTextConfigParam
 from .tool_choice_function_param import ToolChoiceFunctionParam
 from ..shared_params.responses_model import ResponsesModel
+from .response_format_text_config_param import ResponseFormatTextConfigParam
 
 __all__ = [
     "ResponseCreateParamsBase",
     "StreamOptions",
+    "Text",
     "ToolChoice",
     "ResponseCreateParamsNonStreaming",
     "ResponseCreateParamsStreaming",
@@ -134,7 +135,7 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     """
 
     reasoning: Optional[Reasoning]
-    """**o-series models only**
+    """**gpt-5 and o-series models only**
 
     Configuration options for
     [reasoning models](https://platform.openai.com/docs/guides/reasoning).
@@ -158,9 +159,8 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     - If set to 'default', then the request will be processed with the standard
       pricing and performance for the selected model.
     - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-      'priority', then the request will be processed with the corresponding service
-      tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-      Priority processing.
+      '[priority](https://openai.com/api-priority-processing/)', then the request
+      will be processed with the corresponding service tier.
     - When not set, the default behavior is 'auto'.
 
     When the `service_tier` parameter is set, the response body will include the
@@ -183,14 +183,7 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     this or `top_p` but not both.
     """
 
-    text: ResponseTextConfigParam
-    """Configuration options for a text response from the model.
-
-    Can be plain text or structured JSON data. Learn more:
-
-    - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-    - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-    """
+    text: Text
 
     tool_choice: ToolChoice
     """
@@ -267,6 +260,32 @@ class StreamOptions(TypedDict, total=False):
     """
 
 
+class Text(TypedDict, total=False):
+    format: ResponseFormatTextConfigParam
+    """An object specifying the format that the model must output.
+
+    Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
+    ensures the model will match your supplied JSON schema. Learn more in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+    The default format is `{ "type": "text" }` with no additional options.
+
+    **Not recommended for gpt-4o and newer models:**
+
+    Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+    ensures the message the model generates is valid JSON. Using `json_schema` is
+    preferred for models that support it.
+    """
+
+    verbosity: Optional[Literal["low", "medium", "high"]]
+    """Constrains the verbosity of the model's response.
+
+    Lower values will result in more concise responses, while higher values will
+    result in more verbose responses. Currently supported values are `low`,
+    `medium`, and `high`.
+    """
+
+
 ToolChoice: TypeAlias = Union[
     ToolChoiceOptions,
     ToolChoiceAllowedParam,
src/openai/types/responses/response_text_config.py
@@ -1,35 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-from .response_format_text_config import ResponseFormatTextConfig
-
-__all__ = ["ResponseTextConfig"]
-
-
-class ResponseTextConfig(BaseModel):
-    format: Optional[ResponseFormatTextConfig] = None
-    """An object specifying the format that the model must output.
-
-    Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
-    ensures the model will match your supplied JSON schema. Learn more in the
-    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
-    The default format is `{ "type": "text" }` with no additional options.
-
-    **Not recommended for gpt-4o and newer models:**
-
-    Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-    ensures the message the model generates is valid JSON. Using `json_schema` is
-    preferred for models that support it.
-    """
-
-    verbosity: Optional[Literal["low", "medium", "high"]] = None
-    """Constrains the verbosity of the model's response.
-
-    Lower values will result in more concise responses, while higher values will
-    result in more verbose responses. Currently supported values are `low`,
-    `medium`, and `high`.
-    """
src/openai/types/responses/response_text_config_param.py
@@ -1,36 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Optional
-from typing_extensions import Literal, TypedDict
-
-from .response_format_text_config_param import ResponseFormatTextConfigParam
-
-__all__ = ["ResponseTextConfigParam"]
-
-
-class ResponseTextConfigParam(TypedDict, total=False):
-    format: ResponseFormatTextConfigParam
-    """An object specifying the format that the model must output.
-
-    Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
-    ensures the model will match your supplied JSON schema. Learn more in the
-    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
-    The default format is `{ "type": "text" }` with no additional options.
-
-    **Not recommended for gpt-4o and newer models:**
-
-    Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-    ensures the message the model generates is valid JSON. Using `json_schema` is
-    preferred for models that support it.
-    """
-
-    verbosity: Optional[Literal["low", "medium", "high"]]
-    """Constrains the verbosity of the model's response.
-
-    Lower values will result in more concise responses, while higher values will
-    result in more verbose responses. Currently supported values are `low`,
-    `medium`, and `high`.
-    """
src/openai/types/batch_create_params.py
@@ -7,7 +7,7 @@ from typing_extensions import Literal, Required, TypedDict
 
 from .shared_params.metadata import Metadata
 
-__all__ = ["BatchCreateParams"]
+__all__ = ["BatchCreateParams", "OutputExpiresAfter"]
 
 
 class BatchCreateParams(TypedDict, total=False):
@@ -47,3 +47,24 @@ class BatchCreateParams(TypedDict, total=False):
     Keys are strings with a maximum length of 64 characters. Values are strings with
     a maximum length of 512 characters.
     """
+
+    output_expires_after: OutputExpiresAfter
+    """
+    The expiration policy for the output and/or error file that are generated for a
+    batch.
+    """
+
+
+class OutputExpiresAfter(TypedDict, total=False):
+    anchor: Required[Literal["created_at"]]
+    """Anchor timestamp after which the expiration policy applies.
+
+    Supported anchors: `created_at`. Note that the anchor is the file creation time,
+    not the time the batch is created.
+    """
+
+    seconds: Required[int]
+    """The number of seconds after the anchor time that the file will expire.
+
+    Must be between 3600 (1 hour) and 2592000 (30 days).
+    """
src/openai/types/file_create_params.py
@@ -2,12 +2,12 @@
 
 from __future__ import annotations
 
-from typing_extensions import Required, TypedDict
+from typing_extensions import Literal, Required, TypedDict
 
 from .._types import FileTypes
 from .file_purpose import FilePurpose
 
-__all__ = ["FileCreateParams"]
+__all__ = ["FileCreateParams", "ExpiresAfter"]
 
 
 class FileCreateParams(TypedDict, total=False):
@@ -22,3 +22,24 @@ class FileCreateParams(TypedDict, total=False):
     fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used
     for eval data sets
     """
+
+    expires_after: ExpiresAfter
+    """The expiration policy for a file.
+
+    By default, files with `purpose=batch` expire after 30 days and all other files
+    are persisted until they are manually deleted.
+    """
+
+
+class ExpiresAfter(TypedDict, total=False):
+    anchor: Required[Literal["created_at"]]
+    """Anchor timestamp after which the expiration policy applies.
+
+    Supported anchors: `created_at`.
+    """
+
+    seconds: Required[int]
+    """The number of seconds after the anchor time that the file will expire.
+
+    Must be between 3600 (1 hour) and 2592000 (30 days).
+    """
src/openai/types/upload_create_params.py
@@ -2,11 +2,11 @@
 
 from __future__ import annotations
 
-from typing_extensions import Required, TypedDict
+from typing_extensions import Literal, Required, TypedDict
 
 from .file_purpose import FilePurpose
 
-__all__ = ["UploadCreateParams"]
+__all__ = ["UploadCreateParams", "ExpiresAfter"]
 
 
 class UploadCreateParams(TypedDict, total=False):
@@ -29,3 +29,24 @@ class UploadCreateParams(TypedDict, total=False):
     See the
     [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
     """
+
+    expires_after: ExpiresAfter
+    """The expiration policy for a file.
+
+    By default, files with `purpose=batch` expire after 30 days and all other files
+    are persisted until they are manually deleted.
+    """
+
+
+class ExpiresAfter(TypedDict, total=False):
+    anchor: Required[Literal["created_at"]]
+    """Anchor timestamp after which the expiration policy applies.
+
+    Supported anchors: `created_at`.
+    """
+
+    seconds: Required[int]
+    """The number of seconds after the anchor time that the file will expire.
+
+    Must be between 3600 (1 hour) and 2592000 (30 days).
+    """
tests/api_resources/chat/test_completions.py
@@ -86,6 +86,7 @@ class TestCompletions:
                 "include_usage": True,
             },
             temperature=1,
+            text={"verbosity": "low"},
             tool_choice="none",
             tools=[
                 {
@@ -218,6 +219,7 @@ class TestCompletions:
                 "include_usage": True,
             },
             temperature=1,
+            text={"verbosity": "low"},
             tool_choice="none",
             tools=[
                 {
@@ -527,6 +529,7 @@ class TestAsyncCompletions:
                 "include_usage": True,
             },
             temperature=1,
+            text={"verbosity": "low"},
             tool_choice="none",
             tools=[
                 {
@@ -659,6 +662,7 @@ class TestAsyncCompletions:
                 "include_usage": True,
             },
             temperature=1,
+            text={"verbosity": "low"},
             tool_choice="none",
             tools=[
                 {
tests/api_resources/test_batches.py
@@ -34,6 +34,10 @@ class TestBatches:
             endpoint="/v1/responses",
             input_file_id="string",
             metadata={"foo": "string"},
+            output_expires_after={
+                "anchor": "created_at",
+                "seconds": 3600,
+            },
         )
         assert_matches_type(Batch, batch, path=["response"])
 
@@ -196,6 +200,10 @@ class TestAsyncBatches:
             endpoint="/v1/responses",
             input_file_id="string",
             metadata={"foo": "string"},
+            output_expires_after={
+                "anchor": "created_at",
+                "seconds": 3600,
+            },
         )
         assert_matches_type(Batch, batch, path=["response"])
 
tests/api_resources/test_files.py
@@ -31,6 +31,18 @@ class TestFiles:
         )
         assert_matches_type(FileObject, file, path=["response"])
 
+    @parametrize
+    def test_method_create_with_all_params(self, client: OpenAI) -> None:
+        file = client.files.create(
+            file=b"raw file contents",
+            purpose="assistants",
+            expires_after={
+                "anchor": "created_at",
+                "seconds": 3600,
+            },
+        )
+        assert_matches_type(FileObject, file, path=["response"])
+
     @parametrize
     def test_raw_response_create(self, client: OpenAI) -> None:
         response = client.files.with_raw_response.create(
@@ -272,6 +284,18 @@ class TestAsyncFiles:
         )
         assert_matches_type(FileObject, file, path=["response"])
 
+    @parametrize
+    async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        file = await async_client.files.create(
+            file=b"raw file contents",
+            purpose="assistants",
+            expires_after={
+                "anchor": "created_at",
+                "seconds": 3600,
+            },
+        )
+        assert_matches_type(FileObject, file, path=["response"])
+
     @parametrize
     async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.files.with_raw_response.create(
tests/api_resources/test_responses.py
@@ -10,9 +10,7 @@ import pytest
 from openai import OpenAI, AsyncOpenAI
 from tests.utils import assert_matches_type
 from openai._utils import assert_signatures_in_sync
-from openai.types.responses import (
-    Response,
-)
+from openai.types.responses import Response
 
 base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
 
tests/api_resources/test_uploads.py
@@ -27,6 +27,20 @@ class TestUploads:
         )
         assert_matches_type(Upload, upload, path=["response"])
 
+    @parametrize
+    def test_method_create_with_all_params(self, client: OpenAI) -> None:
+        upload = client.uploads.create(
+            bytes=0,
+            filename="filename",
+            mime_type="mime_type",
+            purpose="assistants",
+            expires_after={
+                "anchor": "created_at",
+                "seconds": 3600,
+            },
+        )
+        assert_matches_type(Upload, upload, path=["response"])
+
     @parametrize
     def test_raw_response_create(self, client: OpenAI) -> None:
         response = client.uploads.with_raw_response.create(
@@ -162,6 +176,20 @@ class TestAsyncUploads:
         )
         assert_matches_type(Upload, upload, path=["response"])
 
+    @parametrize
+    async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        upload = await async_client.uploads.create(
+            bytes=0,
+            filename="filename",
+            mime_type="mime_type",
+            purpose="assistants",
+            expires_after={
+                "anchor": "created_at",
+                "seconds": 3600,
+            },
+        )
+        assert_matches_type(Upload, upload, path=["response"])
+
     @parametrize
     async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.uploads.with_raw_response.create(
.stats.yml
@@ -1,4 +1,4 @@
 configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-9cadfad609f94f20ebf74fdc06a80302f1a324dc69700a309a8056aabca82fd2.yml
-openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba
-config_hash: 68337b532875626269c304372a669f67
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-24be531010b354303d741fc9247c1f84f75978f9f7de68aca92cb4f240a04722.yml
+openapi_spec_hash: 3e46f439f6a863beadc71577eb4efa15
+config_hash: ed87b9139ac595a04a2162d754df2fed