Commit caf837bb

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-08-08 01:02:31
feat(api): adds GPT-5 and new API features: platform.openai.com/docs/guides/gpt-5
1 parent 936b2f0
Changed files (76)
src
openai
lib
resources
beta
chat
completions
responses
types
beta
chat
evals
responses
shared
shared_params
tests
src/openai/lib/_parsing/_completions.py
@@ -1,6 +1,7 @@
 from __future__ import annotations
 
 import json
+import logging
 from typing import TYPE_CHECKING, Any, Iterable, cast
 from typing_extensions import TypeVar, TypeGuard, assert_never
 
@@ -19,14 +20,15 @@ from ...types.chat import (
     ParsedChatCompletion,
     ChatCompletionMessage,
     ParsedFunctionToolCall,
-    ChatCompletionToolParam,
     ParsedChatCompletionMessage,
+    ChatCompletionFunctionToolParam,
     completion_create_params,
 )
 from ..._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError
 from ...types.shared_params import FunctionDefinition
 from ...types.chat.completion_create_params import ResponseFormat as ResponseFormatParam
-from ...types.chat.chat_completion_message_tool_call import Function
+from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam
+from ...types.chat.chat_completion_message_function_tool_call import Function
 
 ResponseFormatT = TypeVar(
     "ResponseFormatT",
@@ -35,12 +37,36 @@ ResponseFormatT = TypeVar(
 )
 _default_response_format: None = None
 
+log: logging.Logger = logging.getLogger("openai.lib.parsing")
+
+
+def is_strict_chat_completion_tool_param(
+    tool: ChatCompletionToolParam,
+) -> TypeGuard[ChatCompletionFunctionToolParam]:
+    """Check if the given tool is a strict ChatCompletionFunctionToolParam."""
+    if not tool["type"] == "function":
+        return False
+    if tool["function"].get("strict") is not True:
+        return False
+
+    return True
+
+
+def select_strict_chat_completion_tools(
+    tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+) -> Iterable[ChatCompletionFunctionToolParam] | NotGiven:
+    """Select only the strict ChatCompletionFunctionToolParams from the given tools."""
+    if not is_given(tools):
+        return NOT_GIVEN
+
+    return [t for t in tools if is_strict_chat_completion_tool_param(t)]
+
 
 def validate_input_tools(
     tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
-) -> None:
+) -> Iterable[ChatCompletionFunctionToolParam] | NotGiven:
     if not is_given(tools):
-        return
+        return NOT_GIVEN
 
     for tool in tools:
         if tool["type"] != "function":
@@ -54,6 +80,8 @@ def validate_input_tools(
                 f"`{tool['function']['name']}` is not strict. Only `strict` function tools can be auto-parsed"
             )
 
+    return cast(Iterable[ChatCompletionFunctionToolParam], tools)
+
 
 def parse_chat_completion(
     *,
@@ -95,6 +123,14 @@ def parse_chat_completion(
                             type_=ParsedFunctionToolCall,
                         )
                     )
+                elif tool_call.type == "custom":
+                    # warn user that custom tool calls are not callable here
+                    log.warning(
+                        "Custom tool calls are not callable. Ignoring tool call: %s - %s",
+                        tool_call.id,
+                        tool_call.custom.name,
+                        stacklevel=2,
+                    )
                 elif TYPE_CHECKING:  # type: ignore[unreachable]
                     assert_never(tool_call)
                 else:
@@ -129,13 +165,15 @@ def parse_chat_completion(
     )
 
 
-def get_input_tool_by_name(*, input_tools: list[ChatCompletionToolParam], name: str) -> ChatCompletionToolParam | None:
-    return next((t for t in input_tools if t.get("function", {}).get("name") == name), None)
+def get_input_tool_by_name(
+    *, input_tools: list[ChatCompletionToolParam], name: str
+) -> ChatCompletionFunctionToolParam | None:
+    return next((t for t in input_tools if t["type"] == "function" and t.get("function", {}).get("name") == name), None)
 
 
 def parse_function_tool_arguments(
     *, input_tools: list[ChatCompletionToolParam], function: Function | ParsedFunction
-) -> object:
+) -> object | None:
     input_tool = get_input_tool_by_name(input_tools=input_tools, name=function.name)
     if not input_tool:
         return None
@@ -149,7 +187,7 @@ def parse_function_tool_arguments(
     if not input_fn.get("strict"):
         return None
 
-    return json.loads(function.arguments)
+    return json.loads(function.arguments)  # type: ignore[no-any-return]
 
 
 def maybe_parse_content(
@@ -209,6 +247,9 @@ def is_response_format_param(response_format: object) -> TypeGuard[ResponseForma
 
 
 def is_parseable_tool(input_tool: ChatCompletionToolParam) -> bool:
+    if input_tool["type"] != "function":
+        return False
+
     input_fn = cast(object, input_tool.get("function"))
     if isinstance(input_fn, PydanticFunctionTool):
         return True
src/openai/lib/_parsing/_responses.py
@@ -110,6 +110,7 @@ def parse_response(
             or output.type == "local_shell_call"
             or output.type == "mcp_list_tools"
             or output.type == "exec"
+            or output.type == "custom_tool_call"
         ):
             output_list.append(output)
         elif TYPE_CHECKING:  # type: ignore
src/openai/lib/streaming/chat/_completions.py
@@ -37,11 +37,12 @@ from ..._parsing import (
     parse_function_tool_arguments,
 )
 from ...._streaming import Stream, AsyncStream
-from ....types.chat import ChatCompletionChunk, ParsedChatCompletion, ChatCompletionToolParam
+from ....types.chat import ChatCompletionChunk, ParsedChatCompletion
 from ...._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError
 from ....types.chat.chat_completion import ChoiceLogprobs
 from ....types.chat.chat_completion_chunk import Choice as ChoiceChunk
 from ....types.chat.completion_create_params import ResponseFormat as ResponseFormatParam
+from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
 
 
 class ChatCompletionStream(Generic[ResponseFormatT]):
src/openai/lib/_tools.py
@@ -5,7 +5,7 @@ from typing import Any, Dict, cast
 import pydantic
 
 from ._pydantic import to_strict_json_schema
-from ..types.chat import ChatCompletionToolParam
+from ..types.chat import ChatCompletionFunctionToolParam
 from ..types.shared_params import FunctionDefinition
 from ..types.responses.function_tool_param import FunctionToolParam as ResponsesFunctionToolParam
 
@@ -42,7 +42,7 @@ def pydantic_function_tool(
     *,
     name: str | None = None,  # inferred from class name by default
     description: str | None = None,  # inferred from class docstring by default
-) -> ChatCompletionToolParam:
+) -> ChatCompletionFunctionToolParam:
     if description is None:
         # note: we intentionally don't use `.getdoc()` to avoid
         # including pydantic's docstrings
src/openai/resources/beta/threads/runs/runs.py
@@ -167,12 +167,11 @@ class Runs(SyncAPIResource):
               [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
               during tool use.
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -322,12 +321,11 @@ class Runs(SyncAPIResource):
               [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
               during tool use.
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -473,12 +471,11 @@ class Runs(SyncAPIResource):
               [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
               during tool use.
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -1600,12 +1597,11 @@ class AsyncRuns(AsyncAPIResource):
               [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
               during tool use.
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -1755,12 +1751,11 @@ class AsyncRuns(AsyncAPIResource):
               [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
               during tool use.
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -1906,12 +1901,11 @@ class AsyncRuns(AsyncAPIResource):
               [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
               during tool use.
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
src/openai/resources/beta/assistants.py
@@ -96,12 +96,11 @@ class Assistants(SyncAPIResource):
 
           name: The name of the assistant. The maximum length is 256 characters.
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -220,6 +219,12 @@ class Assistants(SyncAPIResource):
         model: Union[
             str,
             Literal[
+                "gpt-5",
+                "gpt-5-mini",
+                "gpt-5-nano",
+                "gpt-5-2025-08-07",
+                "gpt-5-mini-2025-08-07",
+                "gpt-5-nano-2025-08-07",
                 "gpt-4.1",
                 "gpt-4.1-mini",
                 "gpt-4.1-nano",
@@ -298,12 +303,11 @@ class Assistants(SyncAPIResource):
 
           name: The name of the assistant. The maximum length is 256 characters.
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -545,12 +549,11 @@ class AsyncAssistants(AsyncAPIResource):
 
           name: The name of the assistant. The maximum length is 256 characters.
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -669,6 +672,12 @@ class AsyncAssistants(AsyncAPIResource):
         model: Union[
             str,
             Literal[
+                "gpt-5",
+                "gpt-5-mini",
+                "gpt-5-nano",
+                "gpt-5-2025-08-07",
+                "gpt-5-mini-2025-08-07",
+                "gpt-5-nano-2025-08-07",
                 "gpt-4.1",
                 "gpt-4.1-mini",
                 "gpt-4.1-nano",
@@ -747,12 +756,11 @@ class AsyncAssistants(AsyncAPIResource):
 
           name: The name of the assistant. The maximum length is 256 characters.
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
src/openai/resources/chat/completions/completions.py
@@ -115,6 +115,7 @@ class Completions(SyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -165,7 +166,7 @@ class Completions(SyncAPIResource):
             print("answer: ", message.parsed.final_answer)
         ```
         """
-        _validate_input_tools(tools)
+        chat_completion_tools = _validate_input_tools(tools)
 
         extra_headers = {
             "X-Stainless-Helper-Method": "chat.completions.parse",
@@ -176,7 +177,7 @@ class Completions(SyncAPIResource):
             return _parse_chat_completion(
                 response_format=response_format,
                 chat_completion=raw_completion,
-                input_tools=tools,
+                input_tools=chat_completion_tools,
             )
 
         return self._post(
@@ -215,6 +216,7 @@ class Completions(SyncAPIResource):
                     "top_logprobs": top_logprobs,
                     "top_p": top_p,
                     "user": user,
+                    "verbosity": verbosity,
                     "web_search_options": web_search_options,
                 },
                 completion_create_params.CompletionCreateParams,
@@ -268,6 +270,7 @@ class Completions(SyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -398,12 +401,11 @@ class Completions(SyncAPIResource):
               hit rates. Replaces the `user` field.
               [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: An object specifying the format that the model must output.
 
@@ -483,9 +485,9 @@ class Completions(SyncAPIResource):
               `none` is the default when no tools are present. `auto` is the default if tools
               are present.
 
-          tools: A list of tools the model may call. Currently, only functions are supported as a
-              tool. Use this to provide a list of functions the model may generate JSON inputs
-              for. A max of 128 functions are supported.
+          tools: A list of tools the model may call. You can provide either
+              [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
+              or [function tools](https://platform.openai.com/docs/guides/function-calling).
 
           top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
               return at each token position, each with an associated log probability.
@@ -503,6 +505,10 @@ class Completions(SyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
+          verbosity: Constrains the verbosity of the model's response. Lower values will result in
+              more concise responses, while higher values will result in more verbose
+              responses. Currently supported values are `low`, `medium`, and `high`.
+
           web_search_options: This tool searches the web for relevant results to use in a response. Learn more
               about the
               [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
@@ -553,6 +559,7 @@ class Completions(SyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -692,12 +699,11 @@ class Completions(SyncAPIResource):
               hit rates. Replaces the `user` field.
               [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: An object specifying the format that the model must output.
 
@@ -768,9 +774,9 @@ class Completions(SyncAPIResource):
               `none` is the default when no tools are present. `auto` is the default if tools
               are present.
 
-          tools: A list of tools the model may call. Currently, only functions are supported as a
-              tool. Use this to provide a list of functions the model may generate JSON inputs
-              for. A max of 128 functions are supported.
+          tools: A list of tools the model may call. You can provide either
+              [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
+              or [function tools](https://platform.openai.com/docs/guides/function-calling).
 
           top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
               return at each token position, each with an associated log probability.
@@ -788,6 +794,10 @@ class Completions(SyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
+          verbosity: Constrains the verbosity of the model's response. Lower values will result in
+              more concise responses, while higher values will result in more verbose
+              responses. Currently supported values are `low`, `medium`, and `high`.
+
           web_search_options: This tool searches the web for relevant results to use in a response. Learn more
               about the
               [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
@@ -838,6 +848,7 @@ class Completions(SyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -977,12 +988,11 @@ class Completions(SyncAPIResource):
               hit rates. Replaces the `user` field.
               [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: An object specifying the format that the model must output.
 
@@ -1053,9 +1063,9 @@ class Completions(SyncAPIResource):
               `none` is the default when no tools are present. `auto` is the default if tools
               are present.
 
-          tools: A list of tools the model may call. Currently, only functions are supported as a
-              tool. Use this to provide a list of functions the model may generate JSON inputs
-              for. A max of 128 functions are supported.
+          tools: A list of tools the model may call. You can provide either
+              [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
+              or [function tools](https://platform.openai.com/docs/guides/function-calling).
 
           top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
               return at each token position, each with an associated log probability.
@@ -1073,6 +1083,10 @@ class Completions(SyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
+          verbosity: Constrains the verbosity of the model's response. Lower values will result in
+              more concise responses, while higher values will result in more verbose
+              responses. Currently supported values are `low`, `medium`, and `high`.
+
           web_search_options: This tool searches the web for relevant results to use in a response. Learn more
               about the
               [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
@@ -1123,6 +1137,7 @@ class Completions(SyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1168,6 +1183,7 @@ class Completions(SyncAPIResource):
                     "top_logprobs": top_logprobs,
                     "top_p": top_p,
                     "user": user,
+                    "verbosity": verbosity,
                     "web_search_options": web_search_options,
                 },
                 completion_create_params.CompletionCreateParamsStreaming
@@ -1396,6 +1412,7 @@ class Completions(SyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1465,6 +1482,7 @@ class Completions(SyncAPIResource):
             top_logprobs=top_logprobs,
             top_p=top_p,
             user=user,
+            verbosity=verbosity,
             web_search_options=web_search_options,
             extra_headers=extra_headers,
             extra_query=extra_query,
@@ -1536,6 +1554,7 @@ class AsyncCompletions(AsyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1636,6 +1655,7 @@ class AsyncCompletions(AsyncAPIResource):
                     "top_logprobs": top_logprobs,
                     "top_p": top_p,
                     "user": user,
+                    "verbosity": verbosity,
                     "web_search_options": web_search_options,
                 },
                 completion_create_params.CompletionCreateParams,
@@ -1689,6 +1709,7 @@ class AsyncCompletions(AsyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1819,12 +1840,11 @@ class AsyncCompletions(AsyncAPIResource):
               hit rates. Replaces the `user` field.
               [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: An object specifying the format that the model must output.
 
@@ -1904,9 +1924,9 @@ class AsyncCompletions(AsyncAPIResource):
               `none` is the default when no tools are present. `auto` is the default if tools
               are present.
 
-          tools: A list of tools the model may call. Currently, only functions are supported as a
-              tool. Use this to provide a list of functions the model may generate JSON inputs
-              for. A max of 128 functions are supported.
+          tools: A list of tools the model may call. You can provide either
+              [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
+              or [function tools](https://platform.openai.com/docs/guides/function-calling).
 
           top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
               return at each token position, each with an associated log probability.
@@ -1924,6 +1944,10 @@ class AsyncCompletions(AsyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
+          verbosity: Constrains the verbosity of the model's response. Lower values will result in
+              more concise responses, while higher values will result in more verbose
+              responses. Currently supported values are `low`, `medium`, and `high`.
+
           web_search_options: This tool searches the web for relevant results to use in a response. Learn more
               about the
               [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
@@ -1974,6 +1998,7 @@ class AsyncCompletions(AsyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -2113,12 +2138,11 @@ class AsyncCompletions(AsyncAPIResource):
               hit rates. Replaces the `user` field.
               [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: An object specifying the format that the model must output.
 
@@ -2189,9 +2213,9 @@ class AsyncCompletions(AsyncAPIResource):
               `none` is the default when no tools are present. `auto` is the default if tools
               are present.
 
-          tools: A list of tools the model may call. Currently, only functions are supported as a
-              tool. Use this to provide a list of functions the model may generate JSON inputs
-              for. A max of 128 functions are supported.
+          tools: A list of tools the model may call. You can provide either
+              [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
+              or [function tools](https://platform.openai.com/docs/guides/function-calling).
 
           top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
               return at each token position, each with an associated log probability.
@@ -2209,6 +2233,10 @@ class AsyncCompletions(AsyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
+          verbosity: Constrains the verbosity of the model's response. Lower values will result in
+              more concise responses, while higher values will result in more verbose
+              responses. Currently supported values are `low`, `medium`, and `high`.
+
           web_search_options: This tool searches the web for relevant results to use in a response. Learn more
               about the
               [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
@@ -2259,6 +2287,7 @@ class AsyncCompletions(AsyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -2398,12 +2427,11 @@ class AsyncCompletions(AsyncAPIResource):
               hit rates. Replaces the `user` field.
               [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
 
-          reasoning_effort: **o-series models only**
-
-              Constrains effort on reasoning for
+          reasoning_effort: Constrains effort on reasoning for
               [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-              supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-              result in faster responses and fewer tokens used on reasoning in a response.
+              supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+              effort can result in faster responses and fewer tokens used on reasoning in a
+              response.
 
           response_format: An object specifying the format that the model must output.
 
@@ -2474,9 +2502,9 @@ class AsyncCompletions(AsyncAPIResource):
               `none` is the default when no tools are present. `auto` is the default if tools
               are present.
 
-          tools: A list of tools the model may call. Currently, only functions are supported as a
-              tool. Use this to provide a list of functions the model may generate JSON inputs
-              for. A max of 128 functions are supported.
+          tools: A list of tools the model may call. You can provide either
+              [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
+              or [function tools](https://platform.openai.com/docs/guides/function-calling).
 
           top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
               return at each token position, each with an associated log probability.
@@ -2494,6 +2522,10 @@ class AsyncCompletions(AsyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
+          verbosity: Constrains the verbosity of the model's response. Lower values will result in
+              more concise responses, while higher values will result in more verbose
+              responses. Currently supported values are `low`, `medium`, and `high`.
+
           web_search_options: This tool searches the web for relevant results to use in a response. Learn more
               about the
               [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
@@ -2544,6 +2576,7 @@ class AsyncCompletions(AsyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -2589,6 +2622,7 @@ class AsyncCompletions(AsyncAPIResource):
                     "top_logprobs": top_logprobs,
                     "top_p": top_p,
                     "user": user,
+                    "verbosity": verbosity,
                     "web_search_options": web_search_options,
                 },
                 completion_create_params.CompletionCreateParamsStreaming
@@ -2817,6 +2851,7 @@ class AsyncCompletions(AsyncAPIResource):
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -2887,11 +2922,12 @@ class AsyncCompletions(AsyncAPIResource):
             top_logprobs=top_logprobs,
             top_p=top_p,
             user=user,
+            verbosity=verbosity,
+            web_search_options=web_search_options,
             extra_headers=extra_headers,
             extra_query=extra_query,
             extra_body=extra_body,
             timeout=timeout,
-            web_search_options=web_search_options,
         )
         return AsyncChatCompletionStreamManager(
             api_request,
src/openai/resources/responses/responses.py
@@ -93,6 +93,7 @@ class Responses(SyncAPIResource):
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -101,6 +102,7 @@ class Responses(SyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -232,6 +234,8 @@ class Responses(SyncAPIResource):
               [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
               for more information.
 
+          stream_options: Options for streaming responses. Only set this when you set `stream: true`.
+
           temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
               make the output more random, while lower values like 0.2 will make it more
               focused and deterministic. We generally recommend altering this or `top_p` but
@@ -259,8 +263,10 @@ class Responses(SyncAPIResource):
                 Learn more about
                 [built-in tools](https://platform.openai.com/docs/guides/tools).
               - **Function calls (custom tools)**: Functions that are defined by you, enabling
-                the model to call your own code. Learn more about
+                the model to call your own code with strongly typed arguments and outputs.
+                Learn more about
                 [function calling](https://platform.openai.com/docs/guides/function-calling).
+                You can also use custom tools to call your own code.
 
           top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
               return at each token position, each with an associated log probability.
@@ -285,6 +291,10 @@ class Responses(SyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
+          verbosity: Constrains the verbosity of the model's response. Lower values will result in
+              more concise responses, while higher values will result in more verbose
+              responses. Currently supported values are `low`, `medium`, and `high`.
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -316,6 +326,7 @@ class Responses(SyncAPIResource):
         safety_identifier: str | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -324,6 +335,7 @@ class Responses(SyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -455,6 +467,8 @@ class Responses(SyncAPIResource):
 
           store: Whether to store the generated model response for later retrieval via API.
 
+          stream_options: Options for streaming responses. Only set this when you set `stream: true`.
+
           temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
               make the output more random, while lower values like 0.2 will make it more
               focused and deterministic. We generally recommend altering this or `top_p` but
@@ -482,8 +496,10 @@ class Responses(SyncAPIResource):
                 Learn more about
                 [built-in tools](https://platform.openai.com/docs/guides/tools).
               - **Function calls (custom tools)**: Functions that are defined by you, enabling
-                the model to call your own code. Learn more about
+                the model to call your own code with strongly typed arguments and outputs.
+                Learn more about
                 [function calling](https://platform.openai.com/docs/guides/function-calling).
+                You can also use custom tools to call your own code.
 
           top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
               return at each token position, each with an associated log probability.
@@ -508,6 +524,10 @@ class Responses(SyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
+          verbosity: Constrains the verbosity of the model's response. Lower values will result in
+              more concise responses, while higher values will result in more verbose
+              responses. Currently supported values are `low`, `medium`, and `high`.
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -539,6 +559,7 @@ class Responses(SyncAPIResource):
         safety_identifier: str | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -547,6 +568,7 @@ class Responses(SyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -678,6 +700,8 @@ class Responses(SyncAPIResource):
 
           store: Whether to store the generated model response for later retrieval via API.
 
+          stream_options: Options for streaming responses. Only set this when you set `stream: true`.
+
           temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
               make the output more random, while lower values like 0.2 will make it more
               focused and deterministic. We generally recommend altering this or `top_p` but
@@ -705,8 +729,10 @@ class Responses(SyncAPIResource):
                 Learn more about
                 [built-in tools](https://platform.openai.com/docs/guides/tools).
               - **Function calls (custom tools)**: Functions that are defined by you, enabling
-                the model to call your own code. Learn more about
+                the model to call your own code with strongly typed arguments and outputs.
+                Learn more about
                 [function calling](https://platform.openai.com/docs/guides/function-calling).
+                You can also use custom tools to call your own code.
 
           top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
               return at each token position, each with an associated log probability.
@@ -731,6 +757,10 @@ class Responses(SyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
+          verbosity: Constrains the verbosity of the model's response. Lower values will result in
+              more concise responses, while higher values will result in more verbose
+              responses. Currently supported values are `low`, `medium`, and `high`.
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -761,6 +791,7 @@ class Responses(SyncAPIResource):
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -769,6 +800,7 @@ class Responses(SyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -797,6 +829,7 @@ class Responses(SyncAPIResource):
                     "service_tier": service_tier,
                     "store": store,
                     "stream": stream,
+                    "stream_options": stream_options,
                     "temperature": temperature,
                     "text": text,
                     "tool_choice": tool_choice,
@@ -805,6 +838,7 @@ class Responses(SyncAPIResource):
                     "top_p": top_p,
                     "truncation": truncation,
                     "user": user,
+                    "verbosity": verbosity,
                 },
                 response_create_params.ResponseCreateParamsStreaming
                 if stream
@@ -850,6 +884,7 @@ class Responses(SyncAPIResource):
         previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
         reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -881,6 +916,7 @@ class Responses(SyncAPIResource):
         previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
         reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -906,6 +942,7 @@ class Responses(SyncAPIResource):
             "previous_response_id": previous_response_id,
             "reasoning": reasoning,
             "store": store,
+            "stream_options": stream_options,
             "temperature": temperature,
             "text": text,
             "tool_choice": tool_choice,
@@ -950,6 +987,7 @@ class Responses(SyncAPIResource):
                 parallel_tool_calls=parallel_tool_calls,
                 previous_response_id=previous_response_id,
                 store=store,
+                stream_options=stream_options,
                 stream=True,
                 temperature=temperature,
                 text=text,
@@ -1007,6 +1045,7 @@ class Responses(SyncAPIResource):
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -1015,6 +1054,7 @@ class Responses(SyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1061,6 +1101,7 @@ class Responses(SyncAPIResource):
                     "service_tier": service_tier,
                     "store": store,
                     "stream": stream,
+                    "stream_options": stream_options,
                     "temperature": temperature,
                     "text": text,
                     "tool_choice": tool_choice,
@@ -1069,6 +1110,7 @@ class Responses(SyncAPIResource):
                     "top_p": top_p,
                     "truncation": truncation,
                     "user": user,
+                    "verbosity": verbosity,
                 },
                 response_create_params.ResponseCreateParams,
             ),
@@ -1090,6 +1132,7 @@ class Responses(SyncAPIResource):
         response_id: str,
         *,
         include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        include_obfuscation: bool | NotGiven = NOT_GIVEN,
         starting_after: int | NotGiven = NOT_GIVEN,
         stream: Literal[False] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1154,6 +1197,13 @@ class Responses(SyncAPIResource):
           include: Additional fields to include in the response. See the `include` parameter for
               Response creation above for more information.
 
+          include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
+              characters to an `obfuscation` field on streaming delta events to normalize
+              payload sizes as a mitigation to certain side-channel attacks. These obfuscation
+              fields are included by default, but add a small amount of overhead to the data
+              stream. You can set `include_obfuscation` to false to optimize for bandwidth if
+              you trust the network links between your application and the OpenAI API.
+
           starting_after: The sequence number of the event after which to start streaming.
 
           stream: If set to true, the model response data will be streamed to the client as it is
@@ -1180,6 +1230,7 @@ class Responses(SyncAPIResource):
         *,
         stream: Literal[True],
         include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        include_obfuscation: bool | NotGiven = NOT_GIVEN,
         starting_after: int | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1202,6 +1253,13 @@ class Responses(SyncAPIResource):
           include: Additional fields to include in the response. See the `include` parameter for
               Response creation above for more information.
 
+          include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
+              characters to an `obfuscation` field on streaming delta events to normalize
+              payload sizes as a mitigation to certain side-channel attacks. These obfuscation
+              fields are included by default, but add a small amount of overhead to the data
+              stream. You can set `include_obfuscation` to false to optimize for bandwidth if
+              you trust the network links between your application and the OpenAI API.
+
           starting_after: The sequence number of the event after which to start streaming.
 
           extra_headers: Send extra headers
@@ -1221,6 +1279,7 @@ class Responses(SyncAPIResource):
         *,
         stream: bool,
         include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        include_obfuscation: bool | NotGiven = NOT_GIVEN,
         starting_after: int | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1243,6 +1302,13 @@ class Responses(SyncAPIResource):
           include: Additional fields to include in the response. See the `include` parameter for
               Response creation above for more information.
 
+          include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
+              characters to an `obfuscation` field on streaming delta events to normalize
+              payload sizes as a mitigation to certain side-channel attacks. These obfuscation
+              fields are included by default, but add a small amount of overhead to the data
+              stream. You can set `include_obfuscation` to false to optimize for bandwidth if
+              you trust the network links between your application and the OpenAI API.
+
           starting_after: The sequence number of the event after which to start streaming.
 
           extra_headers: Send extra headers
@@ -1260,6 +1326,7 @@ class Responses(SyncAPIResource):
         response_id: str,
         *,
         include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        include_obfuscation: bool | NotGiven = NOT_GIVEN,
         starting_after: int | NotGiven = NOT_GIVEN,
         stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1281,6 +1348,7 @@ class Responses(SyncAPIResource):
                 query=maybe_transform(
                     {
                         "include": include,
+                        "include_obfuscation": include_obfuscation,
                         "starting_after": starting_after,
                         "stream": stream,
                     },
@@ -1408,6 +1476,7 @@ class AsyncResponses(AsyncAPIResource):
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -1416,6 +1485,7 @@ class AsyncResponses(AsyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1547,6 +1617,8 @@ class AsyncResponses(AsyncAPIResource):
               [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
               for more information.
 
+          stream_options: Options for streaming responses. Only set this when you set `stream: true`.
+
           temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
               make the output more random, while lower values like 0.2 will make it more
               focused and deterministic. We generally recommend altering this or `top_p` but
@@ -1574,8 +1646,10 @@ class AsyncResponses(AsyncAPIResource):
                 Learn more about
                 [built-in tools](https://platform.openai.com/docs/guides/tools).
               - **Function calls (custom tools)**: Functions that are defined by you, enabling
-                the model to call your own code. Learn more about
+                the model to call your own code with strongly typed arguments and outputs.
+                Learn more about
                 [function calling](https://platform.openai.com/docs/guides/function-calling).
+                You can also use custom tools to call your own code.
 
           top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
               return at each token position, each with an associated log probability.
@@ -1600,6 +1674,10 @@ class AsyncResponses(AsyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
+          verbosity: Constrains the verbosity of the model's response. Lower values will result in
+              more concise responses, while higher values will result in more verbose
+              responses. Currently supported values are `low`, `medium`, and `high`.
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -1631,6 +1709,7 @@ class AsyncResponses(AsyncAPIResource):
         safety_identifier: str | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -1639,6 +1718,7 @@ class AsyncResponses(AsyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1770,6 +1850,8 @@ class AsyncResponses(AsyncAPIResource):
 
           store: Whether to store the generated model response for later retrieval via API.
 
+          stream_options: Options for streaming responses. Only set this when you set `stream: true`.
+
           temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
               make the output more random, while lower values like 0.2 will make it more
               focused and deterministic. We generally recommend altering this or `top_p` but
@@ -1797,8 +1879,10 @@ class AsyncResponses(AsyncAPIResource):
                 Learn more about
                 [built-in tools](https://platform.openai.com/docs/guides/tools).
               - **Function calls (custom tools)**: Functions that are defined by you, enabling
-                the model to call your own code. Learn more about
+                the model to call your own code with strongly typed arguments and outputs.
+                Learn more about
                 [function calling](https://platform.openai.com/docs/guides/function-calling).
+                You can also use custom tools to call your own code.
 
           top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
               return at each token position, each with an associated log probability.
@@ -1823,6 +1907,10 @@ class AsyncResponses(AsyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
+          verbosity: Constrains the verbosity of the model's response. Lower values will result in
+              more concise responses, while higher values will result in more verbose
+              responses. Currently supported values are `low`, `medium`, and `high`.
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -1854,6 +1942,7 @@ class AsyncResponses(AsyncAPIResource):
         safety_identifier: str | NotGiven = NOT_GIVEN,
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -1862,6 +1951,7 @@ class AsyncResponses(AsyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1993,6 +2083,8 @@ class AsyncResponses(AsyncAPIResource):
 
           store: Whether to store the generated model response for later retrieval via API.
 
+          stream_options: Options for streaming responses. Only set this when you set `stream: true`.
+
           temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
               make the output more random, while lower values like 0.2 will make it more
               focused and deterministic. We generally recommend altering this or `top_p` but
@@ -2020,8 +2112,10 @@ class AsyncResponses(AsyncAPIResource):
                 Learn more about
                 [built-in tools](https://platform.openai.com/docs/guides/tools).
               - **Function calls (custom tools)**: Functions that are defined by you, enabling
-                the model to call your own code. Learn more about
+                the model to call your own code with strongly typed arguments and outputs.
+                Learn more about
                 [function calling](https://platform.openai.com/docs/guides/function-calling).
+                You can also use custom tools to call your own code.
 
           top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
               return at each token position, each with an associated log probability.
@@ -2046,6 +2140,10 @@ class AsyncResponses(AsyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
+          verbosity: Constrains the verbosity of the model's response. Lower values will result in
+              more concise responses, while higher values will result in more verbose
+              responses. Currently supported values are `low`, `medium`, and `high`.
+
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -2076,6 +2174,7 @@ class AsyncResponses(AsyncAPIResource):
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -2084,6 +2183,7 @@ class AsyncResponses(AsyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -2112,6 +2212,7 @@ class AsyncResponses(AsyncAPIResource):
                     "service_tier": service_tier,
                     "store": store,
                     "stream": stream,
+                    "stream_options": stream_options,
                     "temperature": temperature,
                     "text": text,
                     "tool_choice": tool_choice,
@@ -2120,6 +2221,7 @@ class AsyncResponses(AsyncAPIResource):
                     "top_p": top_p,
                     "truncation": truncation,
                     "user": user,
+                    "verbosity": verbosity,
                 },
                 response_create_params.ResponseCreateParamsStreaming
                 if stream
@@ -2165,6 +2267,7 @@ class AsyncResponses(AsyncAPIResource):
         previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
         reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -2196,6 +2299,7 @@ class AsyncResponses(AsyncAPIResource):
         previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
         reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -2221,6 +2325,7 @@ class AsyncResponses(AsyncAPIResource):
             "previous_response_id": previous_response_id,
             "reasoning": reasoning,
             "store": store,
+            "stream_options": stream_options,
             "temperature": temperature,
             "text": text,
             "tool_choice": tool_choice,
@@ -2266,6 +2371,7 @@ class AsyncResponses(AsyncAPIResource):
                 parallel_tool_calls=parallel_tool_calls,
                 previous_response_id=previous_response_id,
                 store=store,
+                stream_options=stream_options,
                 temperature=temperature,
                 text=text,
                 tool_choice=tool_choice,
@@ -2326,6 +2432,7 @@ class AsyncResponses(AsyncAPIResource):
         service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
         text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
@@ -2334,6 +2441,7 @@ class AsyncResponses(AsyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
+        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -2380,6 +2488,7 @@ class AsyncResponses(AsyncAPIResource):
                     "service_tier": service_tier,
                     "store": store,
                     "stream": stream,
+                    "stream_options": stream_options,
                     "temperature": temperature,
                     "text": text,
                     "tool_choice": tool_choice,
@@ -2388,6 +2497,7 @@ class AsyncResponses(AsyncAPIResource):
                     "top_p": top_p,
                     "truncation": truncation,
                     "user": user,
+                    "verbosity": verbosity,
                 },
                 response_create_params.ResponseCreateParams,
             ),
@@ -2409,6 +2519,7 @@ class AsyncResponses(AsyncAPIResource):
         response_id: str,
         *,
         include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        include_obfuscation: bool | NotGiven = NOT_GIVEN,
         starting_after: int | NotGiven = NOT_GIVEN,
         stream: Literal[False] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -2473,6 +2584,13 @@ class AsyncResponses(AsyncAPIResource):
           include: Additional fields to include in the response. See the `include` parameter for
               Response creation above for more information.
 
+          include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
+              characters to an `obfuscation` field on streaming delta events to normalize
+              payload sizes as a mitigation to certain side-channel attacks. These obfuscation
+              fields are included by default, but add a small amount of overhead to the data
+              stream. You can set `include_obfuscation` to false to optimize for bandwidth if
+              you trust the network links between your application and the OpenAI API.
+
           starting_after: The sequence number of the event after which to start streaming.
 
           stream: If set to true, the model response data will be streamed to the client as it is
@@ -2499,6 +2617,7 @@ class AsyncResponses(AsyncAPIResource):
         *,
         stream: Literal[True],
         include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        include_obfuscation: bool | NotGiven = NOT_GIVEN,
         starting_after: int | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -2521,6 +2640,13 @@ class AsyncResponses(AsyncAPIResource):
           include: Additional fields to include in the response. See the `include` parameter for
               Response creation above for more information.
 
+          include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
+              characters to an `obfuscation` field on streaming delta events to normalize
+              payload sizes as a mitigation to certain side-channel attacks. These obfuscation
+              fields are included by default, but add a small amount of overhead to the data
+              stream. You can set `include_obfuscation` to false to optimize for bandwidth if
+              you trust the network links between your application and the OpenAI API.
+
           starting_after: The sequence number of the event after which to start streaming.
 
           extra_headers: Send extra headers
@@ -2540,6 +2666,7 @@ class AsyncResponses(AsyncAPIResource):
         *,
         stream: bool,
         include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        include_obfuscation: bool | NotGiven = NOT_GIVEN,
         starting_after: int | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -2562,6 +2689,13 @@ class AsyncResponses(AsyncAPIResource):
           include: Additional fields to include in the response. See the `include` parameter for
               Response creation above for more information.
 
+          include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
+              characters to an `obfuscation` field on streaming delta events to normalize
+              payload sizes as a mitigation to certain side-channel attacks. These obfuscation
+              fields are included by default, but add a small amount of overhead to the data
+              stream. You can set `include_obfuscation` to false to optimize for bandwidth if
+              you trust the network links between your application and the OpenAI API.
+
           starting_after: The sequence number of the event after which to start streaming.
 
           extra_headers: Send extra headers
@@ -2579,6 +2713,7 @@ class AsyncResponses(AsyncAPIResource):
         response_id: str,
         *,
         include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        include_obfuscation: bool | NotGiven = NOT_GIVEN,
         starting_after: int | NotGiven = NOT_GIVEN,
         stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -2600,6 +2735,7 @@ class AsyncResponses(AsyncAPIResource):
                 query=await async_maybe_transform(
                     {
                         "include": include,
+                        "include_obfuscation": include_obfuscation,
                         "starting_after": starting_after,
                         "stream": stream,
                     },
src/openai/types/beta/threads/run_create_params.py
@@ -108,12 +108,12 @@ class RunCreateParamsBase(TypedDict, total=False):
     """
 
     reasoning_effort: Optional[ReasoningEffort]
-    """**o-series models only**
-
+    """
     Constrains effort on reasoning for
     [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-    result in faster responses and fewer tokens used on reasoning in a response.
+    supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+    effort can result in faster responses and fewer tokens used on reasoning in a
+    response.
     """
 
     response_format: Optional[AssistantResponseFormatOptionParam]
src/openai/types/beta/assistant_create_params.py
@@ -58,12 +58,12 @@ class AssistantCreateParams(TypedDict, total=False):
     """The name of the assistant. The maximum length is 256 characters."""
 
     reasoning_effort: Optional[ReasoningEffort]
-    """**o-series models only**
-
+    """
     Constrains effort on reasoning for
     [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-    result in faster responses and fewer tokens used on reasoning in a response.
+    supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+    effort can result in faster responses and fewer tokens used on reasoning in a
+    response.
     """
 
     response_format: Optional[AssistantResponseFormatOptionParam]
src/openai/types/beta/assistant_update_params.py
@@ -36,6 +36,12 @@ class AssistantUpdateParams(TypedDict, total=False):
     model: Union[
         str,
         Literal[
+            "gpt-5",
+            "gpt-5-mini",
+            "gpt-5-nano",
+            "gpt-5-2025-08-07",
+            "gpt-5-mini-2025-08-07",
+            "gpt-5-nano-2025-08-07",
             "gpt-4.1",
             "gpt-4.1-mini",
             "gpt-4.1-nano",
@@ -87,12 +93,12 @@ class AssistantUpdateParams(TypedDict, total=False):
     """The name of the assistant. The maximum length is 256 characters."""
 
     reasoning_effort: Optional[ReasoningEffort]
-    """**o-series models only**
-
+    """
     Constrains effort on reasoning for
     [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-    result in faster responses and fewer tokens used on reasoning in a response.
+    supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+    effort can result in faster responses and fewer tokens used on reasoning in a
+    response.
     """
 
     response_format: Optional[AssistantResponseFormatOptionParam]
src/openai/types/chat/__init__.py
@@ -4,7 +4,6 @@ from __future__ import annotations
 
 from .chat_completion import ChatCompletion as ChatCompletion
 from .chat_completion_role import ChatCompletionRole as ChatCompletionRole
-from .chat_completion_tool import ChatCompletionTool as ChatCompletionTool
 from .chat_completion_audio import ChatCompletionAudio as ChatCompletionAudio
 from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
 from .completion_list_params import CompletionListParams as CompletionListParams
@@ -24,16 +23,20 @@ from .parsed_function_tool_call import (
 )
 from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam
 from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam
+from .chat_completion_function_tool import ChatCompletionFunctionTool as ChatCompletionFunctionTool
 from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam
 from .chat_completion_store_message import ChatCompletionStoreMessage as ChatCompletionStoreMessage
 from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
 from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort
 from .chat_completion_content_part_text import ChatCompletionContentPartText as ChatCompletionContentPartText
+from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam as ChatCompletionCustomToolParam
 from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall
 from .chat_completion_content_part_image import ChatCompletionContentPartImage as ChatCompletionContentPartImage
 from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam
 from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam
 from .chat_completion_user_message_param import ChatCompletionUserMessageParam as ChatCompletionUserMessageParam
+from .chat_completion_allowed_tools_param import ChatCompletionAllowedToolsParam as ChatCompletionAllowedToolsParam
+from .chat_completion_function_tool_param import ChatCompletionFunctionToolParam as ChatCompletionFunctionToolParam
 from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam as ChatCompletionStreamOptionsParam
 from .chat_completion_system_message_param import ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam
 from .chat_completion_function_message_param import (
@@ -57,18 +60,36 @@ from .chat_completion_named_tool_choice_param import (
 from .chat_completion_content_part_image_param import (
     ChatCompletionContentPartImageParam as ChatCompletionContentPartImageParam,
 )
+from .chat_completion_message_custom_tool_call import (
+    ChatCompletionMessageCustomToolCall as ChatCompletionMessageCustomToolCall,
+)
 from .chat_completion_prediction_content_param import (
     ChatCompletionPredictionContentParam as ChatCompletionPredictionContentParam,
 )
 from .chat_completion_tool_choice_option_param import (
     ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam,
 )
+from .chat_completion_allowed_tool_choice_param import (
+    ChatCompletionAllowedToolChoiceParam as ChatCompletionAllowedToolChoiceParam,
+)
 from .chat_completion_content_part_refusal_param import (
     ChatCompletionContentPartRefusalParam as ChatCompletionContentPartRefusalParam,
 )
 from .chat_completion_function_call_option_param import (
     ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam,
 )
+from .chat_completion_message_function_tool_call import (
+    ChatCompletionMessageFunctionToolCall as ChatCompletionMessageFunctionToolCall,
+)
 from .chat_completion_content_part_input_audio_param import (
     ChatCompletionContentPartInputAudioParam as ChatCompletionContentPartInputAudioParam,
 )
+from .chat_completion_message_custom_tool_call_param import (
+    ChatCompletionMessageCustomToolCallParam as ChatCompletionMessageCustomToolCallParam,
+)
+from .chat_completion_named_tool_choice_custom_param import (
+    ChatCompletionNamedToolChoiceCustomParam as ChatCompletionNamedToolChoiceCustomParam,
+)
+from .chat_completion_message_function_tool_call_param import (
+    ChatCompletionMessageFunctionToolCallParam as ChatCompletionMessageFunctionToolCallParam,
+)
src/openai/types/chat/chat_completion_allowed_tool_choice_param.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat_completion_allowed_tools_param import ChatCompletionAllowedToolsParam
+
+__all__ = ["ChatCompletionAllowedToolChoiceParam"]
+
+
+class ChatCompletionAllowedToolChoiceParam(TypedDict, total=False):
+    allowed_tools: Required[ChatCompletionAllowedToolsParam]
+    """Constrains the tools available to the model to a pre-defined set."""
+
+    type: Required[Literal["allowed_tools"]]
+    """Allowed tool configuration type. Always `allowed_tools`."""
src/openai/types/chat/chat_completion_allowed_tools_param.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionAllowedToolsParam"]
+
+
+class ChatCompletionAllowedToolsParam(TypedDict, total=False):
+    mode: Required[Literal["auto", "required"]]
+    """Constrains the tools available to the model to a pre-defined set.
+
+    `auto` allows the model to pick from among the allowed tools and generate a
+    message.
+
+    `required` requires the model to call one or more of the allowed tools.
+    """
+
+    tools: Required[Iterable[Dict[str, object]]]
+    """A list of tool definitions that the model should be allowed to call.
+
+    For the Chat Completions API, the list of tool definitions might look like:
+
+    ```json
+    [
+      { "type": "function", "function": { "name": "get_weather" } },
+      { "type": "function", "function": { "name": "get_time" } }
+    ]
+    ```
+    """
src/openai/types/chat/chat_completion_custom_tool_param.py
@@ -0,0 +1,58 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = [
+    "ChatCompletionCustomToolParam",
+    "Custom",
+    "CustomFormat",
+    "CustomFormatText",
+    "CustomFormatGrammar",
+    "CustomFormatGrammarGrammar",
+]
+
+
+class CustomFormatText(TypedDict, total=False):
+    type: Required[Literal["text"]]
+    """Unconstrained text format. Always `text`."""
+
+
+class CustomFormatGrammarGrammar(TypedDict, total=False):
+    definition: Required[str]
+    """The grammar definition."""
+
+    syntax: Required[Literal["lark", "regex"]]
+    """The syntax of the grammar definition. One of `lark` or `regex`."""
+
+
+class CustomFormatGrammar(TypedDict, total=False):
+    grammar: Required[CustomFormatGrammarGrammar]
+    """Your chosen grammar."""
+
+    type: Required[Literal["grammar"]]
+    """Grammar format. Always `grammar`."""
+
+
+CustomFormat: TypeAlias = Union[CustomFormatText, CustomFormatGrammar]
+
+
+class Custom(TypedDict, total=False):
+    name: Required[str]
+    """The name of the custom tool, used to identify it in tool calls."""
+
+    description: str
+    """Optional description of the custom tool, used to provide more context."""
+
+    format: CustomFormat
+    """The input format for the custom tool. Default is unconstrained text."""
+
+
+class ChatCompletionCustomToolParam(TypedDict, total=False):
+    custom: Required[Custom]
+    """Properties of the custom tool."""
+
+    type: Required[Literal["custom"]]
+    """The type of the custom tool. Always `custom`."""
src/openai/types/chat/chat_completion_tool.py โ†’ src/openai/types/chat/chat_completion_function_tool.py
@@ -5,10 +5,10 @@ from typing_extensions import Literal
 from ..._models import BaseModel
 from ..shared.function_definition import FunctionDefinition
 
-__all__ = ["ChatCompletionTool"]
+__all__ = ["ChatCompletionFunctionTool"]
 
 
-class ChatCompletionTool(BaseModel):
+class ChatCompletionFunctionTool(BaseModel):
     function: FunctionDefinition
 
     type: Literal["function"]
src/openai/types/chat/chat_completion_function_tool_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from ..shared_params.function_definition import FunctionDefinition
+
+__all__ = ["ChatCompletionFunctionToolParam"]
+
+
+class ChatCompletionFunctionToolParam(TypedDict, total=False):
+    function: Required[FunctionDefinition]
+
+    type: Required[Literal["function"]]
+    """The type of the tool. Currently, only `function` is supported."""
src/openai/types/chat/chat_completion_message_custom_tool_call.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ChatCompletionMessageCustomToolCall", "Custom"]
+
+
+class Custom(BaseModel):
+    input: str
+    """The input for the custom tool call generated by the model."""
+
+    name: str
+    """The name of the custom tool to call."""
+
+
+class ChatCompletionMessageCustomToolCall(BaseModel):
+    id: str
+    """The ID of the tool call."""
+
+    custom: Custom
+    """The custom tool that the model called."""
+
+    type: Literal["custom"]
+    """The type of the tool. Always `custom`."""
src/openai/types/chat/chat_completion_message_custom_tool_call_param.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionMessageCustomToolCallParam", "Custom"]
+
+
+class Custom(TypedDict, total=False):
+    input: Required[str]
+    """The input for the custom tool call generated by the model."""
+
+    name: Required[str]
+    """The name of the custom tool to call."""
+
+
+class ChatCompletionMessageCustomToolCallParam(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the tool call."""
+
+    custom: Required[Custom]
+    """The custom tool that the model called."""
+
+    type: Required[Literal["custom"]]
+    """The type of the tool. Always `custom`."""
src/openai/types/chat/chat_completion_message_function_tool_call.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ChatCompletionMessageFunctionToolCall", "Function"]
+
+
+class Function(BaseModel):
+    arguments: str
+    """
+    The arguments to call the function with, as generated by the model in JSON
+    format. Note that the model does not always generate valid JSON, and may
+    hallucinate parameters not defined by your function schema. Validate the
+    arguments in your code before calling your function.
+    """
+
+    name: str
+    """The name of the function to call."""
+
+
+class ChatCompletionMessageFunctionToolCall(BaseModel):
+    id: str
+    """The ID of the tool call."""
+
+    function: Function
+    """The function that the model called."""
+
+    type: Literal["function"]
+    """The type of the tool. Currently, only `function` is supported."""
src/openai/types/chat/chat_completion_message_function_tool_call_param.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionMessageFunctionToolCallParam", "Function"]
+
+
+class Function(TypedDict, total=False):
+    arguments: Required[str]
+    """
+    The arguments to call the function with, as generated by the model in JSON
+    format. Note that the model does not always generate valid JSON, and may
+    hallucinate parameters not defined by your function schema. Validate the
+    arguments in your code before calling your function.
+    """
+
+    name: Required[str]
+    """The name of the function to call."""
+
+
+class ChatCompletionMessageFunctionToolCallParam(TypedDict, total=False):
+    id: Required[str]
+    """The ID of the tool call."""
+
+    function: Required[Function]
+    """The function that the model called."""
+
+    type: Required[Literal["function"]]
+    """The type of the tool. Currently, only `function` is supported."""
src/openai/types/chat/chat_completion_message_tool_call.py
@@ -1,31 +1,15 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing_extensions import Literal
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
 
-from ..._models import BaseModel
+from ..._utils import PropertyInfo
+from .chat_completion_message_custom_tool_call import ChatCompletionMessageCustomToolCall
+from .chat_completion_message_function_tool_call import ChatCompletionMessageFunctionToolCall
 
-__all__ = ["ChatCompletionMessageToolCall", "Function"]
+__all__ = ["ChatCompletionMessageToolCall"]
 
-
-class Function(BaseModel):
-    arguments: str
-    """
-    The arguments to call the function with, as generated by the model in JSON
-    format. Note that the model does not always generate valid JSON, and may
-    hallucinate parameters not defined by your function schema. Validate the
-    arguments in your code before calling your function.
-    """
-
-    name: str
-    """The name of the function to call."""
-
-
-class ChatCompletionMessageToolCall(BaseModel):
-    id: str
-    """The ID of the tool call."""
-
-    function: Function
-    """The function that the model called."""
-
-    type: Literal["function"]
-    """The type of the tool. Currently, only `function` is supported."""
+ChatCompletionMessageToolCall: TypeAlias = Annotated[
+    Union[ChatCompletionMessageFunctionToolCall, ChatCompletionMessageCustomToolCall],
+    PropertyInfo(discriminator="type"),
+]
src/openai/types/chat/chat_completion_message_tool_call_param.py
@@ -2,30 +2,14 @@
 
 from __future__ import annotations
 
-from typing_extensions import Literal, Required, TypedDict
+from typing import Union
+from typing_extensions import TypeAlias
 
-__all__ = ["ChatCompletionMessageToolCallParam", "Function"]
+from .chat_completion_message_custom_tool_call_param import ChatCompletionMessageCustomToolCallParam
+from .chat_completion_message_function_tool_call_param import ChatCompletionMessageFunctionToolCallParam
 
+__all__ = ["ChatCompletionMessageToolCallParam"]
 
-class Function(TypedDict, total=False):
-    arguments: Required[str]
-    """
-    The arguments to call the function with, as generated by the model in JSON
-    format. Note that the model does not always generate valid JSON, and may
-    hallucinate parameters not defined by your function schema. Validate the
-    arguments in your code before calling your function.
-    """
-
-    name: Required[str]
-    """The name of the function to call."""
-
-
-class ChatCompletionMessageToolCallParam(TypedDict, total=False):
-    id: Required[str]
-    """The ID of the tool call."""
-
-    function: Required[Function]
-    """The function that the model called."""
-
-    type: Required[Literal["function"]]
-    """The type of the tool. Currently, only `function` is supported."""
+ChatCompletionMessageToolCallParam: TypeAlias = Union[
+    ChatCompletionMessageFunctionToolCallParam, ChatCompletionMessageCustomToolCallParam
+]
src/openai/types/chat/chat_completion_named_tool_choice_custom_param.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionNamedToolChoiceCustomParam", "Custom"]
+
+
+class Custom(TypedDict, total=False):
+    name: Required[str]
+    """The name of the custom tool to call."""
+
+
+class ChatCompletionNamedToolChoiceCustomParam(TypedDict, total=False):
+    custom: Required[Custom]
+
+    type: Required[Literal["custom"]]
+    """For custom tool calling, the type is always `custom`."""
src/openai/types/chat/chat_completion_named_tool_choice_param.py
@@ -16,4 +16,4 @@ class ChatCompletionNamedToolChoiceParam(TypedDict, total=False):
     function: Required[Function]
 
     type: Required[Literal["function"]]
-    """The type of the tool. Currently, only `function` is supported."""
+    """For function calling, the type is always `function`."""
src/openai/types/chat/chat_completion_stream_options_param.py
@@ -8,6 +8,17 @@ __all__ = ["ChatCompletionStreamOptionsParam"]
 
 
 class ChatCompletionStreamOptionsParam(TypedDict, total=False):
+    include_obfuscation: bool
+    """When true, stream obfuscation will be enabled.
+
+    Stream obfuscation adds random characters to an `obfuscation` field on streaming
+    delta events to normalize payload sizes as a mitigation to certain side-channel
+    attacks. These obfuscation fields are included by default, but add a small
+    amount of overhead to the data stream. You can set `include_obfuscation` to
+    false to optimize for bandwidth if you trust the network links between your
+    application and the OpenAI API.
+    """
+
     include_usage: bool
     """If set, an additional chunk will be streamed before the `data: [DONE]` message.
 
src/openai/types/chat/chat_completion_tool_choice_option_param.py
@@ -6,9 +6,14 @@ from typing import Union
 from typing_extensions import Literal, TypeAlias
 
 from .chat_completion_named_tool_choice_param import ChatCompletionNamedToolChoiceParam
+from .chat_completion_allowed_tool_choice_param import ChatCompletionAllowedToolChoiceParam
+from .chat_completion_named_tool_choice_custom_param import ChatCompletionNamedToolChoiceCustomParam
 
 __all__ = ["ChatCompletionToolChoiceOptionParam"]
 
 ChatCompletionToolChoiceOptionParam: TypeAlias = Union[
-    Literal["none", "auto", "required"], ChatCompletionNamedToolChoiceParam
+    Literal["none", "auto", "required"],
+    ChatCompletionAllowedToolChoiceParam,
+    ChatCompletionNamedToolChoiceParam,
+    ChatCompletionNamedToolChoiceCustomParam,
 ]
src/openai/types/chat/chat_completion_tool_param.py
@@ -2,15 +2,12 @@
 
 from __future__ import annotations
 
-from typing_extensions import Literal, Required, TypedDict
+from typing import Union
+from typing_extensions import TypeAlias
 
-from ..shared_params.function_definition import FunctionDefinition
+from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam
+from .chat_completion_function_tool_param import ChatCompletionFunctionToolParam
 
 __all__ = ["ChatCompletionToolParam"]
 
-
-class ChatCompletionToolParam(TypedDict, total=False):
-    function: Required[FunctionDefinition]
-
-    type: Required[Literal["function"]]
-    """The type of the tool. Currently, only `function` is supported."""
+ChatCompletionToolParam: TypeAlias = Union[ChatCompletionFunctionToolParam, ChatCompletionCustomToolParam]
src/openai/types/chat/completion_create_params.py
@@ -185,12 +185,12 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     """
 
     reasoning_effort: Optional[ReasoningEffort]
-    """**o-series models only**
-
+    """
     Constrains effort on reasoning for
     [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-    result in faster responses and fewer tokens used on reasoning in a response.
+    supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+    effort can result in faster responses and fewer tokens used on reasoning in a
+    response.
     """
 
     response_format: ResponseFormat
@@ -287,9 +287,9 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     tools: Iterable[ChatCompletionToolParam]
     """A list of tools the model may call.
 
-    Currently, only functions are supported as a tool. Use this to provide a list of
-    functions the model may generate JSON inputs for. A max of 128 functions are
-    supported.
+    You can provide either
+    [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
+    or [function tools](https://platform.openai.com/docs/guides/function-calling).
     """
 
     top_logprobs: Optional[int]
@@ -317,6 +317,14 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
     """
 
+    verbosity: Optional[Literal["low", "medium", "high"]]
+    """Constrains the verbosity of the model's response.
+
+    Lower values will result in more concise responses, while higher values will
+    result in more verbose responses. Currently supported values are `low`,
+    `medium`, and `high`.
+    """
+
     web_search_options: WebSearchOptions
     """
     This tool searches the web for relevant results to use in a response. Learn more
src/openai/types/chat/parsed_function_tool_call.py
@@ -2,7 +2,7 @@
 
 from typing import Optional
 
-from .chat_completion_message_tool_call import Function, ChatCompletionMessageToolCall
+from .chat_completion_message_function_tool_call import Function, ChatCompletionMessageFunctionToolCall
 
 __all__ = ["ParsedFunctionToolCall", "ParsedFunction"]
 
@@ -24,6 +24,6 @@ class ParsedFunction(Function):
     """
 
 
-class ParsedFunctionToolCall(ChatCompletionMessageToolCall):
+class ParsedFunctionToolCall(ChatCompletionMessageFunctionToolCall):
     function: ParsedFunction
     """The function that the model called."""
src/openai/types/evals/create_eval_completions_run_data_source.py
@@ -6,10 +6,10 @@ from typing_extensions import Literal, Annotated, TypeAlias
 from ..._utils import PropertyInfo
 from ..._models import BaseModel
 from ..shared.metadata import Metadata
-from ..chat.chat_completion_tool import ChatCompletionTool
 from ..shared.response_format_text import ResponseFormatText
 from ..responses.easy_input_message import EasyInputMessage
 from ..responses.response_input_text import ResponseInputText
+from ..chat.chat_completion_function_tool import ChatCompletionFunctionTool
 from ..shared.response_format_json_object import ResponseFormatJSONObject
 from ..shared.response_format_json_schema import ResponseFormatJSONSchema
 
@@ -186,7 +186,7 @@ class SamplingParams(BaseModel):
     temperature: Optional[float] = None
     """A higher temperature increases randomness in the outputs."""
 
-    tools: Optional[List[ChatCompletionTool]] = None
+    tools: Optional[List[ChatCompletionFunctionTool]] = None
     """A list of tools the model may call.
 
     Currently, only functions are supported as a tool. Use this to provide a list of
src/openai/types/evals/create_eval_completions_run_data_source_param.py
@@ -6,10 +6,10 @@ from typing import Dict, Union, Iterable, Optional
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from ..shared_params.metadata import Metadata
-from ..chat.chat_completion_tool_param import ChatCompletionToolParam
 from ..responses.easy_input_message_param import EasyInputMessageParam
 from ..shared_params.response_format_text import ResponseFormatText
 from ..responses.response_input_text_param import ResponseInputTextParam
+from ..chat.chat_completion_function_tool_param import ChatCompletionFunctionToolParam
 from ..shared_params.response_format_json_object import ResponseFormatJSONObject
 from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema
 
@@ -180,7 +180,7 @@ class SamplingParams(TypedDict, total=False):
     temperature: float
     """A higher temperature increases randomness in the outputs."""
 
-    tools: Iterable[ChatCompletionToolParam]
+    tools: Iterable[ChatCompletionFunctionToolParam]
     """A list of tools the model may call.
 
     Currently, only functions are supported as a tool. Use this to provide a list of
src/openai/types/responses/__init__.py
@@ -5,6 +5,7 @@ from __future__ import annotations
 from .tool import Tool as Tool
 from .response import Response as Response
 from .tool_param import ToolParam as ToolParam
+from .custom_tool import CustomTool as CustomTool
 from .computer_tool import ComputerTool as ComputerTool
 from .function_tool import FunctionTool as FunctionTool
 from .response_item import ResponseItem as ResponseItem
@@ -23,15 +24,18 @@ from .response_status import ResponseStatus as ResponseStatus
 from .tool_choice_mcp import ToolChoiceMcp as ToolChoiceMcp
 from .web_search_tool import WebSearchTool as WebSearchTool
 from .file_search_tool import FileSearchTool as FileSearchTool
+from .custom_tool_param import CustomToolParam as CustomToolParam
 from .tool_choice_types import ToolChoiceTypes as ToolChoiceTypes
 from .easy_input_message import EasyInputMessage as EasyInputMessage
 from .response_item_list import ResponseItemList as ResponseItemList
+from .tool_choice_custom import ToolChoiceCustom as ToolChoiceCustom
 from .computer_tool_param import ComputerToolParam as ComputerToolParam
 from .function_tool_param import FunctionToolParam as FunctionToolParam
 from .response_includable import ResponseIncludable as ResponseIncludable
 from .response_input_file import ResponseInputFile as ResponseInputFile
 from .response_input_item import ResponseInputItem as ResponseInputItem
 from .response_input_text import ResponseInputText as ResponseInputText
+from .tool_choice_allowed import ToolChoiceAllowed as ToolChoiceAllowed
 from .tool_choice_options import ToolChoiceOptions as ToolChoiceOptions
 from .response_error_event import ResponseErrorEvent as ResponseErrorEvent
 from .response_input_image import ResponseInputImage as ResponseInputImage
@@ -59,12 +63,15 @@ from .easy_input_message_param import EasyInputMessageParam as EasyInputMessageP
 from .response_completed_event import ResponseCompletedEvent as ResponseCompletedEvent
 from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams
 from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent
+from .tool_choice_custom_param import ToolChoiceCustomParam as ToolChoiceCustomParam
 from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent
+from .response_custom_tool_call import ResponseCustomToolCall as ResponseCustomToolCall
 from .response_incomplete_event import ResponseIncompleteEvent as ResponseIncompleteEvent
 from .response_input_file_param import ResponseInputFileParam as ResponseInputFileParam
 from .response_input_item_param import ResponseInputItemParam as ResponseInputItemParam
 from .response_input_text_param import ResponseInputTextParam as ResponseInputTextParam
 from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent
+from .tool_choice_allowed_param import ToolChoiceAllowedParam as ToolChoiceAllowedParam
 from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent
 from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent
 from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam
@@ -84,8 +91,10 @@ from .response_output_refusal_param import ResponseOutputRefusalParam as Respons
 from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam
 from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall
 from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent as ResponseMcpCallFailedEvent
+from .response_custom_tool_call_param import ResponseCustomToolCallParam as ResponseCustomToolCallParam
 from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent
 from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent
+from .response_custom_tool_call_output import ResponseCustomToolCallOutput as ResponseCustomToolCallOutput
 from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem
 from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent
 from .response_computer_tool_call_param import ResponseComputerToolCallParam as ResponseComputerToolCallParam
@@ -105,6 +114,9 @@ from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEven
 from .response_audio_transcript_delta_event import (
     ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,
 )
+from .response_custom_tool_call_output_param import (
+    ResponseCustomToolCallOutputParam as ResponseCustomToolCallOutputParam,
+)
 from .response_mcp_call_arguments_done_event import (
     ResponseMcpCallArgumentsDoneEvent as ResponseMcpCallArgumentsDoneEvent,
 )
@@ -153,6 +165,9 @@ from .response_input_message_content_list_param import (
 from .response_mcp_list_tools_in_progress_event import (
     ResponseMcpListToolsInProgressEvent as ResponseMcpListToolsInProgressEvent,
 )
+from .response_custom_tool_call_input_done_event import (
+    ResponseCustomToolCallInputDoneEvent as ResponseCustomToolCallInputDoneEvent,
+)
 from .response_reasoning_summary_part_done_event import (
     ResponseReasoningSummaryPartDoneEvent as ResponseReasoningSummaryPartDoneEvent,
 )
@@ -162,6 +177,9 @@ from .response_reasoning_summary_text_done_event import (
 from .response_web_search_call_in_progress_event import (
     ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent,
 )
+from .response_custom_tool_call_input_delta_event import (
+    ResponseCustomToolCallInputDeltaEvent as ResponseCustomToolCallInputDeltaEvent,
+)
 from .response_file_search_call_in_progress_event import (
     ResponseFileSearchCallInProgressEvent as ResponseFileSearchCallInProgressEvent,
 )
src/openai/types/responses/custom_tool.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from ..shared.custom_tool_input_format import CustomToolInputFormat
+
+__all__ = ["CustomTool"]
+
+
+class CustomTool(BaseModel):
+    name: str
+    """The name of the custom tool, used to identify it in tool calls."""
+
+    type: Literal["custom"]
+    """The type of the custom tool. Always `custom`."""
+
+    description: Optional[str] = None
+    """Optional description of the custom tool, used to provide more context."""
+
+    format: Optional[CustomToolInputFormat] = None
+    """The input format for the custom tool. Default is unconstrained text."""
src/openai/types/responses/custom_tool_param.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from ..shared_params.custom_tool_input_format import CustomToolInputFormat
+
+__all__ = ["CustomToolParam"]
+
+
+class CustomToolParam(TypedDict, total=False):
+    name: Required[str]
+    """The name of the custom tool, used to identify it in tool calls."""
+
+    type: Required[Literal["custom"]]
+    """The type of the custom tool. Always `custom`."""
+
+    description: str
+    """Optional description of the custom tool, used to provide more context."""
+
+    format: CustomToolInputFormat
+    """The input format for the custom tool. Default is unconstrained text."""
src/openai/types/responses/parsed_response.py
@@ -19,6 +19,7 @@ from .response_output_text import ResponseOutputText
 from .response_output_message import ResponseOutputMessage
 from .response_output_refusal import ResponseOutputRefusal
 from .response_reasoning_item import ResponseReasoningItem
+from .response_custom_tool_call import ResponseCustomToolCall
 from .response_computer_tool_call import ResponseComputerToolCall
 from .response_function_tool_call import ResponseFunctionToolCall
 from .response_function_web_search import ResponseFunctionWebSearch
@@ -73,6 +74,7 @@ ParsedResponseOutputItem: TypeAlias = Annotated[
         LocalShellCallAction,
         McpListTools,
         ResponseCodeInterpreterToolCall,
+        ResponseCustomToolCall,
     ],
     PropertyInfo(discriminator="type"),
 ]
src/openai/types/responses/response.py
@@ -13,7 +13,9 @@ from .tool_choice_mcp import ToolChoiceMcp
 from ..shared.metadata import Metadata
 from ..shared.reasoning import Reasoning
 from .tool_choice_types import ToolChoiceTypes
+from .tool_choice_custom import ToolChoiceCustom
 from .response_input_item import ResponseInputItem
+from .tool_choice_allowed import ToolChoiceAllowed
 from .tool_choice_options import ToolChoiceOptions
 from .response_output_item import ResponseOutputItem
 from .response_text_config import ResponseTextConfig
@@ -28,7 +30,9 @@ class IncompleteDetails(BaseModel):
     """The reason why the response is incomplete."""
 
 
-ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypes, ToolChoiceFunction, ToolChoiceMcp]
+ToolChoice: TypeAlias = Union[
+    ToolChoiceOptions, ToolChoiceAllowed, ToolChoiceTypes, ToolChoiceFunction, ToolChoiceMcp, ToolChoiceCustom
+]
 
 
 class Response(BaseModel):
@@ -116,8 +120,10 @@ class Response(BaseModel):
       Learn more about
       [built-in tools](https://platform.openai.com/docs/guides/tools).
     - **Function calls (custom tools)**: Functions that are defined by you, enabling
-      the model to call your own code. Learn more about
+      the model to call your own code with strongly typed arguments and outputs.
+      Learn more about
       [function calling](https://platform.openai.com/docs/guides/function-calling).
+      You can also use custom tools to call your own code.
     """
 
     top_p: Optional[float] = None
@@ -130,8 +136,8 @@ class Response(BaseModel):
     """
 
     background: Optional[bool] = None
-    """Whether to run the model response in the background.
-
+    """
+    Whether to run the model response in the background.
     [Learn more](https://platform.openai.com/docs/guides/background).
     """
 
@@ -253,18 +259,3 @@ class Response(BaseModel):
     [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
     """
 
-    @property
-    def output_text(self) -> str:
-        """Convenience property that aggregates all `output_text` items from the `output`
-        list.
-
-        If no `output_text` content blocks exist, then an empty string is returned.
-        """
-        texts: List[str] = []
-        for output in self.output:
-            if output.type == "message":
-                for content in output.content:
-                    if content.type == "output_text":
-                        texts.append(content.text)
-
-        return "".join(texts)
src/openai/types/responses/response_create_params.py
@@ -14,12 +14,15 @@ from .tool_choice_mcp_param import ToolChoiceMcpParam
 from ..shared_params.metadata import Metadata
 from .tool_choice_types_param import ToolChoiceTypesParam
 from ..shared_params.reasoning import Reasoning
+from .tool_choice_custom_param import ToolChoiceCustomParam
+from .tool_choice_allowed_param import ToolChoiceAllowedParam
 from .response_text_config_param import ResponseTextConfigParam
 from .tool_choice_function_param import ToolChoiceFunctionParam
 from ..shared_params.responses_model import ResponsesModel
 
 __all__ = [
     "ResponseCreateParamsBase",
+    "StreamOptions",
     "ToolChoice",
     "ResponseCreateParamsNonStreaming",
     "ResponseCreateParamsStreaming",
@@ -28,8 +31,8 @@ __all__ = [
 
 class ResponseCreateParamsBase(TypedDict, total=False):
     background: Optional[bool]
-    """Whether to run the model response in the background.
-
+    """
+    Whether to run the model response in the background.
     [Learn more](https://platform.openai.com/docs/guides/background).
     """
 
@@ -169,6 +172,9 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     store: Optional[bool]
     """Whether to store the generated model response for later retrieval via API."""
 
+    stream_options: Optional[StreamOptions]
+    """Options for streaming responses. Only set this when you set `stream: true`."""
+
     temperature: Optional[float]
     """What sampling temperature to use, between 0 and 2.
 
@@ -207,8 +213,10 @@ class ResponseCreateParamsBase(TypedDict, total=False):
       Learn more about
       [built-in tools](https://platform.openai.com/docs/guides/tools).
     - **Function calls (custom tools)**: Functions that are defined by you, enabling
-      the model to call your own code. Learn more about
+      the model to call your own code with strongly typed arguments and outputs.
+      Learn more about
       [function calling](https://platform.openai.com/docs/guides/function-calling).
+      You can also use custom tools to call your own code.
     """
 
     top_logprobs: Optional[int]
@@ -245,8 +253,36 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
     """
 
+    verbosity: Optional[Literal["low", "medium", "high"]]
+    """Constrains the verbosity of the model's response.
+
+    Lower values will result in more concise responses, while higher values will
+    result in more verbose responses. Currently supported values are `low`,
+    `medium`, and `high`.
+    """
+
+
+class StreamOptions(TypedDict, total=False):
+    include_obfuscation: bool
+    """When true, stream obfuscation will be enabled.
+
+    Stream obfuscation adds random characters to an `obfuscation` field on streaming
+    delta events to normalize payload sizes as a mitigation to certain side-channel
+    attacks. These obfuscation fields are included by default, but add a small
+    amount of overhead to the data stream. You can set `include_obfuscation` to
+    false to optimize for bandwidth if you trust the network links between your
+    application and the OpenAI API.
+    """
+
 
-ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypesParam, ToolChoiceFunctionParam, ToolChoiceMcpParam]
+ToolChoice: TypeAlias = Union[
+    ToolChoiceOptions,
+    ToolChoiceAllowedParam,
+    ToolChoiceTypesParam,
+    ToolChoiceFunctionParam,
+    ToolChoiceMcpParam,
+    ToolChoiceCustomParam,
+]
 
 
 class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False):
src/openai/types/responses/response_custom_tool_call.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseCustomToolCall"]
+
+
+class ResponseCustomToolCall(BaseModel):
+    call_id: str
+    """An identifier used to map this custom tool call to a tool call output."""
+
+    input: str
+    """The input for the custom tool call generated by the model."""
+
+    name: str
+    """The name of the custom tool being called."""
+
+    type: Literal["custom_tool_call"]
+    """The type of the custom tool call. Always `custom_tool_call`."""
+
+    id: Optional[str] = None
+    """The unique ID of the custom tool call in the OpenAI platform."""
src/openai/types/responses/response_custom_tool_call_input_delta_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseCustomToolCallInputDeltaEvent"]
+
+
+class ResponseCustomToolCallInputDeltaEvent(BaseModel):
+    delta: str
+    """The incremental input data (delta) for the custom tool call."""
+
+    item_id: str
+    """Unique identifier for the API item associated with this event."""
+
+    output_index: int
+    """The index of the output this delta applies to."""
+
+    sequence_number: int
+    """The sequence number of this event."""
+
+    type: Literal["response.custom_tool_call_input.delta"]
+    """The event type identifier."""
src/openai/types/responses/response_custom_tool_call_input_done_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseCustomToolCallInputDoneEvent"]
+
+
+class ResponseCustomToolCallInputDoneEvent(BaseModel):
+    input: str
+    """The complete input data for the custom tool call."""
+
+    item_id: str
+    """Unique identifier for the API item associated with this event."""
+
+    output_index: int
+    """The index of the output this event applies to."""
+
+    sequence_number: int
+    """The sequence number of this event."""
+
+    type: Literal["response.custom_tool_call_input.done"]
+    """The event type identifier."""
src/openai/types/responses/response_custom_tool_call_output.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseCustomToolCallOutput"]
+
+
+class ResponseCustomToolCallOutput(BaseModel):
+    call_id: str
+    """The call ID, used to map this custom tool call output to a custom tool call."""
+
+    output: str
+    """The output from the custom tool call generated by your code."""
+
+    type: Literal["custom_tool_call_output"]
+    """The type of the custom tool call output. Always `custom_tool_call_output`."""
+
+    id: Optional[str] = None
+    """The unique ID of the custom tool call output in the OpenAI platform."""
src/openai/types/responses/response_custom_tool_call_output_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseCustomToolCallOutputParam"]
+
+
+class ResponseCustomToolCallOutputParam(TypedDict, total=False):
+    call_id: Required[str]
+    """The call ID, used to map this custom tool call output to a custom tool call."""
+
+    output: Required[str]
+    """The output from the custom tool call generated by your code."""
+
+    type: Required[Literal["custom_tool_call_output"]]
+    """The type of the custom tool call output. Always `custom_tool_call_output`."""
+
+    id: str
+    """The unique ID of the custom tool call output in the OpenAI platform."""
src/openai/types/responses/response_custom_tool_call_param.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseCustomToolCallParam"]
+
+
+class ResponseCustomToolCallParam(TypedDict, total=False):
+    call_id: Required[str]
+    """An identifier used to map this custom tool call to a tool call output."""
+
+    input: Required[str]
+    """The input for the custom tool call generated by the model."""
+
+    name: Required[str]
+    """The name of the custom tool being called."""
+
+    type: Required[Literal["custom_tool_call"]]
+    """The type of the custom tool call. Always `custom_tool_call`."""
+
+    id: str
+    """The unique ID of the custom tool call in the OpenAI platform."""
src/openai/types/responses/response_input_item.py
@@ -8,10 +8,12 @@ from ..._models import BaseModel
 from .easy_input_message import EasyInputMessage
 from .response_output_message import ResponseOutputMessage
 from .response_reasoning_item import ResponseReasoningItem
+from .response_custom_tool_call import ResponseCustomToolCall
 from .response_computer_tool_call import ResponseComputerToolCall
 from .response_function_tool_call import ResponseFunctionToolCall
 from .response_function_web_search import ResponseFunctionWebSearch
 from .response_file_search_tool_call import ResponseFileSearchToolCall
+from .response_custom_tool_call_output import ResponseCustomToolCallOutput
 from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
 from .response_input_message_content_list import ResponseInputMessageContentList
 from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot
@@ -299,6 +301,8 @@ ResponseInputItem: TypeAlias = Annotated[
         McpApprovalRequest,
         McpApprovalResponse,
         McpCall,
+        ResponseCustomToolCallOutput,
+        ResponseCustomToolCall,
         ItemReference,
     ],
     PropertyInfo(discriminator="type"),
src/openai/types/responses/response_input_item_param.py
@@ -8,10 +8,12 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict
 from .easy_input_message_param import EasyInputMessageParam
 from .response_output_message_param import ResponseOutputMessageParam
 from .response_reasoning_item_param import ResponseReasoningItemParam
+from .response_custom_tool_call_param import ResponseCustomToolCallParam
 from .response_computer_tool_call_param import ResponseComputerToolCallParam
 from .response_function_tool_call_param import ResponseFunctionToolCallParam
 from .response_function_web_search_param import ResponseFunctionWebSearchParam
 from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam
+from .response_custom_tool_call_output_param import ResponseCustomToolCallOutputParam
 from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam
 from .response_input_message_content_list_param import ResponseInputMessageContentListParam
 from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam
@@ -298,5 +300,7 @@ ResponseInputItemParam: TypeAlias = Union[
     McpApprovalRequest,
     McpApprovalResponse,
     McpCall,
+    ResponseCustomToolCallOutputParam,
+    ResponseCustomToolCallParam,
     ItemReference,
 ]
src/openai/types/responses/response_input_param.py
@@ -8,10 +8,12 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict
 from .easy_input_message_param import EasyInputMessageParam
 from .response_output_message_param import ResponseOutputMessageParam
 from .response_reasoning_item_param import ResponseReasoningItemParam
+from .response_custom_tool_call_param import ResponseCustomToolCallParam
 from .response_computer_tool_call_param import ResponseComputerToolCallParam
 from .response_function_tool_call_param import ResponseFunctionToolCallParam
 from .response_function_web_search_param import ResponseFunctionWebSearchParam
 from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam
+from .response_custom_tool_call_output_param import ResponseCustomToolCallOutputParam
 from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam
 from .response_input_message_content_list_param import ResponseInputMessageContentListParam
 from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam
@@ -299,6 +301,8 @@ ResponseInputItemParam: TypeAlias = Union[
     McpApprovalRequest,
     McpApprovalResponse,
     McpCall,
+    ResponseCustomToolCallOutputParam,
+    ResponseCustomToolCallParam,
     ItemReference,
 ]
 
src/openai/types/responses/response_output_item.py
@@ -7,6 +7,7 @@ from ..._utils import PropertyInfo
 from ..._models import BaseModel
 from .response_output_message import ResponseOutputMessage
 from .response_reasoning_item import ResponseReasoningItem
+from .response_custom_tool_call import ResponseCustomToolCall
 from .response_computer_tool_call import ResponseComputerToolCall
 from .response_function_tool_call import ResponseFunctionToolCall
 from .response_function_web_search import ResponseFunctionWebSearch
@@ -161,6 +162,7 @@ ResponseOutputItem: TypeAlias = Annotated[
         McpCall,
         McpListTools,
         McpApprovalRequest,
+        ResponseCustomToolCall,
     ],
     PropertyInfo(discriminator="type"),
 ]
src/openai/types/responses/response_retrieve_params.py
@@ -17,6 +17,17 @@ class ResponseRetrieveParamsBase(TypedDict, total=False):
     See the `include` parameter for Response creation above for more information.
     """
 
+    include_obfuscation: bool
+    """When true, stream obfuscation will be enabled.
+
+    Stream obfuscation adds random characters to an `obfuscation` field on streaming
+    delta events to normalize payload sizes as a mitigation to certain side-channel
+    attacks. These obfuscation fields are included by default, but add a small
+    amount of overhead to the data stream. You can set `include_obfuscation` to
+    false to optimize for bandwidth if you trust the network links between your
+    application and the OpenAI API.
+    """
+
     starting_after: int
     """The sequence number of the event after which to start streaming."""
 
src/openai/types/responses/response_stream_event.py
@@ -40,9 +40,11 @@ from .response_file_search_call_completed_event import ResponseFileSearchCallCom
 from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent
 from .response_image_gen_call_in_progress_event import ResponseImageGenCallInProgressEvent
 from .response_mcp_list_tools_in_progress_event import ResponseMcpListToolsInProgressEvent
+from .response_custom_tool_call_input_done_event import ResponseCustomToolCallInputDoneEvent
 from .response_reasoning_summary_part_done_event import ResponseReasoningSummaryPartDoneEvent
 from .response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent
 from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent
+from .response_custom_tool_call_input_delta_event import ResponseCustomToolCallInputDeltaEvent
 from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent
 from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
 from .response_image_gen_call_partial_image_event import ResponseImageGenCallPartialImageEvent
@@ -111,6 +113,8 @@ ResponseStreamEvent: TypeAlias = Annotated[
         ResponseMcpListToolsInProgressEvent,
         ResponseOutputTextAnnotationAddedEvent,
         ResponseQueuedEvent,
+        ResponseCustomToolCallInputDeltaEvent,
+        ResponseCustomToolCallInputDoneEvent,
     ],
     PropertyInfo(discriminator="type"),
 ]
src/openai/types/responses/tool.py
@@ -5,6 +5,7 @@ from typing_extensions import Literal, Annotated, TypeAlias
 
 from ..._utils import PropertyInfo
 from ..._models import BaseModel
+from .custom_tool import CustomTool
 from .computer_tool import ComputerTool
 from .function_tool import FunctionTool
 from .web_search_tool import WebSearchTool
@@ -177,6 +178,16 @@ class LocalShell(BaseModel):
 
 
 Tool: TypeAlias = Annotated[
-    Union[FunctionTool, FileSearchTool, WebSearchTool, ComputerTool, Mcp, CodeInterpreter, ImageGeneration, LocalShell],
+    Union[
+        FunctionTool,
+        FileSearchTool,
+        WebSearchTool,
+        ComputerTool,
+        Mcp,
+        CodeInterpreter,
+        ImageGeneration,
+        LocalShell,
+        CustomTool,
+    ],
     PropertyInfo(discriminator="type"),
 ]
src/openai/types/responses/tool_choice_allowed.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ToolChoiceAllowed"]
+
+
+class ToolChoiceAllowed(BaseModel):
+    mode: Literal["auto", "required"]
+    """Constrains the tools available to the model to a pre-defined set.
+
+    `auto` allows the model to pick from among the allowed tools and generate a
+    message.
+
+    `required` requires the model to call one or more of the allowed tools.
+    """
+
+    tools: List[Dict[str, object]]
+    """A list of tool definitions that the model should be allowed to call.
+
+    For the Responses API, the list of tool definitions might look like:
+
+    ```json
+    [
+      { "type": "function", "name": "get_weather" },
+      { "type": "mcp", "server_label": "deepwiki" },
+      { "type": "image_generation" }
+    ]
+    ```
+    """
+
+    type: Literal["allowed_tools"]
+    """Allowed tool configuration type. Always `allowed_tools`."""
src/openai/types/responses/tool_choice_allowed_param.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ToolChoiceAllowedParam"]
+
+
+class ToolChoiceAllowedParam(TypedDict, total=False):
+    mode: Required[Literal["auto", "required"]]
+    """Constrains the tools available to the model to a pre-defined set.
+
+    `auto` allows the model to pick from among the allowed tools and generate a
+    message.
+
+    `required` requires the model to call one or more of the allowed tools.
+    """
+
+    tools: Required[Iterable[Dict[str, object]]]
+    """A list of tool definitions that the model should be allowed to call.
+
+    For the Responses API, the list of tool definitions might look like:
+
+    ```json
+    [
+      { "type": "function", "name": "get_weather" },
+      { "type": "mcp", "server_label": "deepwiki" },
+      { "type": "image_generation" }
+    ]
+    ```
+    """
+
+    type: Required[Literal["allowed_tools"]]
+    """Allowed tool configuration type. Always `allowed_tools`."""
src/openai/types/responses/tool_choice_custom.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ToolChoiceCustom"]
+
+
+class ToolChoiceCustom(BaseModel):
+    name: str
+    """The name of the custom tool to call."""
+
+    type: Literal["custom"]
+    """For custom tool calling, the type is always `custom`."""
src/openai/types/responses/tool_choice_custom_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ToolChoiceCustomParam"]
+
+
+class ToolChoiceCustomParam(TypedDict, total=False):
+    name: Required[str]
+    """The name of the custom tool to call."""
+
+    type: Required[Literal["custom"]]
+    """For custom tool calling, the type is always `custom`."""
src/openai/types/responses/tool_param.py
@@ -5,6 +5,7 @@ from __future__ import annotations
 from typing import Dict, List, Union, Optional
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
+from .custom_tool_param import CustomToolParam
 from .computer_tool_param import ComputerToolParam
 from .function_tool_param import FunctionToolParam
 from .web_search_tool_param import WebSearchToolParam
@@ -186,6 +187,7 @@ ToolParam: TypeAlias = Union[
     CodeInterpreter,
     ImageGeneration,
     LocalShell,
+    CustomToolParam,
 ]
 
 
src/openai/types/shared/__init__.py
@@ -12,5 +12,8 @@ from .comparison_filter import ComparisonFilter as ComparisonFilter
 from .function_definition import FunctionDefinition as FunctionDefinition
 from .function_parameters import FunctionParameters as FunctionParameters
 from .response_format_text import ResponseFormatText as ResponseFormatText
+from .custom_tool_input_format import CustomToolInputFormat as CustomToolInputFormat
 from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject
 from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema
+from .response_format_text_python import ResponseFormatTextPython as ResponseFormatTextPython
+from .response_format_text_grammar import ResponseFormatTextGrammar as ResponseFormatTextGrammar
src/openai/types/shared/chat_model.py
@@ -5,6 +5,13 @@ from typing_extensions import Literal, TypeAlias
 __all__ = ["ChatModel"]
 
 ChatModel: TypeAlias = Literal[
+    "gpt-5",
+    "gpt-5-mini",
+    "gpt-5-nano",
+    "gpt-5-2025-08-07",
+    "gpt-5-mini-2025-08-07",
+    "gpt-5-nano-2025-08-07",
+    "gpt-5-chat-latest",
     "gpt-4.1",
     "gpt-4.1-mini",
     "gpt-4.1-nano",
src/openai/types/shared/custom_tool_input_format.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+
+__all__ = ["CustomToolInputFormat", "Text", "Grammar"]
+
+
+class Text(BaseModel):
+    type: Literal["text"]
+    """Unconstrained text format. Always `text`."""
+
+
+class Grammar(BaseModel):
+    definition: str
+    """The grammar definition."""
+
+    syntax: Literal["lark", "regex"]
+    """The syntax of the grammar definition. One of `lark` or `regex`."""
+
+    type: Literal["grammar"]
+    """Grammar format. Always `grammar`."""
+
+
+CustomToolInputFormat: TypeAlias = Annotated[Union[Text, Grammar], PropertyInfo(discriminator="type")]
src/openai/types/shared/reasoning.py
@@ -11,12 +11,12 @@ __all__ = ["Reasoning"]
 
 class Reasoning(BaseModel):
     effort: Optional[ReasoningEffort] = None
-    """**o-series models only**
-
+    """
     Constrains effort on reasoning for
     [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-    result in faster responses and fewer tokens used on reasoning in a response.
+    supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+    effort can result in faster responses and fewer tokens used on reasoning in a
+    response.
     """
 
     generate_summary: Optional[Literal["auto", "concise", "detailed"]] = None
src/openai/types/shared/reasoning_effort.py
@@ -5,4 +5,4 @@ from typing_extensions import Literal, TypeAlias
 
 __all__ = ["ReasoningEffort"]
 
-ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]]
+ReasoningEffort: TypeAlias = Optional[Literal["minimal", "low", "medium", "high"]]
src/openai/types/shared/response_format_text_grammar.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatTextGrammar"]
+
+
+class ResponseFormatTextGrammar(BaseModel):
+    grammar: str
+    """The custom grammar for the model to follow."""
+
+    type: Literal["grammar"]
+    """The type of response format being defined. Always `grammar`."""
src/openai/types/shared/response_format_text_python.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatTextPython"]
+
+
+class ResponseFormatTextPython(BaseModel):
+    type: Literal["python"]
+    """The type of response format being defined. Always `python`."""
src/openai/types/shared_params/__init__.py
@@ -10,5 +10,6 @@ from .comparison_filter import ComparisonFilter as ComparisonFilter
 from .function_definition import FunctionDefinition as FunctionDefinition
 from .function_parameters import FunctionParameters as FunctionParameters
 from .response_format_text import ResponseFormatText as ResponseFormatText
+from .custom_tool_input_format import CustomToolInputFormat as CustomToolInputFormat
 from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject
 from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema
src/openai/types/shared_params/chat_model.py
@@ -7,6 +7,13 @@ from typing_extensions import Literal, TypeAlias
 __all__ = ["ChatModel"]
 
 ChatModel: TypeAlias = Literal[
+    "gpt-5",
+    "gpt-5-mini",
+    "gpt-5-nano",
+    "gpt-5-2025-08-07",
+    "gpt-5-mini-2025-08-07",
+    "gpt-5-nano-2025-08-07",
+    "gpt-5-chat-latest",
     "gpt-4.1",
     "gpt-4.1-mini",
     "gpt-4.1-nano",
src/openai/types/shared_params/custom_tool_input_format.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = ["CustomToolInputFormat", "Text", "Grammar"]
+
+
+class Text(TypedDict, total=False):
+    type: Required[Literal["text"]]
+    """Unconstrained text format. Always `text`."""
+
+
+class Grammar(TypedDict, total=False):
+    definition: Required[str]
+    """The grammar definition."""
+
+    syntax: Required[Literal["lark", "regex"]]
+    """The syntax of the grammar definition. One of `lark` or `regex`."""
+
+    type: Required[Literal["grammar"]]
+    """Grammar format. Always `grammar`."""
+
+
+CustomToolInputFormat: TypeAlias = Union[Text, Grammar]
src/openai/types/shared_params/reasoning.py
@@ -12,12 +12,12 @@ __all__ = ["Reasoning"]
 
 class Reasoning(TypedDict, total=False):
     effort: Optional[ReasoningEffort]
-    """**o-series models only**
-
+    """
     Constrains effort on reasoning for
     [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-    supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-    result in faster responses and fewer tokens used on reasoning in a response.
+    supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+    effort can result in faster responses and fewer tokens used on reasoning in a
+    response.
     """
 
     generate_summary: Optional[Literal["auto", "concise", "detailed"]]
src/openai/types/shared_params/reasoning_effort.py
@@ -7,4 +7,4 @@ from typing_extensions import Literal, TypeAlias
 
 __all__ = ["ReasoningEffort"]
 
-ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]]
+ReasoningEffort: TypeAlias = Optional[Literal["minimal", "low", "medium", "high"]]
src/openai/types/__init__.py
@@ -18,8 +18,11 @@ from .shared import (
     FunctionDefinition as FunctionDefinition,
     FunctionParameters as FunctionParameters,
     ResponseFormatText as ResponseFormatText,
+    CustomToolInputFormat as CustomToolInputFormat,
     ResponseFormatJSONObject as ResponseFormatJSONObject,
     ResponseFormatJSONSchema as ResponseFormatJSONSchema,
+    ResponseFormatTextPython as ResponseFormatTextPython,
+    ResponseFormatTextGrammar as ResponseFormatTextGrammar,
 )
 from .upload import Upload as Upload
 from .embedding import Embedding as Embedding
tests/api_resources/beta/threads/test_runs.py
@@ -59,7 +59,7 @@ class TestRuns:
                 metadata={"foo": "string"},
                 model="string",
                 parallel_tool_calls=True,
-                reasoning_effort="low",
+                reasoning_effort="minimal",
                 response_format="auto",
                 stream=False,
                 temperature=1,
@@ -150,7 +150,7 @@ class TestRuns:
                 metadata={"foo": "string"},
                 model="string",
                 parallel_tool_calls=True,
-                reasoning_effort="low",
+                reasoning_effort="minimal",
                 response_format="auto",
                 temperature=1,
                 tool_choice="none",
@@ -609,7 +609,7 @@ class TestAsyncRuns:
                 metadata={"foo": "string"},
                 model="string",
                 parallel_tool_calls=True,
-                reasoning_effort="low",
+                reasoning_effort="minimal",
                 response_format="auto",
                 stream=False,
                 temperature=1,
@@ -700,7 +700,7 @@ class TestAsyncRuns:
                 metadata={"foo": "string"},
                 model="string",
                 parallel_tool_calls=True,
-                reasoning_effort="low",
+                reasoning_effort="minimal",
                 response_format="auto",
                 temperature=1,
                 tool_choice="none",
tests/api_resources/beta/test_assistants.py
@@ -36,7 +36,7 @@ class TestAssistants:
             instructions="instructions",
             metadata={"foo": "string"},
             name="name",
-            reasoning_effort="low",
+            reasoning_effort="minimal",
             response_format="auto",
             temperature=1,
             tool_resources={
@@ -135,7 +135,7 @@ class TestAssistants:
             metadata={"foo": "string"},
             model="string",
             name="name",
-            reasoning_effort="low",
+            reasoning_effort="minimal",
             response_format="auto",
             temperature=1,
             tool_resources={
@@ -272,7 +272,7 @@ class TestAsyncAssistants:
             instructions="instructions",
             metadata={"foo": "string"},
             name="name",
-            reasoning_effort="low",
+            reasoning_effort="minimal",
             response_format="auto",
             temperature=1,
             tool_resources={
@@ -371,7 +371,7 @@ class TestAsyncAssistants:
             metadata={"foo": "string"},
             model="string",
             name="name",
-            reasoning_effort="low",
+            reasoning_effort="minimal",
             response_format="auto",
             temperature=1,
             tool_resources={
tests/api_resources/chat/test_completions.py
@@ -73,7 +73,7 @@ class TestCompletions:
             },
             presence_penalty=-2,
             prompt_cache_key="prompt-cache-key-1234",
-            reasoning_effort="low",
+            reasoning_effort="minimal",
             response_format={"type": "text"},
             safety_identifier="safety-identifier-1234",
             seed=-9007199254740991,
@@ -81,7 +81,10 @@ class TestCompletions:
             stop="\n",
             store=True,
             stream=False,
-            stream_options={"include_usage": True},
+            stream_options={
+                "include_obfuscation": True,
+                "include_usage": True,
+            },
             temperature=1,
             tool_choice="none",
             tools=[
@@ -98,6 +101,7 @@ class TestCompletions:
             top_logprobs=0,
             top_p=1,
             user="user-1234",
+            verbosity="low",
             web_search_options={
                 "search_context_size": "low",
                 "user_location": {
@@ -202,14 +206,17 @@ class TestCompletions:
             },
             presence_penalty=-2,
             prompt_cache_key="prompt-cache-key-1234",
-            reasoning_effort="low",
+            reasoning_effort="minimal",
             response_format={"type": "text"},
             safety_identifier="safety-identifier-1234",
             seed=-9007199254740991,
             service_tier="auto",
             stop="\n",
             store=True,
-            stream_options={"include_usage": True},
+            stream_options={
+                "include_obfuscation": True,
+                "include_usage": True,
+            },
             temperature=1,
             tool_choice="none",
             tools=[
@@ -226,6 +233,7 @@ class TestCompletions:
             top_logprobs=0,
             top_p=1,
             user="user-1234",
+            verbosity="low",
             web_search_options={
                 "search_context_size": "low",
                 "user_location": {
@@ -506,7 +514,7 @@ class TestAsyncCompletions:
             },
             presence_penalty=-2,
             prompt_cache_key="prompt-cache-key-1234",
-            reasoning_effort="low",
+            reasoning_effort="minimal",
             response_format={"type": "text"},
             safety_identifier="safety-identifier-1234",
             seed=-9007199254740991,
@@ -514,7 +522,10 @@ class TestAsyncCompletions:
             stop="\n",
             store=True,
             stream=False,
-            stream_options={"include_usage": True},
+            stream_options={
+                "include_obfuscation": True,
+                "include_usage": True,
+            },
             temperature=1,
             tool_choice="none",
             tools=[
@@ -531,6 +542,7 @@ class TestAsyncCompletions:
             top_logprobs=0,
             top_p=1,
             user="user-1234",
+            verbosity="low",
             web_search_options={
                 "search_context_size": "low",
                 "user_location": {
@@ -635,14 +647,17 @@ class TestAsyncCompletions:
             },
             presence_penalty=-2,
             prompt_cache_key="prompt-cache-key-1234",
-            reasoning_effort="low",
+            reasoning_effort="minimal",
             response_format={"type": "text"},
             safety_identifier="safety-identifier-1234",
             seed=-9007199254740991,
             service_tier="auto",
             stop="\n",
             store=True,
-            stream_options={"include_usage": True},
+            stream_options={
+                "include_obfuscation": True,
+                "include_usage": True,
+            },
             temperature=1,
             tool_choice="none",
             tools=[
@@ -659,6 +674,7 @@ class TestAsyncCompletions:
             top_logprobs=0,
             top_p=1,
             user="user-1234",
+            verbosity="low",
             web_search_options={
                 "search_context_size": "low",
                 "user_location": {
tests/api_resources/test_completions.py
@@ -41,7 +41,10 @@ class TestCompletions:
             seed=0,
             stop="\n",
             stream=False,
-            stream_options={"include_usage": True},
+            stream_options={
+                "include_obfuscation": True,
+                "include_usage": True,
+            },
             suffix="test.",
             temperature=1,
             top_p=1,
@@ -100,7 +103,10 @@ class TestCompletions:
             presence_penalty=-2,
             seed=0,
             stop="\n",
-            stream_options={"include_usage": True},
+            stream_options={
+                "include_obfuscation": True,
+                "include_usage": True,
+            },
             suffix="test.",
             temperature=1,
             top_p=1,
@@ -165,7 +171,10 @@ class TestAsyncCompletions:
             seed=0,
             stop="\n",
             stream=False,
-            stream_options={"include_usage": True},
+            stream_options={
+                "include_obfuscation": True,
+                "include_usage": True,
+            },
             suffix="test.",
             temperature=1,
             top_p=1,
@@ -224,7 +233,10 @@ class TestAsyncCompletions:
             presence_penalty=-2,
             seed=0,
             stop="\n",
-            stream_options={"include_usage": True},
+            stream_options={
+                "include_obfuscation": True,
+                "include_usage": True,
+            },
             suffix="test.",
             temperature=1,
             top_p=1,
tests/api_resources/test_responses.py
@@ -45,7 +45,7 @@ class TestResponses:
             },
             prompt_cache_key="prompt-cache-key-1234",
             reasoning={
-                "effort": "low",
+                "effort": "minimal",
                 "generate_summary": "auto",
                 "summary": "auto",
             },
@@ -53,6 +53,7 @@ class TestResponses:
             service_tier="auto",
             store=True,
             stream=False,
+            stream_options={"include_obfuscation": True},
             temperature=1,
             text={"format": {"type": "text"}},
             tool_choice="none",
@@ -69,6 +70,7 @@ class TestResponses:
             top_p=1,
             truncation="auto",
             user="user-1234",
+            verbosity="low",
         )
         assert_matches_type(Response, response, path=["response"])
 
@@ -120,13 +122,14 @@ class TestResponses:
             },
             prompt_cache_key="prompt-cache-key-1234",
             reasoning={
-                "effort": "low",
+                "effort": "minimal",
                 "generate_summary": "auto",
                 "summary": "auto",
             },
             safety_identifier="safety-identifier-1234",
             service_tier="auto",
             store=True,
+            stream_options={"include_obfuscation": True},
             temperature=1,
             text={"format": {"type": "text"}},
             tool_choice="none",
@@ -143,6 +146,7 @@ class TestResponses:
             top_p=1,
             truncation="auto",
             user="user-1234",
+            verbosity="low",
         )
         response_stream.response.close()
 
@@ -181,6 +185,7 @@ class TestResponses:
         response = client.responses.retrieve(
             response_id="resp_677efb5139a88190b512bc3fef8e535d",
             include=["code_interpreter_call.outputs"],
+            include_obfuscation=True,
             starting_after=0,
             stream=False,
         )
@@ -231,6 +236,7 @@ class TestResponses:
             response_id="resp_677efb5139a88190b512bc3fef8e535d",
             stream=True,
             include=["code_interpreter_call.outputs"],
+            include_obfuscation=True,
             starting_after=0,
         )
         response_stream.response.close()
@@ -386,7 +392,7 @@ class TestAsyncResponses:
             },
             prompt_cache_key="prompt-cache-key-1234",
             reasoning={
-                "effort": "low",
+                "effort": "minimal",
                 "generate_summary": "auto",
                 "summary": "auto",
             },
@@ -394,6 +400,7 @@ class TestAsyncResponses:
             service_tier="auto",
             store=True,
             stream=False,
+            stream_options={"include_obfuscation": True},
             temperature=1,
             text={"format": {"type": "text"}},
             tool_choice="none",
@@ -410,6 +417,7 @@ class TestAsyncResponses:
             top_p=1,
             truncation="auto",
             user="user-1234",
+            verbosity="low",
         )
         assert_matches_type(Response, response, path=["response"])
 
@@ -461,13 +469,14 @@ class TestAsyncResponses:
             },
             prompt_cache_key="prompt-cache-key-1234",
             reasoning={
-                "effort": "low",
+                "effort": "minimal",
                 "generate_summary": "auto",
                 "summary": "auto",
             },
             safety_identifier="safety-identifier-1234",
             service_tier="auto",
             store=True,
+            stream_options={"include_obfuscation": True},
             temperature=1,
             text={"format": {"type": "text"}},
             tool_choice="none",
@@ -484,6 +493,7 @@ class TestAsyncResponses:
             top_p=1,
             truncation="auto",
             user="user-1234",
+            verbosity="low",
         )
         await response_stream.response.aclose()
 
@@ -522,6 +532,7 @@ class TestAsyncResponses:
         response = await async_client.responses.retrieve(
             response_id="resp_677efb5139a88190b512bc3fef8e535d",
             include=["code_interpreter_call.outputs"],
+            include_obfuscation=True,
             starting_after=0,
             stream=False,
         )
@@ -572,6 +583,7 @@ class TestAsyncResponses:
             response_id="resp_677efb5139a88190b512bc3fef8e535d",
             stream=True,
             include=["code_interpreter_call.outputs"],
+            include_obfuscation=True,
             starting_after=0,
         )
         await response_stream.response.aclose()
.stats.yml
@@ -1,4 +1,4 @@
 configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d6a16b25b969c3e5382e7d413de15bf83d5f7534d5c3ecce64d3a7e847418f9e.yml
-openapi_spec_hash: 0c0bcf4aee9ca2a948dd14b890dfe728
-config_hash: aeff9289bd7f8c8482e4d738c3c2fde1
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f5c45f4ae5c2075cbc603d6910bba3da31c23714c209fbd3fd82a94f634a126b.yml
+openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba
+config_hash: 9a64321968e21ed72f5c0e02164ea00d
api.md
@@ -6,6 +6,7 @@ from openai.types import (
     ChatModel,
     ComparisonFilter,
     CompoundFilter,
+    CustomToolInputFormat,
     ErrorObject,
     FunctionDefinition,
     FunctionParameters,
@@ -15,6 +16,8 @@ from openai.types import (
     ResponseFormatJSONObject,
     ResponseFormatJSONSchema,
     ResponseFormatText,
+    ResponseFormatTextGrammar,
+    ResponseFormatTextPython,
     ResponsesModel,
 )
 ```
@@ -46,6 +49,7 @@ Types:
 ```python
 from openai.types.chat import (
     ChatCompletion,
+    ChatCompletionAllowedToolChoice,
     ChatCompletionAssistantMessageParam,
     ChatCompletionAudio,
     ChatCompletionAudioParam,
@@ -55,15 +59,20 @@ from openai.types.chat import (
     ChatCompletionContentPartInputAudio,
     ChatCompletionContentPartRefusal,
     ChatCompletionContentPartText,
+    ChatCompletionCustomTool,
     ChatCompletionDeleted,
     ChatCompletionDeveloperMessageParam,
     ChatCompletionFunctionCallOption,
     ChatCompletionFunctionMessageParam,
+    ChatCompletionFunctionTool,
     ChatCompletionMessage,
+    ChatCompletionMessageCustomToolCall,
+    ChatCompletionMessageFunctionToolCall,
     ChatCompletionMessageParam,
     ChatCompletionMessageToolCall,
     ChatCompletionModality,
     ChatCompletionNamedToolChoice,
+    ChatCompletionNamedToolChoiceCustom,
     ChatCompletionPredictionContent,
     ChatCompletionRole,
     ChatCompletionStoreMessage,
@@ -74,6 +83,7 @@ from openai.types.chat import (
     ChatCompletionToolChoiceOption,
     ChatCompletionToolMessageParam,
     ChatCompletionUserMessageParam,
+    ChatCompletionAllowedTools,
     ChatCompletionReasoningEffort,
 )
 ```
@@ -719,6 +729,7 @@ Types:
 ```python
 from openai.types.responses import (
     ComputerTool,
+    CustomTool,
     EasyInputMessage,
     FileSearchTool,
     FunctionTool,
@@ -741,6 +752,10 @@ from openai.types.responses import (
     ResponseContentPartAddedEvent,
     ResponseContentPartDoneEvent,
     ResponseCreatedEvent,
+    ResponseCustomToolCall,
+    ResponseCustomToolCallInputDeltaEvent,
+    ResponseCustomToolCallInputDoneEvent,
+    ResponseCustomToolCallOutput,
     ResponseError,
     ResponseErrorEvent,
     ResponseFailedEvent,
@@ -810,6 +825,8 @@ from openai.types.responses import (
     ResponseWebSearchCallInProgressEvent,
     ResponseWebSearchCallSearchingEvent,
     Tool,
+    ToolChoiceAllowed,
+    ToolChoiceCustom,
     ToolChoiceFunction,
     ToolChoiceMcp,
     ToolChoiceOptions,