Commit f4e41b87

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-08-08 22:56:34
fix(client): fix verbosity parameter location in Responses
fixes error with unsupported `verbosity` parameter by correctly placing it inside the `text` parameter
1 parent 09f98ac
Changed files (6)
src/openai/resources/responses/responses.py
@@ -102,7 +102,6 @@ class Responses(SyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
-        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -291,10 +290,6 @@ class Responses(SyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
-          verbosity: Constrains the verbosity of the model's response. Lower values will result in
-              more concise responses, while higher values will result in more verbose
-              responses. Currently supported values are `low`, `medium`, and `high`.
-
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -335,7 +330,6 @@ class Responses(SyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
-        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -524,10 +518,6 @@ class Responses(SyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
-          verbosity: Constrains the verbosity of the model's response. Lower values will result in
-              more concise responses, while higher values will result in more verbose
-              responses. Currently supported values are `low`, `medium`, and `high`.
-
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -568,7 +558,6 @@ class Responses(SyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
-        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -757,10 +746,6 @@ class Responses(SyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
-          verbosity: Constrains the verbosity of the model's response. Lower values will result in
-              more concise responses, while higher values will result in more verbose
-              responses. Currently supported values are `low`, `medium`, and `high`.
-
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -800,7 +785,6 @@ class Responses(SyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
-        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -838,7 +822,6 @@ class Responses(SyncAPIResource):
                     "top_p": top_p,
                     "truncation": truncation,
                     "user": user,
-                    "verbosity": verbosity,
                 },
                 response_create_params.ResponseCreateParamsStreaming
                 if stream
@@ -1485,7 +1468,6 @@ class AsyncResponses(AsyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
-        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1674,10 +1656,6 @@ class AsyncResponses(AsyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
-          verbosity: Constrains the verbosity of the model's response. Lower values will result in
-              more concise responses, while higher values will result in more verbose
-              responses. Currently supported values are `low`, `medium`, and `high`.
-
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -1718,7 +1696,6 @@ class AsyncResponses(AsyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
-        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1907,10 +1884,6 @@ class AsyncResponses(AsyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
-          verbosity: Constrains the verbosity of the model's response. Lower values will result in
-              more concise responses, while higher values will result in more verbose
-              responses. Currently supported values are `low`, `medium`, and `high`.
-
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -1951,7 +1924,6 @@ class AsyncResponses(AsyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
-        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -2140,10 +2112,6 @@ class AsyncResponses(AsyncAPIResource):
               similar requests and to help OpenAI detect and prevent abuse.
               [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
 
-          verbosity: Constrains the verbosity of the model's response. Lower values will result in
-              more concise responses, while higher values will result in more verbose
-              responses. Currently supported values are `low`, `medium`, and `high`.
-
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -2183,7 +2151,6 @@ class AsyncResponses(AsyncAPIResource):
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
-        verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -2221,7 +2188,6 @@ class AsyncResponses(AsyncAPIResource):
                     "top_p": top_p,
                     "truncation": truncation,
                     "user": user,
-                    "verbosity": verbosity,
                 },
                 response_create_params.ResponseCreateParamsStreaming
                 if stream
src/openai/types/responses/response_create_params.py
@@ -253,14 +253,6 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
     """
 
-    verbosity: Optional[Literal["low", "medium", "high"]]
-    """Constrains the verbosity of the model's response.
-
-    Lower values will result in more concise responses, while higher values will
-    result in more verbose responses. Currently supported values are `low`,
-    `medium`, and `high`.
-    """
-
 
 class StreamOptions(TypedDict, total=False):
     include_obfuscation: bool
src/openai/types/responses/response_text_config.py
@@ -1,6 +1,7 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 from typing import Optional
+from typing_extensions import Literal
 
 from ..._models import BaseModel
 from .response_format_text_config import ResponseFormatTextConfig
@@ -24,3 +25,11 @@ class ResponseTextConfig(BaseModel):
     ensures the message the model generates is valid JSON. Using `json_schema` is
     preferred for models that support it.
     """
+
+    verbosity: Optional[Literal["low", "medium", "high"]] = None
+    """Constrains the verbosity of the model's response.
+
+    Lower values will result in more concise responses, while higher values will
+    result in more verbose responses. Currently supported values are `low`,
+    `medium`, and `high`.
+    """
src/openai/types/responses/response_text_config_param.py
@@ -2,7 +2,8 @@
 
 from __future__ import annotations
 
-from typing_extensions import TypedDict
+from typing import Optional
+from typing_extensions import Literal, TypedDict
 
 from .response_format_text_config_param import ResponseFormatTextConfigParam
 
@@ -25,3 +26,11 @@ class ResponseTextConfigParam(TypedDict, total=False):
     ensures the message the model generates is valid JSON. Using `json_schema` is
     preferred for models that support it.
     """
+
+    verbosity: Optional[Literal["low", "medium", "high"]]
+    """Constrains the verbosity of the model's response.
+
+    Lower values will result in more concise responses, while higher values will
+    result in more verbose responses. Currently supported values are `low`,
+    `medium`, and `high`.
+    """
tests/api_resources/test_responses.py
@@ -55,7 +55,10 @@ class TestResponses:
             stream=False,
             stream_options={"include_obfuscation": True},
             temperature=1,
-            text={"format": {"type": "text"}},
+            text={
+                "format": {"type": "text"},
+                "verbosity": "low",
+            },
             tool_choice="none",
             tools=[
                 {
@@ -70,7 +73,6 @@ class TestResponses:
             top_p=1,
             truncation="auto",
             user="user-1234",
-            verbosity="low",
         )
         assert_matches_type(Response, response, path=["response"])
 
@@ -131,7 +133,10 @@ class TestResponses:
             store=True,
             stream_options={"include_obfuscation": True},
             temperature=1,
-            text={"format": {"type": "text"}},
+            text={
+                "format": {"type": "text"},
+                "verbosity": "low",
+            },
             tool_choice="none",
             tools=[
                 {
@@ -146,7 +151,6 @@ class TestResponses:
             top_p=1,
             truncation="auto",
             user="user-1234",
-            verbosity="low",
         )
         response_stream.response.close()
 
@@ -402,7 +406,10 @@ class TestAsyncResponses:
             stream=False,
             stream_options={"include_obfuscation": True},
             temperature=1,
-            text={"format": {"type": "text"}},
+            text={
+                "format": {"type": "text"},
+                "verbosity": "low",
+            },
             tool_choice="none",
             tools=[
                 {
@@ -417,7 +424,6 @@ class TestAsyncResponses:
             top_p=1,
             truncation="auto",
             user="user-1234",
-            verbosity="low",
         )
         assert_matches_type(Response, response, path=["response"])
 
@@ -478,7 +484,10 @@ class TestAsyncResponses:
             store=True,
             stream_options={"include_obfuscation": True},
             temperature=1,
-            text={"format": {"type": "text"}},
+            text={
+                "format": {"type": "text"},
+                "verbosity": "low",
+            },
             tool_choice="none",
             tools=[
                 {
@@ -493,7 +502,6 @@ class TestAsyncResponses:
             top_p=1,
             truncation="auto",
             user="user-1234",
-            verbosity="low",
         )
         await response_stream.response.aclose()
 
.stats.yml
@@ -1,4 +1,4 @@
 configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-56d3a72a5caa187aebcf9de169a6a28a9dc3f70a79d7467a03a9e22595936066.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6a1bfd4738fff02ef5becc3fdb2bf0cd6c026f2c924d4147a2a515474477dd9a.yml
 openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba
-config_hash: 7e18239879286d68a48ac5487a649aa6
+config_hash: a67c5e195a59855fe8a5db0dc61a3e7f