Commit b3547d66
Changed files (3)
src
openai
src/openai/types/responses/__init__.py
@@ -42,6 +42,7 @@ from .response_input_image import ResponseInputImage as ResponseInputImage
from .response_input_param import ResponseInputParam as ResponseInputParam
from .response_output_item import ResponseOutputItem as ResponseOutputItem
from .response_output_text import ResponseOutputText as ResponseOutputText
+from .response_text_config import ResponseTextConfig as ResponseTextConfig
from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction
from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent
from .response_prompt_param import ResponsePromptParam as ResponsePromptParam
@@ -75,6 +76,7 @@ from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudio
from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent
from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam
from .response_output_text_param import ResponseOutputTextParam as ResponseOutputTextParam
+from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam
from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam
from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall
from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig
src/openai/types/responses/response_text_config.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_format_text_config import ResponseFormatTextConfig
+
+__all__ = ["ResponseTextConfig"]
+
+
+class ResponseTextConfig(BaseModel):
+ format: Optional[ResponseFormatTextConfig] = None
+ """An object specifying the format that the model must output.
+
+ Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
+ ensures the model will match your supplied JSON schema. Learn more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+ The default format is `{ "type": "text" }` with no additional options.
+
+ **Not recommended for gpt-4o and newer models:**
+
+ Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+ ensures the message the model generates is valid JSON. Using `json_schema` is
+ preferred for models that support it.
+ """
+
+ verbosity: Optional[Literal["low", "medium", "high"]] = None
+ """Constrains the verbosity of the model's response.
+
+ Lower values will result in more concise responses, while higher values will
+ result in more verbose responses. Currently supported values are `low`,
+ `medium`, and `high`.
+ """
src/openai/types/responses/response_text_config_param.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, TypedDict
+
+from .response_format_text_config_param import ResponseFormatTextConfigParam
+
+__all__ = ["ResponseTextConfigParam"]
+
+
+class ResponseTextConfigParam(TypedDict, total=False):
+ format: ResponseFormatTextConfigParam
+ """An object specifying the format that the model must output.
+
+ Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
+ ensures the model will match your supplied JSON schema. Learn more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+ The default format is `{ "type": "text" }` with no additional options.
+
+ **Not recommended for gpt-4o and newer models:**
+
+ Setting to `{ "type": "json_object" }` enables the older JSON mode, which
+ ensures the message the model generates is valid JSON. Using `json_schema` is
+ preferred for models that support it.
+ """
+
+ verbosity: Optional[Literal["low", "medium", "high"]]
+ """Constrains the verbosity of the model's response.
+
+ Lower values will result in more concise responses, while higher values will
+ result in more verbose responses. Currently supported values are `low`,
+ `medium`, and `high`.
+ """