Commit a94bd5b2
Changed files (11)
src
openai
resources
chat
completions
responses
tests
api_resources
lib
src/openai/resources/chat/completions/completions.py
@@ -103,7 +103,6 @@ class Completions(SyncAPIResource):
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
prompt_cache_key: str | NotGiven = NOT_GIVEN,
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- text: completion_create_params.Text | NotGiven = NOT_GIVEN,
safety_identifier: str | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
@@ -204,7 +203,6 @@ class Completions(SyncAPIResource):
"prompt_cache_key": prompt_cache_key,
"reasoning_effort": reasoning_effort,
"response_format": _type_to_response_format(response_format),
- "text": text,
"safety_identifier": safety_identifier,
"seed": seed,
"service_tier": service_tier,
@@ -267,7 +265,6 @@ class Completions(SyncAPIResource):
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: completion_create_params.Text | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -459,7 +456,7 @@ class Completions(SyncAPIResource):
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
- Supports text and image inputs. Note: image inputs over 10MB will be dropped.
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
@@ -556,7 +553,6 @@ class Completions(SyncAPIResource):
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: completion_create_params.Text | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -757,7 +753,7 @@ class Completions(SyncAPIResource):
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
- Supports text and image inputs. Note: image inputs over 10MB will be dropped.
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -845,7 +841,6 @@ class Completions(SyncAPIResource):
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: completion_create_params.Text | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1046,7 +1041,7 @@ class Completions(SyncAPIResource):
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
- Supports text and image inputs. Note: image inputs over 10MB will be dropped.
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -1134,7 +1129,6 @@ class Completions(SyncAPIResource):
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: completion_create_params.Text | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1181,7 +1175,6 @@ class Completions(SyncAPIResource):
"stream": stream,
"stream_options": stream_options,
"temperature": temperature,
- "text": text,
"tool_choice": tool_choice,
"tools": tools,
"top_logprobs": top_logprobs,
@@ -1404,7 +1397,6 @@ class Completions(SyncAPIResource):
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
prompt_cache_key: str | NotGiven = NOT_GIVEN,
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- text: completion_create_params.Text | NotGiven = NOT_GIVEN,
safety_identifier: str | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
@@ -1475,7 +1467,6 @@ class Completions(SyncAPIResource):
presence_penalty=presence_penalty,
prompt_cache_key=prompt_cache_key,
reasoning_effort=reasoning_effort,
- text=text,
safety_identifier=safety_identifier,
seed=seed,
service_tier=service_tier,
@@ -1548,7 +1539,6 @@ class AsyncCompletions(AsyncAPIResource):
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
prompt_cache_key: str | NotGiven = NOT_GIVEN,
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- text: completion_create_params.Text | NotGiven = NOT_GIVEN,
safety_identifier: str | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
@@ -1649,7 +1639,6 @@ class AsyncCompletions(AsyncAPIResource):
"prompt_cache_key": prompt_cache_key,
"reasoning_effort": reasoning_effort,
"response_format": _type_to_response_format(response_format),
- "text": text,
"safety_identifier": safety_identifier,
"seed": seed,
"service_tier": service_tier,
@@ -1712,7 +1701,6 @@ class AsyncCompletions(AsyncAPIResource):
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: completion_create_params.Text | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1904,7 +1892,7 @@ class AsyncCompletions(AsyncAPIResource):
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
- Supports text and image inputs. Note: image inputs over 10MB will be dropped.
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
@@ -2001,7 +1989,6 @@ class AsyncCompletions(AsyncAPIResource):
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: completion_create_params.Text | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2202,7 +2189,7 @@ class AsyncCompletions(AsyncAPIResource):
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
- Supports text and image inputs. Note: image inputs over 10MB will be dropped.
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -2290,7 +2277,6 @@ class AsyncCompletions(AsyncAPIResource):
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: completion_create_params.Text | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2491,7 +2477,7 @@ class AsyncCompletions(AsyncAPIResource):
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
- Supports text and image inputs. Note: image inputs over 10MB will be dropped.
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -2579,7 +2565,6 @@ class AsyncCompletions(AsyncAPIResource):
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: completion_create_params.Text | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2626,7 +2611,6 @@ class AsyncCompletions(AsyncAPIResource):
"stream": stream,
"stream_options": stream_options,
"temperature": temperature,
- "text": text,
"tool_choice": tool_choice,
"tools": tools,
"top_logprobs": top_logprobs,
@@ -2849,7 +2833,6 @@ class AsyncCompletions(AsyncAPIResource):
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
prompt_cache_key: str | NotGiven = NOT_GIVEN,
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
- text: completion_create_params.Text | NotGiven = NOT_GIVEN,
safety_identifier: str | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
@@ -2921,7 +2904,6 @@ class AsyncCompletions(AsyncAPIResource):
presence_penalty=presence_penalty,
prompt_cache_key=prompt_cache_key,
reasoning_effort=reasoning_effort,
- text=text,
safety_identifier=safety_identifier,
seed=seed,
service_tier=service_tier,
src/openai/resources/responses/responses.py
@@ -43,6 +43,7 @@ from ...types.shared_params.responses_model import ResponsesModel
from ...types.responses.response_input_param import ResponseInputParam
from ...types.responses.response_prompt_param import ResponsePromptParam
from ...types.responses.response_stream_event import ResponseStreamEvent
+from ...types.responses.response_text_config_param import ResponseTextConfigParam
__all__ = ["Responses", "AsyncResponses"]
@@ -94,7 +95,7 @@ class Responses(SyncAPIResource):
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -238,6 +239,12 @@ class Responses(SyncAPIResource):
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
+ text: Configuration options for a text response from the model. Can be plain text or
+ structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
tool_choice: How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
@@ -315,7 +322,7 @@ class Responses(SyncAPIResource):
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -459,6 +466,12 @@ class Responses(SyncAPIResource):
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
+ text: Configuration options for a text response from the model. Can be plain text or
+ structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
tool_choice: How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
@@ -536,7 +549,7 @@ class Responses(SyncAPIResource):
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -680,6 +693,12 @@ class Responses(SyncAPIResource):
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
+ text: Configuration options for a text response from the model. Can be plain text or
+ structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
tool_choice: How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
@@ -756,7 +775,7 @@ class Responses(SyncAPIResource):
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -847,7 +866,7 @@ class Responses(SyncAPIResource):
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -879,7 +898,7 @@ class Responses(SyncAPIResource):
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -1008,7 +1027,7 @@ class Responses(SyncAPIResource):
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1439,7 +1458,7 @@ class AsyncResponses(AsyncAPIResource):
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1583,6 +1602,12 @@ class AsyncResponses(AsyncAPIResource):
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
+ text: Configuration options for a text response from the model. Can be plain text or
+ structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
tool_choice: How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
@@ -1660,7 +1685,7 @@ class AsyncResponses(AsyncAPIResource):
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1804,6 +1829,12 @@ class AsyncResponses(AsyncAPIResource):
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
+ text: Configuration options for a text response from the model. Can be plain text or
+ structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
tool_choice: How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
@@ -1881,7 +1912,7 @@ class AsyncResponses(AsyncAPIResource):
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2025,6 +2056,12 @@ class AsyncResponses(AsyncAPIResource):
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
+ text: Configuration options for a text response from the model. Can be plain text or
+ structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
tool_choice: How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
@@ -2101,7 +2138,7 @@ class AsyncResponses(AsyncAPIResource):
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2192,7 +2229,7 @@ class AsyncResponses(AsyncAPIResource):
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -2224,7 +2261,7 @@ class AsyncResponses(AsyncAPIResource):
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -2357,7 +2394,7 @@ class AsyncResponses(AsyncAPIResource):
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: response_create_params.Text | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
src/openai/types/chat/completion_create_params.py
@@ -25,7 +25,6 @@ __all__ = [
"FunctionCall",
"Function",
"ResponseFormat",
- "Text",
"WebSearchOptions",
"WebSearchOptionsUserLocation",
"WebSearchOptionsUserLocationApproximate",
@@ -257,7 +256,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
- Supports text and image inputs. Note: image inputs over 10MB will be dropped.
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
"""
stream_options: Optional[ChatCompletionStreamOptionsParam]
@@ -271,8 +270,6 @@ class CompletionCreateParamsBase(TypedDict, total=False):
this or `top_p` but not both.
"""
- text: Text
-
tool_choice: ChatCompletionToolChoiceOptionParam
"""
Controls which (if any) tool is called by the model. `none` means the model will
@@ -367,16 +364,6 @@ class Function(TypedDict, total=False):
ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject]
-class Text(TypedDict, total=False):
- verbosity: Optional[Literal["low", "medium", "high"]]
- """Constrains the verbosity of the model's response.
-
- Lower values will result in more concise responses, while higher values will
- result in more verbose responses. Currently supported values are `low`,
- `medium`, and `high`.
- """
-
-
class WebSearchOptionsUserLocationApproximate(TypedDict, total=False):
city: str
"""Free text input for the city of the user, e.g. `San Francisco`."""
src/openai/types/graders/text_similarity_grader.py
@@ -9,12 +9,22 @@ __all__ = ["TextSimilarityGrader"]
class TextSimilarityGrader(BaseModel):
evaluation_metric: Literal[
- "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l"
+ "cosine",
+ "fuzzy_match",
+ "bleu",
+ "gleu",
+ "meteor",
+ "rouge_1",
+ "rouge_2",
+ "rouge_3",
+ "rouge_4",
+ "rouge_5",
+ "rouge_l",
]
"""The evaluation metric to use.
- One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`,
- `rouge_4`, `rouge_5`, or `rouge_l`.
+ One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`,
+ `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
"""
input: str
src/openai/types/graders/text_similarity_grader_param.py
@@ -10,13 +10,23 @@ __all__ = ["TextSimilarityGraderParam"]
class TextSimilarityGraderParam(TypedDict, total=False):
evaluation_metric: Required[
Literal[
- "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l"
+ "cosine",
+ "fuzzy_match",
+ "bleu",
+ "gleu",
+ "meteor",
+ "rouge_1",
+ "rouge_2",
+ "rouge_3",
+ "rouge_4",
+ "rouge_5",
+ "rouge_l",
]
]
"""The evaluation metric to use.
- One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`,
- `rouge_4`, `rouge_5`, or `rouge_l`.
+ One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`,
+ `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
"""
input: Required[str]
src/openai/types/responses/response.py
@@ -18,11 +18,11 @@ from .response_input_item import ResponseInputItem
from .tool_choice_allowed import ToolChoiceAllowed
from .tool_choice_options import ToolChoiceOptions
from .response_output_item import ResponseOutputItem
+from .response_text_config import ResponseTextConfig
from .tool_choice_function import ToolChoiceFunction
from ..shared.responses_model import ResponsesModel
-from .response_format_text_config import ResponseFormatTextConfig
-__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Text"]
+__all__ = ["Response", "IncompleteDetails", "ToolChoice"]
class IncompleteDetails(BaseModel):
@@ -35,32 +35,6 @@ ToolChoice: TypeAlias = Union[
]
-class Text(BaseModel):
- format: Optional[ResponseFormatTextConfig] = None
- """An object specifying the format that the model must output.
-
- Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
- ensures the model will match your supplied JSON schema. Learn more in the
- [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
- The default format is `{ "type": "text" }` with no additional options.
-
- **Not recommended for gpt-4o and newer models:**
-
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
- """
-
- verbosity: Optional[Literal["low", "medium", "high"]] = None
- """Constrains the verbosity of the model's response.
-
- Lower values will result in more concise responses, while higher values will
- result in more verbose responses. Currently supported values are `low`,
- `medium`, and `high`.
- """
-
-
class Response(BaseModel):
id: str
"""Unique identifier for this Response."""
@@ -244,7 +218,14 @@ class Response(BaseModel):
`incomplete`.
"""
- text: Optional[Text] = None
+ text: Optional[ResponseTextConfig] = None
+ """Configuration options for a text response from the model.
+
+ Can be plain text or structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+ """
top_logprobs: Optional[int] = None
"""
src/openai/types/responses/response_create_params.py
@@ -16,14 +16,13 @@ from .tool_choice_types_param import ToolChoiceTypesParam
from ..shared_params.reasoning import Reasoning
from .tool_choice_custom_param import ToolChoiceCustomParam
from .tool_choice_allowed_param import ToolChoiceAllowedParam
+from .response_text_config_param import ResponseTextConfigParam
from .tool_choice_function_param import ToolChoiceFunctionParam
from ..shared_params.responses_model import ResponsesModel
-from .response_format_text_config_param import ResponseFormatTextConfigParam
__all__ = [
"ResponseCreateParamsBase",
"StreamOptions",
- "Text",
"ToolChoice",
"ResponseCreateParamsNonStreaming",
"ResponseCreateParamsStreaming",
@@ -183,7 +182,14 @@ class ResponseCreateParamsBase(TypedDict, total=False):
this or `top_p` but not both.
"""
- text: Text
+ text: ResponseTextConfigParam
+ """Configuration options for a text response from the model.
+
+ Can be plain text or structured JSON data. Learn more:
+
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+ """
tool_choice: ToolChoice
"""
@@ -260,32 +266,6 @@ class StreamOptions(TypedDict, total=False):
"""
-class Text(TypedDict, total=False):
- format: ResponseFormatTextConfigParam
- """An object specifying the format that the model must output.
-
- Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
- ensures the model will match your supplied JSON schema. Learn more in the
- [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
- The default format is `{ "type": "text" }` with no additional options.
-
- **Not recommended for gpt-4o and newer models:**
-
- Setting to `{ "type": "json_object" }` enables the older JSON mode, which
- ensures the message the model generates is valid JSON. Using `json_schema` is
- preferred for models that support it.
- """
-
- verbosity: Optional[Literal["low", "medium", "high"]]
- """Constrains the verbosity of the model's response.
-
- Lower values will result in more concise responses, while higher values will
- result in more verbose responses. Currently supported values are `low`,
- `medium`, and `high`.
- """
-
-
ToolChoice: TypeAlias = Union[
ToolChoiceOptions,
ToolChoiceAllowedParam,
tests/api_resources/chat/test_completions.py
@@ -86,7 +86,6 @@ class TestCompletions:
"include_usage": True,
},
temperature=1,
- text={"verbosity": "low"},
tool_choice="none",
tools=[
{
@@ -219,7 +218,6 @@ class TestCompletions:
"include_usage": True,
},
temperature=1,
- text={"verbosity": "low"},
tool_choice="none",
tools=[
{
@@ -529,7 +527,6 @@ class TestAsyncCompletions:
"include_usage": True,
},
temperature=1,
- text={"verbosity": "low"},
tool_choice="none",
tools=[
{
@@ -662,7 +659,6 @@ class TestAsyncCompletions:
"include_usage": True,
},
temperature=1,
- text={"verbosity": "low"},
tool_choice="none",
tools=[
{
tests/api_resources/test_responses.py
@@ -10,7 +10,9 @@ import pytest
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai._utils import assert_signatures_in_sync
-from openai.types.responses import Response
+from openai.types.responses import (
+ Response,
+)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
tests/lib/chat/test_completions.py
@@ -541,7 +541,7 @@ def test_parse_max_tokens_reached(client: OpenAI, respx_mock: MockRouter) -> Non
content_snapshot=snapshot(
'{"id": "chatcmpl-ABfvvX7eB1KsfeZj8VcF3z7G7SbaA", "object": "chat.completion", "created": 1727346163, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"", "refusal": null}, "logprobs": null, "finish_reason": "length"}], "usage": {"prompt_tokens": 79, "completion_tokens": 1, "total_tokens": 80, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}'
),
- path="/chat/completions",
+ path="/chat/completions",
mock_client=client,
respx_mock=respx_mock,
)
.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-24be531010b354303d741fc9247c1f84f75978f9f7de68aca92cb4f240a04722.yml
-openapi_spec_hash: 3e46f439f6a863beadc71577eb4efa15
-config_hash: ed87b9139ac595a04a2162d754df2fed
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-7ef7a457c3bf05364e66e48c9ca34f31bfef1f6c9b7c15b1812346105e0abb16.yml
+openapi_spec_hash: a2b1f5d8fbb62175c93b0ebea9f10063
+config_hash: 76afa3236f36854a8705f1281b1990b8