Commit fdd52476
Changed files (55)
src
openai
resources
audio
beta
realtime
threads
vector_stores
chat
types
beta
realtime
threads
shared
shared_params
tests
api_resources
src/openai/resources/audio/transcriptions.py
@@ -138,8 +138,8 @@ class Transcriptions(SyncAPIResource):
Whisper V2 model) is currently available.
language: The language of the input audio. Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will
- improve accuracy and latency.
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
@@ -302,8 +302,8 @@ class AsyncTranscriptions(AsyncAPIResource):
Whisper V2 model) is currently available.
language: The language of the input audio. Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will
- improve accuracy and latency.
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
src/openai/resources/beta/realtime/sessions.py
@@ -89,8 +89,11 @@ class Sessions(SyncAPIResource):
input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
+ asynchronously through
+ [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as rough guidance rather than the representation
+ understood by the model. The client can optionally set the language and prompt
+ for transcription, these fields will be passed to the Whisper API.
instructions: The default system instructions (i.e. system message) prepended to model calls.
This field allows the client to guide the model on desired responses. The model
@@ -232,8 +235,11 @@ class AsyncSessions(AsyncAPIResource):
input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
+ asynchronously through
+ [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as rough guidance rather than the representation
+ understood by the model. The client can optionally set the language and prompt
+ for transcription, these fields will be passed to the Whisper API.
instructions: The default system instructions (i.e. system message) prepended to model calls.
This field allows the client to guide the model on desired responses. The model
src/openai/resources/beta/threads/runs/runs.py
@@ -47,6 +47,7 @@ from .....types.beta.threads import (
run_submit_tool_outputs_params,
)
from .....types.beta.threads.run import Run
+from .....types.shared_params.metadata import Metadata
from .....types.beta.assistant_tool_param import AssistantToolParam
from .....types.beta.assistant_stream_event import AssistantStreamEvent
from .....types.beta.threads.runs.run_step_include import RunStepInclude
@@ -92,7 +93,7 @@ class Runs(SyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -148,9 +149,11 @@ class Runs(SyncAPIResource):
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -233,7 +236,7 @@ class Runs(SyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -292,9 +295,11 @@ class Runs(SyncAPIResource):
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -373,7 +378,7 @@ class Runs(SyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -432,9 +437,11 @@ class Runs(SyncAPIResource):
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -512,7 +519,7 @@ class Runs(SyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -609,7 +616,7 @@ class Runs(SyncAPIResource):
run_id: str,
*,
thread_id: str,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -622,9 +629,11 @@ class Runs(SyncAPIResource):
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
@@ -1457,7 +1466,7 @@ class AsyncRuns(AsyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1513,9 +1522,11 @@ class AsyncRuns(AsyncAPIResource):
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -1598,7 +1609,7 @@ class AsyncRuns(AsyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1657,9 +1668,11 @@ class AsyncRuns(AsyncAPIResource):
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -1738,7 +1751,7 @@ class AsyncRuns(AsyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1797,9 +1810,11 @@ class AsyncRuns(AsyncAPIResource):
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -1877,7 +1892,7 @@ class AsyncRuns(AsyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1974,7 +1989,7 @@ class AsyncRuns(AsyncAPIResource):
run_id: str,
*,
thread_id: str,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1987,9 +2002,11 @@ class AsyncRuns(AsyncAPIResource):
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
src/openai/resources/beta/threads/messages.py
@@ -23,6 +23,7 @@ from ...._base_client import (
)
from ....types.beta.threads import message_list_params, message_create_params, message_update_params
from ....types.beta.threads.message import Message
+from ....types.shared_params.metadata import Metadata
from ....types.beta.threads.message_deleted import MessageDeleted
from ....types.beta.threads.message_content_part_param import MessageContentPartParam
@@ -56,7 +57,7 @@ class Messages(SyncAPIResource):
content: Union[str, Iterable[MessageContentPartParam]],
role: Literal["user", "assistant"],
attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -81,9 +82,11 @@ class Messages(SyncAPIResource):
attachments: A list of files attached to the message, and the tools they should be added to.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
@@ -155,7 +158,7 @@ class Messages(SyncAPIResource):
message_id: str,
*,
thread_id: str,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -168,9 +171,11 @@ class Messages(SyncAPIResource):
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
@@ -330,7 +335,7 @@ class AsyncMessages(AsyncAPIResource):
content: Union[str, Iterable[MessageContentPartParam]],
role: Literal["user", "assistant"],
attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -355,9 +360,11 @@ class AsyncMessages(AsyncAPIResource):
attachments: A list of files attached to the message, and the tools they should be added to.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
@@ -429,7 +436,7 @@ class AsyncMessages(AsyncAPIResource):
message_id: str,
*,
thread_id: str,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -442,9 +449,11 @@ class AsyncMessages(AsyncAPIResource):
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
src/openai/resources/beta/threads/threads.py
@@ -53,6 +53,7 @@ from ....types.chat_model import ChatModel
from ....types.beta.thread import Thread
from ....types.beta.threads.run import Run
from ....types.beta.thread_deleted import ThreadDeleted
+from ....types.shared_params.metadata import Metadata
from ....types.beta.assistant_stream_event import AssistantStreamEvent
from ....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
from ....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -92,7 +93,7 @@ class Threads(SyncAPIResource):
self,
*,
messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -109,9 +110,11 @@ class Threads(SyncAPIResource):
start the thread with.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
tool_resources: A set of resources that are made available to the assistant's tools in this
thread. The resources are specific to the type of tool. For example, the
@@ -181,7 +184,7 @@ class Threads(SyncAPIResource):
self,
thread_id: str,
*,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -195,9 +198,11 @@ class Threads(SyncAPIResource):
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
tool_resources: A set of resources that are made available to the assistant's tools in this
thread. The resources are specific to the type of tool. For example, the
@@ -272,7 +277,7 @@ class Threads(SyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -315,9 +320,11 @@ class Threads(SyncAPIResource):
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -357,7 +364,8 @@ class Threads(SyncAPIResource):
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
- thread: If no thread is provided, an empty thread will be created.
+ thread: Options to create a new thread. If no thread is provided when running a request,
+ an empty thread will be created.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tools and instead generates a message. `auto` is the default value
@@ -403,7 +411,7 @@ class Threads(SyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -449,9 +457,11 @@ class Threads(SyncAPIResource):
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -487,7 +497,8 @@ class Threads(SyncAPIResource):
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
- thread: If no thread is provided, an empty thread will be created.
+ thread: Options to create a new thread. If no thread is provided when running a request,
+ an empty thread will be created.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tools and instead generates a message. `auto` is the default value
@@ -533,7 +544,7 @@ class Threads(SyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -579,9 +590,11 @@ class Threads(SyncAPIResource):
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -617,7 +630,8 @@ class Threads(SyncAPIResource):
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
- thread: If no thread is provided, an empty thread will be created.
+ thread: Options to create a new thread. If no thread is provided when running a request,
+ an empty thread will be created.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tools and instead generates a message. `auto` is the default value
@@ -662,7 +676,7 @@ class Threads(SyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -926,7 +940,7 @@ class AsyncThreads(AsyncAPIResource):
self,
*,
messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -943,9 +957,11 @@ class AsyncThreads(AsyncAPIResource):
start the thread with.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
tool_resources: A set of resources that are made available to the assistant's tools in this
thread. The resources are specific to the type of tool. For example, the
@@ -1015,7 +1031,7 @@ class AsyncThreads(AsyncAPIResource):
self,
thread_id: str,
*,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -1029,9 +1045,11 @@ class AsyncThreads(AsyncAPIResource):
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
tool_resources: A set of resources that are made available to the assistant's tools in this
thread. The resources are specific to the type of tool. For example, the
@@ -1106,7 +1124,7 @@ class AsyncThreads(AsyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1149,9 +1167,11 @@ class AsyncThreads(AsyncAPIResource):
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -1191,7 +1211,8 @@ class AsyncThreads(AsyncAPIResource):
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
- thread: If no thread is provided, an empty thread will be created.
+ thread: Options to create a new thread. If no thread is provided when running a request,
+ an empty thread will be created.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tools and instead generates a message. `auto` is the default value
@@ -1237,7 +1258,7 @@ class AsyncThreads(AsyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1283,9 +1304,11 @@ class AsyncThreads(AsyncAPIResource):
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -1321,7 +1344,8 @@ class AsyncThreads(AsyncAPIResource):
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
- thread: If no thread is provided, an empty thread will be created.
+ thread: Options to create a new thread. If no thread is provided when running a request,
+ an empty thread will be created.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tools and instead generates a message. `auto` is the default value
@@ -1367,7 +1391,7 @@ class AsyncThreads(AsyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -1413,9 +1437,11 @@ class AsyncThreads(AsyncAPIResource):
`incomplete_details` for more info.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
be used to execute this run. If a value is provided here, it will override the
@@ -1451,7 +1477,8 @@ class AsyncThreads(AsyncAPIResource):
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
- thread: If no thread is provided, an empty thread will be created.
+ thread: Options to create a new thread. If no thread is provided when running a request,
+ an empty thread will be created.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tools and instead generates a message. `auto` is the default value
@@ -1496,7 +1523,7 @@ class AsyncThreads(AsyncAPIResource):
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
src/openai/resources/beta/vector_stores/vector_stores.py
@@ -41,6 +41,7 @@ from ....types.beta import (
)
from ...._base_client import AsyncPaginator, make_request_options
from ....types.beta.vector_store import VectorStore
+from ....types.shared_params.metadata import Metadata
from ....types.beta.vector_store_deleted import VectorStoreDeleted
from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam
@@ -81,7 +82,7 @@ class VectorStores(SyncAPIResource):
chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
file_ids: List[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -104,9 +105,11 @@ class VectorStores(SyncAPIResource):
files.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
name: The name of the vector store.
@@ -176,7 +179,7 @@ class VectorStores(SyncAPIResource):
vector_store_id: str,
*,
expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -192,9 +195,11 @@ class VectorStores(SyncAPIResource):
expires_after: The expiration policy for a vector store.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
name: The name of the vector store.
@@ -359,7 +364,7 @@ class AsyncVectorStores(AsyncAPIResource):
chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
file_ids: List[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -382,9 +387,11 @@ class AsyncVectorStores(AsyncAPIResource):
files.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
name: The name of the vector store.
@@ -454,7 +461,7 @@ class AsyncVectorStores(AsyncAPIResource):
vector_store_id: str,
*,
expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -470,9 +477,11 @@ class AsyncVectorStores(AsyncAPIResource):
expires_after: The expiration policy for a vector store.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
name: The name of the vector store.
src/openai/resources/beta/assistants.py
@@ -26,6 +26,7 @@ from ..._base_client import AsyncPaginator, make_request_options
from ...types.chat_model import ChatModel
from ...types.beta.assistant import Assistant
from ...types.beta.assistant_deleted import AssistantDeleted
+from ...types.shared_params.metadata import Metadata
from ...types.beta.assistant_tool_param import AssistantToolParam
from ...types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -58,7 +59,7 @@ class Assistants(SyncAPIResource):
model: Union[str, ChatModel],
description: Optional[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -88,9 +89,11 @@ class Assistants(SyncAPIResource):
characters.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
name: The name of the assistant. The maximum length is 256 characters.
@@ -206,7 +209,7 @@ class Assistants(SyncAPIResource):
*,
description: Optional[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: str | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -232,9 +235,11 @@ class Assistants(SyncAPIResource):
characters.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
@@ -444,7 +449,7 @@ class AsyncAssistants(AsyncAPIResource):
model: Union[str, ChatModel],
description: Optional[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -474,9 +479,11 @@ class AsyncAssistants(AsyncAPIResource):
characters.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
name: The name of the assistant. The maximum length is 256 characters.
@@ -592,7 +599,7 @@ class AsyncAssistants(AsyncAPIResource):
*,
description: Optional[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
- metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
model: str | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
@@ -618,9 +625,11 @@ class AsyncAssistants(AsyncAPIResource):
characters.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
- for storing additional information about the object in a structured format. Keys
- can be a maximum of 64 characters long and values can be a maximum of 512
- characters long.
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
src/openai/resources/chat/completions.py
@@ -28,6 +28,7 @@ from ...types.chat import (
from ..._base_client import make_request_options
from ...types.chat_model import ChatModel
from ...types.chat.chat_completion import ChatCompletion
+from ...types.shared_params.metadata import Metadata
from ...types.chat.chat_completion_chunk import ChatCompletionChunk
from ...types.chat.chat_completion_modality import ChatCompletionModality
from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam
@@ -75,7 +76,7 @@ class Completions(SyncAPIResource):
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -179,8 +180,12 @@ class Completions(SyncAPIResource):
compatible with
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
- metadata: Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
modalities: Output types that you would like the model to generate for this request. Most
models are capable of generating text, which is the default:
@@ -246,9 +251,9 @@ class Completions(SyncAPIResource):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
stop: Up to 4 sequences where the API will stop generating further tokens.
@@ -324,7 +329,7 @@ class Completions(SyncAPIResource):
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -434,8 +439,12 @@ class Completions(SyncAPIResource):
compatible with
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
- metadata: Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
modalities: Output types that you would like the model to generate for this request. Most
models are capable of generating text, which is the default:
@@ -501,9 +510,9 @@ class Completions(SyncAPIResource):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
stop: Up to 4 sequences where the API will stop generating further tokens.
@@ -572,7 +581,7 @@ class Completions(SyncAPIResource):
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -682,8 +691,12 @@ class Completions(SyncAPIResource):
compatible with
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
- metadata: Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
modalities: Output types that you would like the model to generate for this request. Most
models are capable of generating text, which is the default:
@@ -749,9 +762,9 @@ class Completions(SyncAPIResource):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
stop: Up to 4 sequences where the API will stop generating further tokens.
@@ -819,7 +832,7 @@ class Completions(SyncAPIResource):
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -927,7 +940,7 @@ class AsyncCompletions(AsyncAPIResource):
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -1031,8 +1044,12 @@ class AsyncCompletions(AsyncAPIResource):
compatible with
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
- metadata: Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
modalities: Output types that you would like the model to generate for this request. Most
models are capable of generating text, which is the default:
@@ -1098,9 +1115,9 @@ class AsyncCompletions(AsyncAPIResource):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
stop: Up to 4 sequences where the API will stop generating further tokens.
@@ -1176,7 +1193,7 @@ class AsyncCompletions(AsyncAPIResource):
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -1286,8 +1303,12 @@ class AsyncCompletions(AsyncAPIResource):
compatible with
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
- metadata: Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
modalities: Output types that you would like the model to generate for this request. Most
models are capable of generating text, which is the default:
@@ -1353,9 +1374,9 @@ class AsyncCompletions(AsyncAPIResource):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
stop: Up to 4 sequences where the API will stop generating further tokens.
@@ -1424,7 +1445,7 @@ class AsyncCompletions(AsyncAPIResource):
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
@@ -1534,8 +1555,12 @@ class AsyncCompletions(AsyncAPIResource):
compatible with
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
- metadata: Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
modalities: Output types that you would like the model to generate for this request. Most
models are capable of generating text, which is the default:
@@ -1601,9 +1626,9 @@ class AsyncCompletions(AsyncAPIResource):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
stop: Up to 4 sequences where the API will stop generating further tokens.
@@ -1671,7 +1696,7 @@ class AsyncCompletions(AsyncAPIResource):
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
src/openai/resources/batches.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Dict, Optional
+from typing import Optional
from typing_extensions import Literal
import httpx
@@ -19,10 +19,8 @@ from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..pagination import SyncCursorPage, AsyncCursorPage
from ..types.batch import Batch
-from .._base_client import (
- AsyncPaginator,
- make_request_options,
-)
+from .._base_client import AsyncPaginator, make_request_options
+from ..types.shared_params.metadata import Metadata
__all__ = ["Batches", "AsyncBatches"]
@@ -53,7 +51,7 @@ class Batches(SyncAPIResource):
completion_window: Literal["24h"],
endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
input_file_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -83,7 +81,12 @@ class Batches(SyncAPIResource):
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
requests, and can be up to 200 MB in size.
- metadata: Optional custom metadata for the batch.
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
@@ -258,7 +261,7 @@ class AsyncBatches(AsyncAPIResource):
completion_window: Literal["24h"],
endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
input_file_id: str,
- metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -288,7 +291,12 @@ class AsyncBatches(AsyncAPIResource):
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
requests, and can be up to 200 MB in size.
- metadata: Optional custom metadata for the batch.
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
extra_headers: Send extra headers
src/openai/types/audio/transcription_create_params.py
@@ -30,8 +30,8 @@ class TranscriptionCreateParams(TypedDict, total=False):
"""The language of the input audio.
Supplying the input language in
- [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will
- improve accuracy and latency.
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
"""
prompt: str
src/openai/types/beta/realtime/conversation_item_create_event.py
@@ -20,10 +20,10 @@ class ConversationItemCreateEvent(BaseModel):
"""Optional client-generated ID used to identify this event."""
previous_item_id: Optional[str] = None
- """
- The ID of the preceding item after which the new item will be inserted. If not
- set, the new item will be appended to the end of the conversation. If set to
- `root`, the new item will be added to the beginning of the conversation. If set
- to an existing ID, it allows an item to be inserted mid-conversation. If the ID
- cannot be found, an error will be returned and the item will not be added.
+ """The ID of the preceding item after which the new item will be inserted.
+
+ If not set, the new item will be appended to the end of the conversation. If set
+ to `root`, the new item will be added to the beginning of the conversation. If
+ set to an existing ID, it allows an item to be inserted mid-conversation. If the
+ ID cannot be found, an error will be returned and the item will not be added.
"""
src/openai/types/beta/realtime/conversation_item_create_event_param.py
@@ -20,10 +20,10 @@ class ConversationItemCreateEventParam(TypedDict, total=False):
"""Optional client-generated ID used to identify this event."""
previous_item_id: str
- """
- The ID of the preceding item after which the new item will be inserted. If not
- set, the new item will be appended to the end of the conversation. If set to
- `root`, the new item will be added to the beginning of the conversation. If set
- to an existing ID, it allows an item to be inserted mid-conversation. If the ID
- cannot be found, an error will be returned and the item will not be added.
+ """The ID of the preceding item after which the new item will be inserted.
+
+ If not set, the new item will be appended to the end of the conversation. If set
+ to `root`, the new item will be added to the beginning of the conversation. If
+ set to an existing ID, it allows an item to be inserted mid-conversation. If the
+ ID cannot be found, an error will be returned and the item will not be added.
"""
src/openai/types/beta/realtime/realtime_response.py
@@ -1,9 +1,10 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Optional
+from typing import List, Union, Optional
from typing_extensions import Literal
from ...._models import BaseModel
+from ...shared.metadata import Metadata
from .conversation_item import ConversationItem
from .realtime_response_usage import RealtimeResponseUsage
from .realtime_response_status import RealtimeResponseStatus
@@ -15,8 +16,40 @@ class RealtimeResponse(BaseModel):
id: Optional[str] = None
"""The unique ID of the response."""
- metadata: Optional[object] = None
- """Developer-provided string key-value pairs associated with this response."""
+ conversation_id: Optional[str] = None
+ """
+ Which conversation the response is added to, determined by the `conversation`
+ field in the `response.create` event. If `auto`, the response will be added to
+ the default conversation and the value of `conversation_id` will be an id like
+ `conv_1234`. If `none`, the response will not be added to any conversation and
+ the value of `conversation_id` will be `null`. If responses are being triggered
+ by server VAD, the response will be added to the default conversation, thus the
+ `conversation_id` will be an id like `conv_1234`.
+ """
+
+ max_output_tokens: Union[int, Literal["inf"], None] = None
+ """
+ Maximum number of output tokens for a single assistant response, inclusive of
+ tool calls, that was used in this response.
+ """
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+ modalities: Optional[List[Literal["text", "audio"]]] = None
+ """The set of modalities the model used to respond.
+
+ If there are multiple modalities, the model will pick one, for example if
+ `modalities` is `["text", "audio"]`, the model could be responding in either
+ text or audio.
+ """
object: Optional[Literal["realtime.response"]] = None
"""The object type, must be `realtime.response`."""
@@ -24,6 +57,9 @@ class RealtimeResponse(BaseModel):
output: Optional[List[ConversationItem]] = None
"""The list of output items generated by the response."""
+ output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
+ """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
+
status: Optional[Literal["completed", "cancelled", "failed", "incomplete"]] = None
"""
The final status of the response (`completed`, `cancelled`, `failed`, or
@@ -33,6 +69,9 @@ class RealtimeResponse(BaseModel):
status_details: Optional[RealtimeResponseStatus] = None
"""Additional details about the status."""
+ temperature: Optional[float] = None
+ """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
+
usage: Optional[RealtimeResponseUsage] = None
"""Usage statistics for the Response, this will correspond to billing.
@@ -40,3 +79,9 @@ class RealtimeResponse(BaseModel):
to the Conversation, thus output from previous turns (text and audio tokens)
will become the input for later turns.
"""
+
+ voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None
+ """
+ The voice the model used to respond. Current voice options are `alloy`, `ash`,
+ `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
+ """
src/openai/types/beta/realtime/response_create_event.py
@@ -4,6 +4,7 @@ from typing import List, Union, Optional
from typing_extensions import Literal
from ...._models import BaseModel
+from ...shared.metadata import Metadata
from .conversation_item import ConversationItem
__all__ = ["ResponseCreateEvent", "Response", "ResponseTool"]
@@ -66,12 +67,14 @@ class Response(BaseModel):
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
modalities: Optional[List[Literal["text", "audio"]]] = None
src/openai/types/beta/realtime/response_create_event_param.py
@@ -6,6 +6,7 @@ from typing import List, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypedDict
from .conversation_item_param import ConversationItemParam
+from ...shared_params.metadata import Metadata
__all__ = ["ResponseCreateEventParam", "Response", "ResponseTool"]
@@ -67,12 +68,14 @@ class Response(TypedDict, total=False):
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
modalities: List[Literal["text", "audio"]]
src/openai/types/beta/realtime/session_create_params.py
@@ -22,8 +22,11 @@ class SessionCreateParams(TypedDict, total=False):
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
+ asynchronously through
+ [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as rough guidance rather than the representation
+ understood by the model. The client can optionally set the language and prompt
+ for transcription, these fields will be passed to the Whisper API.
"""
instructions: str
@@ -101,12 +104,28 @@ class SessionCreateParams(TypedDict, total=False):
class InputAudioTranscription(TypedDict, total=False):
+ language: str
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
model: str
"""
The model to use for transcription, `whisper-1` is the only currently supported
model.
"""
+ prompt: str
+ """An optional text to guide the model's style or continue a previous audio
+ segment.
+
+ The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+ should match the audio language.
+ """
+
class Tool(TypedDict, total=False):
description: str
src/openai/types/beta/realtime/session_create_response.py
@@ -9,13 +9,13 @@ __all__ = ["SessionCreateResponse", "ClientSecret", "InputAudioTranscription", "
class ClientSecret(BaseModel):
- expires_at: Optional[int] = None
+ expires_at: int
"""Timestamp for when the token expires.
Currently, all tokens expire after one minute.
"""
- value: Optional[str] = None
+ value: str
"""
Ephemeral key usable in client environments to authenticate connections to the
Realtime API. Use this in client-side environments rather than a standard API
@@ -74,7 +74,7 @@ class TurnDetection(BaseModel):
class SessionCreateResponse(BaseModel):
- client_secret: Optional[ClientSecret] = None
+ client_secret: ClientSecret
"""Ephemeral key returned by the API."""
input_audio_format: Optional[str] = None
src/openai/types/beta/realtime/session_update_event.py
@@ -9,12 +9,28 @@ __all__ = ["SessionUpdateEvent", "Session", "SessionInputAudioTranscription", "S
class SessionInputAudioTranscription(BaseModel):
+ language: Optional[str] = None
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
model: Optional[str] = None
"""
The model to use for transcription, `whisper-1` is the only currently supported
model.
"""
+ prompt: Optional[str] = None
+ """An optional text to guide the model's style or continue a previous audio
+ segment.
+
+ The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+ should match the audio language.
+ """
+
class SessionTool(BaseModel):
description: Optional[str] = None
@@ -78,8 +94,11 @@ class Session(BaseModel):
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
+ asynchronously through
+ [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as rough guidance rather than the representation
+ understood by the model. The client can optionally set the language and prompt
+ for transcription, these fields will be passed to the Whisper API.
"""
instructions: Optional[str] = None
src/openai/types/beta/realtime/session_update_event_param.py
@@ -15,12 +15,28 @@ __all__ = [
class SessionInputAudioTranscription(TypedDict, total=False):
+ language: str
+ """The language of the input audio.
+
+ Supplying the input language in
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
+ format will improve accuracy and latency.
+ """
+
model: str
"""
The model to use for transcription, `whisper-1` is the only currently supported
model.
"""
+ prompt: str
+ """An optional text to guide the model's style or continue a previous audio
+ segment.
+
+ The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
+ should match the audio language.
+ """
+
class SessionTool(TypedDict, total=False):
description: str
@@ -84,8 +100,11 @@ class Session(TypedDict, total=False):
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
+ asynchronously through
+ [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription)
+ and should be treated as rough guidance rather than the representation
+ understood by the model. The client can optionally set the language and prompt
+ for transcription, these fields will be passed to the Whisper API.
"""
instructions: str
src/openai/types/beta/threads/runs/run_step.py
@@ -5,6 +5,7 @@ from typing_extensions import Literal, Annotated, TypeAlias
from ....._utils import PropertyInfo
from ....._models import BaseModel
+from ....shared.metadata import Metadata
from .tool_calls_step_details import ToolCallsStepDetails
from .message_creation_step_details import MessageCreationStepDetails
@@ -70,12 +71,14 @@ class RunStep(BaseModel):
Will be `null` if there are no errors.
"""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
object: Literal["thread.run.step"]
src/openai/types/beta/threads/message.py
@@ -5,6 +5,7 @@ from typing_extensions import Literal, TypeAlias
from ...._models import BaseModel
from .message_content import MessageContent
+from ...shared.metadata import Metadata
from ..code_interpreter_tool import CodeInterpreterTool
__all__ = [
@@ -66,12 +67,14 @@ class Message(BaseModel):
incomplete_details: Optional[IncompleteDetails] = None
"""On an incomplete message, details about why the message is incomplete."""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
object: Literal["thread.message"]
src/openai/types/beta/threads/message_create_params.py
@@ -5,6 +5,7 @@ from __future__ import annotations
from typing import Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from ...shared_params.metadata import Metadata
from .message_content_part_param import MessageContentPartParam
from ..code_interpreter_tool_param import CodeInterpreterToolParam
@@ -27,12 +28,14 @@ class MessageCreateParams(TypedDict, total=False):
attachments: Optional[Iterable[Attachment]]
"""A list of files attached to the message, and the tools they should be added to."""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
src/openai/types/beta/threads/message_update_params.py
@@ -5,16 +5,20 @@ from __future__ import annotations
from typing import Optional
from typing_extensions import Required, TypedDict
+from ...shared_params.metadata import Metadata
+
__all__ = ["MessageUpdateParams"]
class MessageUpdateParams(TypedDict, total=False):
thread_id: Required[str]
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
src/openai/types/beta/threads/run.py
@@ -6,6 +6,7 @@ from typing_extensions import Literal
from ...._models import BaseModel
from .run_status import RunStatus
from ..assistant_tool import AssistantTool
+from ...shared.metadata import Metadata
from ..assistant_tool_choice_option import AssistantToolChoiceOption
from ..assistant_response_format_option import AssistantResponseFormatOption
from .required_action_function_tool_call import RequiredActionFunctionToolCall
@@ -133,12 +134,14 @@ class Run(BaseModel):
of the run.
"""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
model: str
src/openai/types/beta/threads/run_create_params.py
@@ -8,6 +8,7 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ...chat_model import ChatModel
from ..assistant_tool_param import AssistantToolParam
from .runs.run_step_include import RunStepInclude
+from ...shared_params.metadata import Metadata
from .message_content_part_param import MessageContentPartParam
from ..code_interpreter_tool_param import CodeInterpreterToolParam
from ..assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
@@ -80,12 +81,14 @@ class RunCreateParamsBase(TypedDict, total=False):
`incomplete_details` for more info.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
model: Union[str, ChatModel, None]
@@ -199,12 +202,14 @@ class AdditionalMessage(TypedDict, total=False):
attachments: Optional[Iterable[AdditionalMessageAttachment]]
"""A list of files attached to the message, and the tools they should be added to."""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
src/openai/types/beta/threads/run_update_params.py
@@ -5,16 +5,20 @@ from __future__ import annotations
from typing import Optional
from typing_extensions import Required, TypedDict
+from ...shared_params.metadata import Metadata
+
__all__ = ["RunUpdateParams"]
class RunUpdateParams(TypedDict, total=False):
thread_id: Required[str]
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
src/openai/types/beta/assistant.py
@@ -5,6 +5,7 @@ from typing_extensions import Literal
from ..._models import BaseModel
from .assistant_tool import AssistantTool
+from ..shared.metadata import Metadata
from .assistant_response_format_option import AssistantResponseFormatOption
__all__ = ["Assistant", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
@@ -51,12 +52,14 @@ class Assistant(BaseModel):
The maximum length is 256,000 characters.
"""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
model: str
src/openai/types/beta/assistant_create_params.py
@@ -7,6 +7,7 @@ from typing_extensions import Required, TypedDict
from ..chat_model import ChatModel
from .assistant_tool_param import AssistantToolParam
+from ..shared_params.metadata import Metadata
from .file_chunking_strategy_param import FileChunkingStrategyParam
from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -39,12 +40,14 @@ class AssistantCreateParams(TypedDict, total=False):
The maximum length is 256,000 characters.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
name: Optional[str]
@@ -130,12 +133,14 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
store.
"""
- metadata: object
- """Set of 16 key-value pairs that can be attached to a vector store.
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
- This can be useful for storing additional information about the vector store in
- a structured format. Keys can be a maximum of 64 characters long and values can
- be a maximum of 512 characters long.
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
src/openai/types/beta/assistant_update_params.py
@@ -6,6 +6,7 @@ from typing import List, Iterable, Optional
from typing_extensions import TypedDict
from .assistant_tool_param import AssistantToolParam
+from ..shared_params.metadata import Metadata
from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
__all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
@@ -21,12 +22,14 @@ class AssistantUpdateParams(TypedDict, total=False):
The maximum length is 256,000 characters.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
model: str
src/openai/types/beta/thread.py
@@ -4,6 +4,7 @@ from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
+from ..shared.metadata import Metadata
__all__ = ["Thread", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
@@ -40,12 +41,14 @@ class Thread(BaseModel):
created_at: int
"""The Unix timestamp (in seconds) for when the thread was created."""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
object: Literal["thread"]
src/openai/types/beta/thread_create_and_run_params.py
@@ -8,6 +8,7 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..chat_model import ChatModel
from .function_tool_param import FunctionToolParam
from .file_search_tool_param import FileSearchToolParam
+from ..shared_params.metadata import Metadata
from .code_interpreter_tool_param import CodeInterpreterToolParam
from .file_chunking_strategy_param import FileChunkingStrategyParam
from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
@@ -67,12 +68,14 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
`incomplete_details` for more info.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
model: Union[str, ChatModel, None]
@@ -122,7 +125,11 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
"""
thread: Thread
- """If no thread is provided, an empty thread will be created."""
+ """Options to create a new thread.
+
+ If no thread is provided when running a request, an empty thread will be
+ created.
+ """
tool_choice: Optional[AssistantToolChoiceOptionParam]
"""
@@ -197,12 +204,14 @@ class ThreadMessage(TypedDict, total=False):
attachments: Optional[Iterable[ThreadMessageAttachment]]
"""A list of files attached to the message, and the tools they should be added to."""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
@@ -230,12 +239,14 @@ class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False):
store.
"""
- metadata: object
- """Set of 16 key-value pairs that can be attached to a vector store.
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
- This can be useful for storing additional information about the vector store in
- a structured format. Keys can be a maximum of 64 characters long and values can
- be a maximum of 512 characters long.
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
@@ -270,12 +281,14 @@ class Thread(TypedDict, total=False):
start the thread with.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
tool_resources: Optional[ThreadToolResources]
src/openai/types/beta/thread_create_params.py
@@ -5,6 +5,7 @@ from __future__ import annotations
from typing import List, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from ..shared_params.metadata import Metadata
from .code_interpreter_tool_param import CodeInterpreterToolParam
from .file_chunking_strategy_param import FileChunkingStrategyParam
from .threads.message_content_part_param import MessageContentPartParam
@@ -29,12 +30,14 @@ class ThreadCreateParams(TypedDict, total=False):
start the thread with.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
tool_resources: Optional[ToolResources]
@@ -78,12 +81,14 @@ class Message(TypedDict, total=False):
attachments: Optional[Iterable[MessageAttachment]]
"""A list of files attached to the message, and the tools they should be added to."""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
@@ -111,12 +116,14 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
store.
"""
- metadata: object
- """Set of 16 key-value pairs that can be attached to a vector store.
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
- This can be useful for storing additional information about the vector store in
- a structured format. Keys can be a maximum of 64 characters long and values can
- be a maximum of 512 characters long.
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
src/openai/types/beta/thread_update_params.py
@@ -5,16 +5,20 @@ from __future__ import annotations
from typing import List, Optional
from typing_extensions import TypedDict
+from ..shared_params.metadata import Metadata
+
__all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
class ThreadUpdateParams(TypedDict, total=False):
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
tool_resources: Optional[ToolResources]
src/openai/types/beta/vector_store.py
@@ -4,6 +4,7 @@ from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
+from ..shared.metadata import Metadata
__all__ = ["VectorStore", "FileCounts", "ExpiresAfter"]
@@ -48,12 +49,14 @@ class VectorStore(BaseModel):
last_active_at: Optional[int] = None
"""The Unix timestamp (in seconds) for when the vector store was last active."""
- metadata: Optional[object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
name: str
src/openai/types/beta/vector_store_create_params.py
@@ -5,6 +5,7 @@ from __future__ import annotations
from typing import List, Optional
from typing_extensions import Literal, Required, TypedDict
+from ..shared_params.metadata import Metadata
from .file_chunking_strategy_param import FileChunkingStrategyParam
__all__ = ["VectorStoreCreateParams", "ExpiresAfter"]
@@ -28,12 +29,14 @@ class VectorStoreCreateParams(TypedDict, total=False):
files.
"""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
name: str
src/openai/types/beta/vector_store_update_params.py
@@ -5,6 +5,8 @@ from __future__ import annotations
from typing import Optional
from typing_extensions import Literal, Required, TypedDict
+from ..shared_params.metadata import Metadata
+
__all__ = ["VectorStoreUpdateParams", "ExpiresAfter"]
@@ -12,12 +14,14 @@ class VectorStoreUpdateParams(TypedDict, total=False):
expires_after: Optional[ExpiresAfter]
"""The expiration policy for a vector store."""
- metadata: Optional[object]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
name: Optional[str]
src/openai/types/chat/chat_completion_assistant_message_param.py
@@ -38,8 +38,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False):
"""The role of the messages author, in this case `assistant`."""
audio: Optional[Audio]
- """
- Data about a previous audio response from the model.
+ """Data about a previous audio response from the model.
+
[Learn more](https://platform.openai.com/docs/guides/audio).
"""
src/openai/types/chat/completion_create_params.py
@@ -6,6 +6,7 @@ from typing import Dict, List, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..chat_model import ChatModel
+from ..shared_params.metadata import Metadata
from .chat_completion_modality import ChatCompletionModality
from .chat_completion_tool_param import ChatCompletionToolParam
from .chat_completion_audio_param import ChatCompletionAudioParam
@@ -122,10 +123,14 @@ class CompletionCreateParamsBase(TypedDict, total=False):
[o1 series models](https://platform.openai.com/docs/guides/reasoning).
"""
- metadata: Optional[Dict[str, str]]
- """
- Developer-defined tags and values used for filtering completions in the
- [dashboard](https://platform.openai.com/chat-completions).
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
modalities: Optional[List[ChatCompletionModality]]
@@ -216,9 +221,9 @@ class CompletionCreateParamsBase(TypedDict, total=False):
utilize scale tier credits until they are exhausted.
- If set to 'auto', and the Project is not Scale tier enabled, the request will
be processed using the default service tier with a lower uptime SLA and no
- latency guarentee.
+ latency guarantee.
- If set to 'default', the request will be processed using the default service
- tier with a lower uptime SLA and no latency guarentee.
+ tier with a lower uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
"""
src/openai/types/__init__.py
@@ -6,6 +6,7 @@ from .batch import Batch as Batch
from .image import Image as Image
from .model import Model as Model
from .shared import (
+ Metadata as Metadata,
ErrorObject as ErrorObject,
FunctionDefinition as FunctionDefinition,
FunctionParameters as FunctionParameters,
src/openai/types/batch.py
@@ -1,11 +1,11 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import builtins
from typing import List, Optional
from typing_extensions import Literal
from .._models import BaseModel
from .batch_error import BatchError
+from .shared.metadata import Metadata
from .batch_request_counts import BatchRequestCounts
__all__ = ["Batch", "Errors"]
@@ -70,12 +70,14 @@ class Batch(BaseModel):
in_progress_at: Optional[int] = None
"""The Unix timestamp (in seconds) for when the batch started processing."""
- metadata: Optional[builtins.object] = None
+ metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
- structured format. Keys can be a maximum of 64 characters long and values can be
- a maximum of 512 characters long.
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
"""
output_file_id: Optional[str] = None
src/openai/types/batch_create_params.py
@@ -2,9 +2,11 @@
from __future__ import annotations
-from typing import Dict, Optional
+from typing import Optional
from typing_extensions import Literal, Required, TypedDict
+from .shared_params.metadata import Metadata
+
__all__ = ["BatchCreateParams"]
@@ -35,5 +37,12 @@ class BatchCreateParams(TypedDict, total=False):
requests, and can be up to 200 MB in size.
"""
- metadata: Optional[Dict[str, str]]
- """Optional custom metadata for the batch."""
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
src/openai/types/chat_model.py
@@ -5,6 +5,8 @@ from typing_extensions import Literal, TypeAlias
__all__ = ["ChatModel"]
ChatModel: TypeAlias = Literal[
+ "o3-mini",
+ "o3-mini-2025-01-31",
"o1",
"o1-2024-12-17",
"o1-preview",
src/openai/types/upload.py
@@ -39,4 +39,4 @@ class Upload(BaseModel):
"""The status of the Upload."""
file: Optional[FileObject] = None
- """The ready File object after the Upload is completed."""
+ """The `File` object represents a document that has been uploaded to OpenAI."""
tests/api_resources/beta/realtime/test_sessions.py
@@ -26,7 +26,11 @@ class TestSessions:
def test_method_create_with_all_params(self, client: OpenAI) -> None:
session = client.beta.realtime.sessions.create(
input_audio_format="pcm16",
- input_audio_transcription={"model": "model"},
+ input_audio_transcription={
+ "language": "language",
+ "model": "model",
+ "prompt": "prompt",
+ },
instructions="instructions",
max_response_output_tokens=0,
modalities=["text"],
@@ -86,7 +90,11 @@ class TestAsyncSessions:
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
session = await async_client.beta.realtime.sessions.create(
input_audio_format="pcm16",
- input_audio_transcription={"model": "model"},
+ input_audio_transcription={
+ "language": "language",
+ "model": "model",
+ "prompt": "prompt",
+ },
instructions="instructions",
max_response_output_tokens=0,
modalities=["text"],
tests/api_resources/beta/threads/test_messages.py
@@ -42,7 +42,7 @@ class TestMessages:
"tools": [{"type": "code_interpreter"}],
}
],
- metadata={},
+ metadata={"foo": "string"},
)
assert_matches_type(Message, message, path=["response"])
@@ -142,9 +142,9 @@ class TestMessages:
@parametrize
def test_method_update_with_all_params(self, client: OpenAI) -> None:
message = client.beta.threads.messages.update(
- "string",
- thread_id="string",
- metadata={},
+ message_id="message_id",
+ thread_id="thread_id",
+ metadata={"foo": "string"},
)
assert_matches_type(Message, message, path=["response"])
@@ -311,7 +311,7 @@ class TestAsyncMessages:
"tools": [{"type": "code_interpreter"}],
}
],
- metadata={},
+ metadata={"foo": "string"},
)
assert_matches_type(Message, message, path=["response"])
@@ -411,9 +411,9 @@ class TestAsyncMessages:
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
message = await async_client.beta.threads.messages.update(
- "string",
- thread_id="string",
- metadata={},
+ message_id="message_id",
+ thread_id="thread_id",
+ metadata={"foo": "string"},
)
assert_matches_type(Message, message, path=["response"])
tests/api_resources/beta/threads/test_runs.py
@@ -47,13 +47,13 @@ class TestRuns:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -130,13 +130,13 @@ class TestRuns:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -246,9 +246,9 @@ class TestRuns:
@parametrize
def test_method_update_with_all_params(self, client: OpenAI) -> None:
run = client.beta.threads.runs.update(
- "string",
- thread_id="string",
- metadata={},
+ run_id="run_id",
+ thread_id="thread_id",
+ metadata={"foo": "string"},
)
assert_matches_type(Run, run, path=["response"])
@@ -543,13 +543,13 @@ class TestAsyncRuns:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -626,13 +626,13 @@ class TestAsyncRuns:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -742,9 +742,9 @@ class TestAsyncRuns:
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
run = await async_client.beta.threads.runs.update(
- "string",
- thread_id="string",
- metadata={},
+ run_id="run_id",
+ thread_id="thread_id",
+ metadata={"foo": "string"},
)
assert_matches_type(Run, run, path=["response"])
tests/api_resources/beta/test_assistants.py
@@ -34,7 +34,7 @@ class TestAssistants:
model="gpt-4o",
description="description",
instructions="instructions",
- metadata={},
+ metadata={"foo": "string"},
name="name",
response_format="auto",
temperature=1,
@@ -46,7 +46,7 @@ class TestAssistants:
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -131,7 +131,7 @@ class TestAssistants:
assistant_id="assistant_id",
description="description",
instructions="instructions",
- metadata={},
+ metadata={"foo": "string"},
model="model",
name="name",
response_format="auto",
@@ -266,7 +266,7 @@ class TestAsyncAssistants:
model="gpt-4o",
description="description",
instructions="instructions",
- metadata={},
+ metadata={"foo": "string"},
name="name",
response_format="auto",
temperature=1,
@@ -278,7 +278,7 @@ class TestAsyncAssistants:
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -363,7 +363,7 @@ class TestAsyncAssistants:
assistant_id="assistant_id",
description="description",
instructions="instructions",
- metadata={},
+ metadata={"foo": "string"},
model="model",
name="name",
response_format="auto",
tests/api_resources/beta/test_threads.py
@@ -39,10 +39,10 @@ class TestThreads:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
- metadata={},
+ metadata={"foo": "string"},
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
@@ -51,7 +51,7 @@ class TestThreads:
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -127,8 +127,8 @@ class TestThreads:
@parametrize
def test_method_update_with_all_params(self, client: OpenAI) -> None:
thread = client.beta.threads.update(
- "string",
- metadata={},
+ thread_id="thread_id",
+ metadata={"foo": "string"},
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {"vector_store_ids": ["string"]},
@@ -219,7 +219,7 @@ class TestThreads:
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -236,10 +236,10 @@ class TestThreads:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
"tool_resources": {
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
@@ -248,7 +248,7 @@ class TestThreads:
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -308,7 +308,7 @@ class TestThreads:
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -324,10 +324,10 @@ class TestThreads:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
"tool_resources": {
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
@@ -336,7 +336,7 @@ class TestThreads:
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -403,10 +403,10 @@ class TestAsyncThreads:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
- metadata={},
+ metadata={"foo": "string"},
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
@@ -415,7 +415,7 @@ class TestAsyncThreads:
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -491,8 +491,8 @@ class TestAsyncThreads:
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
thread = await async_client.beta.threads.update(
- "string",
- metadata={},
+ thread_id="thread_id",
+ metadata={"foo": "string"},
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {"vector_store_ids": ["string"]},
@@ -583,7 +583,7 @@ class TestAsyncThreads:
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -600,10 +600,10 @@ class TestAsyncThreads:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
"tool_resources": {
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
@@ -612,7 +612,7 @@ class TestAsyncThreads:
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
@@ -672,7 +672,7 @@ class TestAsyncThreads:
instructions="string",
max_completion_tokens=256,
max_prompt_tokens=256,
- metadata={},
+ metadata={"foo": "string"},
model="gpt-4o",
parallel_tool_calls=True,
response_format="auto",
@@ -688,10 +688,10 @@ class TestAsyncThreads:
"tools": [{"type": "code_interpreter"}],
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
- "metadata": {},
+ "metadata": {"foo": "string"},
"tool_resources": {
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
@@ -700,7 +700,7 @@ class TestAsyncThreads:
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
- "metadata": {},
+ "metadata": {"foo": "string"},
}
],
},
tests/api_resources/beta/test_vector_stores.py
@@ -35,8 +35,8 @@ class TestVectorStores:
"days": 1,
},
file_ids=["string"],
- metadata={},
- name="string",
+ metadata={"foo": "string"},
+ name="name",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
@@ -113,8 +113,8 @@ class TestVectorStores:
"anchor": "last_active_at",
"days": 1,
},
- metadata={},
- name="string",
+ metadata={"foo": "string"},
+ name="name",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
@@ -240,8 +240,8 @@ class TestAsyncVectorStores:
"days": 1,
},
file_ids=["string"],
- metadata={},
- name="string",
+ metadata={"foo": "string"},
+ name="name",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
@@ -318,8 +318,8 @@ class TestAsyncVectorStores:
"anchor": "last_active_at",
"days": 1,
},
- metadata={},
- name="string",
+ metadata={"foo": "string"},
+ name="name",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 69
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3904ef6b29a89c98f93a9b7da19879695f3c440564be6384db7af1b734611ede.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-6204952a29973265b9c0d66fc67ffaf53c6a90ae4d75cdacf9d147676f5274c9.yml
api.md
@@ -5,6 +5,7 @@ from openai.types import (
ErrorObject,
FunctionDefinition,
FunctionParameters,
+ Metadata,
ResponseFormatJSONObject,
ResponseFormatJSONSchema,
ResponseFormatText,