Commit 3c1f5543
Changed files (22)
src
openai
resources
responses
types
evals
graders
responses
src/openai/resources/responses/responses.py
@@ -269,7 +269,7 @@ class Responses(SyncAPIResource):
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
- predefined connectors such as Google Drive and Notion. Learn more about
+ predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
@@ -508,7 +508,7 @@ class Responses(SyncAPIResource):
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
- predefined connectors such as Google Drive and Notion. Learn more about
+ predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
@@ -747,7 +747,7 @@ class Responses(SyncAPIResource):
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
- predefined connectors such as Google Drive and Notion. Learn more about
+ predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
@@ -1700,7 +1700,7 @@ class AsyncResponses(AsyncAPIResource):
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
- predefined connectors such as Google Drive and Notion. Learn more about
+ predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
@@ -1939,7 +1939,7 @@ class AsyncResponses(AsyncAPIResource):
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
- predefined connectors such as Google Drive and Notion. Learn more about
+ predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
@@ -2178,7 +2178,7 @@ class AsyncResponses(AsyncAPIResource):
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
- predefined connectors such as Google Drive and Notion. Learn more about
+ predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
src/openai/types/evals/create_eval_completions_run_data_source.py
@@ -9,6 +9,7 @@ from ..shared.metadata import Metadata
from ..shared.response_format_text import ResponseFormatText
from ..responses.easy_input_message import EasyInputMessage
from ..responses.response_input_text import ResponseInputText
+from ..responses.response_input_audio import ResponseInputAudio
from ..chat.chat_completion_function_tool import ChatCompletionFunctionTool
from ..shared.response_format_json_object import ResponseFormatJSONObject
from ..shared.response_format_json_schema import ResponseFormatJSONSchema
@@ -114,6 +115,7 @@ InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
ResponseInputText,
InputMessagesTemplateTemplateEvalItemContentOutputText,
InputMessagesTemplateTemplateEvalItemContentInputImage,
+ ResponseInputAudio,
List[object],
]
src/openai/types/evals/create_eval_completions_run_data_source_param.py
@@ -9,6 +9,7 @@ from ..shared_params.metadata import Metadata
from ..responses.easy_input_message_param import EasyInputMessageParam
from ..shared_params.response_format_text import ResponseFormatText
from ..responses.response_input_text_param import ResponseInputTextParam
+from ..responses.response_input_audio_param import ResponseInputAudioParam
from ..chat.chat_completion_function_tool_param import ChatCompletionFunctionToolParam
from ..shared_params.response_format_json_object import ResponseFormatJSONObject
from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema
@@ -112,6 +113,7 @@ InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
ResponseInputTextParam,
InputMessagesTemplateTemplateEvalItemContentOutputText,
InputMessagesTemplateTemplateEvalItemContentInputImage,
+ ResponseInputAudioParam,
Iterable[object],
]
src/openai/types/evals/run_cancel_response.py
@@ -12,6 +12,7 @@ from ..responses.tool import Tool
from ..shared.metadata import Metadata
from ..shared.reasoning_effort import ReasoningEffort
from ..responses.response_input_text import ResponseInputText
+from ..responses.response_input_audio import ResponseInputAudio
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
from ..responses.response_format_text_config import ResponseFormatTextConfig
from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
@@ -158,6 +159,7 @@ DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Uni
ResponseInputText,
DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText,
DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage,
+ ResponseInputAudio,
List[object],
]
src/openai/types/evals/run_create_params.py
@@ -10,6 +10,7 @@ from ..responses.tool_param import ToolParam
from ..shared_params.metadata import Metadata
from ..shared.reasoning_effort import ReasoningEffort
from ..responses.response_input_text_param import ResponseInputTextParam
+from ..responses.response_input_audio_param import ResponseInputAudioParam
from .create_eval_jsonl_run_data_source_param import CreateEvalJSONLRunDataSourceParam
from ..responses.response_format_text_config_param import ResponseFormatTextConfigParam
from .create_eval_completions_run_data_source_param import CreateEvalCompletionsRunDataSourceParam
@@ -176,6 +177,7 @@ DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemC
ResponseInputTextParam,
DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText,
DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage,
+ ResponseInputAudioParam,
Iterable[object],
]
src/openai/types/evals/run_create_response.py
@@ -12,6 +12,7 @@ from ..responses.tool import Tool
from ..shared.metadata import Metadata
from ..shared.reasoning_effort import ReasoningEffort
from ..responses.response_input_text import ResponseInputText
+from ..responses.response_input_audio import ResponseInputAudio
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
from ..responses.response_format_text_config import ResponseFormatTextConfig
from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
@@ -158,6 +159,7 @@ DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Uni
ResponseInputText,
DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText,
DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage,
+ ResponseInputAudio,
List[object],
]
src/openai/types/evals/run_list_response.py
@@ -12,6 +12,7 @@ from ..responses.tool import Tool
from ..shared.metadata import Metadata
from ..shared.reasoning_effort import ReasoningEffort
from ..responses.response_input_text import ResponseInputText
+from ..responses.response_input_audio import ResponseInputAudio
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
from ..responses.response_format_text_config import ResponseFormatTextConfig
from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
@@ -158,6 +159,7 @@ DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Uni
ResponseInputText,
DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText,
DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage,
+ ResponseInputAudio,
List[object],
]
src/openai/types/evals/run_retrieve_response.py
@@ -12,6 +12,7 @@ from ..responses.tool import Tool
from ..shared.metadata import Metadata
from ..shared.reasoning_effort import ReasoningEffort
from ..responses.response_input_text import ResponseInputText
+from ..responses.response_input_audio import ResponseInputAudio
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
from ..responses.response_format_text_config import ResponseFormatTextConfig
from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
@@ -158,6 +159,7 @@ DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Uni
ResponseInputText,
DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText,
DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage,
+ ResponseInputAudio,
List[object],
]
src/openai/types/graders/label_model_grader.py
@@ -5,6 +5,7 @@ from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
from ..responses.response_input_text import ResponseInputText
+from ..responses.response_input_audio import ResponseInputAudio
__all__ = ["LabelModelGrader", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"]
@@ -31,7 +32,9 @@ class InputContentInputImage(BaseModel):
"""
-InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText, InputContentInputImage, List[object]]
+InputContent: TypeAlias = Union[
+ str, ResponseInputText, InputContentOutputText, InputContentInputImage, ResponseInputAudio, List[object]
+]
class Input(BaseModel):
src/openai/types/graders/label_model_grader_param.py
@@ -7,6 +7,7 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..._types import SequenceNotStr
from ..responses.response_input_text_param import ResponseInputTextParam
+from ..responses.response_input_audio_param import ResponseInputAudioParam
__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"]
@@ -34,7 +35,12 @@ class InputContentInputImage(TypedDict, total=False):
InputContent: TypeAlias = Union[
- str, ResponseInputTextParam, InputContentOutputText, InputContentInputImage, Iterable[object]
+ str,
+ ResponseInputTextParam,
+ InputContentOutputText,
+ InputContentInputImage,
+ ResponseInputAudioParam,
+ Iterable[object],
]
src/openai/types/graders/score_model_grader.py
@@ -5,6 +5,7 @@ from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
from ..responses.response_input_text import ResponseInputText
+from ..responses.response_input_audio import ResponseInputAudio
__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"]
@@ -31,7 +32,9 @@ class InputContentInputImage(BaseModel):
"""
-InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText, InputContentInputImage, List[object]]
+InputContent: TypeAlias = Union[
+ str, ResponseInputText, InputContentOutputText, InputContentInputImage, ResponseInputAudio, List[object]
+]
class Input(BaseModel):
src/openai/types/graders/score_model_grader_param.py
@@ -6,6 +6,7 @@ from typing import Union, Iterable
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..responses.response_input_text_param import ResponseInputTextParam
+from ..responses.response_input_audio_param import ResponseInputAudioParam
__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"]
@@ -33,7 +34,12 @@ class InputContentInputImage(TypedDict, total=False):
InputContent: TypeAlias = Union[
- str, ResponseInputTextParam, InputContentOutputText, InputContentInputImage, Iterable[object]
+ str,
+ ResponseInputTextParam,
+ InputContentOutputText,
+ InputContentInputImage,
+ ResponseInputAudioParam,
+ Iterable[object],
]
src/openai/types/responses/__init__.py
@@ -38,6 +38,7 @@ from .response_input_text import ResponseInputText as ResponseInputText
from .tool_choice_allowed import ToolChoiceAllowed as ToolChoiceAllowed
from .tool_choice_options import ToolChoiceOptions as ToolChoiceOptions
from .response_error_event import ResponseErrorEvent as ResponseErrorEvent
+from .response_input_audio import ResponseInputAudio as ResponseInputAudio
from .response_input_image import ResponseInputImage as ResponseInputImage
from .response_input_param import ResponseInputParam as ResponseInputParam
from .response_output_item import ResponseOutputItem as ResponseOutputItem
@@ -75,6 +76,7 @@ from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDel
from .tool_choice_allowed_param import ToolChoiceAllowedParam as ToolChoiceAllowedParam
from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent
from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent
+from .response_input_audio_param import ResponseInputAudioParam as ResponseInputAudioParam
from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam
from .response_output_text_param import ResponseOutputTextParam as ResponseOutputTextParam
from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam
src/openai/types/responses/response.py
@@ -125,7 +125,7 @@ class Response(BaseModel):
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
- predefined connectors such as Google Drive and Notion. Learn more about
+ predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
src/openai/types/responses/response_create_params.py
@@ -225,7 +225,7 @@ class ResponseCreateParamsBase(TypedDict, total=False):
Learn more about
[built-in tools](https://platform.openai.com/docs/guides/tools).
- **MCP Tools**: Integrations with third-party systems via custom MCP servers or
- predefined connectors such as Google Drive and Notion. Learn more about
+ predefined connectors such as Google Drive and SharePoint. Learn more about
[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
- **Function calls (custom tools)**: Functions that are defined by you, enabling
the model to call your own code with strongly typed arguments and outputs.
src/openai/types/responses/response_input_audio.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseInputAudio", "InputAudio"]
+
+
+class InputAudio(BaseModel):
+ data: str
+ """Base64-encoded audio data."""
+
+ format: Literal["mp3", "wav"]
+ """The format of the audio data. Currently supported formats are `mp3` and `wav`."""
+
+
+class ResponseInputAudio(BaseModel):
+ input_audio: InputAudio
+
+ type: Literal["input_audio"]
+ """The type of the input item. Always `input_audio`."""
src/openai/types/responses/response_input_audio_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseInputAudioParam", "InputAudio"]
+
+
+class InputAudio(TypedDict, total=False):
+ data: Required[str]
+ """Base64-encoded audio data."""
+
+ format: Required[Literal["mp3", "wav"]]
+ """The format of the audio data. Currently supported formats are `mp3` and `wav`."""
+
+
+class ResponseInputAudioParam(TypedDict, total=False):
+ input_audio: Required[InputAudio]
+
+ type: Required[Literal["input_audio"]]
+ """The type of the input item. Always `input_audio`."""
src/openai/types/responses/response_input_content.py
@@ -6,10 +6,12 @@ from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
from .response_input_file import ResponseInputFile
from .response_input_text import ResponseInputText
+from .response_input_audio import ResponseInputAudio
from .response_input_image import ResponseInputImage
__all__ = ["ResponseInputContent"]
ResponseInputContent: TypeAlias = Annotated[
- Union[ResponseInputText, ResponseInputImage, ResponseInputFile], PropertyInfo(discriminator="type")
+ Union[ResponseInputText, ResponseInputImage, ResponseInputFile, ResponseInputAudio],
+ PropertyInfo(discriminator="type"),
]
src/openai/types/responses/response_input_content_param.py
@@ -7,8 +7,11 @@ from typing_extensions import TypeAlias
from .response_input_file_param import ResponseInputFileParam
from .response_input_text_param import ResponseInputTextParam
+from .response_input_audio_param import ResponseInputAudioParam
from .response_input_image_param import ResponseInputImageParam
__all__ = ["ResponseInputContentParam"]
-ResponseInputContentParam: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam]
+ResponseInputContentParam: TypeAlias = Union[
+ ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam, ResponseInputAudioParam
+]
src/openai/types/responses/response_input_message_content_list_param.py
@@ -7,10 +7,13 @@ from typing_extensions import TypeAlias
from .response_input_file_param import ResponseInputFileParam
from .response_input_text_param import ResponseInputTextParam
+from .response_input_audio_param import ResponseInputAudioParam
from .response_input_image_param import ResponseInputImageParam
__all__ = ["ResponseInputMessageContentListParam", "ResponseInputContentParam"]
-ResponseInputContentParam: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam]
+ResponseInputContentParam: TypeAlias = Union[
+ ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam, ResponseInputAudioParam
+]
ResponseInputMessageContentListParam: TypeAlias = List[ResponseInputContentParam]
src/openai/types/eval_create_params.py
@@ -12,6 +12,7 @@ from .graders.score_model_grader_param import ScoreModelGraderParam
from .graders.string_check_grader_param import StringCheckGraderParam
from .responses.response_input_text_param import ResponseInputTextParam
from .graders.text_similarity_grader_param import TextSimilarityGraderParam
+from .responses.response_input_audio_param import ResponseInputAudioParam
__all__ = [
"EvalCreateParams",
@@ -130,6 +131,7 @@ TestingCriterionLabelModelInputEvalItemContent: TypeAlias = Union[
ResponseInputTextParam,
TestingCriterionLabelModelInputEvalItemContentOutputText,
TestingCriterionLabelModelInputEvalItemContentInputImage,
+ ResponseInputAudioParam,
Iterable[object],
]
.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 118
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-356b4364203ff36d7724074cd04f6e684253bfcc3c9d969122d730aa7bc51b46.yml
-openapi_spec_hash: 4ab8e96f52699bc3d2b0c4432aa92af8
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f312a661d9dd6b5d6d676e449c357f6414afd1fdaaf4d982d44ad86cba5c5f6e.yml
+openapi_spec_hash: b62fd3d3fb98e37b1da0a2e22af51d40
config_hash: b854932c0ea24b400bdd64e4376936bd