Commit db5c3504
Changed files (17)
src
openai
types
responses
src/openai/types/audio/transcription.py
@@ -46,7 +46,7 @@ class UsageTokens(BaseModel):
class UsageDuration(BaseModel):
- duration: float
+ seconds: float
"""Duration of the input audio in seconds."""
type: Literal["duration"]
src/openai/types/audio/transcription_verbose.py
@@ -11,7 +11,7 @@ __all__ = ["TranscriptionVerbose", "Usage"]
class Usage(BaseModel):
- duration: float
+ seconds: float
"""Duration of the input audio in seconds."""
type: Literal["duration"]
src/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py
@@ -1,11 +1,54 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Optional
-from typing_extensions import Literal
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
from ...._models import BaseModel
-__all__ = ["ConversationItemInputAudioTranscriptionCompletedEvent", "Logprob"]
+__all__ = [
+ "ConversationItemInputAudioTranscriptionCompletedEvent",
+ "Usage",
+ "UsageTranscriptTextUsageTokens",
+ "UsageTranscriptTextUsageTokensInputTokenDetails",
+ "UsageTranscriptTextUsageDuration",
+ "Logprob",
+]
+
+
+class UsageTranscriptTextUsageTokensInputTokenDetails(BaseModel):
+ audio_tokens: Optional[int] = None
+ """Number of audio tokens billed for this request."""
+
+ text_tokens: Optional[int] = None
+ """Number of text tokens billed for this request."""
+
+
+class UsageTranscriptTextUsageTokens(BaseModel):
+ input_tokens: int
+ """Number of input tokens billed for this request."""
+
+ output_tokens: int
+ """Number of output tokens generated."""
+
+ total_tokens: int
+ """Total number of tokens used (input + output)."""
+
+ type: Literal["tokens"]
+ """The type of the usage object. Always `tokens` for this variant."""
+
+ input_token_details: Optional[UsageTranscriptTextUsageTokensInputTokenDetails] = None
+ """Details about the input tokens billed for this request."""
+
+
+class UsageTranscriptTextUsageDuration(BaseModel):
+ seconds: float
+ """Duration of the input audio in seconds."""
+
+ type: Literal["duration"]
+ """The type of the usage object. Always `duration` for this variant."""
+
+
+Usage: TypeAlias = Union[UsageTranscriptTextUsageTokens, UsageTranscriptTextUsageDuration]
class Logprob(BaseModel):
@@ -37,5 +80,8 @@ class ConversationItemInputAudioTranscriptionCompletedEvent(BaseModel):
The event type, must be `conversation.item.input_audio_transcription.completed`.
"""
+ usage: Usage
+ """Usage statistics for the transcription."""
+
logprobs: Optional[List[Logprob]] = None
"""The log probabilities of the transcription."""
src/openai/types/responses/response_input_file.py
@@ -18,5 +18,8 @@ class ResponseInputFile(BaseModel):
file_id: Optional[str] = None
"""The ID of the file to be sent to the model."""
+ file_url: Optional[str] = None
+ """The URL of the file to be sent to the model."""
+
filename: Optional[str] = None
"""The name of the file to be sent to the model."""
src/openai/types/responses/response_input_file_param.py
@@ -18,5 +18,8 @@ class ResponseInputFileParam(TypedDict, total=False):
file_id: Optional[str]
"""The ID of the file to be sent to the model."""
+ file_url: str
+ """The URL of the file to be sent to the model."""
+
filename: str
"""The name of the file to be sent to the model."""
src/openai/types/responses/response_mcp_call_arguments_delta_event.py
@@ -20,5 +20,5 @@ class ResponseMcpCallArgumentsDeltaEvent(BaseModel):
sequence_number: int
"""The sequence number of this event."""
- type: Literal["response.mcp_call.arguments_delta"]
- """The type of the event. Always 'response.mcp_call.arguments_delta'."""
+ type: Literal["response.mcp_call_arguments.delta"]
+ """The type of the event. Always 'response.mcp_call_arguments.delta'."""
src/openai/types/responses/response_mcp_call_arguments_done_event.py
@@ -20,5 +20,5 @@ class ResponseMcpCallArgumentsDoneEvent(BaseModel):
sequence_number: int
"""The sequence number of this event."""
- type: Literal["response.mcp_call.arguments_done"]
- """The type of the event. Always 'response.mcp_call.arguments_done'."""
+ type: Literal["response.mcp_call_arguments.done"]
+ """The type of the event. Always 'response.mcp_call_arguments.done'."""
src/openai/types/responses/response_output_text_annotation_added_event.py
@@ -26,5 +26,5 @@ class ResponseOutputTextAnnotationAddedEvent(BaseModel):
sequence_number: int
"""The sequence number of this event."""
- type: Literal["response.output_text_annotation.added"]
- """The type of the event. Always 'response.output_text_annotation.added'."""
+ type: Literal["response.output_text.annotation.added"]
+ """The type of the event. Always 'response.output_text.annotation.added'."""
src/openai/types/responses/tool.py
@@ -79,6 +79,9 @@ class Mcp(BaseModel):
require_approval: Optional[McpRequireApproval] = None
"""Specify which of the MCP server's tools require approval."""
+ server_description: Optional[str] = None
+ """Optional description of the MCP server, used to provide more context."""
+
class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel):
type: Literal["auto"]
src/openai/types/responses/tool_param.py
@@ -80,6 +80,9 @@ class Mcp(TypedDict, total=False):
require_approval: Optional[McpRequireApproval]
"""Specify which of the MCP server's tools require approval."""
+ server_description: str
+ """Optional description of the MCP server, used to provide more context."""
+
class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False):
type: Required[Literal["auto"]]
src/openai/types/file_object.py
@@ -25,12 +25,19 @@ class FileObject(BaseModel):
"""The object type, which is always `file`."""
purpose: Literal[
- "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "vision"
+ "assistants",
+ "assistants_output",
+ "batch",
+ "batch_output",
+ "fine-tune",
+ "fine-tune-results",
+ "vision",
+ "user_data",
]
"""The intended purpose of the file.
Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`,
- `fine-tune`, `fine-tune-results` and `vision`.
+ `fine-tune`, `fine-tune-results`, `vision`, and `user_data`.
"""
status: Literal["uploaded", "processed", "error"]
src/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "1.94.0" # x-release-please-version
+__version__ = "1.95.0" # x-release-please-version
.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.94.0"
+ ".": "1.95.0"
}
\ No newline at end of file
.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a473967d1766dc155994d932fbc4a5bcbd1c140a37c20d0a4065e1bf0640536d.yml
-openapi_spec_hash: 67cdc62b0d6c8b1de29b7dc54b265749
-config_hash: 7b53f96f897ca1b3407a5341a6f820db
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-2d116cda53321baa3479e628512def723207a81eb1cdaebb542bd0555e563bda.yml
+openapi_spec_hash: 809d958fec261a32004a4b026b718793
+config_hash: e74d6791681e3af1b548748ff47a22c2
CHANGELOG.md
@@ -1,5 +1,18 @@
# Changelog
+## 1.95.0 (2025-07-10)
+
+Full Changelog: [v1.94.0...v1.95.0](https://github.com/openai/openai-python/compare/v1.94.0...v1.95.0)
+
+### Features
+
+* **api:** add file_url, fix event ID ([265e216](https://github.com/openai/openai-python/commit/265e216396196d66cdfb5f92c5ef1a2a6ff27b5b))
+
+
+### Chores
+
+* **readme:** fix version rendering on pypi ([1eee5ca](https://github.com/openai/openai-python/commit/1eee5cabf2fd93877cd3ba85d0c6ed2ffd5f159f))
+
## 1.94.0 (2025-07-10)
Full Changelog: [v1.93.3...v1.94.0](https://github.com/openai/openai-python/compare/v1.93.3...v1.94.0)
pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "openai"
-version = "1.94.0"
+version = "1.95.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
README.md
@@ -1,6 +1,7 @@
# OpenAI Python API library
-[>)](https://pypi.org/project/openai/)
+<!-- prettier-ignore -->
+[)](https://pypi.org/project/openai/)
The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.8+
application. The library includes type definitions for all request params and response fields,