Commit 489dadfb
Changed files (5)
src
openai
resources
audio
chat
types
tests
api_resources
src/openai/resources/audio/transcriptions.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Union, Mapping, cast
+from typing import List, Union, Mapping, cast
from typing_extensions import Literal
import httpx
@@ -39,6 +39,7 @@ class Transcriptions(SyncAPIResource):
prompt: str | NotGiven = NOT_GIVEN,
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -74,6 +75,10 @@ class Transcriptions(SyncAPIResource):
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
+ timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these
+ options: `word`, or `segment`. Note: There is no additional latency for segment
+ timestamps, but generating word timestamps incurs additional latency.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -90,6 +95,7 @@ class Transcriptions(SyncAPIResource):
"prompt": prompt,
"response_format": response_format,
"temperature": temperature,
+ "timestamp_granularities": timestamp_granularities,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
@@ -127,6 +133,7 @@ class AsyncTranscriptions(AsyncAPIResource):
prompt: str | NotGiven = NOT_GIVEN,
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -162,6 +169,10 @@ class AsyncTranscriptions(AsyncAPIResource):
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
+ timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these
+ options: `word`, or `segment`. Note: There is no additional latency for segment
+ timestamps, but generating word timestamps incurs additional latency.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -178,6 +189,7 @@ class AsyncTranscriptions(AsyncAPIResource):
"prompt": prompt,
"response_format": response_format,
"temperature": temperature,
+ "timestamp_granularities": timestamp_granularities,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
src/openai/resources/chat/completions.py
@@ -61,6 +61,7 @@ class Completions(SyncAPIResource):
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
@@ -155,7 +156,7 @@ class Completions(SyncAPIResource):
response_format: An object specifying the format that the model must output. Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
- `gpt-3.5-turbo-1106`.
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -250,6 +251,7 @@ class Completions(SyncAPIResource):
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
@@ -351,7 +353,7 @@ class Completions(SyncAPIResource):
response_format: An object specifying the format that the model must output. Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
- `gpt-3.5-turbo-1106`.
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -439,6 +441,7 @@ class Completions(SyncAPIResource):
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
@@ -540,7 +543,7 @@ class Completions(SyncAPIResource):
response_format: An object specifying the format that the model must output. Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
- `gpt-3.5-turbo-1106`.
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -628,6 +631,7 @@ class Completions(SyncAPIResource):
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
@@ -724,6 +728,7 @@ class AsyncCompletions(AsyncAPIResource):
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
@@ -818,7 +823,7 @@ class AsyncCompletions(AsyncAPIResource):
response_format: An object specifying the format that the model must output. Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
- `gpt-3.5-turbo-1106`.
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -913,6 +918,7 @@ class AsyncCompletions(AsyncAPIResource):
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
@@ -1014,7 +1020,7 @@ class AsyncCompletions(AsyncAPIResource):
response_format: An object specifying the format that the model must output. Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
- `gpt-3.5-turbo-1106`.
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -1102,6 +1108,7 @@ class AsyncCompletions(AsyncAPIResource):
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
@@ -1203,7 +1210,7 @@ class AsyncCompletions(AsyncAPIResource):
response_format: An object specifying the format that the model must output. Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
- `gpt-3.5-turbo-1106`.
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -1291,6 +1298,7 @@ class AsyncCompletions(AsyncAPIResource):
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
src/openai/types/audio/transcription_create_params.py
@@ -2,10 +2,11 @@
from __future__ import annotations
-from typing import Union
-from typing_extensions import Literal, Required, TypedDict
+from typing import List, Union
+from typing_extensions import Literal, Required, Annotated, TypedDict
from ..._types import FileTypes
+from ..._utils import PropertyInfo
__all__ = ["TranscriptionCreateParams"]
@@ -50,3 +51,13 @@ class TranscriptionCreateParams(TypedDict, total=False):
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
"""
+
+ timestamp_granularities: Annotated[
+ List[Literal["word", "segment"]], PropertyInfo(alias="timestamp_granularities[]")
+ ]
+ """The timestamp granularities to populate for this transcription.
+
+ Any of these options: `word`, or `segment`. Note: There is no additional latency
+ for segment timestamps, but generating word timestamps incurs additional
+ latency.
+ """
src/openai/types/chat/completion_create_params.py
@@ -47,6 +47,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
]
@@ -137,7 +138,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
- `gpt-3.5-turbo-1106`.
+ all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
tests/api_resources/audio/test_transcriptions.py
@@ -34,6 +34,7 @@ class TestTranscriptions:
prompt="string",
response_format="json",
temperature=0,
+ timestamp_granularities=["word", "segment"],
)
assert_matches_type(Transcription, transcription, path=["response"])
@@ -84,6 +85,7 @@ class TestAsyncTranscriptions:
prompt="string",
response_format="json",
temperature=0,
+ timestamp_granularities=["word", "segment"],
)
assert_matches_type(Transcription, transcription, path=["response"])