Commit dfdcf571
Changed files (55)
src
openai
resources
beta
threads
vector_stores
chat
fine_tuning
jobs
uploads
types
beta
threads
vector_stores
chat
fine_tuning
tests
api_resources
src/openai/resources/audio/speech.py
@@ -70,13 +70,13 @@ class Speech(SyncAPIResource):
input: The text to generate audio for. The maximum length is 4096 characters.
model:
- One of the available [TTS models](https://platform.openai.com/docs/models/tts):
+ One of the available [TTS models](https://platform.openai.com/docs/models#tts):
`tts-1` or `tts-1-hd`
voice: The voice to use when generating the audio. Supported voices are `alloy`,
`echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are
available in the
- [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
+ [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
`wav`, and `pcm`.
@@ -154,13 +154,13 @@ class AsyncSpeech(AsyncAPIResource):
input: The text to generate audio for. The maximum length is 4096 characters.
model:
- One of the available [TTS models](https://platform.openai.com/docs/models/tts):
+ One of the available [TTS models](https://platform.openai.com/docs/models#tts):
`tts-1` or `tts-1-hd`
voice: The voice to use when generating the audio. Supported voices are `alloy`,
`echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are
available in the
- [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
+ [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
`wav`, and `pcm`.
src/openai/resources/audio/transcriptions.py
@@ -143,7 +143,7 @@ class Transcriptions(SyncAPIResource):
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
- [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting)
+ [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
@@ -307,7 +307,7 @@ class AsyncTranscriptions(AsyncAPIResource):
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
- [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting)
+ [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
src/openai/resources/audio/translations.py
@@ -130,7 +130,7 @@ class Translations(SyncAPIResource):
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
- [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting)
+ [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should be in English.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
@@ -273,7 +273,7 @@ class AsyncTranslations(AsyncAPIResource):
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
- [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting)
+ [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should be in English.
response_format: The format of the output, in one of these options: `json`, `text`, `srt`,
src/openai/resources/beta/threads/runs/runs.py
@@ -122,7 +122,7 @@ class Runs(SyncAPIResource):
to fetch the file search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
@@ -158,12 +158,12 @@ class Runs(SyncAPIResource):
assistant will be used.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -266,7 +266,7 @@ class Runs(SyncAPIResource):
to fetch the file search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
@@ -302,12 +302,12 @@ class Runs(SyncAPIResource):
assistant will be used.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -406,7 +406,7 @@ class Runs(SyncAPIResource):
to fetch the file search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
@@ -442,12 +442,12 @@ class Runs(SyncAPIResource):
assistant will be used.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -674,8 +674,8 @@ class Runs(SyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
@@ -1484,7 +1484,7 @@ class AsyncRuns(AsyncAPIResource):
to fetch the file search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
@@ -1520,12 +1520,12 @@ class AsyncRuns(AsyncAPIResource):
assistant will be used.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -1628,7 +1628,7 @@ class AsyncRuns(AsyncAPIResource):
to fetch the file search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
@@ -1664,12 +1664,12 @@ class AsyncRuns(AsyncAPIResource):
assistant will be used.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -1768,7 +1768,7 @@ class AsyncRuns(AsyncAPIResource):
to fetch the file search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
@@ -1804,12 +1804,12 @@ class AsyncRuns(AsyncAPIResource):
assistant will be used.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -2037,8 +2037,8 @@ class AsyncRuns(AsyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
src/openai/resources/beta/threads/runs/steps.py
@@ -68,7 +68,7 @@ class Steps(SyncAPIResource):
to fetch the file search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
extra_headers: Send extra headers
@@ -126,15 +126,15 @@ class Steps(SyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
include: A list of additional fields to include in the response. Currently the only
supported value is `step_details.tool_calls[*].file_search.results[*].content`
to fetch the file search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
@@ -222,7 +222,7 @@ class AsyncSteps(AsyncAPIResource):
to fetch the file search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
extra_headers: Send extra headers
@@ -280,15 +280,15 @@ class AsyncSteps(AsyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
include: A list of additional fields to include in the response. Currently the only
supported value is `step_details.tool_calls[*].file_search.results[*].content`
to fetch the file search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
src/openai/resources/beta/threads/messages.py
@@ -221,8 +221,8 @@ class Messages(SyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
@@ -495,8 +495,8 @@ class AsyncMessages(AsyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
src/openai/resources/beta/threads/threads.py
@@ -326,12 +326,12 @@ class Threads(SyncAPIResource):
assistant will be used.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -460,12 +460,12 @@ class Threads(SyncAPIResource):
assistant will be used.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -590,12 +590,12 @@ class Threads(SyncAPIResource):
assistant will be used.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -1160,12 +1160,12 @@ class AsyncThreads(AsyncAPIResource):
assistant will be used.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -1294,12 +1294,12 @@ class AsyncThreads(AsyncAPIResource):
assistant will be used.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -1424,12 +1424,12 @@ class AsyncThreads(AsyncAPIResource):
assistant will be used.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
src/openai/resources/beta/vector_stores/file_batches.py
@@ -227,8 +227,8 @@ class FileBatches(SyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
@@ -556,8 +556,8 @@ class AsyncFileBatches(AsyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
src/openai/resources/beta/vector_stores/files.py
@@ -164,8 +164,8 @@ class Files(SyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
@@ -476,8 +476,8 @@ class AsyncFiles(AsyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
src/openai/resources/beta/vector_stores/vector_stores.py
@@ -251,8 +251,8 @@ class VectorStores(SyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
@@ -529,8 +529,8 @@ class AsyncVectorStores(AsyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
src/openai/resources/beta/assistants.py
@@ -79,8 +79,8 @@ class Assistants(SyncAPIResource):
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
description: The description of the assistant. The maximum length is 512 characters.
@@ -95,8 +95,8 @@ class Assistants(SyncAPIResource):
name: The name of the assistant. The maximum length is 256 characters.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -239,14 +239,14 @@ class Assistants(SyncAPIResource):
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
name: The name of the assistant. The maximum length is 256 characters.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -344,8 +344,8 @@ class Assistants(SyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
@@ -465,8 +465,8 @@ class AsyncAssistants(AsyncAPIResource):
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
description: The description of the assistant. The maximum length is 512 characters.
@@ -481,8 +481,8 @@ class AsyncAssistants(AsyncAPIResource):
name: The name of the assistant. The maximum length is 256 characters.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -625,14 +625,14 @@ class AsyncAssistants(AsyncAPIResource):
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
name: The name of the assistant. The maximum length is 256 characters.
response_format: Specifies the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -730,8 +730,8 @@ class AsyncAssistants(AsyncAPIResource):
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
- ending with obj_foo, your subsequent call can include before=obj_foo in order to
- fetch the previous page of the list.
+ starting with obj_foo, your subsequent call can include before=obj_foo in order
+ to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
src/openai/resources/chat/completions.py
@@ -33,6 +33,7 @@ from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam
from ...types.chat.chat_completion_audio_param import ChatCompletionAudioParam
from ...types.chat.chat_completion_message_param import ChatCompletionMessageParam
from ...types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
+from ...types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
from ...types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
__all__ = ["Completions", "AsyncCompletions"]
@@ -76,6 +77,7 @@ class Completions(SyncAPIResource):
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+ prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
@@ -113,7 +115,7 @@ class Completions(SyncAPIResource):
[audio](https://platform.openai.com/docs/guides/audio).
model: ID of the model to use. See the
- [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
+ [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
table for details on which models work with the Chat API.
audio: Parameters for audio output. Required when audio output is requested with
@@ -124,7 +126,7 @@ class Completions(SyncAPIResource):
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
function_call: Deprecated in favor of `tool_choice`.
@@ -185,19 +187,22 @@ class Completions(SyncAPIResource):
choices. Keep `n` as `1` to minimize costs.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
+ prediction: Static predicted output content, such as the content of a text file that is
+ being regenerated.
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
response_format: An object specifying the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -284,7 +289,7 @@ class Completions(SyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -315,6 +320,7 @@ class Completions(SyncAPIResource):
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+ prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
@@ -351,7 +357,7 @@ class Completions(SyncAPIResource):
[audio](https://platform.openai.com/docs/guides/audio).
model: ID of the model to use. See the
- [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
+ [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
table for details on which models work with the Chat API.
stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
@@ -369,7 +375,7 @@ class Completions(SyncAPIResource):
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
function_call: Deprecated in favor of `tool_choice`.
@@ -430,19 +436,22 @@ class Completions(SyncAPIResource):
choices. Keep `n` as `1` to minimize costs.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
+ prediction: Static predicted output content, such as the content of a text file that is
+ being regenerated.
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
response_format: An object specifying the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -522,7 +531,7 @@ class Completions(SyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -553,6 +562,7 @@ class Completions(SyncAPIResource):
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+ prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
@@ -589,7 +599,7 @@ class Completions(SyncAPIResource):
[audio](https://platform.openai.com/docs/guides/audio).
model: ID of the model to use. See the
- [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
+ [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
table for details on which models work with the Chat API.
stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
@@ -607,7 +617,7 @@ class Completions(SyncAPIResource):
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
function_call: Deprecated in favor of `tool_choice`.
@@ -668,19 +678,22 @@ class Completions(SyncAPIResource):
choices. Keep `n` as `1` to minimize costs.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
+ prediction: Static predicted output content, such as the content of a text file that is
+ being regenerated.
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
response_format: An object specifying the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -760,7 +773,7 @@ class Completions(SyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -790,6 +803,7 @@ class Completions(SyncAPIResource):
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+ prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
@@ -830,6 +844,7 @@ class Completions(SyncAPIResource):
"modalities": modalities,
"n": n,
"parallel_tool_calls": parallel_tool_calls,
+ "prediction": prediction,
"presence_penalty": presence_penalty,
"response_format": response_format,
"seed": seed,
@@ -894,6 +909,7 @@ class AsyncCompletions(AsyncAPIResource):
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+ prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
@@ -931,7 +947,7 @@ class AsyncCompletions(AsyncAPIResource):
[audio](https://platform.openai.com/docs/guides/audio).
model: ID of the model to use. See the
- [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
+ [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
table for details on which models work with the Chat API.
audio: Parameters for audio output. Required when audio output is requested with
@@ -942,7 +958,7 @@ class AsyncCompletions(AsyncAPIResource):
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
function_call: Deprecated in favor of `tool_choice`.
@@ -1003,19 +1019,22 @@ class AsyncCompletions(AsyncAPIResource):
choices. Keep `n` as `1` to minimize costs.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
+ prediction: Static predicted output content, such as the content of a text file that is
+ being regenerated.
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
response_format: An object specifying the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -1102,7 +1121,7 @@ class AsyncCompletions(AsyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -1133,6 +1152,7 @@ class AsyncCompletions(AsyncAPIResource):
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+ prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1169,7 +1189,7 @@ class AsyncCompletions(AsyncAPIResource):
[audio](https://platform.openai.com/docs/guides/audio).
model: ID of the model to use. See the
- [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
+ [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
table for details on which models work with the Chat API.
stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
@@ -1187,7 +1207,7 @@ class AsyncCompletions(AsyncAPIResource):
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
function_call: Deprecated in favor of `tool_choice`.
@@ -1248,19 +1268,22 @@ class AsyncCompletions(AsyncAPIResource):
choices. Keep `n` as `1` to minimize costs.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
+ prediction: Static predicted output content, such as the content of a text file that is
+ being regenerated.
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
response_format: An object specifying the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -1340,7 +1363,7 @@ class AsyncCompletions(AsyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -1371,6 +1394,7 @@ class AsyncCompletions(AsyncAPIResource):
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+ prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1407,7 +1431,7 @@ class AsyncCompletions(AsyncAPIResource):
[audio](https://platform.openai.com/docs/guides/audio).
model: ID of the model to use. See the
- [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
+ [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
table for details on which models work with the Chat API.
stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
@@ -1425,7 +1449,7 @@ class AsyncCompletions(AsyncAPIResource):
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
function_call: Deprecated in favor of `tool_choice`.
@@ -1486,19 +1510,22 @@ class AsyncCompletions(AsyncAPIResource):
choices. Keep `n` as `1` to minimize costs.
parallel_tool_calls: Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
+ prediction: Static predicted output content, such as the content of a text file that is
+ being regenerated.
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
response_format: An object specifying the format that the model must output. Compatible with
- [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -1578,7 +1605,7 @@ class AsyncCompletions(AsyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -1608,6 +1635,7 @@ class AsyncCompletions(AsyncAPIResource):
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+ prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
seed: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1648,6 +1676,7 @@ class AsyncCompletions(AsyncAPIResource):
"modalities": modalities,
"n": n,
"parallel_tool_calls": parallel_tool_calls,
+ "prediction": prediction,
"presence_penalty": presence_penalty,
"response_format": response_format,
"seed": seed,
src/openai/resources/fine_tuning/jobs/jobs.py
@@ -88,7 +88,7 @@ class Jobs(SyncAPIResource):
Args:
model: The name of the model to fine-tune. You can select one of the
- [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
training_file: The ID of an uploaded file that contains training data.
@@ -379,7 +379,7 @@ class AsyncJobs(AsyncAPIResource):
Args:
model: The name of the model to fine-tune. You can select one of the
- [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
training_file: The ID of an uploaded file that contains training data.
src/openai/resources/uploads/uploads.py
@@ -196,7 +196,7 @@ class Uploads(SyncAPIResource):
For certain `purpose`s, the correct `mime_type` must be specified. Please refer
to documentation for the supported MIME types for your use case:
- - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files)
+ - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files)
For guidance on the proper filename extensions for each purpose, please follow
the documentation on
@@ -500,7 +500,7 @@ class AsyncUploads(AsyncAPIResource):
For certain `purpose`s, the correct `mime_type` must be specified. Please refer
to documentation for the supported MIME types for your use case:
- - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files)
+ - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files)
For guidance on the proper filename extensions for each purpose, please follow
the documentation on
src/openai/resources/completions.py
@@ -84,8 +84,8 @@ class Completions(SyncAPIResource):
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
prompt: The prompt(s) to generate completions for, encoded as a string, array of
strings, array of tokens, or array of token arrays.
@@ -110,7 +110,7 @@ class Completions(SyncAPIResource):
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
@@ -150,7 +150,7 @@ class Completions(SyncAPIResource):
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
seed: If specified, our system will make a best effort to sample deterministically,
such that repeated requests with the same `seed` and parameters should return
@@ -189,7 +189,7 @@ class Completions(SyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -237,8 +237,8 @@ class Completions(SyncAPIResource):
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
prompt: The prompt(s) to generate completions for, encoded as a string, array of
strings, array of tokens, or array of token arrays.
@@ -270,7 +270,7 @@ class Completions(SyncAPIResource):
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
@@ -310,7 +310,7 @@ class Completions(SyncAPIResource):
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
seed: If specified, our system will make a best effort to sample deterministically,
such that repeated requests with the same `seed` and parameters should return
@@ -342,7 +342,7 @@ class Completions(SyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -390,8 +390,8 @@ class Completions(SyncAPIResource):
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
prompt: The prompt(s) to generate completions for, encoded as a string, array of
strings, array of tokens, or array of token arrays.
@@ -423,7 +423,7 @@ class Completions(SyncAPIResource):
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
@@ -463,7 +463,7 @@ class Completions(SyncAPIResource):
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
seed: If specified, our system will make a best effort to sample deterministically,
such that repeated requests with the same `seed` and parameters should return
@@ -495,7 +495,7 @@ class Completions(SyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -626,8 +626,8 @@ class AsyncCompletions(AsyncAPIResource):
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
prompt: The prompt(s) to generate completions for, encoded as a string, array of
strings, array of tokens, or array of token arrays.
@@ -652,7 +652,7 @@ class AsyncCompletions(AsyncAPIResource):
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
@@ -692,7 +692,7 @@ class AsyncCompletions(AsyncAPIResource):
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
seed: If specified, our system will make a best effort to sample deterministically,
such that repeated requests with the same `seed` and parameters should return
@@ -731,7 +731,7 @@ class AsyncCompletions(AsyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -779,8 +779,8 @@ class AsyncCompletions(AsyncAPIResource):
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
prompt: The prompt(s) to generate completions for, encoded as a string, array of
strings, array of tokens, or array of token arrays.
@@ -812,7 +812,7 @@ class AsyncCompletions(AsyncAPIResource):
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
@@ -852,7 +852,7 @@ class AsyncCompletions(AsyncAPIResource):
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
seed: If specified, our system will make a best effort to sample deterministically,
such that repeated requests with the same `seed` and parameters should return
@@ -884,7 +884,7 @@ class AsyncCompletions(AsyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -932,8 +932,8 @@ class AsyncCompletions(AsyncAPIResource):
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
prompt: The prompt(s) to generate completions for, encoded as a string, array of
strings, array of tokens, or array of token arrays.
@@ -965,7 +965,7 @@ class AsyncCompletions(AsyncAPIResource):
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
@@ -1005,7 +1005,7 @@ class AsyncCompletions(AsyncAPIResource):
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
seed: If specified, our system will make a best effort to sample deterministically,
such that repeated requests with the same `seed` and parameters should return
@@ -1037,7 +1037,7 @@ class AsyncCompletions(AsyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
src/openai/resources/embeddings.py
@@ -73,8 +73,8 @@ class Embeddings(SyncAPIResource):
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
dimensions: The number of dimensions the resulting output embeddings should have. Only
supported in `text-embedding-3` and later models.
@@ -84,7 +84,7 @@ class Embeddings(SyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -185,8 +185,8 @@ class AsyncEmbeddings(AsyncAPIResource):
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
dimensions: The number of dimensions the resulting output embeddings should have. Only
supported in `text-embedding-3` and later models.
@@ -196,7 +196,7 @@ class AsyncEmbeddings(AsyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
src/openai/resources/files.py
@@ -5,6 +5,7 @@ from __future__ import annotations
import time
import typing_extensions
from typing import Mapping, cast
+from typing_extensions import Literal
import httpx
@@ -27,11 +28,8 @@ from .._response import (
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
-from ..pagination import SyncPage, AsyncPage
-from .._base_client import (
- AsyncPaginator,
- make_request_options,
-)
+from ..pagination import SyncCursorPage, AsyncCursorPage
+from .._base_client import AsyncPaginator, make_request_options
from ..types.file_object import FileObject
from ..types.file_deleted import FileDeleted
from ..types.file_purpose import FilePurpose
@@ -172,6 +170,9 @@ class Files(SyncAPIResource):
def list(
self,
*,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
purpose: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -179,11 +180,23 @@ class Files(SyncAPIResource):
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SyncPage[FileObject]:
- """
- Returns a list of files that belong to the user's organization.
+ ) -> SyncCursorPage[FileObject]:
+ """Returns a list of files.
Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 10,000, and the default is 10,000.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
purpose: Only return files with the given purpose.
extra_headers: Send extra headers
@@ -196,13 +209,21 @@ class Files(SyncAPIResource):
"""
return self._get_api_list(
"/files",
- page=SyncPage[FileObject],
+ page=SyncCursorPage[FileObject],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
- query=maybe_transform({"purpose": purpose}, file_list_params.FileListParams),
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ "purpose": purpose,
+ },
+ file_list_params.FileListParams,
+ ),
),
model=FileObject,
)
@@ -465,6 +486,9 @@ class AsyncFiles(AsyncAPIResource):
def list(
self,
*,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
purpose: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -472,11 +496,23 @@ class AsyncFiles(AsyncAPIResource):
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncPaginator[FileObject, AsyncPage[FileObject]]:
- """
- Returns a list of files that belong to the user's organization.
+ ) -> AsyncPaginator[FileObject, AsyncCursorPage[FileObject]]:
+ """Returns a list of files.
Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 10,000, and the default is 10,000.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
purpose: Only return files with the given purpose.
extra_headers: Send extra headers
@@ -489,13 +525,21 @@ class AsyncFiles(AsyncAPIResource):
"""
return self._get_api_list(
"/files",
- page=AsyncPage[FileObject],
+ page=AsyncCursorPage[FileObject],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
- query=maybe_transform({"purpose": purpose}, file_list_params.FileListParams),
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ "purpose": purpose,
+ },
+ file_list_params.FileListParams,
+ ),
),
model=FileObject,
)
src/openai/resources/images.py
@@ -84,7 +84,7 @@ class Images(SyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -165,7 +165,7 @@ class Images(SyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -251,7 +251,7 @@ class Images(SyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -341,7 +341,7 @@ class AsyncImages(AsyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -422,7 +422,7 @@ class AsyncImages(AsyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
@@ -508,7 +508,7 @@ class AsyncImages(AsyncAPIResource):
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
src/openai/resources/moderations.py
@@ -68,7 +68,7 @@ class Moderations(SyncAPIResource):
model: The content moderation model you would like to use. Learn more in
[the moderation guide](https://platform.openai.com/docs/guides/moderation), and
learn about available models
- [here](https://platform.openai.com/docs/models/moderation).
+ [here](https://platform.openai.com/docs/models#moderation).
extra_headers: Send extra headers
@@ -138,7 +138,7 @@ class AsyncModerations(AsyncAPIResource):
model: The content moderation model you would like to use. Learn more in
[the moderation guide](https://platform.openai.com/docs/guides/moderation), and
learn about available models
- [here](https://platform.openai.com/docs/models/moderation).
+ [here](https://platform.openai.com/docs/models#moderation).
extra_headers: Send extra headers
src/openai/types/audio/speech_create_params.py
@@ -16,7 +16,7 @@ class SpeechCreateParams(TypedDict, total=False):
model: Required[Union[str, SpeechModel]]
"""
- One of the available [TTS models](https://platform.openai.com/docs/models/tts):
+ One of the available [TTS models](https://platform.openai.com/docs/models#tts):
`tts-1` or `tts-1-hd`
"""
@@ -25,7 +25,7 @@ class SpeechCreateParams(TypedDict, total=False):
Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`.
Previews of the voices are available in the
- [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
+ [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
"""
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"]
src/openai/types/audio/transcription_create_params.py
@@ -38,7 +38,7 @@ class TranscriptionCreateParams(TypedDict, total=False):
"""An optional text to guide the model's style or continue a previous audio
segment.
- The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting)
+ The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language.
"""
src/openai/types/audio/translation_create_params.py
@@ -30,7 +30,7 @@ class TranslationCreateParams(TypedDict, total=False):
"""An optional text to guide the model's style or continue a previous audio
segment.
- The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting)
+ The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should be in English.
"""
src/openai/types/beta/threads/runs/step_list_params.py
@@ -26,7 +26,7 @@ class StepListParams(TypedDict, total=False):
"""A cursor for use in pagination.
`before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
+ you make a list request and receive 100 objects, starting with obj_foo, your
subsequent call can include before=obj_foo in order to fetch the previous page
of the list.
"""
@@ -39,7 +39,7 @@ class StepListParams(TypedDict, total=False):
search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
"""
src/openai/types/beta/threads/runs/step_retrieve_params.py
@@ -23,6 +23,6 @@ class StepRetrieveParams(TypedDict, total=False):
search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
"""
src/openai/types/beta/threads/message_list_params.py
@@ -21,7 +21,7 @@ class MessageListParams(TypedDict, total=False):
"""A cursor for use in pagination.
`before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
+ you make a list request and receive 100 objects, starting with obj_foo, your
subsequent call can include before=obj_foo in order to fetch the previous page
of the list.
"""
src/openai/types/beta/threads/run.py
@@ -154,7 +154,7 @@ class Run(BaseModel):
parallel_tool_calls: bool
"""
Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
"""
@@ -167,8 +167,8 @@ class Run(BaseModel):
response_format: Optional[AssistantResponseFormatOption] = None
"""Specifies the format that the model must output.
- Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
src/openai/types/beta/threads/run_create_params.py
@@ -41,7 +41,7 @@ class RunCreateParamsBase(TypedDict, total=False):
search result content.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
"""
@@ -99,15 +99,15 @@ class RunCreateParamsBase(TypedDict, total=False):
parallel_tool_calls: bool
"""
Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
"""
response_format: Optional[AssistantResponseFormatOptionParam]
"""Specifies the format that the model must output.
- Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
src/openai/types/beta/threads/run_list_params.py
@@ -21,7 +21,7 @@ class RunListParams(TypedDict, total=False):
"""A cursor for use in pagination.
`before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
+ you make a list request and receive 100 objects, starting with obj_foo, your
subsequent call can include before=obj_foo in order to fetch the previous page
of the list.
"""
src/openai/types/beta/vector_stores/file_batch_list_files_params.py
@@ -23,7 +23,7 @@ class FileBatchListFilesParams(TypedDict, total=False):
"""A cursor for use in pagination.
`before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
+ you make a list request and receive 100 objects, starting with obj_foo, your
subsequent call can include before=obj_foo in order to fetch the previous page
of the list.
"""
src/openai/types/beta/vector_stores/file_list_params.py
@@ -21,7 +21,7 @@ class FileListParams(TypedDict, total=False):
"""A cursor for use in pagination.
`before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
+ you make a list request and receive 100 objects, starting with obj_foo, your
subsequent call can include before=obj_foo in order to fetch the previous page
of the list.
"""
src/openai/types/beta/assistant.py
@@ -65,8 +65,8 @@ class Assistant(BaseModel):
You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
"""
name: Optional[str] = None
@@ -85,8 +85,8 @@ class Assistant(BaseModel):
response_format: Optional[AssistantResponseFormatOption] = None
"""Specifies the format that the model must output.
- Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
src/openai/types/beta/assistant_create_params.py
@@ -26,8 +26,8 @@ class AssistantCreateParams(TypedDict, total=False):
You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
"""
description: Optional[str]
@@ -53,8 +53,8 @@ class AssistantCreateParams(TypedDict, total=False):
response_format: Optional[AssistantResponseFormatOptionParam]
"""Specifies the format that the model must output.
- Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
src/openai/types/beta/assistant_list_params.py
@@ -21,7 +21,7 @@ class AssistantListParams(TypedDict, total=False):
"""A cursor for use in pagination.
`before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
+ you make a list request and receive 100 objects, starting with obj_foo, your
subsequent call can include before=obj_foo in order to fetch the previous page
of the list.
"""
src/openai/types/beta/assistant_update_params.py
@@ -35,8 +35,8 @@ class AssistantUpdateParams(TypedDict, total=False):
You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
"""
name: Optional[str]
@@ -45,8 +45,8 @@ class AssistantUpdateParams(TypedDict, total=False):
response_format: Optional[AssistantResponseFormatOptionParam]
"""Specifies the format that the model must output.
- Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
src/openai/types/beta/file_search_tool.py
@@ -31,7 +31,7 @@ class FileSearch(BaseModel):
Note that the file search tool may output fewer than `max_num_results` results.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
"""
@@ -42,7 +42,7 @@ class FileSearch(BaseModel):
score_threshold of 0.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
"""
src/openai/types/beta/file_search_tool_param.py
@@ -30,7 +30,7 @@ class FileSearch(TypedDict, total=False):
Note that the file search tool may output fewer than `max_num_results` results.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
"""
@@ -41,7 +41,7 @@ class FileSearch(TypedDict, total=False):
score_threshold of 0.
See the
- [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
"""
src/openai/types/beta/thread_create_and_run_params.py
@@ -86,15 +86,15 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
parallel_tool_calls: bool
"""
Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
"""
response_format: Optional[AssistantResponseFormatOptionParam]
"""Specifies the format that the model must output.
- Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
src/openai/types/beta/vector_store_list_params.py
@@ -21,7 +21,7 @@ class VectorStoreListParams(TypedDict, total=False):
"""A cursor for use in pagination.
`before` is an object ID that defines your place in the list. For instance, if
- you make a list request and receive 100 objects, ending with obj_foo, your
+ you make a list request and receive 100 objects, starting with obj_foo, your
subsequent call can include before=obj_foo in order to fetch the previous page
of the list.
"""
src/openai/types/chat/__init__.py
@@ -46,6 +46,9 @@ from .chat_completion_named_tool_choice_param import (
from .chat_completion_content_part_image_param import (
ChatCompletionContentPartImageParam as ChatCompletionContentPartImageParam,
)
+from .chat_completion_prediction_content_param import (
+ ChatCompletionPredictionContentParam as ChatCompletionPredictionContentParam,
+)
from .chat_completion_tool_choice_option_param import (
ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam,
)
src/openai/types/chat/chat_completion_content_part_image_param.py
@@ -15,7 +15,7 @@ class ImageURL(TypedDict, total=False):
"""Specifies the detail level of the image.
Learn more in the
- [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding).
+ [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
"""
src/openai/types/chat/chat_completion_prediction_content_param.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
+__all__ = ["ChatCompletionPredictionContentParam"]
+
+
+class ChatCompletionPredictionContentParam(TypedDict, total=False):
+ content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
+ """
+ The content that should be matched when generating a model response. If
+ generated tokens would match this content, the entire model response can be
+ returned much more quickly.
+ """
+
+ type: Required[Literal["content"]]
+ """The type of the predicted content you want to provide.
+
+ This type is currently always `content`.
+ """
src/openai/types/chat/completion_create_params.py
@@ -13,6 +13,7 @@ from .chat_completion_message_param import ChatCompletionMessageParam
from ..shared_params.function_parameters import FunctionParameters
from ..shared_params.response_format_text import ResponseFormatText
from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
+from .chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
from .chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
from ..shared_params.response_format_json_object import ResponseFormatJSONObject
from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema
@@ -43,7 +44,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"""ID of the model to use.
See the
- [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
+ [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
table for details on which models work with the Chat API.
"""
@@ -60,7 +61,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
Positive values penalize new tokens based on their existing frequency in the
text so far, decreasing the model's likelihood to repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
"""
function_call: FunctionCall
@@ -148,25 +149,31 @@ class CompletionCreateParamsBase(TypedDict, total=False):
parallel_tool_calls: bool
"""
Whether to enable
- [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
"""
+ prediction: Optional[ChatCompletionPredictionContentParam]
+ """
+ Static predicted output content, such as the content of a text file that is
+ being regenerated.
+ """
+
presence_penalty: Optional[float]
"""Number between -2.0 and 2.0.
Positive values penalize new tokens based on whether they appear in the text so
far, increasing the model's likelihood to talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
"""
response_format: ResponseFormat
"""An object specifying the format that the model must output.
- Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
- [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini),
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -276,7 +283,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"""
A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
"""
src/openai/types/fine_tuning/job_create_params.py
@@ -13,7 +13,7 @@ class JobCreateParams(TypedDict, total=False):
"""The name of the model to fine-tune.
You can select one of the
- [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
"""
training_file: Required[str]
src/openai/types/completion_create_params.py
@@ -17,8 +17,8 @@ class CompletionCreateParamsBase(TypedDict, total=False):
You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
"""
prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]]
@@ -53,7 +53,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
Positive values penalize new tokens based on their existing frequency in the
text so far, decreasing the model's likelihood to repeat the same line verbatim.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
"""
logit_bias: Optional[Dict[str, int]]
@@ -106,7 +106,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
Positive values penalize new tokens based on whether they appear in the text so
far, increasing the model's likelihood to talk about new topics.
- [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
"""
seed: Optional[int]
@@ -156,7 +156,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"""
A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
"""
src/openai/types/completion_usage.py
@@ -8,12 +8,26 @@ __all__ = ["CompletionUsage", "CompletionTokensDetails", "PromptTokensDetails"]
class CompletionTokensDetails(BaseModel):
+ accepted_prediction_tokens: Optional[int] = None
+ """
+ When using Predicted Outputs, the number of tokens in the prediction that
+ appeared in the completion.
+ """
+
audio_tokens: Optional[int] = None
"""Audio input tokens generated by the model."""
reasoning_tokens: Optional[int] = None
"""Tokens generated by the model for reasoning."""
+ rejected_prediction_tokens: Optional[int] = None
+ """
+ When using Predicted Outputs, the number of tokens in the prediction that did
+ not appear in the completion. However, like reasoning tokens, these tokens are
+ still counted in the total completion tokens for purposes of billing, output,
+ and context window limits.
+ """
+
class PromptTokensDetails(BaseModel):
audio_tokens: Optional[int] = None
src/openai/types/embedding_create_params.py
@@ -28,8 +28,8 @@ class EmbeddingCreateParams(TypedDict, total=False):
You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
- [Model overview](https://platform.openai.com/docs/models/overview) for
- descriptions of them.
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ them.
"""
dimensions: int
@@ -48,5 +48,5 @@ class EmbeddingCreateParams(TypedDict, total=False):
"""
A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
"""
src/openai/types/file_list_params.py
@@ -2,11 +2,32 @@
from __future__ import annotations
-from typing_extensions import TypedDict
+from typing_extensions import Literal, TypedDict
__all__ = ["FileListParams"]
class FileListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 10,000, and the default is 10,000.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
+
purpose: str
"""Only return files with the given purpose."""
src/openai/types/image_create_variation_params.py
@@ -47,5 +47,5 @@ class ImageCreateVariationParams(TypedDict, total=False):
"""
A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
"""
src/openai/types/image_edit_params.py
@@ -58,5 +58,5 @@ class ImageEditParams(TypedDict, total=False):
"""
A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
"""
src/openai/types/image_generate_params.py
@@ -61,5 +61,5 @@ class ImageGenerateParams(TypedDict, total=False):
"""
A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
"""
src/openai/types/moderation_create_params.py
@@ -25,5 +25,5 @@ class ModerationCreateParams(TypedDict, total=False):
Learn more in
[the moderation guide](https://platform.openai.com/docs/guides/moderation), and
learn about available models
- [here](https://platform.openai.com/docs/models/moderation).
+ [here](https://platform.openai.com/docs/models#moderation).
"""
tests/api_resources/chat/test_completions.py
@@ -65,6 +65,10 @@ class TestCompletions:
modalities=["text", "audio"],
n=1,
parallel_tool_calls=True,
+ prediction={
+ "content": "string",
+ "type": "content",
+ },
presence_penalty=-2,
response_format={"type": "text"},
seed=-9007199254740991,
@@ -193,6 +197,10 @@ class TestCompletions:
modalities=["text", "audio"],
n=1,
parallel_tool_calls=True,
+ prediction={
+ "content": "string",
+ "type": "content",
+ },
presence_penalty=-2,
response_format={"type": "text"},
seed=-9007199254740991,
@@ -340,6 +348,10 @@ class TestAsyncCompletions:
modalities=["text", "audio"],
n=1,
parallel_tool_calls=True,
+ prediction={
+ "content": "string",
+ "type": "content",
+ },
presence_penalty=-2,
response_format={"type": "text"},
seed=-9007199254740991,
@@ -468,6 +480,10 @@ class TestAsyncCompletions:
modalities=["text", "audio"],
n=1,
parallel_tool_calls=True,
+ prediction={
+ "content": "string",
+ "type": "content",
+ },
presence_penalty=-2,
response_format={"type": "text"},
seed=-9007199254740991,
tests/api_resources/test_files.py
@@ -13,7 +13,7 @@ import openai._legacy_response as _legacy_response
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.types import FileObject, FileDeleted
-from openai.pagination import SyncPage, AsyncPage
+from openai.pagination import SyncCursorPage, AsyncCursorPage
# pyright: reportDeprecated=false
@@ -98,14 +98,17 @@ class TestFiles:
@parametrize
def test_method_list(self, client: OpenAI) -> None:
file = client.files.list()
- assert_matches_type(SyncPage[FileObject], file, path=["response"])
+ assert_matches_type(SyncCursorPage[FileObject], file, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
file = client.files.list(
- purpose="string",
+ after="after",
+ limit=0,
+ order="asc",
+ purpose="purpose",
)
- assert_matches_type(SyncPage[FileObject], file, path=["response"])
+ assert_matches_type(SyncCursorPage[FileObject], file, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
@@ -114,7 +117,7 @@ class TestFiles:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(SyncPage[FileObject], file, path=["response"])
+ assert_matches_type(SyncCursorPage[FileObject], file, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
@@ -123,7 +126,7 @@ class TestFiles:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(SyncPage[FileObject], file, path=["response"])
+ assert_matches_type(SyncCursorPage[FileObject], file, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -334,14 +337,17 @@ class TestAsyncFiles:
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
file = await async_client.files.list()
- assert_matches_type(AsyncPage[FileObject], file, path=["response"])
+ assert_matches_type(AsyncCursorPage[FileObject], file, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
file = await async_client.files.list(
- purpose="string",
+ after="after",
+ limit=0,
+ order="asc",
+ purpose="purpose",
)
- assert_matches_type(AsyncPage[FileObject], file, path=["response"])
+ assert_matches_type(AsyncCursorPage[FileObject], file, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
@@ -350,7 +356,7 @@ class TestAsyncFiles:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(AsyncPage[FileObject], file, path=["response"])
+ assert_matches_type(AsyncCursorPage[FileObject], file, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
@@ -359,7 +365,7 @@ class TestAsyncFiles:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
- assert_matches_type(AsyncPage[FileObject], file, path=["response"])
+ assert_matches_type(AsyncCursorPage[FileObject], file, path=["response"])
assert cast(Any, response.is_closed) is True
.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 68
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7b0a5d715d94f75ac7795bd4d2175a0e3243af9b935a86c273f371e45583140f.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2f8ca92b9b1879fd535b685e4767338413fcd533d42f3baac13a9c41da3fce35.yml
api.md
@@ -54,6 +54,7 @@ from openai.types.chat import (
ChatCompletionMessageToolCall,
ChatCompletionModality,
ChatCompletionNamedToolChoice,
+ ChatCompletionPredictionContent,
ChatCompletionRole,
ChatCompletionStreamOptions,
ChatCompletionSystemMessageParam,
@@ -93,7 +94,7 @@ Methods:
- <code title="post /files">client.files.<a href="./src/openai/resources/files.py">create</a>(\*\*<a href="src/openai/types/file_create_params.py">params</a>) -> <a href="./src/openai/types/file_object.py">FileObject</a></code>
- <code title="get /files/{file_id}">client.files.<a href="./src/openai/resources/files.py">retrieve</a>(file_id) -> <a href="./src/openai/types/file_object.py">FileObject</a></code>
-- <code title="get /files">client.files.<a href="./src/openai/resources/files.py">list</a>(\*\*<a href="src/openai/types/file_list_params.py">params</a>) -> <a href="./src/openai/types/file_object.py">SyncPage[FileObject]</a></code>
+- <code title="get /files">client.files.<a href="./src/openai/resources/files.py">list</a>(\*\*<a href="src/openai/types/file_list_params.py">params</a>) -> <a href="./src/openai/types/file_object.py">SyncCursorPage[FileObject]</a></code>
- <code title="delete /files/{file_id}">client.files.<a href="./src/openai/resources/files.py">delete</a>(file_id) -> <a href="./src/openai/types/file_deleted.py">FileDeleted</a></code>
- <code title="get /files/{file_id}/content">client.files.<a href="./src/openai/resources/files.py">content</a>(file_id) -> HttpxBinaryResponseContent</code>
- <code title="get /files/{file_id}/content">client.files.<a href="./src/openai/resources/files.py">retrieve_content</a>(file_id) -> str</code>