Commit 6321004a
Changed files (44)
src
openai
resources
beta
realtime
threads
fine_tuning
checkpoints
types
beta
evals
responses
tests
api_resources
src/openai/resources/beta/realtime/realtime.py
@@ -233,6 +233,7 @@ class AsyncRealtimeConnection:
response: AsyncRealtimeResponseResource
input_audio_buffer: AsyncRealtimeInputAudioBufferResource
conversation: AsyncRealtimeConversationResource
+ output_audio_buffer: AsyncRealtimeOutputAudioBufferResource
transcription_session: AsyncRealtimeTranscriptionSessionResource
_connection: AsyncWebsocketConnection
@@ -244,6 +245,7 @@ class AsyncRealtimeConnection:
self.response = AsyncRealtimeResponseResource(self)
self.input_audio_buffer = AsyncRealtimeInputAudioBufferResource(self)
self.conversation = AsyncRealtimeConversationResource(self)
+ self.output_audio_buffer = AsyncRealtimeOutputAudioBufferResource(self)
self.transcription_session = AsyncRealtimeTranscriptionSessionResource(self)
async def __aiter__(self) -> AsyncIterator[RealtimeServerEvent]:
@@ -413,6 +415,7 @@ class RealtimeConnection:
response: RealtimeResponseResource
input_audio_buffer: RealtimeInputAudioBufferResource
conversation: RealtimeConversationResource
+ output_audio_buffer: RealtimeOutputAudioBufferResource
transcription_session: RealtimeTranscriptionSessionResource
_connection: WebsocketConnection
@@ -424,6 +427,7 @@ class RealtimeConnection:
self.response = RealtimeResponseResource(self)
self.input_audio_buffer = RealtimeInputAudioBufferResource(self)
self.conversation = RealtimeConversationResource(self)
+ self.output_audio_buffer = RealtimeOutputAudioBufferResource(self)
self.transcription_session = RealtimeTranscriptionSessionResource(self)
def __iter__(self) -> Iterator[RealtimeServerEvent]:
@@ -808,6 +812,21 @@ class RealtimeConversationItemResource(BaseRealtimeConnectionResource):
)
+class RealtimeOutputAudioBufferResource(BaseRealtimeConnectionResource):
+ def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
+ """**WebRTC Only:** Emit to cut off the current audio response.
+
+ This will trigger the server to
+ stop generating audio and emit a `output_audio_buffer.cleared` event. This
+ event should be preceded by a `response.cancel` client event to stop the
+ generation of the current response.
+ [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc).
+ """
+ self._connection.send(
+ cast(RealtimeClientEventParam, strip_not_given({"type": "output_audio_buffer.clear", "event_id": event_id}))
+ )
+
+
class RealtimeTranscriptionSessionResource(BaseRealtimeConnectionResource):
def update(
self, *, session: transcription_session_update_param.Session, event_id: str | NotGiven = NOT_GIVEN
@@ -1045,6 +1064,21 @@ class AsyncRealtimeConversationItemResource(BaseAsyncRealtimeConnectionResource)
)
+class AsyncRealtimeOutputAudioBufferResource(BaseAsyncRealtimeConnectionResource):
+ async def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
+ """**WebRTC Only:** Emit to cut off the current audio response.
+
+ This will trigger the server to
+ stop generating audio and emit a `output_audio_buffer.cleared` event. This
+ event should be preceded by a `response.cancel` client event to stop the
+ generation of the current response.
+ [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc).
+ """
+ await self._connection.send(
+ cast(RealtimeClientEventParam, strip_not_given({"type": "output_audio_buffer.clear", "event_id": event_id}))
+ )
+
+
class AsyncRealtimeTranscriptionSessionResource(BaseAsyncRealtimeConnectionResource):
async def update(
self, *, session: transcription_session_update_param.Session, event_id: str | NotGiven = NOT_GIVEN
src/openai/resources/beta/threads/threads.py
@@ -50,6 +50,7 @@ from ....types.beta.threads.run import Run
from ....types.shared.chat_model import ChatModel
from ....types.beta.thread_deleted import ThreadDeleted
from ....types.shared_params.metadata import Metadata
+from ....types.beta.assistant_tool_param import AssistantToolParam
from ....types.beta.assistant_stream_event import AssistantStreamEvent
from ....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
from ....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -282,7 +283,7 @@ class Threads(SyncAPIResource):
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -415,7 +416,7 @@ class Threads(SyncAPIResource):
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -548,7 +549,7 @@ class Threads(SyncAPIResource):
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -681,7 +682,7 @@ class Threads(SyncAPIResource):
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1131,7 +1132,7 @@ class AsyncThreads(AsyncAPIResource):
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1264,7 +1265,7 @@ class AsyncThreads(AsyncAPIResource):
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1397,7 +1398,7 @@ class AsyncThreads(AsyncAPIResource):
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1530,7 +1531,7 @@ class AsyncThreads(AsyncAPIResource):
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
src/openai/resources/evals/runs/runs.py
@@ -176,8 +176,8 @@ class Runs(SyncAPIResource):
order: Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for
descending order. Defaults to `asc`.
- status: Filter runs by status. Use "queued" | "in_progress" | "failed" | "completed" |
- "canceled".
+ status: Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
+ | `canceled`.
extra_headers: Send extra headers
@@ -425,8 +425,8 @@ class AsyncRuns(AsyncAPIResource):
order: Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for
descending order. Defaults to `asc`.
- status: Filter runs by status. Use "queued" | "in_progress" | "failed" | "completed" |
- "canceled".
+ status: Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
+ | `canceled`.
extra_headers: Send extra headers
src/openai/resources/evals/evals.py
@@ -65,7 +65,6 @@ class Evals(SyncAPIResource):
testing_criteria: Iterable[eval_create_params.TestingCriterion],
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
- share_with_openai: bool | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -94,8 +93,6 @@ class Evals(SyncAPIResource):
name: The name of the evaluation.
- share_with_openai: Indicates whether the evaluation is shared with OpenAI.
-
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -112,7 +109,6 @@ class Evals(SyncAPIResource):
"testing_criteria": testing_criteria,
"metadata": metadata,
"name": name,
- "share_with_openai": share_with_openai,
},
eval_create_params.EvalCreateParams,
),
@@ -328,7 +324,6 @@ class AsyncEvals(AsyncAPIResource):
testing_criteria: Iterable[eval_create_params.TestingCriterion],
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
- share_with_openai: bool | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -357,8 +352,6 @@ class AsyncEvals(AsyncAPIResource):
name: The name of the evaluation.
- share_with_openai: Indicates whether the evaluation is shared with OpenAI.
-
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -375,7 +368,6 @@ class AsyncEvals(AsyncAPIResource):
"testing_criteria": testing_criteria,
"metadata": metadata,
"name": name,
- "share_with_openai": share_with_openai,
},
eval_create_params.EvalCreateParams,
),
src/openai/resources/fine_tuning/checkpoints/permissions.py
@@ -151,8 +151,9 @@ class Permissions(SyncAPIResource):
def delete(
self,
- fine_tuned_model_checkpoint: str,
+ permission_id: str,
*,
+ fine_tuned_model_checkpoint: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -179,8 +180,10 @@ class Permissions(SyncAPIResource):
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
+ if not permission_id:
+ raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
return self._delete(
- f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -316,8 +319,9 @@ class AsyncPermissions(AsyncAPIResource):
async def delete(
self,
- fine_tuned_model_checkpoint: str,
+ permission_id: str,
*,
+ fine_tuned_model_checkpoint: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -344,8 +348,10 @@ class AsyncPermissions(AsyncAPIResource):
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
+ if not permission_id:
+ raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
return await self._delete(
- f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
src/openai/resources/images.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Union, Mapping, Optional, cast
+from typing import List, Union, Mapping, Optional, cast
from typing_extensions import Literal
import httpx
@@ -57,8 +57,9 @@ class Images(SyncAPIResource):
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ImagesResponse:
- """
- Creates a variation of a given image.
+ """Creates a variation of a given image.
+
+ This endpoint only supports `dall-e-2`.
Args:
image: The image to use as the basis for the variation(s). Must be a valid PNG file,
@@ -67,8 +68,7 @@ class Images(SyncAPIResource):
model: The model to use for image generation. Only `dall-e-2` is supported at this
time.
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
+ n: The number of images to generate. Must be between 1 and 10.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
@@ -117,11 +117,12 @@ class Images(SyncAPIResource):
def edit(
self,
*,
- image: FileTypes,
+ image: Union[FileTypes, List[FileTypes]],
prompt: str,
mask: FileTypes | NotGiven = NOT_GIVEN,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
@@ -132,31 +133,43 @@ class Images(SyncAPIResource):
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ImagesResponse:
- """
- Creates an edited or extended image given an original image and a prompt.
+ """Creates an edited or extended image given one or more source images and a
+ prompt.
+
+ This endpoint only supports `gpt-image-1` and `dall-e-2`.
Args:
- image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask
- is not provided, image must have transparency, which will be used as the mask.
+ image: The image(s) to edit. Must be a supported image file or an array of images. For
+ `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
+ 25MB. For `dall-e-2`, you can only provide one image, and it should be a square
+ `png` file less than 4MB.
prompt: A text description of the desired image(s). The maximum length is 1000
- characters.
+ characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. Must be a valid PNG file, less than
+ indicate where `image` should be edited. If there are multiple images provided,
+ the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
+ model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
+ supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
+ is used.
n: The number of images to generate. Must be between 1 and 10.
+ quality: The quality of the image that will be generated. `high`, `medium` and `low` are
+ only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
+ Defaults to `auto`.
+
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
+ generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
+ will always return base64-encoded images.
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
+ size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+ (landscape), `1024x1536` (portrait), or `auto` (default value) for
+ `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
@@ -177,12 +190,13 @@ class Images(SyncAPIResource):
"mask": mask,
"model": model,
"n": n,
+ "quality": quality,
"response_format": response_format,
"size": size,
"user": user,
}
)
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]])
+ files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
@@ -201,11 +215,18 @@ class Images(SyncAPIResource):
self,
*,
prompt: str,
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
- quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN,
+ size: Optional[
+ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
+ ]
+ | NotGiven = NOT_GIVEN,
style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -217,32 +238,60 @@ class Images(SyncAPIResource):
) -> ImagesResponse:
"""
Creates an image given a prompt.
+ [Learn more](https://platform.openai.com/docs/guides/images).
Args:
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2` and 4000 characters for `dall-e-3`.
+ prompt: A text description of the desired image(s). The maximum length is 32000
+ characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
+ for `dall-e-3`.
+
+ background: Allows to set transparency for the background of the generated image(s). This
+ parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+ `opaque` or `auto` (default value). When `auto` is used, the model will
+ automatically determine the best background for the image.
+
+ If `transparent`, the output format needs to support transparency, so it should
+ be set to either `png` (default value) or `webp`.
- model: The model to use for image generation.
+ model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
+ `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
+ `gpt-image-1` is used.
+
+ moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
+ be either `low` for less restrictive filtering or `auto` (default value).
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
`n=1` is supported.
- quality: The quality of the image that will be generated. `hd` creates images with finer
- details and greater consistency across the image. This param is only supported
- for `dall-e-3`.
+ output_compression: The compression level (0-100%) for the generated images. This parameter is only
+ supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ defaults to 100.
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
+ output_format: The format in which the generated images are returned. This parameter is only
+ supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
- `1024x1792` for `dall-e-3` models.
+ quality: The quality of the image that will be generated.
+
+ - `auto` (default value) will automatically select the best quality for the
+ given model.
+ - `high`, `medium` and `low` are supported for `gpt-image-1`.
+ - `hd` and `standard` are supported for `dall-e-3`.
+ - `standard` is the only option for `dall-e-2`.
+
+ response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
+ returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
+ after the image has been generated. This parameter isn't supported for
+ `gpt-image-1` which will always return base64-encoded images.
+
+ size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+ (landscape), `1024x1536` (portrait), or `auto` (default value) for
+ `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
+ one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
- style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid
- causes the model to lean towards generating hyper-real and dramatic images.
- Natural causes the model to produce more natural, less hyper-real looking
- images. This param is only supported for `dall-e-3`.
+ style: The style of the generated images. This parameter is only supported for
+ `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
+ towards generating hyper-real and dramatic images. Natural causes the model to
+ produce more natural, less hyper-real looking images.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
@@ -261,8 +310,12 @@ class Images(SyncAPIResource):
body=maybe_transform(
{
"prompt": prompt,
+ "background": background,
"model": model,
+ "moderation": moderation,
"n": n,
+ "output_compression": output_compression,
+ "output_format": output_format,
"quality": quality,
"response_format": response_format,
"size": size,
@@ -314,8 +367,9 @@ class AsyncImages(AsyncAPIResource):
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ImagesResponse:
- """
- Creates a variation of a given image.
+ """Creates a variation of a given image.
+
+ This endpoint only supports `dall-e-2`.
Args:
image: The image to use as the basis for the variation(s). Must be a valid PNG file,
@@ -324,8 +378,7 @@ class AsyncImages(AsyncAPIResource):
model: The model to use for image generation. Only `dall-e-2` is supported at this
time.
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
+ n: The number of images to generate. Must be between 1 and 10.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
@@ -374,11 +427,12 @@ class AsyncImages(AsyncAPIResource):
async def edit(
self,
*,
- image: FileTypes,
+ image: Union[FileTypes, List[FileTypes]],
prompt: str,
mask: FileTypes | NotGiven = NOT_GIVEN,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
@@ -389,31 +443,43 @@ class AsyncImages(AsyncAPIResource):
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ImagesResponse:
- """
- Creates an edited or extended image given an original image and a prompt.
+ """Creates an edited or extended image given one or more source images and a
+ prompt.
+
+ This endpoint only supports `gpt-image-1` and `dall-e-2`.
Args:
- image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask
- is not provided, image must have transparency, which will be used as the mask.
+ image: The image(s) to edit. Must be a supported image file or an array of images. For
+ `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
+ 25MB. For `dall-e-2`, you can only provide one image, and it should be a square
+ `png` file less than 4MB.
prompt: A text description of the desired image(s). The maximum length is 1000
- characters.
+ characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. Must be a valid PNG file, less than
+ indicate where `image` should be edited. If there are multiple images provided,
+ the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
+ model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
+ supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
+ is used.
n: The number of images to generate. Must be between 1 and 10.
+ quality: The quality of the image that will be generated. `high`, `medium` and `low` are
+ only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
+ Defaults to `auto`.
+
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
+ generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
+ will always return base64-encoded images.
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
+ size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+ (landscape), `1024x1536` (portrait), or `auto` (default value) for
+ `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
@@ -434,12 +500,13 @@ class AsyncImages(AsyncAPIResource):
"mask": mask,
"model": model,
"n": n,
+ "quality": quality,
"response_format": response_format,
"size": size,
"user": user,
}
)
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]])
+ files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
@@ -458,11 +525,18 @@ class AsyncImages(AsyncAPIResource):
self,
*,
prompt: str,
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
- quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN,
+ size: Optional[
+ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
+ ]
+ | NotGiven = NOT_GIVEN,
style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -474,32 +548,60 @@ class AsyncImages(AsyncAPIResource):
) -> ImagesResponse:
"""
Creates an image given a prompt.
+ [Learn more](https://platform.openai.com/docs/guides/images).
Args:
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2` and 4000 characters for `dall-e-3`.
+ prompt: A text description of the desired image(s). The maximum length is 32000
+ characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
+ for `dall-e-3`.
+
+ background: Allows to set transparency for the background of the generated image(s). This
+ parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+ `opaque` or `auto` (default value). When `auto` is used, the model will
+ automatically determine the best background for the image.
+
+ If `transparent`, the output format needs to support transparency, so it should
+ be set to either `png` (default value) or `webp`.
- model: The model to use for image generation.
+ model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
+ `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
+ `gpt-image-1` is used.
+
+ moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
+ be either `low` for less restrictive filtering or `auto` (default value).
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
`n=1` is supported.
- quality: The quality of the image that will be generated. `hd` creates images with finer
- details and greater consistency across the image. This param is only supported
- for `dall-e-3`.
+ output_compression: The compression level (0-100%) for the generated images. This parameter is only
+ supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ defaults to 100.
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
+ output_format: The format in which the generated images are returned. This parameter is only
+ supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
- `1024x1792` for `dall-e-3` models.
+ quality: The quality of the image that will be generated.
+
+ - `auto` (default value) will automatically select the best quality for the
+ given model.
+ - `high`, `medium` and `low` are supported for `gpt-image-1`.
+ - `hd` and `standard` are supported for `dall-e-3`.
+ - `standard` is the only option for `dall-e-2`.
+
+ response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
+ returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
+ after the image has been generated. This parameter isn't supported for
+ `gpt-image-1` which will always return base64-encoded images.
+
+ size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+ (landscape), `1024x1536` (portrait), or `auto` (default value) for
+ `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
+ one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
- style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid
- causes the model to lean towards generating hyper-real and dramatic images.
- Natural causes the model to produce more natural, less hyper-real looking
- images. This param is only supported for `dall-e-3`.
+ style: The style of the generated images. This parameter is only supported for
+ `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
+ towards generating hyper-real and dramatic images. Natural causes the model to
+ produce more natural, less hyper-real looking images.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
@@ -518,8 +620,12 @@ class AsyncImages(AsyncAPIResource):
body=await async_maybe_transform(
{
"prompt": prompt,
+ "background": background,
"model": model,
+ "moderation": moderation,
"n": n,
+ "output_compression": output_compression,
+ "output_format": output_format,
"quality": quality,
"response_format": response_format,
"size": size,
src/openai/types/beta/realtime/realtime_client_event.py
@@ -1,9 +1,10 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Union
-from typing_extensions import Annotated, TypeAlias
+from typing import Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
from ...._utils import PropertyInfo
+from ...._models import BaseModel
from .session_update_event import SessionUpdateEvent
from .response_cancel_event import ResponseCancelEvent
from .response_create_event import ResponseCreateEvent
@@ -16,7 +17,16 @@ from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent
from .conversation_item_retrieve_event import ConversationItemRetrieveEvent
from .conversation_item_truncate_event import ConversationItemTruncateEvent
-__all__ = ["RealtimeClientEvent"]
+__all__ = ["RealtimeClientEvent", "OutputAudioBufferClear"]
+
+
+class OutputAudioBufferClear(BaseModel):
+ type: Literal["output_audio_buffer.clear"]
+ """The event type, must be `output_audio_buffer.clear`."""
+
+ event_id: Optional[str] = None
+ """The unique ID of the client event used for error handling."""
+
RealtimeClientEvent: TypeAlias = Annotated[
Union[
@@ -26,6 +36,7 @@ RealtimeClientEvent: TypeAlias = Annotated[
ConversationItemTruncateEvent,
InputAudioBufferAppendEvent,
InputAudioBufferClearEvent,
+ OutputAudioBufferClear,
InputAudioBufferCommitEvent,
ResponseCancelEvent,
ResponseCreateEvent,
src/openai/types/beta/realtime/realtime_client_event_param.py
@@ -3,7 +3,7 @@
from __future__ import annotations
from typing import Union
-from typing_extensions import TypeAlias
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .session_update_event_param import SessionUpdateEventParam
from .response_cancel_event_param import ResponseCancelEventParam
@@ -17,7 +17,16 @@ from .input_audio_buffer_commit_event_param import InputAudioBufferCommitEventPa
from .conversation_item_retrieve_event_param import ConversationItemRetrieveEventParam
from .conversation_item_truncate_event_param import ConversationItemTruncateEventParam
-__all__ = ["RealtimeClientEventParam"]
+__all__ = ["RealtimeClientEventParam", "OutputAudioBufferClear"]
+
+
+class OutputAudioBufferClear(TypedDict, total=False):
+ type: Required[Literal["output_audio_buffer.clear"]]
+ """The event type, must be `output_audio_buffer.clear`."""
+
+ event_id: str
+ """The unique ID of the client event used for error handling."""
+
RealtimeClientEventParam: TypeAlias = Union[
ConversationItemCreateEventParam,
@@ -26,6 +35,7 @@ RealtimeClientEventParam: TypeAlias = Union[
ConversationItemTruncateEventParam,
InputAudioBufferAppendEventParam,
InputAudioBufferClearEventParam,
+ OutputAudioBufferClear,
InputAudioBufferCommitEventParam,
ResponseCancelEventParam,
ResponseCreateEventParam,
src/openai/types/beta/realtime/realtime_server_event.py
@@ -39,7 +39,13 @@ from .conversation_item_input_audio_transcription_completed_event import (
ConversationItemInputAudioTranscriptionCompletedEvent,
)
-__all__ = ["RealtimeServerEvent", "ConversationItemRetrieved"]
+__all__ = [
+ "RealtimeServerEvent",
+ "ConversationItemRetrieved",
+ "OutputAudioBufferStarted",
+ "OutputAudioBufferStopped",
+ "OutputAudioBufferCleared",
+]
class ConversationItemRetrieved(BaseModel):
@@ -53,6 +59,39 @@ class ConversationItemRetrieved(BaseModel):
"""The event type, must be `conversation.item.retrieved`."""
+class OutputAudioBufferStarted(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ response_id: str
+ """The unique ID of the response that produced the audio."""
+
+ type: Literal["output_audio_buffer.started"]
+ """The event type, must be `output_audio_buffer.started`."""
+
+
+class OutputAudioBufferStopped(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ response_id: str
+ """The unique ID of the response that produced the audio."""
+
+ type: Literal["output_audio_buffer.stopped"]
+ """The event type, must be `output_audio_buffer.stopped`."""
+
+
+class OutputAudioBufferCleared(BaseModel):
+ event_id: str
+ """The unique ID of the server event."""
+
+ response_id: str
+ """The unique ID of the response that produced the audio."""
+
+ type: Literal["output_audio_buffer.cleared"]
+ """The event type, must be `output_audio_buffer.cleared`."""
+
+
RealtimeServerEvent: TypeAlias = Annotated[
Union[
ConversationCreatedEvent,
@@ -86,6 +125,9 @@ RealtimeServerEvent: TypeAlias = Annotated[
SessionCreatedEvent,
SessionUpdatedEvent,
TranscriptionSessionUpdatedEvent,
+ OutputAudioBufferStarted,
+ OutputAudioBufferStopped,
+ OutputAudioBufferCleared,
],
PropertyInfo(discriminator="type"),
]
src/openai/types/beta/thread_create_and_run_params.py
@@ -6,8 +6,7 @@ from typing import List, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..shared.chat_model import ChatModel
-from .function_tool_param import FunctionToolParam
-from .file_search_tool_param import FileSearchToolParam
+from .assistant_tool_param import AssistantToolParam
from ..shared_params.metadata import Metadata
from .code_interpreter_tool_param import CodeInterpreterToolParam
from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
@@ -32,7 +31,6 @@ __all__ = [
"ToolResources",
"ToolResourcesCodeInterpreter",
"ToolResourcesFileSearch",
- "Tool",
"TruncationStrategy",
"ThreadCreateAndRunParamsNonStreaming",
"ThreadCreateAndRunParamsStreaming",
@@ -153,7 +151,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
tool requires a list of vector store IDs.
"""
- tools: Optional[Iterable[Tool]]
+ tools: Optional[Iterable[AssistantToolParam]]
"""Override the tools the assistant can use for this run.
This is useful for modifying the behavior on a per-run basis.
@@ -360,9 +358,6 @@ class ToolResources(TypedDict, total=False):
file_search: ToolResourcesFileSearch
-Tool: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam]
-
-
class TruncationStrategy(TypedDict, total=False):
type: Required[Literal["auto", "last_messages"]]
"""The truncation strategy to use for the thread.
src/openai/types/evals/create_eval_completions_run_data_source.py
@@ -6,102 +6,27 @@ from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from ..shared.metadata import Metadata
+from ..responses.easy_input_message import EasyInputMessage
+from ..responses.response_input_text import ResponseInputText
__all__ = [
"CreateEvalCompletionsRunDataSource",
- "InputMessages",
- "InputMessagesTemplate",
- "InputMessagesTemplateTemplate",
- "InputMessagesTemplateTemplateChatMessage",
- "InputMessagesTemplateTemplateInputMessage",
- "InputMessagesTemplateTemplateInputMessageContent",
- "InputMessagesTemplateTemplateOutputMessage",
- "InputMessagesTemplateTemplateOutputMessageContent",
- "InputMessagesItemReference",
"Source",
"SourceFileContent",
"SourceFileContentContent",
"SourceFileID",
"SourceStoredCompletions",
+ "InputMessages",
+ "InputMessagesTemplate",
+ "InputMessagesTemplateTemplate",
+ "InputMessagesTemplateTemplateMessage",
+ "InputMessagesTemplateTemplateMessageContent",
+ "InputMessagesTemplateTemplateMessageContentOutputText",
+ "InputMessagesItemReference",
"SamplingParams",
]
-class InputMessagesTemplateTemplateChatMessage(BaseModel):
- content: str
- """The content of the message."""
-
- role: str
- """The role of the message (e.g. "system", "assistant", "user")."""
-
-
-class InputMessagesTemplateTemplateInputMessageContent(BaseModel):
- text: str
- """The text content."""
-
- type: Literal["input_text"]
- """The type of content, which is always `input_text`."""
-
-
-class InputMessagesTemplateTemplateInputMessage(BaseModel):
- content: InputMessagesTemplateTemplateInputMessageContent
-
- role: Literal["user", "system", "developer"]
- """The role of the message. One of `user`, `system`, or `developer`."""
-
- type: Literal["message"]
- """The type of item, which is always `message`."""
-
-
-class InputMessagesTemplateTemplateOutputMessageContent(BaseModel):
- text: str
- """The text content."""
-
- type: Literal["output_text"]
- """The type of content, which is always `output_text`."""
-
-
-class InputMessagesTemplateTemplateOutputMessage(BaseModel):
- content: InputMessagesTemplateTemplateOutputMessageContent
-
- role: Literal["assistant"]
- """The role of the message. Must be `assistant` for output."""
-
- type: Literal["message"]
- """The type of item, which is always `message`."""
-
-
-InputMessagesTemplateTemplate: TypeAlias = Union[
- InputMessagesTemplateTemplateChatMessage,
- InputMessagesTemplateTemplateInputMessage,
- InputMessagesTemplateTemplateOutputMessage,
-]
-
-
-class InputMessagesTemplate(BaseModel):
- template: List[InputMessagesTemplateTemplate]
- """A list of chat messages forming the prompt or context.
-
- May include variable references to the "item" namespace, ie {{item.name}}.
- """
-
- type: Literal["template"]
- """The type of input messages. Always `template`."""
-
-
-class InputMessagesItemReference(BaseModel):
- item_reference: str
- """A reference to a variable in the "item" namespace. Ie, "item.name" """
-
- type: Literal["item_reference"]
- """The type of input messages. Always `item_reference`."""
-
-
-InputMessages: TypeAlias = Annotated[
- Union[InputMessagesTemplate, InputMessagesItemReference], PropertyInfo(discriminator="type")
-]
-
-
class SourceFileContentContent(BaseModel):
item: Dict[str, object]
@@ -125,6 +50,9 @@ class SourceFileID(BaseModel):
class SourceStoredCompletions(BaseModel):
+ type: Literal["stored_completions"]
+ """The type of source. Always `stored_completions`."""
+
created_after: Optional[int] = None
"""An optional Unix timestamp to filter items created after this time."""
@@ -147,15 +75,68 @@ class SourceStoredCompletions(BaseModel):
model: Optional[str] = None
"""An optional model to filter by (e.g., 'gpt-4o')."""
- type: Literal["stored_completions"]
- """The type of source. Always `stored_completions`."""
-
Source: TypeAlias = Annotated[
Union[SourceFileContent, SourceFileID, SourceStoredCompletions], PropertyInfo(discriminator="type")
]
+class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[
+ str, ResponseInputText, InputMessagesTemplateTemplateMessageContentOutputText
+]
+
+
+class InputMessagesTemplateTemplateMessage(BaseModel):
+ content: InputMessagesTemplateTemplateMessageContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+InputMessagesTemplateTemplate: TypeAlias = Annotated[
+ Union[EasyInputMessage, InputMessagesTemplateTemplateMessage], PropertyInfo(discriminator="type")
+]
+
+
+class InputMessagesTemplate(BaseModel):
+ template: List[InputMessagesTemplateTemplate]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
+
+ type: Literal["template"]
+ """The type of input messages. Always `template`."""
+
+
+class InputMessagesItemReference(BaseModel):
+ item_reference: str
+ """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+ type: Literal["item_reference"]
+ """The type of input messages. Always `item_reference`."""
+
+
+InputMessages: TypeAlias = Annotated[
+ Union[InputMessagesTemplate, InputMessagesItemReference], PropertyInfo(discriminator="type")
+]
+
+
class SamplingParams(BaseModel):
max_completion_tokens: Optional[int] = None
"""The maximum number of tokens in the generated output."""
@@ -171,15 +152,15 @@ class SamplingParams(BaseModel):
class CreateEvalCompletionsRunDataSource(BaseModel):
- input_messages: InputMessages
-
- model: str
- """The name of the model to use for generating completions (e.g. "o3-mini")."""
-
source: Source
"""A StoredCompletionsRunDataSource configuration describing a set of filters"""
type: Literal["completions"]
"""The type of run data source. Always `completions`."""
+ input_messages: Optional[InputMessages] = None
+
+ model: Optional[str] = None
+ """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
sampling_params: Optional[SamplingParams] = None
src/openai/types/evals/create_eval_completions_run_data_source_param.py
@@ -6,100 +6,27 @@ from typing import Dict, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..shared_params.metadata import Metadata
+from ..responses.easy_input_message_param import EasyInputMessageParam
+from ..responses.response_input_text_param import ResponseInputTextParam
__all__ = [
"CreateEvalCompletionsRunDataSourceParam",
- "InputMessages",
- "InputMessagesTemplate",
- "InputMessagesTemplateTemplate",
- "InputMessagesTemplateTemplateChatMessage",
- "InputMessagesTemplateTemplateInputMessage",
- "InputMessagesTemplateTemplateInputMessageContent",
- "InputMessagesTemplateTemplateOutputMessage",
- "InputMessagesTemplateTemplateOutputMessageContent",
- "InputMessagesItemReference",
"Source",
"SourceFileContent",
"SourceFileContentContent",
"SourceFileID",
"SourceStoredCompletions",
+ "InputMessages",
+ "InputMessagesTemplate",
+ "InputMessagesTemplateTemplate",
+ "InputMessagesTemplateTemplateMessage",
+ "InputMessagesTemplateTemplateMessageContent",
+ "InputMessagesTemplateTemplateMessageContentOutputText",
+ "InputMessagesItemReference",
"SamplingParams",
]
-class InputMessagesTemplateTemplateChatMessage(TypedDict, total=False):
- content: Required[str]
- """The content of the message."""
-
- role: Required[str]
- """The role of the message (e.g. "system", "assistant", "user")."""
-
-
-class InputMessagesTemplateTemplateInputMessageContent(TypedDict, total=False):
- text: Required[str]
- """The text content."""
-
- type: Required[Literal["input_text"]]
- """The type of content, which is always `input_text`."""
-
-
-class InputMessagesTemplateTemplateInputMessage(TypedDict, total=False):
- content: Required[InputMessagesTemplateTemplateInputMessageContent]
-
- role: Required[Literal["user", "system", "developer"]]
- """The role of the message. One of `user`, `system`, or `developer`."""
-
- type: Required[Literal["message"]]
- """The type of item, which is always `message`."""
-
-
-class InputMessagesTemplateTemplateOutputMessageContent(TypedDict, total=False):
- text: Required[str]
- """The text content."""
-
- type: Required[Literal["output_text"]]
- """The type of content, which is always `output_text`."""
-
-
-class InputMessagesTemplateTemplateOutputMessage(TypedDict, total=False):
- content: Required[InputMessagesTemplateTemplateOutputMessageContent]
-
- role: Required[Literal["assistant"]]
- """The role of the message. Must be `assistant` for output."""
-
- type: Required[Literal["message"]]
- """The type of item, which is always `message`."""
-
-
-InputMessagesTemplateTemplate: TypeAlias = Union[
- InputMessagesTemplateTemplateChatMessage,
- InputMessagesTemplateTemplateInputMessage,
- InputMessagesTemplateTemplateOutputMessage,
-]
-
-
-class InputMessagesTemplate(TypedDict, total=False):
- template: Required[Iterable[InputMessagesTemplateTemplate]]
- """A list of chat messages forming the prompt or context.
-
- May include variable references to the "item" namespace, ie {{item.name}}.
- """
-
- type: Required[Literal["template"]]
- """The type of input messages. Always `template`."""
-
-
-class InputMessagesItemReference(TypedDict, total=False):
- item_reference: Required[str]
- """A reference to a variable in the "item" namespace. Ie, "item.name" """
-
- type: Required[Literal["item_reference"]]
- """The type of input messages. Always `item_reference`."""
-
-
-InputMessages: TypeAlias = Union[InputMessagesTemplate, InputMessagesItemReference]
-
-
class SourceFileContentContent(TypedDict, total=False):
item: Required[Dict[str, object]]
@@ -123,16 +50,19 @@ class SourceFileID(TypedDict, total=False):
class SourceStoredCompletions(TypedDict, total=False):
- created_after: Required[Optional[int]]
+ type: Required[Literal["stored_completions"]]
+ """The type of source. Always `stored_completions`."""
+
+ created_after: Optional[int]
"""An optional Unix timestamp to filter items created after this time."""
- created_before: Required[Optional[int]]
+ created_before: Optional[int]
"""An optional Unix timestamp to filter items created before this time."""
- limit: Required[Optional[int]]
+ limit: Optional[int]
"""An optional maximum number of items to return."""
- metadata: Required[Optional[Metadata]]
+ metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
@@ -142,16 +72,65 @@ class SourceStoredCompletions(TypedDict, total=False):
a maximum length of 512 characters.
"""
- model: Required[Optional[str]]
+ model: Optional[str]
"""An optional model to filter by (e.g., 'gpt-4o')."""
- type: Required[Literal["stored_completions"]]
- """The type of source. Always `stored_completions`."""
-
Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceStoredCompletions]
+class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=False):
+ text: Required[str]
+ """The text output from the model."""
+
+ type: Required[Literal["output_text"]]
+ """The type of the output text. Always `output_text`."""
+
+
+InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[
+ str, ResponseInputTextParam, InputMessagesTemplateTemplateMessageContentOutputText
+]
+
+
+class InputMessagesTemplateTemplateMessage(TypedDict, total=False):
+ content: Required[InputMessagesTemplateTemplateMessageContent]
+ """Text inputs to the model - can contain template strings."""
+
+ role: Required[Literal["user", "assistant", "system", "developer"]]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always `message`."""
+
+
+InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateMessage]
+
+
+class InputMessagesTemplate(TypedDict, total=False):
+ template: Required[Iterable[InputMessagesTemplateTemplate]]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
+
+ type: Required[Literal["template"]]
+ """The type of input messages. Always `template`."""
+
+
+class InputMessagesItemReference(TypedDict, total=False):
+ item_reference: Required[str]
+ """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+ type: Required[Literal["item_reference"]]
+ """The type of input messages. Always `item_reference`."""
+
+
+InputMessages: TypeAlias = Union[InputMessagesTemplate, InputMessagesItemReference]
+
+
class SamplingParams(TypedDict, total=False):
max_completion_tokens: int
"""The maximum number of tokens in the generated output."""
@@ -167,15 +146,15 @@ class SamplingParams(TypedDict, total=False):
class CreateEvalCompletionsRunDataSourceParam(TypedDict, total=False):
- input_messages: Required[InputMessages]
-
- model: Required[str]
- """The name of the model to use for generating completions (e.g. "o3-mini")."""
-
source: Required[Source]
"""A StoredCompletionsRunDataSource configuration describing a set of filters"""
type: Required[Literal["completions"]]
"""The type of run data source. Always `completions`."""
+ input_messages: InputMessages
+
+ model: str
+ """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
sampling_params: SamplingParams
src/openai/types/evals/run_cancel_response.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from pydantic import Field as FieldInfo
@@ -9,13 +9,225 @@ from ..._utils import PropertyInfo
from ..._models import BaseModel
from .eval_api_error import EvalAPIError
from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text import ResponseInputText
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
-__all__ = ["RunCancelResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"]
+__all__ = [
+ "RunCancelResponse",
+ "DataSource",
+ "DataSourceCompletions",
+ "DataSourceCompletionsSource",
+ "DataSourceCompletionsSourceFileContent",
+ "DataSourceCompletionsSourceFileContentContent",
+ "DataSourceCompletionsSourceFileID",
+ "DataSourceCompletionsSourceResponses",
+ "DataSourceCompletionsInputMessages",
+ "DataSourceCompletionsInputMessagesTemplate",
+ "DataSourceCompletionsInputMessagesTemplateTemplate",
+ "DataSourceCompletionsInputMessagesTemplateTemplateChatMessage",
+ "DataSourceCompletionsInputMessagesTemplateTemplateEvalItem",
+ "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent",
+ "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText",
+ "DataSourceCompletionsInputMessagesItemReference",
+ "DataSourceCompletionsSamplingParams",
+ "PerModelUsage",
+ "PerTestingCriteriaResult",
+ "ResultCounts",
+]
+
+
+class DataSourceCompletionsSourceFileContentContent(BaseModel):
+ item: Dict[str, object]
+
+ sample: Optional[Dict[str, object]] = None
+
+
+class DataSourceCompletionsSourceFileContent(BaseModel):
+ content: List[DataSourceCompletionsSourceFileContentContent]
+ """The content of the jsonl file."""
+
+ type: Literal["file_content"]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceCompletionsSourceFileID(BaseModel):
+ id: str
+ """The identifier of the file."""
+
+ type: Literal["file_id"]
+ """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceCompletionsSourceResponses(BaseModel):
+ type: Literal["responses"]
+ """The type of run data source. Always `responses`."""
+
+ allow_parallel_tool_calls: Optional[bool] = None
+ """Whether to allow parallel tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ created_after: Optional[int] = None
+ """Only include items created after this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ created_before: Optional[int] = None
+ """Only include items created before this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ has_tool_calls: Optional[bool] = None
+ """Whether the response has tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ instructions_search: Optional[str] = None
+ """Optional search string for instructions.
+
+ This is a query parameter used to select responses.
+ """
+
+ metadata: Optional[object] = None
+ """Metadata filter for the responses.
+
+ This is a query parameter used to select responses.
+ """
+
+ model: Optional[str] = None
+ """The name of the model to find responses for.
+
+ This is a query parameter used to select responses.
+ """
+
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """Optional reasoning effort parameter.
+
+ This is a query parameter used to select responses.
+ """
+
+ temperature: Optional[float] = None
+ """Sampling temperature. This is a query parameter used to select responses."""
+
+ top_p: Optional[float] = None
+ """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+ users: Optional[List[str]] = None
+ """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceCompletionsSource: TypeAlias = Annotated[
+ Union[
+ DataSourceCompletionsSourceFileContent, DataSourceCompletionsSourceFileID, DataSourceCompletionsSourceResponses
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateChatMessage(BaseModel):
+ content: str
+ """The content of the message."""
+
+ role: str
+ """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+ str, ResponseInputText, DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItem(BaseModel):
+ content: DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplate: TypeAlias = Union[
+ DataSourceCompletionsInputMessagesTemplateTemplateChatMessage,
+ DataSourceCompletionsInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceCompletionsInputMessagesTemplate(BaseModel):
+ template: List[DataSourceCompletionsInputMessagesTemplateTemplate]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
+
+ type: Literal["template"]
+ """The type of input messages. Always `template`."""
+
+
+class DataSourceCompletionsInputMessagesItemReference(BaseModel):
+ item_reference: str
+ """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+ type: Literal["item_reference"]
+ """The type of input messages. Always `item_reference`."""
+
+
+DataSourceCompletionsInputMessages: TypeAlias = Annotated[
+ Union[DataSourceCompletionsInputMessagesTemplate, DataSourceCompletionsInputMessagesItemReference],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsSamplingParams(BaseModel):
+ max_completion_tokens: Optional[int] = None
+ """The maximum number of tokens in the generated output."""
+
+ seed: Optional[int] = None
+ """A seed value to initialize the randomness, during sampling."""
+
+ temperature: Optional[float] = None
+ """A higher temperature increases randomness in the outputs."""
+
+ top_p: Optional[float] = None
+ """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceCompletions(BaseModel):
+ source: DataSourceCompletionsSource
+ """A EvalResponsesSource object describing a run data source configuration."""
+
+ type: Literal["completions"]
+ """The type of run data source. Always `completions`."""
+
+ input_messages: Optional[DataSourceCompletionsInputMessages] = None
+
+ model: Optional[str] = None
+ """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+ sampling_params: Optional[DataSourceCompletionsSamplingParams] = None
+
DataSource: TypeAlias = Annotated[
- Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource], PropertyInfo(discriminator="type")
+ Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceCompletions],
+ PropertyInfo(discriminator="type"),
]
src/openai/types/evals/run_create_params.py
@@ -2,14 +2,34 @@
from __future__ import annotations
-from typing import Union, Optional
-from typing_extensions import Required, TypeAlias, TypedDict
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..shared_params.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text_param import ResponseInputTextParam
from .create_eval_jsonl_run_data_source_param import CreateEvalJSONLRunDataSourceParam
from .create_eval_completions_run_data_source_param import CreateEvalCompletionsRunDataSourceParam
-__all__ = ["RunCreateParams", "DataSource"]
+__all__ = [
+ "RunCreateParams",
+ "DataSource",
+ "DataSourceCreateEvalResponsesRunDataSource",
+ "DataSourceCreateEvalResponsesRunDataSourceSource",
+ "DataSourceCreateEvalResponsesRunDataSourceSourceFileContent",
+ "DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent",
+ "DataSourceCreateEvalResponsesRunDataSourceSourceFileID",
+ "DataSourceCreateEvalResponsesRunDataSourceSourceResponses",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessages",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference",
+ "DataSourceCreateEvalResponsesRunDataSourceSamplingParams",
+]
class RunCreateParams(TypedDict, total=False):
@@ -30,4 +50,198 @@ class RunCreateParams(TypedDict, total=False):
"""The name of the run."""
-DataSource: TypeAlias = Union[CreateEvalJSONLRunDataSourceParam, CreateEvalCompletionsRunDataSourceParam]
+class DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent(TypedDict, total=False):
+ item: Required[Dict[str, object]]
+
+ sample: Dict[str, object]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceSourceFileContent(TypedDict, total=False):
+ content: Required[Iterable[DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent]]
+ """The content of the jsonl file."""
+
+ type: Required[Literal["file_content"]]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceCreateEvalResponsesRunDataSourceSourceFileID(TypedDict, total=False):
+ id: Required[str]
+ """The identifier of the file."""
+
+ type: Required[Literal["file_id"]]
+ """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceCreateEvalResponsesRunDataSourceSourceResponses(TypedDict, total=False):
+ type: Required[Literal["responses"]]
+ """The type of run data source. Always `responses`."""
+
+ allow_parallel_tool_calls: Optional[bool]
+ """Whether to allow parallel tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ created_after: Optional[int]
+ """Only include items created after this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ created_before: Optional[int]
+ """Only include items created before this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ has_tool_calls: Optional[bool]
+ """Whether the response has tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ instructions_search: Optional[str]
+ """Optional search string for instructions.
+
+ This is a query parameter used to select responses.
+ """
+
+ metadata: Optional[object]
+ """Metadata filter for the responses.
+
+ This is a query parameter used to select responses.
+ """
+
+ model: Optional[str]
+ """The name of the model to find responses for.
+
+ This is a query parameter used to select responses.
+ """
+
+ reasoning_effort: Optional[ReasoningEffort]
+ """Optional reasoning effort parameter.
+
+ This is a query parameter used to select responses.
+ """
+
+ temperature: Optional[float]
+ """Sampling temperature. This is a query parameter used to select responses."""
+
+ top_p: Optional[float]
+ """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+ users: Optional[List[str]]
+ """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceCreateEvalResponsesRunDataSourceSource: TypeAlias = Union[
+ DataSourceCreateEvalResponsesRunDataSourceSourceFileContent,
+ DataSourceCreateEvalResponsesRunDataSourceSourceFileID,
+ DataSourceCreateEvalResponsesRunDataSourceSourceResponses,
+]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage(TypedDict, total=False):
+ content: Required[str]
+ """The content of the message."""
+
+ role: Required[str]
+ """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText(
+ TypedDict, total=False
+):
+ text: Required[str]
+ """The text output from the model."""
+
+ type: Required[Literal["output_text"]]
+ """The type of the output text. Always `output_text`."""
+
+
+DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+ str,
+ ResponseInputTextParam,
+ DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText,
+]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem(TypedDict, total=False):
+ content: Required[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent]
+ """Text inputs to the model - can contain template strings."""
+
+ role: Required[Literal["user", "assistant", "system", "developer"]]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always `message`."""
+
+
+DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate: TypeAlias = Union[
+ DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage,
+ DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate(TypedDict, total=False):
+ template: Required[Iterable[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate]]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
+
+ type: Required[Literal["template"]]
+ """The type of input messages. Always `template`."""
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference(TypedDict, total=False):
+ item_reference: Required[str]
+ """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+ type: Required[Literal["item_reference"]]
+ """The type of input messages. Always `item_reference`."""
+
+
+DataSourceCreateEvalResponsesRunDataSourceInputMessages: TypeAlias = Union[
+ DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate,
+ DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference,
+]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total=False):
+ max_completion_tokens: int
+ """The maximum number of tokens in the generated output."""
+
+ seed: int
+ """A seed value to initialize the randomness, during sampling."""
+
+ temperature: float
+ """A higher temperature increases randomness in the outputs."""
+
+ top_p: float
+ """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceCreateEvalResponsesRunDataSource(TypedDict, total=False):
+ source: Required[DataSourceCreateEvalResponsesRunDataSourceSource]
+ """A EvalResponsesSource object describing a run data source configuration."""
+
+ type: Required[Literal["completions"]]
+ """The type of run data source. Always `completions`."""
+
+ input_messages: DataSourceCreateEvalResponsesRunDataSourceInputMessages
+
+ model: str
+ """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+ sampling_params: DataSourceCreateEvalResponsesRunDataSourceSamplingParams
+
+
+DataSource: TypeAlias = Union[
+ CreateEvalJSONLRunDataSourceParam,
+ CreateEvalCompletionsRunDataSourceParam,
+ DataSourceCreateEvalResponsesRunDataSource,
+]
src/openai/types/evals/run_create_response.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from pydantic import Field as FieldInfo
@@ -9,13 +9,225 @@ from ..._utils import PropertyInfo
from ..._models import BaseModel
from .eval_api_error import EvalAPIError
from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text import ResponseInputText
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
-__all__ = ["RunCreateResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"]
+__all__ = [
+ "RunCreateResponse",
+ "DataSource",
+ "DataSourceCompletions",
+ "DataSourceCompletionsSource",
+ "DataSourceCompletionsSourceFileContent",
+ "DataSourceCompletionsSourceFileContentContent",
+ "DataSourceCompletionsSourceFileID",
+ "DataSourceCompletionsSourceResponses",
+ "DataSourceCompletionsInputMessages",
+ "DataSourceCompletionsInputMessagesTemplate",
+ "DataSourceCompletionsInputMessagesTemplateTemplate",
+ "DataSourceCompletionsInputMessagesTemplateTemplateChatMessage",
+ "DataSourceCompletionsInputMessagesTemplateTemplateEvalItem",
+ "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent",
+ "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText",
+ "DataSourceCompletionsInputMessagesItemReference",
+ "DataSourceCompletionsSamplingParams",
+ "PerModelUsage",
+ "PerTestingCriteriaResult",
+ "ResultCounts",
+]
+
+
+class DataSourceCompletionsSourceFileContentContent(BaseModel):
+ item: Dict[str, object]
+
+ sample: Optional[Dict[str, object]] = None
+
+
+class DataSourceCompletionsSourceFileContent(BaseModel):
+ content: List[DataSourceCompletionsSourceFileContentContent]
+ """The content of the jsonl file."""
+
+ type: Literal["file_content"]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceCompletionsSourceFileID(BaseModel):
+ id: str
+ """The identifier of the file."""
+
+ type: Literal["file_id"]
+ """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceCompletionsSourceResponses(BaseModel):
+ type: Literal["responses"]
+ """The type of run data source. Always `responses`."""
+
+ allow_parallel_tool_calls: Optional[bool] = None
+ """Whether to allow parallel tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ created_after: Optional[int] = None
+ """Only include items created after this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ created_before: Optional[int] = None
+ """Only include items created before this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ has_tool_calls: Optional[bool] = None
+ """Whether the response has tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ instructions_search: Optional[str] = None
+ """Optional search string for instructions.
+
+ This is a query parameter used to select responses.
+ """
+
+ metadata: Optional[object] = None
+ """Metadata filter for the responses.
+
+ This is a query parameter used to select responses.
+ """
+
+ model: Optional[str] = None
+ """The name of the model to find responses for.
+
+ This is a query parameter used to select responses.
+ """
+
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """Optional reasoning effort parameter.
+
+ This is a query parameter used to select responses.
+ """
+
+ temperature: Optional[float] = None
+ """Sampling temperature. This is a query parameter used to select responses."""
+
+ top_p: Optional[float] = None
+ """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+ users: Optional[List[str]] = None
+ """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceCompletionsSource: TypeAlias = Annotated[
+ Union[
+ DataSourceCompletionsSourceFileContent, DataSourceCompletionsSourceFileID, DataSourceCompletionsSourceResponses
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateChatMessage(BaseModel):
+ content: str
+ """The content of the message."""
+
+ role: str
+ """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+ str, ResponseInputText, DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItem(BaseModel):
+ content: DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplate: TypeAlias = Union[
+ DataSourceCompletionsInputMessagesTemplateTemplateChatMessage,
+ DataSourceCompletionsInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceCompletionsInputMessagesTemplate(BaseModel):
+ template: List[DataSourceCompletionsInputMessagesTemplateTemplate]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
+
+ type: Literal["template"]
+ """The type of input messages. Always `template`."""
+
+
+class DataSourceCompletionsInputMessagesItemReference(BaseModel):
+ item_reference: str
+ """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+ type: Literal["item_reference"]
+ """The type of input messages. Always `item_reference`."""
+
+
+DataSourceCompletionsInputMessages: TypeAlias = Annotated[
+ Union[DataSourceCompletionsInputMessagesTemplate, DataSourceCompletionsInputMessagesItemReference],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsSamplingParams(BaseModel):
+ max_completion_tokens: Optional[int] = None
+ """The maximum number of tokens in the generated output."""
+
+ seed: Optional[int] = None
+ """A seed value to initialize the randomness, during sampling."""
+
+ temperature: Optional[float] = None
+ """A higher temperature increases randomness in the outputs."""
+
+ top_p: Optional[float] = None
+ """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceCompletions(BaseModel):
+ source: DataSourceCompletionsSource
+ """A EvalResponsesSource object describing a run data source configuration."""
+
+ type: Literal["completions"]
+ """The type of run data source. Always `completions`."""
+
+ input_messages: Optional[DataSourceCompletionsInputMessages] = None
+
+ model: Optional[str] = None
+ """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+ sampling_params: Optional[DataSourceCompletionsSamplingParams] = None
+
DataSource: TypeAlias = Annotated[
- Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource], PropertyInfo(discriminator="type")
+ Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceCompletions],
+ PropertyInfo(discriminator="type"),
]
src/openai/types/evals/run_list_params.py
@@ -23,5 +23,5 @@ class RunListParams(TypedDict, total=False):
status: Literal["queued", "in_progress", "completed", "canceled", "failed"]
"""Filter runs by status.
- Use "queued" | "in_progress" | "failed" | "completed" | "canceled".
+ One of `queued` | `in_progress` | `failed` | `completed` | `canceled`.
"""
src/openai/types/evals/run_list_response.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from pydantic import Field as FieldInfo
@@ -9,13 +9,225 @@ from ..._utils import PropertyInfo
from ..._models import BaseModel
from .eval_api_error import EvalAPIError
from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text import ResponseInputText
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
-__all__ = ["RunListResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"]
+__all__ = [
+ "RunListResponse",
+ "DataSource",
+ "DataSourceCompletions",
+ "DataSourceCompletionsSource",
+ "DataSourceCompletionsSourceFileContent",
+ "DataSourceCompletionsSourceFileContentContent",
+ "DataSourceCompletionsSourceFileID",
+ "DataSourceCompletionsSourceResponses",
+ "DataSourceCompletionsInputMessages",
+ "DataSourceCompletionsInputMessagesTemplate",
+ "DataSourceCompletionsInputMessagesTemplateTemplate",
+ "DataSourceCompletionsInputMessagesTemplateTemplateChatMessage",
+ "DataSourceCompletionsInputMessagesTemplateTemplateEvalItem",
+ "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent",
+ "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText",
+ "DataSourceCompletionsInputMessagesItemReference",
+ "DataSourceCompletionsSamplingParams",
+ "PerModelUsage",
+ "PerTestingCriteriaResult",
+ "ResultCounts",
+]
+
+
+class DataSourceCompletionsSourceFileContentContent(BaseModel):
+ item: Dict[str, object]
+
+ sample: Optional[Dict[str, object]] = None
+
+
+class DataSourceCompletionsSourceFileContent(BaseModel):
+ content: List[DataSourceCompletionsSourceFileContentContent]
+ """The content of the jsonl file."""
+
+ type: Literal["file_content"]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceCompletionsSourceFileID(BaseModel):
+ id: str
+ """The identifier of the file."""
+
+ type: Literal["file_id"]
+ """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceCompletionsSourceResponses(BaseModel):
+ type: Literal["responses"]
+ """The type of run data source. Always `responses`."""
+
+ allow_parallel_tool_calls: Optional[bool] = None
+ """Whether to allow parallel tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ created_after: Optional[int] = None
+ """Only include items created after this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ created_before: Optional[int] = None
+ """Only include items created before this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ has_tool_calls: Optional[bool] = None
+ """Whether the response has tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ instructions_search: Optional[str] = None
+ """Optional search string for instructions.
+
+ This is a query parameter used to select responses.
+ """
+
+ metadata: Optional[object] = None
+ """Metadata filter for the responses.
+
+ This is a query parameter used to select responses.
+ """
+
+ model: Optional[str] = None
+ """The name of the model to find responses for.
+
+ This is a query parameter used to select responses.
+ """
+
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """Optional reasoning effort parameter.
+
+ This is a query parameter used to select responses.
+ """
+
+ temperature: Optional[float] = None
+ """Sampling temperature. This is a query parameter used to select responses."""
+
+ top_p: Optional[float] = None
+ """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+ users: Optional[List[str]] = None
+ """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceCompletionsSource: TypeAlias = Annotated[
+ Union[
+ DataSourceCompletionsSourceFileContent, DataSourceCompletionsSourceFileID, DataSourceCompletionsSourceResponses
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateChatMessage(BaseModel):
+ content: str
+ """The content of the message."""
+
+ role: str
+ """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+ str, ResponseInputText, DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItem(BaseModel):
+ content: DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplate: TypeAlias = Union[
+ DataSourceCompletionsInputMessagesTemplateTemplateChatMessage,
+ DataSourceCompletionsInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceCompletionsInputMessagesTemplate(BaseModel):
+ template: List[DataSourceCompletionsInputMessagesTemplateTemplate]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
+
+ type: Literal["template"]
+ """The type of input messages. Always `template`."""
+
+
+class DataSourceCompletionsInputMessagesItemReference(BaseModel):
+ item_reference: str
+ """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+ type: Literal["item_reference"]
+ """The type of input messages. Always `item_reference`."""
+
+
+DataSourceCompletionsInputMessages: TypeAlias = Annotated[
+ Union[DataSourceCompletionsInputMessagesTemplate, DataSourceCompletionsInputMessagesItemReference],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsSamplingParams(BaseModel):
+ max_completion_tokens: Optional[int] = None
+ """The maximum number of tokens in the generated output."""
+
+ seed: Optional[int] = None
+ """A seed value to initialize the randomness, during sampling."""
+
+ temperature: Optional[float] = None
+ """A higher temperature increases randomness in the outputs."""
+
+ top_p: Optional[float] = None
+ """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceCompletions(BaseModel):
+ source: DataSourceCompletionsSource
+ """A EvalResponsesSource object describing a run data source configuration."""
+
+ type: Literal["completions"]
+ """The type of run data source. Always `completions`."""
+
+ input_messages: Optional[DataSourceCompletionsInputMessages] = None
+
+ model: Optional[str] = None
+ """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+ sampling_params: Optional[DataSourceCompletionsSamplingParams] = None
+
DataSource: TypeAlias = Annotated[
- Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource], PropertyInfo(discriminator="type")
+ Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceCompletions],
+ PropertyInfo(discriminator="type"),
]
src/openai/types/evals/run_retrieve_response.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from pydantic import Field as FieldInfo
@@ -9,13 +9,225 @@ from ..._utils import PropertyInfo
from ..._models import BaseModel
from .eval_api_error import EvalAPIError
from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text import ResponseInputText
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
-__all__ = ["RunRetrieveResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"]
+__all__ = [
+ "RunRetrieveResponse",
+ "DataSource",
+ "DataSourceCompletions",
+ "DataSourceCompletionsSource",
+ "DataSourceCompletionsSourceFileContent",
+ "DataSourceCompletionsSourceFileContentContent",
+ "DataSourceCompletionsSourceFileID",
+ "DataSourceCompletionsSourceResponses",
+ "DataSourceCompletionsInputMessages",
+ "DataSourceCompletionsInputMessagesTemplate",
+ "DataSourceCompletionsInputMessagesTemplateTemplate",
+ "DataSourceCompletionsInputMessagesTemplateTemplateChatMessage",
+ "DataSourceCompletionsInputMessagesTemplateTemplateEvalItem",
+ "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent",
+ "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText",
+ "DataSourceCompletionsInputMessagesItemReference",
+ "DataSourceCompletionsSamplingParams",
+ "PerModelUsage",
+ "PerTestingCriteriaResult",
+ "ResultCounts",
+]
+
+
+class DataSourceCompletionsSourceFileContentContent(BaseModel):
+ item: Dict[str, object]
+
+ sample: Optional[Dict[str, object]] = None
+
+
+class DataSourceCompletionsSourceFileContent(BaseModel):
+ content: List[DataSourceCompletionsSourceFileContentContent]
+ """The content of the jsonl file."""
+
+ type: Literal["file_content"]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceCompletionsSourceFileID(BaseModel):
+ id: str
+ """The identifier of the file."""
+
+ type: Literal["file_id"]
+ """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceCompletionsSourceResponses(BaseModel):
+ type: Literal["responses"]
+ """The type of run data source. Always `responses`."""
+
+ allow_parallel_tool_calls: Optional[bool] = None
+ """Whether to allow parallel tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ created_after: Optional[int] = None
+ """Only include items created after this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ created_before: Optional[int] = None
+ """Only include items created before this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ has_tool_calls: Optional[bool] = None
+ """Whether the response has tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ instructions_search: Optional[str] = None
+ """Optional search string for instructions.
+
+ This is a query parameter used to select responses.
+ """
+
+ metadata: Optional[object] = None
+ """Metadata filter for the responses.
+
+ This is a query parameter used to select responses.
+ """
+
+ model: Optional[str] = None
+ """The name of the model to find responses for.
+
+ This is a query parameter used to select responses.
+ """
+
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """Optional reasoning effort parameter.
+
+ This is a query parameter used to select responses.
+ """
+
+ temperature: Optional[float] = None
+ """Sampling temperature. This is a query parameter used to select responses."""
+
+ top_p: Optional[float] = None
+ """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+ users: Optional[List[str]] = None
+ """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceCompletionsSource: TypeAlias = Annotated[
+ Union[
+ DataSourceCompletionsSourceFileContent, DataSourceCompletionsSourceFileID, DataSourceCompletionsSourceResponses
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateChatMessage(BaseModel):
+ content: str
+ """The content of the message."""
+
+ role: str
+ """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+ str, ResponseInputText, DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItem(BaseModel):
+ content: DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplate: TypeAlias = Union[
+ DataSourceCompletionsInputMessagesTemplateTemplateChatMessage,
+ DataSourceCompletionsInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceCompletionsInputMessagesTemplate(BaseModel):
+ template: List[DataSourceCompletionsInputMessagesTemplateTemplate]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
+
+ type: Literal["template"]
+ """The type of input messages. Always `template`."""
+
+
+class DataSourceCompletionsInputMessagesItemReference(BaseModel):
+ item_reference: str
+ """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+ type: Literal["item_reference"]
+ """The type of input messages. Always `item_reference`."""
+
+
+DataSourceCompletionsInputMessages: TypeAlias = Annotated[
+ Union[DataSourceCompletionsInputMessagesTemplate, DataSourceCompletionsInputMessagesItemReference],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsSamplingParams(BaseModel):
+ max_completion_tokens: Optional[int] = None
+ """The maximum number of tokens in the generated output."""
+
+ seed: Optional[int] = None
+ """A seed value to initialize the randomness, during sampling."""
+
+ temperature: Optional[float] = None
+ """A higher temperature increases randomness in the outputs."""
+
+ top_p: Optional[float] = None
+ """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceCompletions(BaseModel):
+ source: DataSourceCompletionsSource
+ """A EvalResponsesSource object describing a run data source configuration."""
+
+ type: Literal["completions"]
+ """The type of run data source. Always `completions`."""
+
+ input_messages: Optional[DataSourceCompletionsInputMessages] = None
+
+ model: Optional[str] = None
+ """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+ sampling_params: Optional[DataSourceCompletionsSamplingParams] = None
+
DataSource: TypeAlias = Annotated[
- Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource], PropertyInfo(discriminator="type")
+ Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceCompletions],
+ PropertyInfo(discriminator="type"),
]
src/openai/types/responses/__init__.py
@@ -22,6 +22,7 @@ from .response_status import ResponseStatus as ResponseStatus
from .web_search_tool import WebSearchTool as WebSearchTool
from .file_search_tool import FileSearchTool as FileSearchTool
from .tool_choice_types import ToolChoiceTypes as ToolChoiceTypes
+from .easy_input_message import EasyInputMessage as EasyInputMessage
from .response_item_list import ResponseItemList as ResponseItemList
from .computer_tool_param import ComputerToolParam as ComputerToolParam
from .function_tool_param import FunctionToolParam as FunctionToolParam
@@ -117,6 +118,12 @@ from .response_file_search_call_searching_event import (
from .response_input_message_content_list_param import (
ResponseInputMessageContentListParam as ResponseInputMessageContentListParam,
)
+from .response_reasoning_summary_part_done_event import (
+ ResponseReasoningSummaryPartDoneEvent as ResponseReasoningSummaryPartDoneEvent,
+)
+from .response_reasoning_summary_text_done_event import (
+ ResponseReasoningSummaryTextDoneEvent as ResponseReasoningSummaryTextDoneEvent,
+)
from .response_web_search_call_in_progress_event import (
ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent,
)
@@ -126,6 +133,12 @@ from .response_file_search_call_in_progress_event import (
from .response_function_call_arguments_done_event import (
ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent,
)
+from .response_reasoning_summary_part_added_event import (
+ ResponseReasoningSummaryPartAddedEvent as ResponseReasoningSummaryPartAddedEvent,
+)
+from .response_reasoning_summary_text_delta_event import (
+ ResponseReasoningSummaryTextDeltaEvent as ResponseReasoningSummaryTextDeltaEvent,
+)
from .response_function_call_arguments_delta_event import (
ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,
)
src/openai/types/responses/easy_input_message.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_input_message_content_list import ResponseInputMessageContentList
+
+__all__ = ["EasyInputMessage"]
+
+
+class EasyInputMessage(BaseModel):
+ content: Union[str, ResponseInputMessageContentList]
+ """
+ Text, image, or audio input to the model, used to generate a response. Can also
+ contain previous assistant responses.
+ """
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
src/openai/types/responses/response_reasoning_summary_part_added_event.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningSummaryPartAddedEvent", "Part"]
+
+
+class Part(BaseModel):
+ text: str
+ """The text of the summary part."""
+
+ type: Literal["summary_text"]
+ """The type of the summary part. Always `summary_text`."""
+
+
+class ResponseReasoningSummaryPartAddedEvent(BaseModel):
+ item_id: str
+ """The ID of the item this summary part is associated with."""
+
+ output_index: int
+ """The index of the output item this summary part is associated with."""
+
+ part: Part
+ """The summary part that was added."""
+
+ summary_index: int
+ """The index of the summary part within the reasoning summary."""
+
+ type: Literal["response.reasoning_summary_part.added"]
+ """The type of the event. Always `response.reasoning_summary_part.added`."""
src/openai/types/responses/response_reasoning_summary_part_done_event.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningSummaryPartDoneEvent", "Part"]
+
+
+class Part(BaseModel):
+ text: str
+ """The text of the summary part."""
+
+ type: Literal["summary_text"]
+ """The type of the summary part. Always `summary_text`."""
+
+
+class ResponseReasoningSummaryPartDoneEvent(BaseModel):
+ item_id: str
+ """The ID of the item this summary part is associated with."""
+
+ output_index: int
+ """The index of the output item this summary part is associated with."""
+
+ part: Part
+ """The completed summary part."""
+
+ summary_index: int
+ """The index of the summary part within the reasoning summary."""
+
+ type: Literal["response.reasoning_summary_part.done"]
+ """The type of the event. Always `response.reasoning_summary_part.done`."""
src/openai/types/responses/response_reasoning_summary_text_delta_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningSummaryTextDeltaEvent"]
+
+
+class ResponseReasoningSummaryTextDeltaEvent(BaseModel):
+ delta: str
+ """The text delta that was added to the summary."""
+
+ item_id: str
+ """The ID of the item this summary text delta is associated with."""
+
+ output_index: int
+ """The index of the output item this summary text delta is associated with."""
+
+ summary_index: int
+ """The index of the summary part within the reasoning summary."""
+
+ type: Literal["response.reasoning_summary_text.delta"]
+ """The type of the event. Always `response.reasoning_summary_text.delta`."""
src/openai/types/responses/response_reasoning_summary_text_done_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningSummaryTextDoneEvent"]
+
+
+class ResponseReasoningSummaryTextDoneEvent(BaseModel):
+ item_id: str
+ """The ID of the item this summary text is associated with."""
+
+ output_index: int
+ """The index of the output item this summary text is associated with."""
+
+ summary_index: int
+ """The index of the summary part within the reasoning summary."""
+
+ text: str
+ """The full text of the completed reasoning summary."""
+
+ type: Literal["response.reasoning_summary_text.done"]
+ """The type of the event. Always `response.reasoning_summary_text.done`."""
src/openai/types/responses/response_stream_event.py
@@ -27,9 +27,13 @@ from .response_web_search_call_completed_event import ResponseWebSearchCallCompl
from .response_web_search_call_searching_event import ResponseWebSearchCallSearchingEvent
from .response_file_search_call_completed_event import ResponseFileSearchCallCompletedEvent
from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent
+from .response_reasoning_summary_part_done_event import ResponseReasoningSummaryPartDoneEvent
+from .response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent
from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent
from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent
from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
+from .response_reasoning_summary_part_added_event import ResponseReasoningSummaryPartAddedEvent
+from .response_reasoning_summary_text_delta_event import ResponseReasoningSummaryTextDeltaEvent
from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent
from .response_code_interpreter_call_code_done_event import ResponseCodeInterpreterCallCodeDoneEvent
from .response_code_interpreter_call_completed_event import ResponseCodeInterpreterCallCompletedEvent
@@ -65,6 +69,10 @@ ResponseStreamEvent: TypeAlias = Annotated[
ResponseIncompleteEvent,
ResponseOutputItemAddedEvent,
ResponseOutputItemDoneEvent,
+ ResponseReasoningSummaryPartAddedEvent,
+ ResponseReasoningSummaryPartDoneEvent,
+ ResponseReasoningSummaryTextDeltaEvent,
+ ResponseReasoningSummaryTextDoneEvent,
ResponseRefusalDeltaEvent,
ResponseRefusalDoneEvent,
ResponseTextAnnotationDeltaEvent,
src/openai/types/eval_create_params.py
@@ -8,20 +8,25 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .shared_params.metadata import Metadata
from .eval_string_check_grader_param import EvalStringCheckGraderParam
from .eval_text_similarity_grader_param import EvalTextSimilarityGraderParam
+from .responses.response_input_text_param import ResponseInputTextParam
__all__ = [
"EvalCreateParams",
"DataSourceConfig",
"DataSourceConfigCustom",
- "DataSourceConfigStoredCompletions",
+ "DataSourceConfigLogs",
"TestingCriterion",
"TestingCriterionLabelModel",
"TestingCriterionLabelModelInput",
"TestingCriterionLabelModelInputSimpleInputMessage",
- "TestingCriterionLabelModelInputInputMessage",
- "TestingCriterionLabelModelInputInputMessageContent",
- "TestingCriterionLabelModelInputOutputMessage",
- "TestingCriterionLabelModelInputOutputMessageContent",
+ "TestingCriterionLabelModelInputEvalItem",
+ "TestingCriterionLabelModelInputEvalItemContent",
+ "TestingCriterionLabelModelInputEvalItemContentOutputText",
+ "TestingCriterionPython",
+ "TestingCriterionScoreModel",
+ "TestingCriterionScoreModelInput",
+ "TestingCriterionScoreModelInputContent",
+ "TestingCriterionScoreModelInputContentOutputText",
]
@@ -45,37 +50,30 @@ class EvalCreateParams(TypedDict, total=False):
name: str
"""The name of the evaluation."""
- share_with_openai: bool
- """Indicates whether the evaluation is shared with OpenAI."""
-
class DataSourceConfigCustom(TypedDict, total=False):
item_schema: Required[Dict[str, object]]
- """The json schema for the run data source items."""
+ """The json schema for each row in the data source."""
type: Required[Literal["custom"]]
"""The type of data source. Always `custom`."""
include_sample_schema: bool
- """Whether to include the sample schema in the data source."""
-
-
-class DataSourceConfigStoredCompletions(TypedDict, total=False):
- type: Required[Literal["stored_completions"]]
- """The type of data source. Always `stored_completions`."""
+ """
+ Whether the eval should expect you to populate the sample namespace (ie, by
+ generating responses off of your data source)
+ """
- metadata: Optional[Metadata]
- """Set of 16 key-value pairs that can be attached to an object.
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
+class DataSourceConfigLogs(TypedDict, total=False):
+ type: Required[Literal["logs"]]
+ """The type of data source. Always `logs`."""
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
+ metadata: Dict[str, object]
+ """Metadata filters for the logs data source."""
-DataSourceConfig: TypeAlias = Union[DataSourceConfigCustom, DataSourceConfigStoredCompletions]
+DataSourceConfig: TypeAlias = Union[DataSourceConfigCustom, DataSourceConfigLogs]
class TestingCriterionLabelModelInputSimpleInputMessage(TypedDict, total=False):
@@ -86,51 +84,44 @@ class TestingCriterionLabelModelInputSimpleInputMessage(TypedDict, total=False):
"""The role of the message (e.g. "system", "assistant", "user")."""
-class TestingCriterionLabelModelInputInputMessageContent(TypedDict, total=False):
+class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total=False):
text: Required[str]
- """The text content."""
-
- type: Required[Literal["input_text"]]
- """The type of content, which is always `input_text`."""
-
+ """The text output from the model."""
-class TestingCriterionLabelModelInputInputMessage(TypedDict, total=False):
- content: Required[TestingCriterionLabelModelInputInputMessageContent]
-
- role: Required[Literal["user", "system", "developer"]]
- """The role of the message. One of `user`, `system`, or `developer`."""
-
- type: Required[Literal["message"]]
- """The type of item, which is always `message`."""
+ type: Required[Literal["output_text"]]
+ """The type of the output text. Always `output_text`."""
-class TestingCriterionLabelModelInputOutputMessageContent(TypedDict, total=False):
- text: Required[str]
- """The text content."""
+TestingCriterionLabelModelInputEvalItemContent: TypeAlias = Union[
+ str, ResponseInputTextParam, TestingCriterionLabelModelInputEvalItemContentOutputText
+]
- type: Required[Literal["output_text"]]
- """The type of content, which is always `output_text`."""
+class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False):
+ content: Required[TestingCriterionLabelModelInputEvalItemContent]
+ """Text inputs to the model - can contain template strings."""
-class TestingCriterionLabelModelInputOutputMessage(TypedDict, total=False):
- content: Required[TestingCriterionLabelModelInputOutputMessageContent]
+ role: Required[Literal["user", "assistant", "system", "developer"]]
+ """The role of the message input.
- role: Required[Literal["assistant"]]
- """The role of the message. Must be `assistant` for output."""
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
- type: Required[Literal["message"]]
- """The type of item, which is always `message`."""
+ type: Literal["message"]
+ """The type of the message input. Always `message`."""
TestingCriterionLabelModelInput: TypeAlias = Union[
- TestingCriterionLabelModelInputSimpleInputMessage,
- TestingCriterionLabelModelInputInputMessage,
- TestingCriterionLabelModelInputOutputMessage,
+ TestingCriterionLabelModelInputSimpleInputMessage, TestingCriterionLabelModelInputEvalItem
]
class TestingCriterionLabelModel(TypedDict, total=False):
input: Required[Iterable[TestingCriterionLabelModelInput]]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
labels: Required[List[str]]
"""The labels to classify to each item in the evaluation."""
@@ -148,6 +139,77 @@ class TestingCriterionLabelModel(TypedDict, total=False):
"""The object type, which is always `label_model`."""
+class TestingCriterionPython(TypedDict, total=False):
+ name: Required[str]
+ """The name of the grader."""
+
+ source: Required[str]
+ """The source code of the python script."""
+
+ type: Required[Literal["python"]]
+ """The object type, which is always `python`."""
+
+ image_tag: str
+ """The image tag to use for the python script."""
+
+ pass_threshold: float
+ """The threshold for the score."""
+
+
+class TestingCriterionScoreModelInputContentOutputText(TypedDict, total=False):
+ text: Required[str]
+ """The text output from the model."""
+
+ type: Required[Literal["output_text"]]
+ """The type of the output text. Always `output_text`."""
+
+
+TestingCriterionScoreModelInputContent: TypeAlias = Union[
+ str, ResponseInputTextParam, TestingCriterionScoreModelInputContentOutputText
+]
+
+
+class TestingCriterionScoreModelInput(TypedDict, total=False):
+ content: Required[TestingCriterionScoreModelInputContent]
+ """Text inputs to the model - can contain template strings."""
+
+ role: Required[Literal["user", "assistant", "system", "developer"]]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always `message`."""
+
+
+class TestingCriterionScoreModel(TypedDict, total=False):
+ input: Required[Iterable[TestingCriterionScoreModelInput]]
+ """The input text. This may include template strings."""
+
+ model: Required[str]
+ """The model to use for the evaluation."""
+
+ name: Required[str]
+ """The name of the grader."""
+
+ type: Required[Literal["score_model"]]
+ """The object type, which is always `score_model`."""
+
+ pass_threshold: float
+ """The threshold for the score."""
+
+ range: Iterable[float]
+ """The range of the score. Defaults to `[0, 1]`."""
+
+ sampling_params: object
+ """The sampling parameters for the model."""
+
+
TestingCriterion: TypeAlias = Union[
- TestingCriterionLabelModel, EvalStringCheckGraderParam, EvalTextSimilarityGraderParam
+ TestingCriterionLabelModel,
+ EvalStringCheckGraderParam,
+ EvalTextSimilarityGraderParam,
+ TestingCriterionPython,
+ TestingCriterionScoreModel,
]
src/openai/types/eval_create_response.py
@@ -9,17 +9,106 @@ from .shared.metadata import Metadata
from .eval_label_model_grader import EvalLabelModelGrader
from .eval_string_check_grader import EvalStringCheckGrader
from .eval_text_similarity_grader import EvalTextSimilarityGrader
+from .responses.response_input_text import ResponseInputText
from .eval_custom_data_source_config import EvalCustomDataSourceConfig
from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
-__all__ = ["EvalCreateResponse", "DataSourceConfig", "TestingCriterion"]
+__all__ = [
+ "EvalCreateResponse",
+ "DataSourceConfig",
+ "TestingCriterion",
+ "TestingCriterionPython",
+ "TestingCriterionScoreModel",
+ "TestingCriterionScoreModelInput",
+ "TestingCriterionScoreModelInputContent",
+ "TestingCriterionScoreModelInputContentOutputText",
+]
DataSourceConfig: TypeAlias = Annotated[
Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type")
]
+
+class TestingCriterionPython(BaseModel):
+ __test__ = False
+ name: str
+ """The name of the grader."""
+
+ source: str
+ """The source code of the python script."""
+
+ type: Literal["python"]
+ """The object type, which is always `python`."""
+
+ image_tag: Optional[str] = None
+ """The image tag to use for the python script."""
+
+ pass_threshold: Optional[float] = None
+ """The threshold for the score."""
+
+
+class TestingCriterionScoreModelInputContentOutputText(BaseModel):
+ __test__ = False
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+TestingCriterionScoreModelInputContent: TypeAlias = Union[
+ str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
+]
+
+
+class TestingCriterionScoreModelInput(BaseModel):
+ __test__ = False
+ content: TestingCriterionScoreModelInputContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+class TestingCriterionScoreModel(BaseModel):
+ __test__ = False
+ input: List[TestingCriterionScoreModelInput]
+ """The input text. This may include template strings."""
+
+ model: str
+ """The model to use for the evaluation."""
+
+ name: str
+ """The name of the grader."""
+
+ type: Literal["score_model"]
+ """The object type, which is always `score_model`."""
+
+ pass_threshold: Optional[float] = None
+ """The threshold for the score."""
+
+ range: Optional[List[float]] = None
+ """The range of the score. Defaults to `[0, 1]`."""
+
+ sampling_params: Optional[object] = None
+ """The sampling parameters for the model."""
+
+
TestingCriterion: TypeAlias = Annotated[
- Union[EvalLabelModelGrader, EvalStringCheckGrader, EvalTextSimilarityGrader], PropertyInfo(discriminator="type")
+ Union[
+ EvalLabelModelGrader,
+ EvalStringCheckGrader,
+ EvalTextSimilarityGrader,
+ TestingCriterionPython,
+ TestingCriterionScoreModel,
+ ],
+ PropertyInfo(discriminator="type"),
]
@@ -49,8 +138,5 @@ class EvalCreateResponse(BaseModel):
object: Literal["eval"]
"""The object type."""
- share_with_openai: bool
- """Indicates whether the evaluation is shared with OpenAI."""
-
testing_criteria: List[TestingCriterion]
"""A list of testing criteria."""
src/openai/types/eval_label_model_grader.py
@@ -1,58 +1,37 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union
-from typing_extensions import Literal, Annotated, TypeAlias
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
-from .._utils import PropertyInfo
from .._models import BaseModel
+from .responses.response_input_text import ResponseInputText
-__all__ = [
- "EvalLabelModelGrader",
- "Input",
- "InputInputMessage",
- "InputInputMessageContent",
- "InputAssistant",
- "InputAssistantContent",
-]
+__all__ = ["EvalLabelModelGrader", "Input", "InputContent", "InputContentOutputText"]
-class InputInputMessageContent(BaseModel):
+class InputContentOutputText(BaseModel):
text: str
- """The text content."""
-
- type: Literal["input_text"]
- """The type of content, which is always `input_text`."""
-
-
-class InputInputMessage(BaseModel):
- content: InputInputMessageContent
-
- role: Literal["user", "system", "developer"]
- """The role of the message. One of `user`, `system`, or `developer`."""
-
- type: Literal["message"]
- """The type of item, which is always `message`."""
-
-
-class InputAssistantContent(BaseModel):
- text: str
- """The text content."""
+ """The text output from the model."""
type: Literal["output_text"]
- """The type of content, which is always `output_text`."""
+ """The type of the output text. Always `output_text`."""
+
+InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText]
-class InputAssistant(BaseModel):
- content: InputAssistantContent
- role: Literal["assistant"]
- """The role of the message. Must be `assistant` for output."""
+class Input(BaseModel):
+ content: InputContent
+ """Text inputs to the model - can contain template strings."""
- type: Literal["message"]
- """The type of item, which is always `message`."""
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
-Input: TypeAlias = Annotated[Union[InputInputMessage, InputAssistant], PropertyInfo(discriminator="role")]
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
class EvalLabelModelGrader(BaseModel):
src/openai/types/eval_list_response.py
@@ -9,17 +9,106 @@ from .shared.metadata import Metadata
from .eval_label_model_grader import EvalLabelModelGrader
from .eval_string_check_grader import EvalStringCheckGrader
from .eval_text_similarity_grader import EvalTextSimilarityGrader
+from .responses.response_input_text import ResponseInputText
from .eval_custom_data_source_config import EvalCustomDataSourceConfig
from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
-__all__ = ["EvalListResponse", "DataSourceConfig", "TestingCriterion"]
+__all__ = [
+ "EvalListResponse",
+ "DataSourceConfig",
+ "TestingCriterion",
+ "TestingCriterionPython",
+ "TestingCriterionScoreModel",
+ "TestingCriterionScoreModelInput",
+ "TestingCriterionScoreModelInputContent",
+ "TestingCriterionScoreModelInputContentOutputText",
+]
DataSourceConfig: TypeAlias = Annotated[
Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type")
]
+
+class TestingCriterionPython(BaseModel):
+ __test__ = False
+ name: str
+ """The name of the grader."""
+
+ source: str
+ """The source code of the python script."""
+
+ type: Literal["python"]
+ """The object type, which is always `python`."""
+
+ image_tag: Optional[str] = None
+ """The image tag to use for the python script."""
+
+ pass_threshold: Optional[float] = None
+ """The threshold for the score."""
+
+
+class TestingCriterionScoreModelInputContentOutputText(BaseModel):
+ __test__ = False
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+TestingCriterionScoreModelInputContent: TypeAlias = Union[
+ str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
+]
+
+
+class TestingCriterionScoreModelInput(BaseModel):
+ __test__ = False
+ content: TestingCriterionScoreModelInputContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+class TestingCriterionScoreModel(BaseModel):
+ __test__ = False
+ input: List[TestingCriterionScoreModelInput]
+ """The input text. This may include template strings."""
+
+ model: str
+ """The model to use for the evaluation."""
+
+ name: str
+ """The name of the grader."""
+
+ type: Literal["score_model"]
+ """The object type, which is always `score_model`."""
+
+ pass_threshold: Optional[float] = None
+ """The threshold for the score."""
+
+ range: Optional[List[float]] = None
+ """The range of the score. Defaults to `[0, 1]`."""
+
+ sampling_params: Optional[object] = None
+ """The sampling parameters for the model."""
+
+
TestingCriterion: TypeAlias = Annotated[
- Union[EvalLabelModelGrader, EvalStringCheckGrader, EvalTextSimilarityGrader], PropertyInfo(discriminator="type")
+ Union[
+ EvalLabelModelGrader,
+ EvalStringCheckGrader,
+ EvalTextSimilarityGrader,
+ TestingCriterionPython,
+ TestingCriterionScoreModel,
+ ],
+ PropertyInfo(discriminator="type"),
]
@@ -49,8 +138,5 @@ class EvalListResponse(BaseModel):
object: Literal["eval"]
"""The object type."""
- share_with_openai: bool
- """Indicates whether the evaluation is shared with OpenAI."""
-
testing_criteria: List[TestingCriterion]
"""A list of testing criteria."""
src/openai/types/eval_retrieve_response.py
@@ -9,17 +9,106 @@ from .shared.metadata import Metadata
from .eval_label_model_grader import EvalLabelModelGrader
from .eval_string_check_grader import EvalStringCheckGrader
from .eval_text_similarity_grader import EvalTextSimilarityGrader
+from .responses.response_input_text import ResponseInputText
from .eval_custom_data_source_config import EvalCustomDataSourceConfig
from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
-__all__ = ["EvalRetrieveResponse", "DataSourceConfig", "TestingCriterion"]
+__all__ = [
+ "EvalRetrieveResponse",
+ "DataSourceConfig",
+ "TestingCriterion",
+ "TestingCriterionPython",
+ "TestingCriterionScoreModel",
+ "TestingCriterionScoreModelInput",
+ "TestingCriterionScoreModelInputContent",
+ "TestingCriterionScoreModelInputContentOutputText",
+]
DataSourceConfig: TypeAlias = Annotated[
Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type")
]
+
+class TestingCriterionPython(BaseModel):
+ __test__ = False
+ name: str
+ """The name of the grader."""
+
+ source: str
+ """The source code of the python script."""
+
+ type: Literal["python"]
+ """The object type, which is always `python`."""
+
+ image_tag: Optional[str] = None
+ """The image tag to use for the python script."""
+
+ pass_threshold: Optional[float] = None
+ """The threshold for the score."""
+
+
+class TestingCriterionScoreModelInputContentOutputText(BaseModel):
+ __test__ = False
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+TestingCriterionScoreModelInputContent: TypeAlias = Union[
+ str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
+]
+
+
+class TestingCriterionScoreModelInput(BaseModel):
+ __test__ = False
+ content: TestingCriterionScoreModelInputContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+class TestingCriterionScoreModel(BaseModel):
+ __test__ = False
+ input: List[TestingCriterionScoreModelInput]
+ """The input text. This may include template strings."""
+
+ model: str
+ """The model to use for the evaluation."""
+
+ name: str
+ """The name of the grader."""
+
+ type: Literal["score_model"]
+ """The object type, which is always `score_model`."""
+
+ pass_threshold: Optional[float] = None
+ """The threshold for the score."""
+
+ range: Optional[List[float]] = None
+ """The range of the score. Defaults to `[0, 1]`."""
+
+ sampling_params: Optional[object] = None
+ """The sampling parameters for the model."""
+
+
TestingCriterion: TypeAlias = Annotated[
- Union[EvalLabelModelGrader, EvalStringCheckGrader, EvalTextSimilarityGrader], PropertyInfo(discriminator="type")
+ Union[
+ EvalLabelModelGrader,
+ EvalStringCheckGrader,
+ EvalTextSimilarityGrader,
+ TestingCriterionPython,
+ TestingCriterionScoreModel,
+ ],
+ PropertyInfo(discriminator="type"),
]
@@ -49,8 +138,5 @@ class EvalRetrieveResponse(BaseModel):
object: Literal["eval"]
"""The object type."""
- share_with_openai: bool
- """Indicates whether the evaluation is shared with OpenAI."""
-
testing_criteria: List[TestingCriterion]
"""A list of testing criteria."""
src/openai/types/eval_text_similarity_grader.py
@@ -10,22 +10,12 @@ __all__ = ["EvalTextSimilarityGrader"]
class EvalTextSimilarityGrader(BaseModel):
evaluation_metric: Literal[
- "fuzzy_match",
- "bleu",
- "gleu",
- "meteor",
- "rouge_1",
- "rouge_2",
- "rouge_3",
- "rouge_4",
- "rouge_5",
- "rouge_l",
- "cosine",
+ "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l"
]
"""The evaluation metric to use.
- One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`,
- `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
+ One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`,
+ `rouge_4`, `rouge_5`, or `rouge_l`.
"""
input: str
src/openai/types/eval_text_similarity_grader_param.py
@@ -10,23 +10,13 @@ __all__ = ["EvalTextSimilarityGraderParam"]
class EvalTextSimilarityGraderParam(TypedDict, total=False):
evaluation_metric: Required[
Literal[
- "fuzzy_match",
- "bleu",
- "gleu",
- "meteor",
- "rouge_1",
- "rouge_2",
- "rouge_3",
- "rouge_4",
- "rouge_5",
- "rouge_l",
- "cosine",
+ "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l"
]
]
"""The evaluation metric to use.
- One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`,
- `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
+ One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`,
+ `rouge_4`, `rouge_5`, or `rouge_l`.
"""
input: Required[str]
src/openai/types/eval_update_response.py
@@ -9,17 +9,106 @@ from .shared.metadata import Metadata
from .eval_label_model_grader import EvalLabelModelGrader
from .eval_string_check_grader import EvalStringCheckGrader
from .eval_text_similarity_grader import EvalTextSimilarityGrader
+from .responses.response_input_text import ResponseInputText
from .eval_custom_data_source_config import EvalCustomDataSourceConfig
from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
-__all__ = ["EvalUpdateResponse", "DataSourceConfig", "TestingCriterion"]
+__all__ = [
+ "EvalUpdateResponse",
+ "DataSourceConfig",
+ "TestingCriterion",
+ "TestingCriterionPython",
+ "TestingCriterionScoreModel",
+ "TestingCriterionScoreModelInput",
+ "TestingCriterionScoreModelInputContent",
+ "TestingCriterionScoreModelInputContentOutputText",
+]
DataSourceConfig: TypeAlias = Annotated[
Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type")
]
+
+class TestingCriterionPython(BaseModel):
+ __test__ = False
+ name: str
+ """The name of the grader."""
+
+ source: str
+ """The source code of the python script."""
+
+ type: Literal["python"]
+ """The object type, which is always `python`."""
+
+ image_tag: Optional[str] = None
+ """The image tag to use for the python script."""
+
+ pass_threshold: Optional[float] = None
+ """The threshold for the score."""
+
+
+class TestingCriterionScoreModelInputContentOutputText(BaseModel):
+ __test__ = False
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+TestingCriterionScoreModelInputContent: TypeAlias = Union[
+ str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
+]
+
+
+class TestingCriterionScoreModelInput(BaseModel):
+ __test__ = False
+ content: TestingCriterionScoreModelInputContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+class TestingCriterionScoreModel(BaseModel):
+ __test__ = False
+ input: List[TestingCriterionScoreModelInput]
+ """The input text. This may include template strings."""
+
+ model: str
+ """The model to use for the evaluation."""
+
+ name: str
+ """The name of the grader."""
+
+ type: Literal["score_model"]
+ """The object type, which is always `score_model`."""
+
+ pass_threshold: Optional[float] = None
+ """The threshold for the score."""
+
+ range: Optional[List[float]] = None
+ """The range of the score. Defaults to `[0, 1]`."""
+
+ sampling_params: Optional[object] = None
+ """The sampling parameters for the model."""
+
+
TestingCriterion: TypeAlias = Annotated[
- Union[EvalLabelModelGrader, EvalStringCheckGrader, EvalTextSimilarityGrader], PropertyInfo(discriminator="type")
+ Union[
+ EvalLabelModelGrader,
+ EvalStringCheckGrader,
+ EvalTextSimilarityGrader,
+ TestingCriterionPython,
+ TestingCriterionScoreModel,
+ ],
+ PropertyInfo(discriminator="type"),
]
@@ -49,8 +138,5 @@ class EvalUpdateResponse(BaseModel):
object: Literal["eval"]
"""The object type."""
- share_with_openai: bool
- """Indicates whether the evaluation is shared with OpenAI."""
-
testing_criteria: List[TestingCriterion]
"""A list of testing criteria."""
src/openai/types/image.py
@@ -9,16 +9,18 @@ __all__ = ["Image"]
class Image(BaseModel):
b64_json: Optional[str] = None
- """
- The base64-encoded JSON of the generated image, if `response_format` is
- `b64_json`.
+ """The base64-encoded JSON of the generated image.
+
+ Default value for `gpt-image-1`, and only present if `response_format` is set to
+ `b64_json` for `dall-e-2` and `dall-e-3`.
"""
revised_prompt: Optional[str] = None
- """
- The prompt that was used to generate the image, if there was any revision to the
- prompt.
- """
+ """For `dall-e-3` only, the revised prompt that was used to generate the image."""
url: Optional[str] = None
- """The URL of the generated image, if `response_format` is `url` (default)."""
+ """
+ When using `dall-e-2` or `dall-e-3`, the URL of the generated image if
+ `response_format` is set to `url` (default value). Unsupported for
+ `gpt-image-1`.
+ """
src/openai/types/image_create_variation_params.py
@@ -25,10 +25,7 @@ class ImageCreateVariationParams(TypedDict, total=False):
"""
n: Optional[int]
- """The number of images to generate.
-
- Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
- """
+ """The number of images to generate. Must be between 1 and 10."""
response_format: Optional[Literal["url", "b64_json"]]
"""The format in which the generated images are returned.
src/openai/types/image_edit_params.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Union, Optional
+from typing import List, Union, Optional
from typing_extensions import Literal, Required, TypedDict
from .._types import FileTypes
@@ -12,46 +12,61 @@ __all__ = ["ImageEditParams"]
class ImageEditParams(TypedDict, total=False):
- image: Required[FileTypes]
- """The image to edit.
+ image: Required[Union[FileTypes, List[FileTypes]]]
+ """The image(s) to edit.
- Must be a valid PNG file, less than 4MB, and square. If mask is not provided,
- image must have transparency, which will be used as the mask.
+ Must be a supported image file or an array of images. For `gpt-image-1`, each
+ image should be a `png`, `webp`, or `jpg` file less than 25MB. For `dall-e-2`,
+ you can only provide one image, and it should be a square `png` file less than
+ 4MB.
"""
prompt: Required[str]
"""A text description of the desired image(s).
- The maximum length is 1000 characters.
+ The maximum length is 1000 characters for `dall-e-2`, and 32000 characters for
+ `gpt-image-1`.
"""
mask: FileTypes
"""An additional image whose fully transparent areas (e.g.
- where alpha is zero) indicate where `image` should be edited. Must be a valid
- PNG file, less than 4MB, and have the same dimensions as `image`.
+ where alpha is zero) indicate where `image` should be edited. If there are
+ multiple images provided, the mask will be applied on the first image. Must be a
+ valid PNG file, less than 4MB, and have the same dimensions as `image`.
"""
model: Union[str, ImageModel, None]
"""The model to use for image generation.
- Only `dall-e-2` is supported at this time.
+ Only `dall-e-2` and `gpt-image-1` are supported. Defaults to `dall-e-2` unless a
+ parameter specific to `gpt-image-1` is used.
"""
n: Optional[int]
"""The number of images to generate. Must be between 1 and 10."""
+ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]]
+ """The quality of the image that will be generated.
+
+ `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only
+ supports `standard` quality. Defaults to `auto`.
+ """
+
response_format: Optional[Literal["url", "b64_json"]]
"""The format in which the generated images are returned.
Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
- image has been generated.
+ image has been generated. This parameter is only supported for `dall-e-2`, as
+ `gpt-image-1` will always return base64-encoded images.
"""
size: Optional[Literal["256x256", "512x512", "1024x1024"]]
"""The size of the generated images.
- Must be one of `256x256`, `512x512`, or `1024x1024`.
+ Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or
+ `auto` (default value) for `gpt-image-1`, and one of `256x256`, `512x512`, or
+ `1024x1024` for `dall-e-2`.
"""
user: str
src/openai/types/image_generate_params.py
@@ -14,12 +14,33 @@ class ImageGenerateParams(TypedDict, total=False):
prompt: Required[str]
"""A text description of the desired image(s).
- The maximum length is 1000 characters for `dall-e-2` and 4000 characters for
- `dall-e-3`.
+ The maximum length is 32000 characters for `gpt-image-1`, 1000 characters for
+ `dall-e-2` and 4000 characters for `dall-e-3`.
+ """
+
+ background: Optional[Literal["transparent", "opaque", "auto"]]
+ """Allows to set transparency for the background of the generated image(s).
+
+ This parameter is only supported for `gpt-image-1`. Must be one of
+ `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
+ model will automatically determine the best background for the image.
+
+ If `transparent`, the output format needs to support transparency, so it should
+ be set to either `png` (default value) or `webp`.
"""
model: Union[str, ImageModel, None]
- """The model to use for image generation."""
+ """The model to use for image generation.
+
+ One of `dall-e-2`, `dall-e-3`, or `gpt-image-1`. Defaults to `dall-e-2` unless a
+ parameter specific to `gpt-image-1` is used.
+ """
+
+ moderation: Optional[Literal["low", "auto"]]
+ """Control the content-moderation level for images generated by `gpt-image-1`.
+
+ Must be either `low` for less restrictive filtering or `auto` (default value).
+ """
n: Optional[int]
"""The number of images to generate.
@@ -27,34 +48,57 @@ class ImageGenerateParams(TypedDict, total=False):
Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
"""
- quality: Literal["standard", "hd"]
+ output_compression: Optional[int]
+ """The compression level (0-100%) for the generated images.
+
+ This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg`
+ output formats, and defaults to 100.
+ """
+
+ output_format: Optional[Literal["png", "jpeg", "webp"]]
+ """The format in which the generated images are returned.
+
+ This parameter is only supported for `gpt-image-1`. Must be one of `png`,
+ `jpeg`, or `webp`.
+ """
+
+ quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]]
"""The quality of the image that will be generated.
- `hd` creates images with finer details and greater consistency across the image.
- This param is only supported for `dall-e-3`.
+ - `auto` (default value) will automatically select the best quality for the
+ given model.
+ - `high`, `medium` and `low` are supported for `gpt-image-1`.
+ - `hd` and `standard` are supported for `dall-e-3`.
+ - `standard` is the only option for `dall-e-2`.
"""
response_format: Optional[Literal["url", "b64_json"]]
- """The format in which the generated images are returned.
+ """The format in which generated images with `dall-e-2` and `dall-e-3` are
+ returned.
Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
- image has been generated.
+ image has been generated. This parameter isn't supported for `gpt-image-1` which
+ will always return base64-encoded images.
"""
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]]
+ size: Optional[
+ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
+ ]
"""The size of the generated images.
- Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one
- of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models.
+ Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or
+ `auto` (default value) for `gpt-image-1`, one of `256x256`, `512x512`, or
+ `1024x1024` for `dall-e-2`, and one of `1024x1024`, `1792x1024`, or `1024x1792`
+ for `dall-e-3`.
"""
style: Optional[Literal["vivid", "natural"]]
"""The style of the generated images.
- Must be one of `vivid` or `natural`. Vivid causes the model to lean towards
- generating hyper-real and dramatic images. Natural causes the model to produce
- more natural, less hyper-real looking images. This param is only supported for
- `dall-e-3`.
+ This parameter is only supported for `dall-e-3`. Must be one of `vivid` or
+ `natural`. Vivid causes the model to lean towards generating hyper-real and
+ dramatic images. Natural causes the model to produce more natural, less
+ hyper-real looking images.
"""
user: str
src/openai/types/image_model.py
@@ -4,4 +4,4 @@ from typing_extensions import Literal, TypeAlias
__all__ = ["ImageModel"]
-ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3"]
+ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3", "gpt-image-1"]
src/openai/types/images_response.py
@@ -1,14 +1,41 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List
+from typing import List, Optional
from .image import Image
from .._models import BaseModel
-__all__ = ["ImagesResponse"]
+__all__ = ["ImagesResponse", "Usage", "UsageInputTokensDetails"]
+
+
+class UsageInputTokensDetails(BaseModel):
+ image_tokens: int
+ """The number of image tokens in the input prompt."""
+
+ text_tokens: int
+ """The number of text tokens in the input prompt."""
+
+
+class Usage(BaseModel):
+ input_tokens: int
+ """The number of tokens (images and text) in the input prompt."""
+
+ input_tokens_details: UsageInputTokensDetails
+ """The input tokens detailed information for the image generation."""
+
+ output_tokens: int
+ """The number of image tokens in the output image."""
+
+ total_tokens: int
+ """The total number of tokens (images and text) used for the image generation."""
class ImagesResponse(BaseModel):
created: int
+ """The Unix timestamp (in seconds) of when the image was created."""
+
+ data: Optional[List[Image]] = None
+ """The list of generated images."""
- data: List[Image]
+ usage: Optional[Usage] = None
+ """For `gpt-image-1` only, the token usage information for the image generation."""
tests/api_resources/fine_tuning/checkpoints/test_permissions.py
@@ -117,19 +117,19 @@ class TestPermissions:
fine_tuned_model_checkpoint="",
)
- @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
permission = client.fine_tuning.checkpoints.permissions.delete(
- "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
+ permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+ fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
)
assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
- @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
- "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
+ permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+ fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
)
assert response.is_closed is True
@@ -137,11 +137,11 @@ class TestPermissions:
permission = response.parse()
assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
- @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.fine_tuning.checkpoints.permissions.with_streaming_response.delete(
- "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
+ permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+ fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -151,14 +151,20 @@ class TestPermissions:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
):
client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
- "",
+ permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+ fine_tuned_model_checkpoint="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
+ client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
+ permission_id="",
+ fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
)
@@ -260,19 +266,19 @@ class TestAsyncPermissions:
fine_tuned_model_checkpoint="",
)
- @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
permission = await async_client.fine_tuning.checkpoints.permissions.delete(
- "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
+ permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+ fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
)
assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
- @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
- "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
+ permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+ fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
)
assert response.is_closed is True
@@ -280,11 +286,11 @@ class TestAsyncPermissions:
permission = response.parse()
assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
- @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.delete(
- "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
+ permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+ fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -294,12 +300,18 @@ class TestAsyncPermissions:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
):
await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
- "",
+ permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+ fine_tuned_model_checkpoint="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
+ await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
+ permission_id="",
+ fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
)
tests/api_resources/test_evals.py
@@ -74,7 +74,6 @@ class TestEvals:
],
metadata={"foo": "string"},
name="name",
- share_with_openai=True,
)
assert_matches_type(EvalCreateResponse, eval, path=["response"])
@@ -350,7 +349,6 @@ class TestAsyncEvals:
],
metadata={"foo": "string"},
name="name",
- share_with_openai=True,
)
assert_matches_type(EvalCreateResponse, eval, path=["response"])
tests/api_resources/test_images.py
@@ -76,6 +76,7 @@ class TestImages:
mask=b"raw file contents",
model="string",
n=1,
+ quality="high",
response_format="url",
size="1024x1024",
user="user-1234",
@@ -119,9 +120,13 @@ class TestImages:
def test_method_generate_with_all_params(self, client: OpenAI) -> None:
image = client.images.generate(
prompt="A cute baby sea otter",
+ background="transparent",
model="string",
+ moderation="low",
n=1,
- quality="standard",
+ output_compression=100,
+ output_format="png",
+ quality="medium",
response_format="url",
size="1024x1024",
style="vivid",
@@ -216,6 +221,7 @@ class TestAsyncImages:
mask=b"raw file contents",
model="string",
n=1,
+ quality="high",
response_format="url",
size="1024x1024",
user="user-1234",
@@ -259,9 +265,13 @@ class TestAsyncImages:
async def test_method_generate_with_all_params(self, async_client: AsyncOpenAI) -> None:
image = await async_client.images.generate(
prompt="A cute baby sea otter",
+ background="transparent",
model="string",
+ moderation="low",
n=1,
- quality="standard",
+ output_compression=100,
+ output_format="png",
+ quality="medium",
response_format="url",
size="1024x1024",
style="vivid",
.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 97
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5633633cc38734869cf7d993f7b549bb8e4d10e0ec45381ec2cd91507cd8eb8f.yml
-openapi_spec_hash: c855121b2b2324b99499c9244c21d24d
-config_hash: d20837393b73efdb19cd08e04c1cc9a1
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-8b68ae6b807dca92e914da1dd9e835a20f69b075e79102a264367fd7fddddb33.yml
+openapi_spec_hash: b6ade5b1a6327339e6669e1134de2d03
+config_hash: b597cd9a31e9e5ec709e2eefb4c54122
api.md
@@ -277,7 +277,7 @@ Methods:
- <code title="post /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">create</a>(fine_tuned_model_checkpoint, \*\*<a href="src/openai/types/fine_tuning/checkpoints/permission_create_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_create_response.py">SyncPage[PermissionCreateResponse]</a></code>
- <code title="get /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">retrieve</a>(fine_tuned_model_checkpoint, \*\*<a href="src/openai/types/fine_tuning/checkpoints/permission_retrieve_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py">PermissionRetrieveResponse</a></code>
-- <code title="delete /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">delete</a>(fine_tuned_model_checkpoint) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_delete_response.py">PermissionDeleteResponse</a></code>
+- <code title="delete /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">delete</a>(permission_id, \*, fine_tuned_model_checkpoint) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_delete_response.py">PermissionDeleteResponse</a></code>
# VectorStores
@@ -689,6 +689,10 @@ from openai.types.responses import (
ResponseOutputRefusal,
ResponseOutputText,
ResponseReasoningItem,
+ ResponseReasoningSummaryPartAddedEvent,
+ ResponseReasoningSummaryPartDoneEvent,
+ ResponseReasoningSummaryTextDeltaEvent,
+ ResponseReasoningSummaryTextDoneEvent,
ResponseRefusalDeltaEvent,
ResponseRefusalDoneEvent,
ResponseStatus,