Commit a85ad051
Changed files (19)
examples
src
openai
resources
types
tests
api_resources
examples/image_stream.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+import base64
+from pathlib import Path
+
+from openai import OpenAI
+
+client = OpenAI()
+
+
+def main() -> None:
+ """Example of OpenAI image streaming with partial images."""
+ stream = client.images.generate(
+ model="gpt-image-1",
+ prompt="A cute baby sea otter",
+ n=1,
+ size="1024x1024",
+ stream=True,
+ partial_images=3,
+ )
+
+ for event in stream:
+ if event.type == "image_generation.partial_image":
+ print(f" Partial image {event.partial_image_index + 1}/3 received")
+ print(f" Size: {len(event.b64_json)} characters (base64)")
+
+ # Save partial image to file
+ filename = f"partial_{event.partial_image_index + 1}.png"
+ image_data = base64.b64decode(event.b64_json)
+ with open(filename, "wb") as f:
+ f.write(image_data)
+ print(f" ๐พ Saved to: {Path(filename).resolve()}")
+
+ elif event.type == "image_generation.completed":
+ print(f"\nโ
Final image completed!")
+ print(f" Size: {len(event.b64_json)} characters (base64)")
+
+ # Save final image to file
+ filename = "final_image.png"
+ image_data = base64.b64decode(event.b64_json)
+ with open(filename, "wb") as f:
+ f.write(image_data)
+ print(f" ๐พ Saved to: {Path(filename).resolve()}")
+
+ else:
+ print(f"โ Unknown event: {event}") # type: ignore[unreachable]
+
+
+if __name__ == "__main__":
+ try:
+ main()
+ except Exception as error:
+ print(f"Error generating image: {error}")
\ No newline at end of file
src/openai/resources/images.py
@@ -3,20 +3,23 @@
from __future__ import annotations
from typing import List, Union, Mapping, Optional, cast
-from typing_extensions import Literal
+from typing_extensions import Literal, overload
import httpx
from .. import _legacy_response
from ..types import image_edit_params, image_generate_params, image_create_variation_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
+from .._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .._streaming import Stream, AsyncStream
from .._base_client import make_request_options
from ..types.image_model import ImageModel
from ..types.images_response import ImagesResponse
+from ..types.image_gen_stream_event import ImageGenStreamEvent
+from ..types.image_edit_stream_event import ImageEditStreamEvent
__all__ = ["Images", "AsyncImages"]
@@ -114,21 +117,25 @@ class Images(SyncAPIResource):
cast_to=ImagesResponse,
)
+ @overload
def edit(
self,
*,
image: Union[FileTypes, List[FileTypes]],
prompt: str,
background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN,
mask: FileTypes | NotGiven = NOT_GIVEN,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
output_compression: Optional[int] | NotGiven = NOT_GIVEN,
output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
| NotGiven = NOT_GIVEN,
+ stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -162,6 +169,234 @@ class Images(SyncAPIResource):
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
+ input_fidelity: Control how much effort the model will exert to match the style and features,
+ especially facial features, of input images. This parameter is only supported
+ for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+
+ mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
+ indicate where `image` should be edited. If there are multiple images provided,
+ the mask will be applied on the first image. Must be a valid PNG file, less than
+ 4MB, and have the same dimensions as `image`.
+
+ model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
+ supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
+ is used.
+
+ n: The number of images to generate. Must be between 1 and 10.
+
+ output_compression: The compression level (0-100%) for the generated images. This parameter is only
+ supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ defaults to 100.
+
+ output_format: The format in which the generated images are returned. This parameter is only
+ supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
+ default value is `png`.
+
+ partial_images: The number of partial images to generate. This parameter is used for streaming
+ responses that return partial images. Value must be between 0 and 3. When set to
+ 0, the response will be a single image sent in one streaming event.
+
+ quality: The quality of the image that will be generated. `high`, `medium` and `low` are
+ only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
+ Defaults to `auto`.
+
+ response_format: The format in which the generated images are returned. Must be one of `url` or
+ `b64_json`. URLs are only valid for 60 minutes after the image has been
+ generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
+ will always return base64-encoded images.
+
+ size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+ (landscape), `1024x1536` (portrait), or `auto` (default value) for
+ `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
+
+ stream: Edit the image in streaming mode. Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information.
+
+ user: A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def edit(
+ self,
+ *,
+ image: Union[FileTypes, List[FileTypes]],
+ prompt: str,
+ stream: Literal[True],
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN,
+ mask: FileTypes | NotGiven = NOT_GIVEN,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
+ | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Stream[ImageEditStreamEvent]:
+ """Creates an edited or extended image given one or more source images and a
+ prompt.
+
+ This endpoint only supports `gpt-image-1` and `dall-e-2`.
+
+ Args:
+ image: The image(s) to edit. Must be a supported image file or an array of images.
+
+ For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
+ 50MB. You can provide up to 16 images.
+
+ For `dall-e-2`, you can only provide one image, and it should be a square `png`
+ file less than 4MB.
+
+ prompt: A text description of the desired image(s). The maximum length is 1000
+ characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
+
+ stream: Edit the image in streaming mode. Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information.
+
+ background: Allows to set transparency for the background of the generated image(s). This
+ parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+ `opaque` or `auto` (default value). When `auto` is used, the model will
+ automatically determine the best background for the image.
+
+ If `transparent`, the output format needs to support transparency, so it should
+ be set to either `png` (default value) or `webp`.
+
+ input_fidelity: Control how much effort the model will exert to match the style and features,
+ especially facial features, of input images. This parameter is only supported
+ for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+
+ mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
+ indicate where `image` should be edited. If there are multiple images provided,
+ the mask will be applied on the first image. Must be a valid PNG file, less than
+ 4MB, and have the same dimensions as `image`.
+
+ model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
+ supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
+ is used.
+
+ n: The number of images to generate. Must be between 1 and 10.
+
+ output_compression: The compression level (0-100%) for the generated images. This parameter is only
+ supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ defaults to 100.
+
+ output_format: The format in which the generated images are returned. This parameter is only
+ supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
+ default value is `png`.
+
+ partial_images: The number of partial images to generate. This parameter is used for streaming
+ responses that return partial images. Value must be between 0 and 3. When set to
+ 0, the response will be a single image sent in one streaming event.
+
+ quality: The quality of the image that will be generated. `high`, `medium` and `low` are
+ only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
+ Defaults to `auto`.
+
+ response_format: The format in which the generated images are returned. Must be one of `url` or
+ `b64_json`. URLs are only valid for 60 minutes after the image has been
+ generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
+ will always return base64-encoded images.
+
+ size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+ (landscape), `1024x1536` (portrait), or `auto` (default value) for
+ `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
+
+ user: A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def edit(
+ self,
+ *,
+ image: Union[FileTypes, List[FileTypes]],
+ prompt: str,
+ stream: bool,
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN,
+ mask: FileTypes | NotGiven = NOT_GIVEN,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
+ | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ImagesResponse | Stream[ImageEditStreamEvent]:
+ """Creates an edited or extended image given one or more source images and a
+ prompt.
+
+ This endpoint only supports `gpt-image-1` and `dall-e-2`.
+
+ Args:
+ image: The image(s) to edit. Must be a supported image file or an array of images.
+
+ For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
+ 50MB. You can provide up to 16 images.
+
+ For `dall-e-2`, you can only provide one image, and it should be a square `png`
+ file less than 4MB.
+
+ prompt: A text description of the desired image(s). The maximum length is 1000
+ characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
+
+ stream: Edit the image in streaming mode. Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information.
+
+ background: Allows to set transparency for the background of the generated image(s). This
+ parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+ `opaque` or `auto` (default value). When `auto` is used, the model will
+ automatically determine the best background for the image.
+
+ If `transparent`, the output format needs to support transparency, so it should
+ be set to either `png` (default value) or `webp`.
+
+ input_fidelity: Control how much effort the model will exert to match the style and features,
+ especially facial features, of input images. This parameter is only supported
+ for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
the mask will be applied on the first image. Must be a valid PNG file, less than
@@ -181,6 +416,10 @@ class Images(SyncAPIResource):
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
default value is `png`.
+ partial_images: The number of partial images to generate. This parameter is used for streaming
+ responses that return partial images. Value must be between 0 and 3. When set to
+ 0, the response will be a single image sent in one streaming event.
+
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
Defaults to `auto`.
@@ -206,19 +445,51 @@ class Images(SyncAPIResource):
timeout: Override the client-level default timeout for this request, in seconds
"""
+ ...
+
+ @required_args(["image", "prompt"], ["image", "prompt", "stream"])
+ def edit(
+ self,
+ *,
+ image: Union[FileTypes, List[FileTypes]],
+ prompt: str,
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN,
+ mask: FileTypes | NotGiven = NOT_GIVEN,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
+ | NotGiven = NOT_GIVEN,
+ stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ImagesResponse | Stream[ImageEditStreamEvent]:
body = deepcopy_minimal(
{
"image": image,
"prompt": prompt,
"background": background,
+ "input_fidelity": input_fidelity,
"mask": mask,
"model": model,
"n": n,
"output_compression": output_compression,
"output_format": output_format,
+ "partial_images": partial_images,
"quality": quality,
"response_format": response_format,
"size": size,
+ "stream": stream,
"user": user,
}
)
@@ -229,15 +500,891 @@ class Images(SyncAPIResource):
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
"/images/edits",
- body=maybe_transform(body, image_edit_params.ImageEditParams),
+ body=maybe_transform(
+ body,
+ image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming,
+ ),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ImagesResponse,
+ stream=stream or False,
+ stream_cls=Stream[ImageEditStreamEvent],
+ )
+
+ @overload
+ def generate(
+ self,
+ *,
+ prompt: str,
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[
+ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
+ ]
+ | NotGiven = NOT_GIVEN,
+ stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+ style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ImagesResponse:
+ """
+ Creates an image given a prompt.
+ [Learn more](https://platform.openai.com/docs/guides/images).
+
+ Args:
+ prompt: A text description of the desired image(s). The maximum length is 32000
+ characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
+ for `dall-e-3`.
+
+ background: Allows to set transparency for the background of the generated image(s). This
+ parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+ `opaque` or `auto` (default value). When `auto` is used, the model will
+ automatically determine the best background for the image.
+
+ If `transparent`, the output format needs to support transparency, so it should
+ be set to either `png` (default value) or `webp`.
+
+ model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
+ `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
+ `gpt-image-1` is used.
+
+ moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
+ be either `low` for less restrictive filtering or `auto` (default value).
+
+ n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
+ `n=1` is supported.
+
+ output_compression: The compression level (0-100%) for the generated images. This parameter is only
+ supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ defaults to 100.
+
+ output_format: The format in which the generated images are returned. This parameter is only
+ supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
+
+ partial_images: The number of partial images to generate. This parameter is used for streaming
+ responses that return partial images. Value must be between 0 and 3. When set to
+ 0, the response will be a single image sent in one streaming event.
+
+ quality: The quality of the image that will be generated.
+
+ - `auto` (default value) will automatically select the best quality for the
+ given model.
+ - `high`, `medium` and `low` are supported for `gpt-image-1`.
+ - `hd` and `standard` are supported for `dall-e-3`.
+ - `standard` is the only option for `dall-e-2`.
+
+ response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
+ returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
+ after the image has been generated. This parameter isn't supported for
+ `gpt-image-1` which will always return base64-encoded images.
+
+ size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+ (landscape), `1024x1536` (portrait), or `auto` (default value) for
+ `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
+ one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
+
+ stream: Generate the image in streaming mode. Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information. This parameter is only supported for `gpt-image-1`.
+
+ style: The style of the generated images. This parameter is only supported for
+ `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
+ towards generating hyper-real and dramatic images. Natural causes the model to
+ produce more natural, less hyper-real looking images.
+
+ user: A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def generate(
+ self,
+ *,
+ prompt: str,
+ stream: Literal[True],
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[
+ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
+ ]
+ | NotGiven = NOT_GIVEN,
+ style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Stream[ImageGenStreamEvent]:
+ """
+ Creates an image given a prompt.
+ [Learn more](https://platform.openai.com/docs/guides/images).
+
+ Args:
+ prompt: A text description of the desired image(s). The maximum length is 32000
+ characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
+ for `dall-e-3`.
+
+ stream: Generate the image in streaming mode. Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information. This parameter is only supported for `gpt-image-1`.
+
+ background: Allows to set transparency for the background of the generated image(s). This
+ parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+ `opaque` or `auto` (default value). When `auto` is used, the model will
+ automatically determine the best background for the image.
+
+ If `transparent`, the output format needs to support transparency, so it should
+ be set to either `png` (default value) or `webp`.
+
+ model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
+ `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
+ `gpt-image-1` is used.
+
+ moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
+ be either `low` for less restrictive filtering or `auto` (default value).
+
+ n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
+ `n=1` is supported.
+
+ output_compression: The compression level (0-100%) for the generated images. This parameter is only
+ supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ defaults to 100.
+
+ output_format: The format in which the generated images are returned. This parameter is only
+ supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
+
+ partial_images: The number of partial images to generate. This parameter is used for streaming
+ responses that return partial images. Value must be between 0 and 3. When set to
+ 0, the response will be a single image sent in one streaming event.
+
+ quality: The quality of the image that will be generated.
+
+ - `auto` (default value) will automatically select the best quality for the
+ given model.
+ - `high`, `medium` and `low` are supported for `gpt-image-1`.
+ - `hd` and `standard` are supported for `dall-e-3`.
+ - `standard` is the only option for `dall-e-2`.
+
+ response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
+ returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
+ after the image has been generated. This parameter isn't supported for
+ `gpt-image-1` which will always return base64-encoded images.
+
+ size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+ (landscape), `1024x1536` (portrait), or `auto` (default value) for
+ `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
+ one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
+
+ style: The style of the generated images. This parameter is only supported for
+ `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
+ towards generating hyper-real and dramatic images. Natural causes the model to
+ produce more natural, less hyper-real looking images.
+
+ user: A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def generate(
+ self,
+ *,
+ prompt: str,
+ stream: bool,
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[
+ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
+ ]
+ | NotGiven = NOT_GIVEN,
+ style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ImagesResponse | Stream[ImageGenStreamEvent]:
+ """
+ Creates an image given a prompt.
+ [Learn more](https://platform.openai.com/docs/guides/images).
+
+ Args:
+ prompt: A text description of the desired image(s). The maximum length is 32000
+ characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
+ for `dall-e-3`.
+
+ stream: Generate the image in streaming mode. Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information. This parameter is only supported for `gpt-image-1`.
+
+ background: Allows to set transparency for the background of the generated image(s). This
+ parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+ `opaque` or `auto` (default value). When `auto` is used, the model will
+ automatically determine the best background for the image.
+
+ If `transparent`, the output format needs to support transparency, so it should
+ be set to either `png` (default value) or `webp`.
+
+ model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
+ `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
+ `gpt-image-1` is used.
+
+ moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
+ be either `low` for less restrictive filtering or `auto` (default value).
+
+ n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
+ `n=1` is supported.
+
+ output_compression: The compression level (0-100%) for the generated images. This parameter is only
+ supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ defaults to 100.
+
+ output_format: The format in which the generated images are returned. This parameter is only
+ supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
+
+ partial_images: The number of partial images to generate. This parameter is used for streaming
+ responses that return partial images. Value must be between 0 and 3. When set to
+ 0, the response will be a single image sent in one streaming event.
+
+ quality: The quality of the image that will be generated.
+
+ - `auto` (default value) will automatically select the best quality for the
+ given model.
+ - `high`, `medium` and `low` are supported for `gpt-image-1`.
+ - `hd` and `standard` are supported for `dall-e-3`.
+ - `standard` is the only option for `dall-e-2`.
+
+ response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
+ returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
+ after the image has been generated. This parameter isn't supported for
+ `gpt-image-1` which will always return base64-encoded images.
+
+ size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+ (landscape), `1024x1536` (portrait), or `auto` (default value) for
+ `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
+ one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
+
+ style: The style of the generated images. This parameter is only supported for
+ `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
+ towards generating hyper-real and dramatic images. Natural causes the model to
+ produce more natural, less hyper-real looking images.
+
+ user: A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["prompt"], ["prompt", "stream"])
+ def generate(
+ self,
+ *,
+ prompt: str,
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[
+ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
+ ]
+ | NotGiven = NOT_GIVEN,
+ stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+ style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ImagesResponse | Stream[ImageGenStreamEvent]:
+ return self._post(
+ "/images/generations",
+ body=maybe_transform(
+ {
+ "prompt": prompt,
+ "background": background,
+ "model": model,
+ "moderation": moderation,
+ "n": n,
+ "output_compression": output_compression,
+ "output_format": output_format,
+ "partial_images": partial_images,
+ "quality": quality,
+ "response_format": response_format,
+ "size": size,
+ "stream": stream,
+ "style": style,
+ "user": user,
+ },
+ image_generate_params.ImageGenerateParamsStreaming
+ if stream
+ else image_generate_params.ImageGenerateParamsNonStreaming,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ImagesResponse,
+ stream=stream or False,
+ stream_cls=Stream[ImageGenStreamEvent],
+ )
+
+
+class AsyncImages(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncImagesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncImagesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncImagesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncImagesWithStreamingResponse(self)
+
+ async def create_variation(
+ self,
+ *,
+ image: FileTypes,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ImagesResponse:
+ """Creates a variation of a given image.
+
+ This endpoint only supports `dall-e-2`.
+
+ Args:
+ image: The image to use as the basis for the variation(s). Must be a valid PNG file,
+ less than 4MB, and square.
+
+ model: The model to use for image generation. Only `dall-e-2` is supported at this
+ time.
+
+ n: The number of images to generate. Must be between 1 and 10.
+
+ response_format: The format in which the generated images are returned. Must be one of `url` or
+ `b64_json`. URLs are only valid for 60 minutes after the image has been
+ generated.
+
+ size: The size of the generated images. Must be one of `256x256`, `512x512`, or
+ `1024x1024`.
+
+ user: A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "image": image,
+ "model": model,
+ "n": n,
+ "response_format": response_format,
+ "size": size,
+ "user": user,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return await self._post(
+ "/images/variations",
+ body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ImagesResponse,
+ )
+
+ @overload
+ async def edit(
+ self,
+ *,
+ image: Union[FileTypes, List[FileTypes]],
+ prompt: str,
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN,
+ mask: FileTypes | NotGiven = NOT_GIVEN,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
+ | NotGiven = NOT_GIVEN,
+ stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ImagesResponse:
+ """Creates an edited or extended image given one or more source images and a
+ prompt.
+
+ This endpoint only supports `gpt-image-1` and `dall-e-2`.
+
+ Args:
+ image: The image(s) to edit. Must be a supported image file or an array of images.
+
+ For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
+ 50MB. You can provide up to 16 images.
+
+ For `dall-e-2`, you can only provide one image, and it should be a square `png`
+ file less than 4MB.
+
+ prompt: A text description of the desired image(s). The maximum length is 1000
+ characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
+
+ background: Allows to set transparency for the background of the generated image(s). This
+ parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+ `opaque` or `auto` (default value). When `auto` is used, the model will
+ automatically determine the best background for the image.
+
+ If `transparent`, the output format needs to support transparency, so it should
+ be set to either `png` (default value) or `webp`.
+
+ input_fidelity: Control how much effort the model will exert to match the style and features,
+ especially facial features, of input images. This parameter is only supported
+ for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+
+ mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
+ indicate where `image` should be edited. If there are multiple images provided,
+ the mask will be applied on the first image. Must be a valid PNG file, less than
+ 4MB, and have the same dimensions as `image`.
+
+ model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
+ supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
+ is used.
+
+ n: The number of images to generate. Must be between 1 and 10.
+
+ output_compression: The compression level (0-100%) for the generated images. This parameter is only
+ supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ defaults to 100.
+
+ output_format: The format in which the generated images are returned. This parameter is only
+ supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
+ default value is `png`.
+
+ partial_images: The number of partial images to generate. This parameter is used for streaming
+ responses that return partial images. Value must be between 0 and 3. When set to
+ 0, the response will be a single image sent in one streaming event.
+
+ quality: The quality of the image that will be generated. `high`, `medium` and `low` are
+ only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
+ Defaults to `auto`.
+
+ response_format: The format in which the generated images are returned. Must be one of `url` or
+ `b64_json`. URLs are only valid for 60 minutes after the image has been
+ generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
+ will always return base64-encoded images.
+
+ size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+ (landscape), `1024x1536` (portrait), or `auto` (default value) for
+ `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
+
+ stream: Edit the image in streaming mode. Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information.
+
+ user: A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def edit(
+ self,
+ *,
+ image: Union[FileTypes, List[FileTypes]],
+ prompt: str,
+ stream: Literal[True],
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN,
+ mask: FileTypes | NotGiven = NOT_GIVEN,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
+ | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncStream[ImageEditStreamEvent]:
+ """Creates an edited or extended image given one or more source images and a
+ prompt.
+
+ This endpoint only supports `gpt-image-1` and `dall-e-2`.
+
+ Args:
+ image: The image(s) to edit. Must be a supported image file or an array of images.
+
+ For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
+ 50MB. You can provide up to 16 images.
+
+ For `dall-e-2`, you can only provide one image, and it should be a square `png`
+ file less than 4MB.
+
+ prompt: A text description of the desired image(s). The maximum length is 1000
+ characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
+
+ stream: Edit the image in streaming mode. Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information.
+
+ background: Allows to set transparency for the background of the generated image(s). This
+ parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+ `opaque` or `auto` (default value). When `auto` is used, the model will
+ automatically determine the best background for the image.
+
+ If `transparent`, the output format needs to support transparency, so it should
+ be set to either `png` (default value) or `webp`.
+
+ input_fidelity: Control how much effort the model will exert to match the style and features,
+ especially facial features, of input images. This parameter is only supported
+ for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+
+ mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
+ indicate where `image` should be edited. If there are multiple images provided,
+ the mask will be applied on the first image. Must be a valid PNG file, less than
+ 4MB, and have the same dimensions as `image`.
+
+ model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
+ supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
+ is used.
+
+ n: The number of images to generate. Must be between 1 and 10.
+
+ output_compression: The compression level (0-100%) for the generated images. This parameter is only
+ supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ defaults to 100.
+
+ output_format: The format in which the generated images are returned. This parameter is only
+ supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
+ default value is `png`.
+
+ partial_images: The number of partial images to generate. This parameter is used for streaming
+ responses that return partial images. Value must be between 0 and 3. When set to
+ 0, the response will be a single image sent in one streaming event.
+
+ quality: The quality of the image that will be generated. `high`, `medium` and `low` are
+ only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
+ Defaults to `auto`.
+
+ response_format: The format in which the generated images are returned. Must be one of `url` or
+ `b64_json`. URLs are only valid for 60 minutes after the image has been
+ generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
+ will always return base64-encoded images.
+
+ size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+ (landscape), `1024x1536` (portrait), or `auto` (default value) for
+ `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
+
+ user: A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def edit(
+ self,
+ *,
+ image: Union[FileTypes, List[FileTypes]],
+ prompt: str,
+ stream: bool,
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN,
+ mask: FileTypes | NotGiven = NOT_GIVEN,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
+ | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
+ """Creates an edited or extended image given one or more source images and a
+ prompt.
+
+ This endpoint only supports `gpt-image-1` and `dall-e-2`.
+
+ Args:
+ image: The image(s) to edit. Must be a supported image file or an array of images.
+
+ For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
+ 50MB. You can provide up to 16 images.
+
+ For `dall-e-2`, you can only provide one image, and it should be a square `png`
+ file less than 4MB.
+
+ prompt: A text description of the desired image(s). The maximum length is 1000
+ characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
+
+ stream: Edit the image in streaming mode. Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information.
+
+ background: Allows to set transparency for the background of the generated image(s). This
+ parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+ `opaque` or `auto` (default value). When `auto` is used, the model will
+ automatically determine the best background for the image.
+
+ If `transparent`, the output format needs to support transparency, so it should
+ be set to either `png` (default value) or `webp`.
+
+ input_fidelity: Control how much effort the model will exert to match the style and features,
+ especially facial features, of input images. This parameter is only supported
+ for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+
+ mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
+ indicate where `image` should be edited. If there are multiple images provided,
+ the mask will be applied on the first image. Must be a valid PNG file, less than
+ 4MB, and have the same dimensions as `image`.
+
+ model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
+ supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
+ is used.
+
+ n: The number of images to generate. Must be between 1 and 10.
+
+ output_compression: The compression level (0-100%) for the generated images. This parameter is only
+ supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+ defaults to 100.
+
+ output_format: The format in which the generated images are returned. This parameter is only
+ supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
+ default value is `png`.
+
+ partial_images: The number of partial images to generate. This parameter is used for streaming
+ responses that return partial images. Value must be between 0 and 3. When set to
+ 0, the response will be a single image sent in one streaming event.
+
+ quality: The quality of the image that will be generated. `high`, `medium` and `low` are
+ only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
+ Defaults to `auto`.
+
+ response_format: The format in which the generated images are returned. Must be one of `url` or
+ `b64_json`. URLs are only valid for 60 minutes after the image has been
+ generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
+ will always return base64-encoded images.
+
+ size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+ (landscape), `1024x1536` (portrait), or `auto` (default value) for
+ `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
+
+ user: A unique identifier representing your end-user, which can help OpenAI to monitor
+ and detect abuse.
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["image", "prompt"], ["image", "prompt", "stream"])
+ async def edit(
+ self,
+ *,
+ image: Union[FileTypes, List[FileTypes]],
+ prompt: str,
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN,
+ mask: FileTypes | NotGiven = NOT_GIVEN,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
+ | NotGiven = NOT_GIVEN,
+ stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
+ body = deepcopy_minimal(
+ {
+ "image": image,
+ "prompt": prompt,
+ "background": background,
+ "input_fidelity": input_fidelity,
+ "mask": mask,
+ "model": model,
+ "n": n,
+ "output_compression": output_compression,
+ "output_format": output_format,
+ "partial_images": partial_images,
+ "quality": quality,
+ "response_format": response_format,
+ "size": size,
+ "stream": stream,
+ "user": user,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return await self._post(
+ "/images/edits",
+ body=await async_maybe_transform(
+ body,
+ image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming,
+ ),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ImagesResponse,
+ stream=stream or False,
+ stream_cls=AsyncStream[ImageEditStreamEvent],
)
- def generate(
+ @overload
+ async def generate(
self,
*,
prompt: str,
@@ -247,12 +1394,14 @@ class Images(SyncAPIResource):
n: Optional[int] | NotGiven = NOT_GIVEN,
output_compression: Optional[int] | NotGiven = NOT_GIVEN,
output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
size: Optional[
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
]
| NotGiven = NOT_GIVEN,
+ stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -296,6 +1445,10 @@ class Images(SyncAPIResource):
output_format: The format in which the generated images are returned. This parameter is only
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
+ partial_images: The number of partial images to generate. This parameter is used for streaming
+ responses that return partial images. Value must be between 0 and 3. When set to
+ 0, the response will be a single image sent in one streaming event.
+
quality: The quality of the image that will be generated.
- `auto` (default value) will automatically select the best quality for the
@@ -314,6 +1467,10 @@ class Images(SyncAPIResource):
`gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
+ stream: Generate the image in streaming mode. Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information. This parameter is only supported for `gpt-image-1`.
+
style: The style of the generated images. This parameter is only supported for
`dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
towards generating hyper-real and dramatic images. Natural causes the model to
@@ -331,140 +1488,28 @@ class Images(SyncAPIResource):
timeout: Override the client-level default timeout for this request, in seconds
"""
- return self._post(
- "/images/generations",
- body=maybe_transform(
- {
- "prompt": prompt,
- "background": background,
- "model": model,
- "moderation": moderation,
- "n": n,
- "output_compression": output_compression,
- "output_format": output_format,
- "quality": quality,
- "response_format": response_format,
- "size": size,
- "style": style,
- "user": user,
- },
- image_generate_params.ImageGenerateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
-
-
-class AsyncImages(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncImagesWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncImagesWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncImagesWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/openai/openai-python#with_streaming_response
- """
- return AsyncImagesWithStreamingResponse(self)
-
- async def create_variation(
- self,
- *,
- image: FileTypes,
- model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
- user: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """Creates a variation of a given image.
-
- This endpoint only supports `dall-e-2`.
-
- Args:
- image: The image to use as the basis for the variation(s). Must be a valid PNG file,
- less than 4MB, and square.
-
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
-
- n: The number of images to generate. Must be between 1 and 10.
-
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
-
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
-
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "image": image,
- "model": model,
- "n": n,
- "response_format": response_format,
- "size": size,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- "/images/variations",
- body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
+ ...
- async def edit(
+ @overload
+ async def generate(
self,
*,
- image: Union[FileTypes, List[FileTypes]],
prompt: str,
+ stream: Literal[True],
background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
- mask: FileTypes | NotGiven = NOT_GIVEN,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
output_compression: Optional[int] | NotGiven = NOT_GIVEN,
output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
- quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
+ size: Optional[
+ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
+ ]
| NotGiven = NOT_GIVEN,
+ style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -472,23 +1517,19 @@ class AsyncImages(AsyncAPIResource):
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
- """Creates an edited or extended image given one or more source images and a
- prompt.
-
- This endpoint only supports `gpt-image-1` and `dall-e-2`.
+ ) -> AsyncStream[ImageGenStreamEvent]:
+ """
+ Creates an image given a prompt.
+ [Learn more](https://platform.openai.com/docs/guides/images).
Args:
- image: The image(s) to edit. Must be a supported image file or an array of images.
-
- For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
-
- For `dall-e-2`, you can only provide one image, and it should be a square `png`
- file less than 4MB.
+ prompt: A text description of the desired image(s). The maximum length is 32000
+ characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
+ for `dall-e-3`.
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
+ stream: Generate the image in streaming mode. Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information. This parameter is only supported for `gpt-image-1`.
background: Allows to set transparency for the background of the generated image(s). This
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
@@ -498,37 +1539,49 @@ class AsyncImages(AsyncAPIResource):
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
- mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. If there are multiple images provided,
- the mask will be applied on the first image. Must be a valid PNG file, less than
- 4MB, and have the same dimensions as `image`.
+ model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
+ `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
+ `gpt-image-1` is used.
- model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
- supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
- is used.
+ moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
+ be either `low` for less restrictive filtering or `auto` (default value).
- n: The number of images to generate. Must be between 1 and 10.
+ n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
+ `n=1` is supported.
output_compression: The compression level (0-100%) for the generated images. This parameter is only
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
defaults to 100.
output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
- default value is `png`.
+ supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
- Defaults to `auto`.
+ partial_images: The number of partial images to generate. This parameter is used for streaming
+ responses that return partial images. Value must be between 0 and 3. When set to
+ 0, the response will be a single image sent in one streaming event.
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
- will always return base64-encoded images.
+ quality: The quality of the image that will be generated.
+
+ - `auto` (default value) will automatically select the best quality for the
+ given model.
+ - `high`, `medium` and `low` are supported for `gpt-image-1`.
+ - `hd` and `standard` are supported for `dall-e-3`.
+ - `standard` is the only option for `dall-e-2`.
+
+ response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
+ returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
+ after the image has been generated. This parameter isn't supported for
+ `gpt-image-1` which will always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
+ `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
+ one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
+
+ style: The style of the generated images. This parameter is only supported for
+ `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
+ towards generating hyper-real and dramatic images. Natural causes the model to
+ produce more natural, less hyper-real looking images.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
@@ -542,47 +1595,21 @@ class AsyncImages(AsyncAPIResource):
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
- {
- "image": image,
- "prompt": prompt,
- "background": background,
- "mask": mask,
- "model": model,
- "n": n,
- "output_compression": output_compression,
- "output_format": output_format,
- "quality": quality,
- "response_format": response_format,
- "size": size,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- "/images/edits",
- body=await async_maybe_transform(body, image_edit_params.ImageEditParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
+ ...
+ @overload
async def generate(
self,
*,
prompt: str,
+ stream: bool,
background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
output_compression: Optional[int] | NotGiven = NOT_GIVEN,
output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
size: Optional[
@@ -597,7 +1624,7 @@ class AsyncImages(AsyncAPIResource):
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ImagesResponse:
+ ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]:
"""
Creates an image given a prompt.
[Learn more](https://platform.openai.com/docs/guides/images).
@@ -607,6 +1634,10 @@ class AsyncImages(AsyncAPIResource):
characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
for `dall-e-3`.
+ stream: Generate the image in streaming mode. Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information. This parameter is only supported for `gpt-image-1`.
+
background: Allows to set transparency for the background of the generated image(s). This
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
`opaque` or `auto` (default value). When `auto` is used, the model will
@@ -632,6 +1663,10 @@ class AsyncImages(AsyncAPIResource):
output_format: The format in which the generated images are returned. This parameter is only
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
+ partial_images: The number of partial images to generate. This parameter is used for streaming
+ responses that return partial images. Value must be between 0 and 3. When set to
+ 0, the response will be a single image sent in one streaming event.
+
quality: The quality of the image that will be generated.
- `auto` (default value) will automatically select the best quality for the
@@ -667,6 +1702,36 @@ class AsyncImages(AsyncAPIResource):
timeout: Override the client-level default timeout for this request, in seconds
"""
+ ...
+
+ @required_args(["prompt"], ["prompt", "stream"])
+ async def generate(
+ self,
+ *,
+ prompt: str,
+ background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
+ model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+ moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+ output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+ partial_images: Optional[int] | NotGiven = NOT_GIVEN,
+ quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
+ response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
+ size: Optional[
+ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
+ ]
+ | NotGiven = NOT_GIVEN,
+ stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
+ style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]:
return await self._post(
"/images/generations",
body=await async_maybe_transform(
@@ -678,18 +1743,24 @@ class AsyncImages(AsyncAPIResource):
"n": n,
"output_compression": output_compression,
"output_format": output_format,
+ "partial_images": partial_images,
"quality": quality,
"response_format": response_format,
"size": size,
+ "stream": stream,
"style": style,
"user": user,
},
- image_generate_params.ImageGenerateParams,
+ image_generate_params.ImageGenerateParamsStreaming
+ if stream
+ else image_generate_params.ImageGenerateParamsNonStreaming,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ImagesResponse,
+ stream=stream or False,
+ stream_cls=AsyncStream[ImageGenStreamEvent],
)
src/openai/types/responses/response_output_refusal.py
@@ -9,7 +9,7 @@ __all__ = ["ResponseOutputRefusal"]
class ResponseOutputRefusal(BaseModel):
refusal: str
- """The refusal explanationfrom the model."""
+ """The refusal explanation from the model."""
type: Literal["refusal"]
"""The type of the refusal. Always `refusal`."""
src/openai/types/responses/response_output_refusal_param.py
@@ -9,7 +9,7 @@ __all__ = ["ResponseOutputRefusalParam"]
class ResponseOutputRefusalParam(TypedDict, total=False):
refusal: Required[str]
- """The refusal explanationfrom the model."""
+ """The refusal explanation from the model."""
type: Required[Literal["refusal"]]
"""The type of the refusal. Always `refusal`."""
src/openai/types/responses/tool.py
@@ -124,6 +124,13 @@ class ImageGeneration(BaseModel):
One of `transparent`, `opaque`, or `auto`. Default: `auto`.
"""
+ input_fidelity: Optional[Literal["high", "low"]] = None
+ """
+ Control how much effort the model will exert to match the style and features,
+ especially facial features, of input images. This parameter is only supported
+ for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+ """
+
input_image_mask: Optional[ImageGenerationInputImageMask] = None
"""Optional mask for inpainting.
src/openai/types/responses/tool_param.py
@@ -125,6 +125,13 @@ class ImageGeneration(TypedDict, total=False):
One of `transparent`, `opaque`, or `auto`. Default: `auto`.
"""
+ input_fidelity: Optional[Literal["high", "low"]]
+ """
+ Control how much effort the model will exert to match the style and features,
+ especially facial features, of input images. This parameter is only supported
+ for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+ """
+
input_image_mask: ImageGenerationInputImageMask
"""Optional mask for inpainting.
src/openai/types/__init__.py
@@ -60,15 +60,19 @@ from .container_list_params import ContainerListParams as ContainerListParams
from .image_generate_params import ImageGenerateParams as ImageGenerateParams
from .eval_retrieve_response import EvalRetrieveResponse as EvalRetrieveResponse
from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy
+from .image_gen_stream_event import ImageGenStreamEvent as ImageGenStreamEvent
from .upload_complete_params import UploadCompleteParams as UploadCompleteParams
from .container_create_params import ContainerCreateParams as ContainerCreateParams
from .container_list_response import ContainerListResponse as ContainerListResponse
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
+from .image_edit_stream_event import ImageEditStreamEvent as ImageEditStreamEvent
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
from .container_create_response import ContainerCreateResponse as ContainerCreateResponse
from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse
+from .image_gen_completed_event import ImageGenCompletedEvent as ImageGenCompletedEvent
+from .image_edit_completed_event import ImageEditCompletedEvent as ImageEditCompletedEvent
from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse
from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
@@ -79,8 +83,10 @@ from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunk
from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions
from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
+from .image_gen_partial_image_event import ImageGenPartialImageEvent as ImageGenPartialImageEvent
from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy
from .eval_custom_data_source_config import EvalCustomDataSourceConfig as EvalCustomDataSourceConfig
+from .image_edit_partial_image_event import ImageEditPartialImageEvent as ImageEditPartialImageEvent
from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam
from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam
from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam
src/openai/types/image_edit_completed_event.py
@@ -0,0 +1,55 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ImageEditCompletedEvent", "Usage", "UsageInputTokensDetails"]
+
+
+class UsageInputTokensDetails(BaseModel):
+ image_tokens: int
+ """The number of image tokens in the input prompt."""
+
+ text_tokens: int
+ """The number of text tokens in the input prompt."""
+
+
+class Usage(BaseModel):
+ input_tokens: int
+ """The number of tokens (images and text) in the input prompt."""
+
+ input_tokens_details: UsageInputTokensDetails
+ """The input tokens detailed information for the image generation."""
+
+ output_tokens: int
+ """The number of image tokens in the output image."""
+
+ total_tokens: int
+ """The total number of tokens (images and text) used for the image generation."""
+
+
+class ImageEditCompletedEvent(BaseModel):
+ b64_json: str
+ """Base64-encoded final edited image data, suitable for rendering as an image."""
+
+ background: Literal["transparent", "opaque", "auto"]
+ """The background setting for the edited image."""
+
+ created_at: int
+ """The Unix timestamp when the event was created."""
+
+ output_format: Literal["png", "webp", "jpeg"]
+ """The output format for the edited image."""
+
+ quality: Literal["low", "medium", "high", "auto"]
+ """The quality setting for the edited image."""
+
+ size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
+ """The size of the edited image."""
+
+ type: Literal["image_edit.completed"]
+ """The type of the event. Always `image_edit.completed`."""
+
+ usage: Usage
+ """For `gpt-image-1` only, the token usage information for the image generation."""
src/openai/types/image_edit_params.py
@@ -8,10 +8,10 @@ from typing_extensions import Literal, Required, TypedDict
from .._types import FileTypes
from .image_model import ImageModel
-__all__ = ["ImageEditParams"]
+__all__ = ["ImageEditParamsBase", "ImageEditParamsNonStreaming", "ImageEditParamsStreaming"]
-class ImageEditParams(TypedDict, total=False):
+class ImageEditParamsBase(TypedDict, total=False):
image: Required[Union[FileTypes, List[FileTypes]]]
"""The image(s) to edit. Must be a supported image file or an array of images.
@@ -40,6 +40,13 @@ class ImageEditParams(TypedDict, total=False):
be set to either `png` (default value) or `webp`.
"""
+ input_fidelity: Optional[Literal["high", "low"]]
+ """
+ Control how much effort the model will exert to match the style and features,
+ especially facial features, of input images. This parameter is only supported
+ for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+ """
+
mask: FileTypes
"""An additional image whose fully transparent areas (e.g.
@@ -72,6 +79,14 @@ class ImageEditParams(TypedDict, total=False):
`jpeg`, or `webp`. The default value is `png`.
"""
+ partial_images: Optional[int]
+ """The number of partial images to generate.
+
+ This parameter is used for streaming responses that return partial images. Value
+ must be between 0 and 3. When set to 0, the response will be a single image sent
+ in one streaming event.
+ """
+
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]]
"""The quality of the image that will be generated.
@@ -101,3 +116,26 @@ class ImageEditParams(TypedDict, total=False):
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
"""
+
+
+class ImageEditParamsNonStreaming(ImageEditParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """Edit the image in streaming mode.
+
+ Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information.
+ """
+
+
+class ImageEditParamsStreaming(ImageEditParamsBase):
+ stream: Required[Literal[True]]
+ """Edit the image in streaming mode.
+
+ Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information.
+ """
+
+
+ImageEditParams = Union[ImageEditParamsNonStreaming, ImageEditParamsStreaming]
src/openai/types/image_edit_partial_image_event.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ImageEditPartialImageEvent"]
+
+
+class ImageEditPartialImageEvent(BaseModel):
+ b64_json: str
+ """Base64-encoded partial image data, suitable for rendering as an image."""
+
+ background: Literal["transparent", "opaque", "auto"]
+ """The background setting for the requested edited image."""
+
+ created_at: int
+ """The Unix timestamp when the event was created."""
+
+ output_format: Literal["png", "webp", "jpeg"]
+ """The output format for the requested edited image."""
+
+ partial_image_index: int
+ """0-based index for the partial image (streaming)."""
+
+ quality: Literal["low", "medium", "high", "auto"]
+ """The quality setting for the requested edited image."""
+
+ size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
+ """The size of the requested edited image."""
+
+ type: Literal["image_edit.partial_image"]
+ """The type of the event. Always `image_edit.partial_image`."""
src/openai/types/image_edit_stream_event.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .image_edit_completed_event import ImageEditCompletedEvent
+from .image_edit_partial_image_event import ImageEditPartialImageEvent
+
+__all__ = ["ImageEditStreamEvent"]
+
+ImageEditStreamEvent: TypeAlias = Annotated[
+ Union[ImageEditPartialImageEvent, ImageEditCompletedEvent], PropertyInfo(discriminator="type")
+]
src/openai/types/image_gen_completed_event.py
@@ -0,0 +1,55 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ImageGenCompletedEvent", "Usage", "UsageInputTokensDetails"]
+
+
+class UsageInputTokensDetails(BaseModel):
+ image_tokens: int
+ """The number of image tokens in the input prompt."""
+
+ text_tokens: int
+ """The number of text tokens in the input prompt."""
+
+
+class Usage(BaseModel):
+ input_tokens: int
+ """The number of tokens (images and text) in the input prompt."""
+
+ input_tokens_details: UsageInputTokensDetails
+ """The input tokens detailed information for the image generation."""
+
+ output_tokens: int
+ """The number of image tokens in the output image."""
+
+ total_tokens: int
+ """The total number of tokens (images and text) used for the image generation."""
+
+
+class ImageGenCompletedEvent(BaseModel):
+ b64_json: str
+ """Base64-encoded image data, suitable for rendering as an image."""
+
+ background: Literal["transparent", "opaque", "auto"]
+ """The background setting for the generated image."""
+
+ created_at: int
+ """The Unix timestamp when the event was created."""
+
+ output_format: Literal["png", "webp", "jpeg"]
+ """The output format for the generated image."""
+
+ quality: Literal["low", "medium", "high", "auto"]
+ """The quality setting for the generated image."""
+
+ size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
+ """The size of the generated image."""
+
+ type: Literal["image_generation.completed"]
+ """The type of the event. Always `image_generation.completed`."""
+
+ usage: Usage
+ """For `gpt-image-1` only, the token usage information for the image generation."""
src/openai/types/image_gen_partial_image_event.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ImageGenPartialImageEvent"]
+
+
+class ImageGenPartialImageEvent(BaseModel):
+ b64_json: str
+ """Base64-encoded partial image data, suitable for rendering as an image."""
+
+ background: Literal["transparent", "opaque", "auto"]
+ """The background setting for the requested image."""
+
+ created_at: int
+ """The Unix timestamp when the event was created."""
+
+ output_format: Literal["png", "webp", "jpeg"]
+ """The output format for the requested image."""
+
+ partial_image_index: int
+ """0-based index for the partial image (streaming)."""
+
+ quality: Literal["low", "medium", "high", "auto"]
+ """The quality setting for the requested image."""
+
+ size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
+ """The size of the requested image."""
+
+ type: Literal["image_generation.partial_image"]
+ """The type of the event. Always `image_generation.partial_image`."""
src/openai/types/image_gen_stream_event.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .image_gen_completed_event import ImageGenCompletedEvent
+from .image_gen_partial_image_event import ImageGenPartialImageEvent
+
+__all__ = ["ImageGenStreamEvent"]
+
+ImageGenStreamEvent: TypeAlias = Annotated[
+ Union[ImageGenPartialImageEvent, ImageGenCompletedEvent], PropertyInfo(discriminator="type")
+]
src/openai/types/image_generate_params.py
@@ -7,10 +7,10 @@ from typing_extensions import Literal, Required, TypedDict
from .image_model import ImageModel
-__all__ = ["ImageGenerateParams"]
+__all__ = ["ImageGenerateParamsBase", "ImageGenerateParamsNonStreaming", "ImageGenerateParamsStreaming"]
-class ImageGenerateParams(TypedDict, total=False):
+class ImageGenerateParamsBase(TypedDict, total=False):
prompt: Required[str]
"""A text description of the desired image(s).
@@ -62,6 +62,14 @@ class ImageGenerateParams(TypedDict, total=False):
`jpeg`, or `webp`.
"""
+ partial_images: Optional[int]
+ """The number of partial images to generate.
+
+ This parameter is used for streaming responses that return partial images. Value
+ must be between 0 and 3. When set to 0, the response will be a single image sent
+ in one streaming event.
+ """
+
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]]
"""The quality of the image that will be generated.
@@ -107,3 +115,26 @@ class ImageGenerateParams(TypedDict, total=False):
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
"""
+
+
+class ImageGenerateParamsNonStreaming(ImageGenerateParamsBase, total=False):
+ stream: Optional[Literal[False]]
+ """Generate the image in streaming mode.
+
+ Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information. This parameter is only supported for `gpt-image-1`.
+ """
+
+
+class ImageGenerateParamsStreaming(ImageGenerateParamsBase):
+ stream: Required[Literal[True]]
+ """Generate the image in streaming mode.
+
+ Defaults to `false`. See the
+ [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
+ for more information. This parameter is only supported for `gpt-image-1`.
+ """
+
+
+ImageGenerateParams = Union[ImageGenerateParamsNonStreaming, ImageGenerateParamsStreaming]
src/openai/_streaming.py
@@ -59,7 +59,12 @@ class Stream(Generic[_T]):
if sse.data.startswith("[DONE]"):
break
- if sse.event is None or sse.event.startswith("response.") or sse.event.startswith("transcript."):
+ if sse.event is None or (
+ sse.event.startswith("response.") or
+ sse.event.startswith("transcript.") or
+ sse.event.startswith("image_edit.") or
+ sse.event.startswith("image_generation.")
+ ):
data = sse.json()
if is_mapping(data) and data.get("error"):
message = None
tests/api_resources/test_images.py
@@ -61,7 +61,7 @@ class TestImages:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_edit(self, client: OpenAI) -> None:
+ def test_method_edit_overload_1(self, client: OpenAI) -> None:
image = client.images.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
@@ -69,25 +69,28 @@ class TestImages:
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
- def test_method_edit_with_all_params(self, client: OpenAI) -> None:
+ def test_method_edit_with_all_params_overload_1(self, client: OpenAI) -> None:
image = client.images.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
background="transparent",
+ input_fidelity="high",
mask=b"raw file contents",
model="string",
n=1,
output_compression=100,
output_format="png",
+ partial_images=1,
quality="high",
response_format="url",
size="1024x1024",
+ stream=False,
user="user-1234",
)
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
- def test_raw_response_edit(self, client: OpenAI) -> None:
+ def test_raw_response_edit_overload_1(self, client: OpenAI) -> None:
response = client.images.with_raw_response.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
@@ -99,7 +102,7 @@ class TestImages:
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
- def test_streaming_response_edit(self, client: OpenAI) -> None:
+ def test_streaming_response_edit_overload_1(self, client: OpenAI) -> None:
with client.images.with_streaming_response.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
@@ -113,14 +116,71 @@ class TestImages:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_generate(self, client: OpenAI) -> None:
+ def test_method_edit_overload_2(self, client: OpenAI) -> None:
+ image_stream = client.images.edit(
+ image=b"raw file contents",
+ prompt="A cute baby sea otter wearing a beret",
+ stream=True,
+ )
+ image_stream.response.close()
+
+ @parametrize
+ def test_method_edit_with_all_params_overload_2(self, client: OpenAI) -> None:
+ image_stream = client.images.edit(
+ image=b"raw file contents",
+ prompt="A cute baby sea otter wearing a beret",
+ stream=True,
+ background="transparent",
+ input_fidelity="high",
+ mask=b"raw file contents",
+ model="string",
+ n=1,
+ output_compression=100,
+ output_format="png",
+ partial_images=1,
+ quality="high",
+ response_format="url",
+ size="1024x1024",
+ user="user-1234",
+ )
+ image_stream.response.close()
+
+ @parametrize
+ def test_raw_response_edit_overload_2(self, client: OpenAI) -> None:
+ response = client.images.with_raw_response.edit(
+ image=b"raw file contents",
+ prompt="A cute baby sea otter wearing a beret",
+ stream=True,
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = response.parse()
+ stream.close()
+
+ @parametrize
+ def test_streaming_response_edit_overload_2(self, client: OpenAI) -> None:
+ with client.images.with_streaming_response.edit(
+ image=b"raw file contents",
+ prompt="A cute baby sea otter wearing a beret",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = response.parse()
+ stream.close()
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_generate_overload_1(self, client: OpenAI) -> None:
image = client.images.generate(
prompt="A cute baby sea otter",
)
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
- def test_method_generate_with_all_params(self, client: OpenAI) -> None:
+ def test_method_generate_with_all_params_overload_1(self, client: OpenAI) -> None:
image = client.images.generate(
prompt="A cute baby sea otter",
background="transparent",
@@ -129,16 +189,18 @@ class TestImages:
n=1,
output_compression=100,
output_format="png",
+ partial_images=1,
quality="medium",
response_format="url",
size="1024x1024",
+ stream=False,
style="vivid",
user="user-1234",
)
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
- def test_raw_response_generate(self, client: OpenAI) -> None:
+ def test_raw_response_generate_overload_1(self, client: OpenAI) -> None:
response = client.images.with_raw_response.generate(
prompt="A cute baby sea otter",
)
@@ -149,7 +211,7 @@ class TestImages:
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
- def test_streaming_response_generate(self, client: OpenAI) -> None:
+ def test_streaming_response_generate_overload_1(self, client: OpenAI) -> None:
with client.images.with_streaming_response.generate(
prompt="A cute baby sea otter",
) as response:
@@ -161,6 +223,59 @@ class TestImages:
assert cast(Any, response.is_closed) is True
+ @parametrize
+ def test_method_generate_overload_2(self, client: OpenAI) -> None:
+ image_stream = client.images.generate(
+ prompt="A cute baby sea otter",
+ stream=True,
+ )
+ image_stream.response.close()
+
+ @parametrize
+ def test_method_generate_with_all_params_overload_2(self, client: OpenAI) -> None:
+ image_stream = client.images.generate(
+ prompt="A cute baby sea otter",
+ stream=True,
+ background="transparent",
+ model="string",
+ moderation="low",
+ n=1,
+ output_compression=100,
+ output_format="png",
+ partial_images=1,
+ quality="medium",
+ response_format="url",
+ size="1024x1024",
+ style="vivid",
+ user="user-1234",
+ )
+ image_stream.response.close()
+
+ @parametrize
+ def test_raw_response_generate_overload_2(self, client: OpenAI) -> None:
+ response = client.images.with_raw_response.generate(
+ prompt="A cute baby sea otter",
+ stream=True,
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = response.parse()
+ stream.close()
+
+ @parametrize
+ def test_streaming_response_generate_overload_2(self, client: OpenAI) -> None:
+ with client.images.with_streaming_response.generate(
+ prompt="A cute baby sea otter",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = response.parse()
+ stream.close()
+
+ assert cast(Any, response.is_closed) is True
+
class TestAsyncImages:
parametrize = pytest.mark.parametrize(
@@ -211,7 +326,7 @@ class TestAsyncImages:
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_edit(self, async_client: AsyncOpenAI) -> None:
+ async def test_method_edit_overload_1(self, async_client: AsyncOpenAI) -> None:
image = await async_client.images.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
@@ -219,25 +334,28 @@ class TestAsyncImages:
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
- async def test_method_edit_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ async def test_method_edit_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
image = await async_client.images.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
background="transparent",
+ input_fidelity="high",
mask=b"raw file contents",
model="string",
n=1,
output_compression=100,
output_format="png",
+ partial_images=1,
quality="high",
response_format="url",
size="1024x1024",
+ stream=False,
user="user-1234",
)
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
- async def test_raw_response_edit(self, async_client: AsyncOpenAI) -> None:
+ async def test_raw_response_edit_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.images.with_raw_response.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
@@ -249,7 +367,7 @@ class TestAsyncImages:
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
- async def test_streaming_response_edit(self, async_client: AsyncOpenAI) -> None:
+ async def test_streaming_response_edit_overload_1(self, async_client: AsyncOpenAI) -> None:
async with async_client.images.with_streaming_response.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
@@ -263,14 +381,71 @@ class TestAsyncImages:
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_generate(self, async_client: AsyncOpenAI) -> None:
+ async def test_method_edit_overload_2(self, async_client: AsyncOpenAI) -> None:
+ image_stream = await async_client.images.edit(
+ image=b"raw file contents",
+ prompt="A cute baby sea otter wearing a beret",
+ stream=True,
+ )
+ await image_stream.response.aclose()
+
+ @parametrize
+ async def test_method_edit_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
+ image_stream = await async_client.images.edit(
+ image=b"raw file contents",
+ prompt="A cute baby sea otter wearing a beret",
+ stream=True,
+ background="transparent",
+ input_fidelity="high",
+ mask=b"raw file contents",
+ model="string",
+ n=1,
+ output_compression=100,
+ output_format="png",
+ partial_images=1,
+ quality="high",
+ response_format="url",
+ size="1024x1024",
+ user="user-1234",
+ )
+ await image_stream.response.aclose()
+
+ @parametrize
+ async def test_raw_response_edit_overload_2(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.images.with_raw_response.edit(
+ image=b"raw file contents",
+ prompt="A cute baby sea otter wearing a beret",
+ stream=True,
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = response.parse()
+ await stream.close()
+
+ @parametrize
+ async def test_streaming_response_edit_overload_2(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.images.with_streaming_response.edit(
+ image=b"raw file contents",
+ prompt="A cute baby sea otter wearing a beret",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = await response.parse()
+ await stream.close()
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_generate_overload_1(self, async_client: AsyncOpenAI) -> None:
image = await async_client.images.generate(
prompt="A cute baby sea otter",
)
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
- async def test_method_generate_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ async def test_method_generate_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
image = await async_client.images.generate(
prompt="A cute baby sea otter",
background="transparent",
@@ -279,16 +454,18 @@ class TestAsyncImages:
n=1,
output_compression=100,
output_format="png",
+ partial_images=1,
quality="medium",
response_format="url",
size="1024x1024",
+ stream=False,
style="vivid",
user="user-1234",
)
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
- async def test_raw_response_generate(self, async_client: AsyncOpenAI) -> None:
+ async def test_raw_response_generate_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.images.with_raw_response.generate(
prompt="A cute baby sea otter",
)
@@ -299,7 +476,7 @@ class TestAsyncImages:
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
- async def test_streaming_response_generate(self, async_client: AsyncOpenAI) -> None:
+ async def test_streaming_response_generate_overload_1(self, async_client: AsyncOpenAI) -> None:
async with async_client.images.with_streaming_response.generate(
prompt="A cute baby sea otter",
) as response:
@@ -310,3 +487,56 @@ class TestAsyncImages:
assert_matches_type(ImagesResponse, image, path=["response"])
assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_generate_overload_2(self, async_client: AsyncOpenAI) -> None:
+ image_stream = await async_client.images.generate(
+ prompt="A cute baby sea otter",
+ stream=True,
+ )
+ await image_stream.response.aclose()
+
+ @parametrize
+ async def test_method_generate_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
+ image_stream = await async_client.images.generate(
+ prompt="A cute baby sea otter",
+ stream=True,
+ background="transparent",
+ model="string",
+ moderation="low",
+ n=1,
+ output_compression=100,
+ output_format="png",
+ partial_images=1,
+ quality="medium",
+ response_format="url",
+ size="1024x1024",
+ style="vivid",
+ user="user-1234",
+ )
+ await image_stream.response.aclose()
+
+ @parametrize
+ async def test_raw_response_generate_overload_2(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.images.with_raw_response.generate(
+ prompt="A cute baby sea otter",
+ stream=True,
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = response.parse()
+ await stream.close()
+
+ @parametrize
+ async def test_streaming_response_generate_overload_2(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.images.with_streaming_response.generate(
+ prompt="A cute baby sea otter",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = await response.parse()
+ await stream.close()
+
+ assert cast(Any, response.is_closed) is True
.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c7dacca97e28bceff218684bb429481a70aa47aadad983ed9178bfda75ff4cd2.yml
-openapi_spec_hash: 28eb1bb901ca10d2e37db4606d2bcfa7
-config_hash: 167ad0ca036d0f023c78e6496b4311e8
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-670ea0d2cc44f52a87dd3cadea45632953283e0636ba30788fdbdb22a232ccac.yml
+openapi_spec_hash: d8b7d38911fead545adf3e4297956410
+config_hash: 5525bda35e48ea6387c6175c4d1651fa
api.md
@@ -127,7 +127,17 @@ Methods:
Types:
```python
-from openai.types import Image, ImageModel, ImagesResponse
+from openai.types import (
+ Image,
+ ImageEditCompletedEvent,
+ ImageEditPartialImageEvent,
+ ImageEditStreamEvent,
+ ImageGenCompletedEvent,
+ ImageGenPartialImageEvent,
+ ImageGenStreamEvent,
+ ImageModel,
+ ImagesResponse,
+)
```
Methods: