main
   1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
   2
   3from __future__ import annotations
   4
   5from typing import Union, Mapping, Optional, cast
   6from typing_extensions import Literal, overload
   7
   8import httpx
   9
  10from .. import _legacy_response
  11from ..types import image_edit_params, image_generate_params, image_create_variation_params
  12from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, SequenceNotStr, omit, not_given
  13from .._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform
  14from .._compat import cached_property
  15from .._resource import SyncAPIResource, AsyncAPIResource
  16from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
  17from .._streaming import Stream, AsyncStream
  18from .._base_client import make_request_options
  19from ..types.image_model import ImageModel
  20from ..types.images_response import ImagesResponse
  21from ..types.image_gen_stream_event import ImageGenStreamEvent
  22from ..types.image_edit_stream_event import ImageEditStreamEvent
  23
  24__all__ = ["Images", "AsyncImages"]
  25
  26
  27class Images(SyncAPIResource):
  28    @cached_property
  29    def with_raw_response(self) -> ImagesWithRawResponse:
  30        """
  31        This property can be used as a prefix for any HTTP method call to return
  32        the raw response object instead of the parsed content.
  33
  34        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
  35        """
  36        return ImagesWithRawResponse(self)
  37
  38    @cached_property
  39    def with_streaming_response(self) -> ImagesWithStreamingResponse:
  40        """
  41        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
  42
  43        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
  44        """
  45        return ImagesWithStreamingResponse(self)
  46
  47    def create_variation(
  48        self,
  49        *,
  50        image: FileTypes,
  51        model: Union[str, ImageModel, None] | Omit = omit,
  52        n: Optional[int] | Omit = omit,
  53        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  54        size: Optional[Literal["256x256", "512x512", "1024x1024"]] | Omit = omit,
  55        user: str | Omit = omit,
  56        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  57        # The extra values given here take precedence over values defined on the client or passed to this method.
  58        extra_headers: Headers | None = None,
  59        extra_query: Query | None = None,
  60        extra_body: Body | None = None,
  61        timeout: float | httpx.Timeout | None | NotGiven = not_given,
  62    ) -> ImagesResponse:
  63        """Creates a variation of a given image.
  64
  65        This endpoint only supports `dall-e-2`.
  66
  67        Args:
  68          image: The image to use as the basis for the variation(s). Must be a valid PNG file,
  69              less than 4MB, and square.
  70
  71          model: The model to use for image generation. Only `dall-e-2` is supported at this
  72              time.
  73
  74          n: The number of images to generate. Must be between 1 and 10.
  75
  76          response_format: The format in which the generated images are returned. Must be one of `url` or
  77              `b64_json`. URLs are only valid for 60 minutes after the image has been
  78              generated.
  79
  80          size: The size of the generated images. Must be one of `256x256`, `512x512`, or
  81              `1024x1024`.
  82
  83          user: A unique identifier representing your end-user, which can help OpenAI to monitor
  84              and detect abuse.
  85              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  86
  87          extra_headers: Send extra headers
  88
  89          extra_query: Add additional query parameters to the request
  90
  91          extra_body: Add additional JSON properties to the request
  92
  93          timeout: Override the client-level default timeout for this request, in seconds
  94        """
  95        body = deepcopy_minimal(
  96            {
  97                "image": image,
  98                "model": model,
  99                "n": n,
 100                "response_format": response_format,
 101                "size": size,
 102                "user": user,
 103            }
 104        )
 105        files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
 106        # It should be noted that the actual Content-Type header that will be
 107        # sent to the server will contain a `boundary` parameter, e.g.
 108        # multipart/form-data; boundary=---abc--
 109        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
 110        return self._post(
 111            "/images/variations",
 112            body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
 113            files=files,
 114            options=make_request_options(
 115                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
 116            ),
 117            cast_to=ImagesResponse,
 118        )
 119
 120    @overload
 121    def edit(
 122        self,
 123        *,
 124        image: Union[FileTypes, SequenceNotStr[FileTypes]],
 125        prompt: str,
 126        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
 127        input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
 128        mask: FileTypes | Omit = omit,
 129        model: Union[str, ImageModel, None] | Omit = omit,
 130        n: Optional[int] | Omit = omit,
 131        output_compression: Optional[int] | Omit = omit,
 132        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
 133        partial_images: Optional[int] | Omit = omit,
 134        quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
 135        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
 136        size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
 137        stream: Optional[Literal[False]] | Omit = omit,
 138        user: str | Omit = omit,
 139        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
 140        # The extra values given here take precedence over values defined on the client or passed to this method.
 141        extra_headers: Headers | None = None,
 142        extra_query: Query | None = None,
 143        extra_body: Body | None = None,
 144        timeout: float | httpx.Timeout | None | NotGiven = not_given,
 145    ) -> ImagesResponse:
 146        """Creates an edited or extended image given one or more source images and a
 147        prompt.
 148
 149        This endpoint only supports `gpt-image-1` and `dall-e-2`.
 150
 151        Args:
 152          image: The image(s) to edit. Must be a supported image file or an array of images.
 153
 154              For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
 155              50MB. You can provide up to 16 images.
 156
 157              For `dall-e-2`, you can only provide one image, and it should be a square `png`
 158              file less than 4MB.
 159
 160          prompt: A text description of the desired image(s). The maximum length is 1000
 161              characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
 162
 163          background: Allows to set transparency for the background of the generated image(s). This
 164              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
 165              `opaque` or `auto` (default value). When `auto` is used, the model will
 166              automatically determine the best background for the image.
 167
 168              If `transparent`, the output format needs to support transparency, so it should
 169              be set to either `png` (default value) or `webp`.
 170
 171          input_fidelity: Control how much effort the model will exert to match the style and features,
 172              especially facial features, of input images. This parameter is only supported
 173              for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
 174              `low`. Defaults to `low`.
 175
 176          mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
 177              indicate where `image` should be edited. If there are multiple images provided,
 178              the mask will be applied on the first image. Must be a valid PNG file, less than
 179              4MB, and have the same dimensions as `image`.
 180
 181          model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
 182              supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
 183              is used.
 184
 185          n: The number of images to generate. Must be between 1 and 10.
 186
 187          output_compression: The compression level (0-100%) for the generated images. This parameter is only
 188              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
 189              defaults to 100.
 190
 191          output_format: The format in which the generated images are returned. This parameter is only
 192              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
 193              default value is `png`.
 194
 195          partial_images: The number of partial images to generate. This parameter is used for streaming
 196              responses that return partial images. Value must be between 0 and 3. When set to
 197              0, the response will be a single image sent in one streaming event.
 198
 199              Note that the final image may be sent before the full number of partial images
 200              are generated if the full image is generated more quickly.
 201
 202          quality: The quality of the image that will be generated. `high`, `medium` and `low` are
 203              only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
 204              Defaults to `auto`.
 205
 206          response_format: The format in which the generated images are returned. Must be one of `url` or
 207              `b64_json`. URLs are only valid for 60 minutes after the image has been
 208              generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
 209              will always return base64-encoded images.
 210
 211          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
 212              (landscape), `1024x1536` (portrait), or `auto` (default value) for
 213              `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
 214
 215          stream: Edit the image in streaming mode. Defaults to `false`. See the
 216              [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
 217              for more information.
 218
 219          user: A unique identifier representing your end-user, which can help OpenAI to monitor
 220              and detect abuse.
 221              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 222
 223          extra_headers: Send extra headers
 224
 225          extra_query: Add additional query parameters to the request
 226
 227          extra_body: Add additional JSON properties to the request
 228
 229          timeout: Override the client-level default timeout for this request, in seconds
 230        """
 231        ...
 232
 233    @overload
 234    def edit(
 235        self,
 236        *,
 237        image: Union[FileTypes, SequenceNotStr[FileTypes]],
 238        prompt: str,
 239        stream: Literal[True],
 240        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
 241        input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
 242        mask: FileTypes | Omit = omit,
 243        model: Union[str, ImageModel, None] | Omit = omit,
 244        n: Optional[int] | Omit = omit,
 245        output_compression: Optional[int] | Omit = omit,
 246        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
 247        partial_images: Optional[int] | Omit = omit,
 248        quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
 249        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
 250        size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
 251        user: str | Omit = omit,
 252        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
 253        # The extra values given here take precedence over values defined on the client or passed to this method.
 254        extra_headers: Headers | None = None,
 255        extra_query: Query | None = None,
 256        extra_body: Body | None = None,
 257        timeout: float | httpx.Timeout | None | NotGiven = not_given,
 258    ) -> Stream[ImageEditStreamEvent]:
 259        """Creates an edited or extended image given one or more source images and a
 260        prompt.
 261
 262        This endpoint only supports `gpt-image-1` and `dall-e-2`.
 263
 264        Args:
 265          image: The image(s) to edit. Must be a supported image file or an array of images.
 266
 267              For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
 268              50MB. You can provide up to 16 images.
 269
 270              For `dall-e-2`, you can only provide one image, and it should be a square `png`
 271              file less than 4MB.
 272
 273          prompt: A text description of the desired image(s). The maximum length is 1000
 274              characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
 275
 276          stream: Edit the image in streaming mode. Defaults to `false`. See the
 277              [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
 278              for more information.
 279
 280          background: Allows to set transparency for the background of the generated image(s). This
 281              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
 282              `opaque` or `auto` (default value). When `auto` is used, the model will
 283              automatically determine the best background for the image.
 284
 285              If `transparent`, the output format needs to support transparency, so it should
 286              be set to either `png` (default value) or `webp`.
 287
 288          input_fidelity: Control how much effort the model will exert to match the style and features,
 289              especially facial features, of input images. This parameter is only supported
 290              for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
 291              `low`. Defaults to `low`.
 292
 293          mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
 294              indicate where `image` should be edited. If there are multiple images provided,
 295              the mask will be applied on the first image. Must be a valid PNG file, less than
 296              4MB, and have the same dimensions as `image`.
 297
 298          model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
 299              supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
 300              is used.
 301
 302          n: The number of images to generate. Must be between 1 and 10.
 303
 304          output_compression: The compression level (0-100%) for the generated images. This parameter is only
 305              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
 306              defaults to 100.
 307
 308          output_format: The format in which the generated images are returned. This parameter is only
 309              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
 310              default value is `png`.
 311
 312          partial_images: The number of partial images to generate. This parameter is used for streaming
 313              responses that return partial images. Value must be between 0 and 3. When set to
 314              0, the response will be a single image sent in one streaming event.
 315
 316              Note that the final image may be sent before the full number of partial images
 317              are generated if the full image is generated more quickly.
 318
 319          quality: The quality of the image that will be generated. `high`, `medium` and `low` are
 320              only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
 321              Defaults to `auto`.
 322
 323          response_format: The format in which the generated images are returned. Must be one of `url` or
 324              `b64_json`. URLs are only valid for 60 minutes after the image has been
 325              generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
 326              will always return base64-encoded images.
 327
 328          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
 329              (landscape), `1024x1536` (portrait), or `auto` (default value) for
 330              `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
 331
 332          user: A unique identifier representing your end-user, which can help OpenAI to monitor
 333              and detect abuse.
 334              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 335
 336          extra_headers: Send extra headers
 337
 338          extra_query: Add additional query parameters to the request
 339
 340          extra_body: Add additional JSON properties to the request
 341
 342          timeout: Override the client-level default timeout for this request, in seconds
 343        """
 344        ...
 345
 346    @overload
 347    def edit(
 348        self,
 349        *,
 350        image: Union[FileTypes, SequenceNotStr[FileTypes]],
 351        prompt: str,
 352        stream: bool,
 353        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
 354        input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
 355        mask: FileTypes | Omit = omit,
 356        model: Union[str, ImageModel, None] | Omit = omit,
 357        n: Optional[int] | Omit = omit,
 358        output_compression: Optional[int] | Omit = omit,
 359        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
 360        partial_images: Optional[int] | Omit = omit,
 361        quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
 362        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
 363        size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
 364        user: str | Omit = omit,
 365        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
 366        # The extra values given here take precedence over values defined on the client or passed to this method.
 367        extra_headers: Headers | None = None,
 368        extra_query: Query | None = None,
 369        extra_body: Body | None = None,
 370        timeout: float | httpx.Timeout | None | NotGiven = not_given,
 371    ) -> ImagesResponse | Stream[ImageEditStreamEvent]:
 372        """Creates an edited or extended image given one or more source images and a
 373        prompt.
 374
 375        This endpoint only supports `gpt-image-1` and `dall-e-2`.
 376
 377        Args:
 378          image: The image(s) to edit. Must be a supported image file or an array of images.
 379
 380              For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
 381              50MB. You can provide up to 16 images.
 382
 383              For `dall-e-2`, you can only provide one image, and it should be a square `png`
 384              file less than 4MB.
 385
 386          prompt: A text description of the desired image(s). The maximum length is 1000
 387              characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
 388
 389          stream: Edit the image in streaming mode. Defaults to `false`. See the
 390              [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
 391              for more information.
 392
 393          background: Allows to set transparency for the background of the generated image(s). This
 394              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
 395              `opaque` or `auto` (default value). When `auto` is used, the model will
 396              automatically determine the best background for the image.
 397
 398              If `transparent`, the output format needs to support transparency, so it should
 399              be set to either `png` (default value) or `webp`.
 400
 401          input_fidelity: Control how much effort the model will exert to match the style and features,
 402              especially facial features, of input images. This parameter is only supported
 403              for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
 404              `low`. Defaults to `low`.
 405
 406          mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
 407              indicate where `image` should be edited. If there are multiple images provided,
 408              the mask will be applied on the first image. Must be a valid PNG file, less than
 409              4MB, and have the same dimensions as `image`.
 410
 411          model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
 412              supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
 413              is used.
 414
 415          n: The number of images to generate. Must be between 1 and 10.
 416
 417          output_compression: The compression level (0-100%) for the generated images. This parameter is only
 418              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
 419              defaults to 100.
 420
 421          output_format: The format in which the generated images are returned. This parameter is only
 422              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
 423              default value is `png`.
 424
 425          partial_images: The number of partial images to generate. This parameter is used for streaming
 426              responses that return partial images. Value must be between 0 and 3. When set to
 427              0, the response will be a single image sent in one streaming event.
 428
 429              Note that the final image may be sent before the full number of partial images
 430              are generated if the full image is generated more quickly.
 431
 432          quality: The quality of the image that will be generated. `high`, `medium` and `low` are
 433              only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
 434              Defaults to `auto`.
 435
 436          response_format: The format in which the generated images are returned. Must be one of `url` or
 437              `b64_json`. URLs are only valid for 60 minutes after the image has been
 438              generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
 439              will always return base64-encoded images.
 440
 441          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
 442              (landscape), `1024x1536` (portrait), or `auto` (default value) for
 443              `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
 444
 445          user: A unique identifier representing your end-user, which can help OpenAI to monitor
 446              and detect abuse.
 447              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 448
 449          extra_headers: Send extra headers
 450
 451          extra_query: Add additional query parameters to the request
 452
 453          extra_body: Add additional JSON properties to the request
 454
 455          timeout: Override the client-level default timeout for this request, in seconds
 456        """
 457        ...
 458
 459    @required_args(["image", "prompt"], ["image", "prompt", "stream"])
 460    def edit(
 461        self,
 462        *,
 463        image: Union[FileTypes, SequenceNotStr[FileTypes]],
 464        prompt: str,
 465        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
 466        input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
 467        mask: FileTypes | Omit = omit,
 468        model: Union[str, ImageModel, None] | Omit = omit,
 469        n: Optional[int] | Omit = omit,
 470        output_compression: Optional[int] | Omit = omit,
 471        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
 472        partial_images: Optional[int] | Omit = omit,
 473        quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
 474        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
 475        size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
 476        stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
 477        user: str | Omit = omit,
 478        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
 479        # The extra values given here take precedence over values defined on the client or passed to this method.
 480        extra_headers: Headers | None = None,
 481        extra_query: Query | None = None,
 482        extra_body: Body | None = None,
 483        timeout: float | httpx.Timeout | None | NotGiven = not_given,
 484    ) -> ImagesResponse | Stream[ImageEditStreamEvent]:
 485        body = deepcopy_minimal(
 486            {
 487                "image": image,
 488                "prompt": prompt,
 489                "background": background,
 490                "input_fidelity": input_fidelity,
 491                "mask": mask,
 492                "model": model,
 493                "n": n,
 494                "output_compression": output_compression,
 495                "output_format": output_format,
 496                "partial_images": partial_images,
 497                "quality": quality,
 498                "response_format": response_format,
 499                "size": size,
 500                "stream": stream,
 501                "user": user,
 502            }
 503        )
 504        files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
 505        # It should be noted that the actual Content-Type header that will be
 506        # sent to the server will contain a `boundary` parameter, e.g.
 507        # multipart/form-data; boundary=---abc--
 508        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
 509        return self._post(
 510            "/images/edits",
 511            body=maybe_transform(
 512                body,
 513                image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming,
 514            ),
 515            files=files,
 516            options=make_request_options(
 517                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
 518            ),
 519            cast_to=ImagesResponse,
 520            stream=stream or False,
 521            stream_cls=Stream[ImageEditStreamEvent],
 522        )
 523
 524    @overload
 525    def generate(
 526        self,
 527        *,
 528        prompt: str,
 529        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
 530        model: Union[str, ImageModel, None] | Omit = omit,
 531        moderation: Optional[Literal["low", "auto"]] | Omit = omit,
 532        n: Optional[int] | Omit = omit,
 533        output_compression: Optional[int] | Omit = omit,
 534        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
 535        partial_images: Optional[int] | Omit = omit,
 536        quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
 537        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
 538        size: Optional[
 539            Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
 540        ]
 541        | Omit = omit,
 542        stream: Optional[Literal[False]] | Omit = omit,
 543        style: Optional[Literal["vivid", "natural"]] | Omit = omit,
 544        user: str | Omit = omit,
 545        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
 546        # The extra values given here take precedence over values defined on the client or passed to this method.
 547        extra_headers: Headers | None = None,
 548        extra_query: Query | None = None,
 549        extra_body: Body | None = None,
 550        timeout: float | httpx.Timeout | None | NotGiven = not_given,
 551    ) -> ImagesResponse:
 552        """
 553        Creates an image given a prompt.
 554        [Learn more](https://platform.openai.com/docs/guides/images).
 555
 556        Args:
 557          prompt: A text description of the desired image(s). The maximum length is 32000
 558              characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
 559              for `dall-e-3`.
 560
 561          background: Allows to set transparency for the background of the generated image(s). This
 562              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
 563              `opaque` or `auto` (default value). When `auto` is used, the model will
 564              automatically determine the best background for the image.
 565
 566              If `transparent`, the output format needs to support transparency, so it should
 567              be set to either `png` (default value) or `webp`.
 568
 569          model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
 570              `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
 571              `gpt-image-1` is used.
 572
 573          moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
 574              be either `low` for less restrictive filtering or `auto` (default value).
 575
 576          n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
 577              `n=1` is supported.
 578
 579          output_compression: The compression level (0-100%) for the generated images. This parameter is only
 580              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
 581              defaults to 100.
 582
 583          output_format: The format in which the generated images are returned. This parameter is only
 584              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
 585
 586          partial_images: The number of partial images to generate. This parameter is used for streaming
 587              responses that return partial images. Value must be between 0 and 3. When set to
 588              0, the response will be a single image sent in one streaming event.
 589
 590              Note that the final image may be sent before the full number of partial images
 591              are generated if the full image is generated more quickly.
 592
 593          quality: The quality of the image that will be generated.
 594
 595              - `auto` (default value) will automatically select the best quality for the
 596                given model.
 597              - `high`, `medium` and `low` are supported for `gpt-image-1`.
 598              - `hd` and `standard` are supported for `dall-e-3`.
 599              - `standard` is the only option for `dall-e-2`.
 600
 601          response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
 602              returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
 603              after the image has been generated. This parameter isn't supported for
 604              `gpt-image-1` which will always return base64-encoded images.
 605
 606          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
 607              (landscape), `1024x1536` (portrait), or `auto` (default value) for
 608              `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
 609              one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
 610
 611          stream: Generate the image in streaming mode. Defaults to `false`. See the
 612              [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
 613              for more information. This parameter is only supported for `gpt-image-1`.
 614
 615          style: The style of the generated images. This parameter is only supported for
 616              `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
 617              towards generating hyper-real and dramatic images. Natural causes the model to
 618              produce more natural, less hyper-real looking images.
 619
 620          user: A unique identifier representing your end-user, which can help OpenAI to monitor
 621              and detect abuse.
 622              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 623
 624          extra_headers: Send extra headers
 625
 626          extra_query: Add additional query parameters to the request
 627
 628          extra_body: Add additional JSON properties to the request
 629
 630          timeout: Override the client-level default timeout for this request, in seconds
 631        """
 632        ...
 633
 634    @overload
 635    def generate(
 636        self,
 637        *,
 638        prompt: str,
 639        stream: Literal[True],
 640        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
 641        model: Union[str, ImageModel, None] | Omit = omit,
 642        moderation: Optional[Literal["low", "auto"]] | Omit = omit,
 643        n: Optional[int] | Omit = omit,
 644        output_compression: Optional[int] | Omit = omit,
 645        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
 646        partial_images: Optional[int] | Omit = omit,
 647        quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
 648        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
 649        size: Optional[
 650            Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
 651        ]
 652        | Omit = omit,
 653        style: Optional[Literal["vivid", "natural"]] | Omit = omit,
 654        user: str | Omit = omit,
 655        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
 656        # The extra values given here take precedence over values defined on the client or passed to this method.
 657        extra_headers: Headers | None = None,
 658        extra_query: Query | None = None,
 659        extra_body: Body | None = None,
 660        timeout: float | httpx.Timeout | None | NotGiven = not_given,
 661    ) -> Stream[ImageGenStreamEvent]:
 662        """
 663        Creates an image given a prompt.
 664        [Learn more](https://platform.openai.com/docs/guides/images).
 665
 666        Args:
 667          prompt: A text description of the desired image(s). The maximum length is 32000
 668              characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
 669              for `dall-e-3`.
 670
 671          stream: Generate the image in streaming mode. Defaults to `false`. See the
 672              [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
 673              for more information. This parameter is only supported for `gpt-image-1`.
 674
 675          background: Allows to set transparency for the background of the generated image(s). This
 676              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
 677              `opaque` or `auto` (default value). When `auto` is used, the model will
 678              automatically determine the best background for the image.
 679
 680              If `transparent`, the output format needs to support transparency, so it should
 681              be set to either `png` (default value) or `webp`.
 682
 683          model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
 684              `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
 685              `gpt-image-1` is used.
 686
 687          moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
 688              be either `low` for less restrictive filtering or `auto` (default value).
 689
 690          n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
 691              `n=1` is supported.
 692
 693          output_compression: The compression level (0-100%) for the generated images. This parameter is only
 694              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
 695              defaults to 100.
 696
 697          output_format: The format in which the generated images are returned. This parameter is only
 698              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
 699
 700          partial_images: The number of partial images to generate. This parameter is used for streaming
 701              responses that return partial images. Value must be between 0 and 3. When set to
 702              0, the response will be a single image sent in one streaming event.
 703
 704              Note that the final image may be sent before the full number of partial images
 705              are generated if the full image is generated more quickly.
 706
 707          quality: The quality of the image that will be generated.
 708
 709              - `auto` (default value) will automatically select the best quality for the
 710                given model.
 711              - `high`, `medium` and `low` are supported for `gpt-image-1`.
 712              - `hd` and `standard` are supported for `dall-e-3`.
 713              - `standard` is the only option for `dall-e-2`.
 714
 715          response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
 716              returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
 717              after the image has been generated. This parameter isn't supported for
 718              `gpt-image-1` which will always return base64-encoded images.
 719
 720          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
 721              (landscape), `1024x1536` (portrait), or `auto` (default value) for
 722              `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
 723              one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
 724
 725          style: The style of the generated images. This parameter is only supported for
 726              `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
 727              towards generating hyper-real and dramatic images. Natural causes the model to
 728              produce more natural, less hyper-real looking images.
 729
 730          user: A unique identifier representing your end-user, which can help OpenAI to monitor
 731              and detect abuse.
 732              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 733
 734          extra_headers: Send extra headers
 735
 736          extra_query: Add additional query parameters to the request
 737
 738          extra_body: Add additional JSON properties to the request
 739
 740          timeout: Override the client-level default timeout for this request, in seconds
 741        """
 742        ...
 743
 744    @overload
 745    def generate(
 746        self,
 747        *,
 748        prompt: str,
 749        stream: bool,
 750        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
 751        model: Union[str, ImageModel, None] | Omit = omit,
 752        moderation: Optional[Literal["low", "auto"]] | Omit = omit,
 753        n: Optional[int] | Omit = omit,
 754        output_compression: Optional[int] | Omit = omit,
 755        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
 756        partial_images: Optional[int] | Omit = omit,
 757        quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
 758        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
 759        size: Optional[
 760            Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
 761        ]
 762        | Omit = omit,
 763        style: Optional[Literal["vivid", "natural"]] | Omit = omit,
 764        user: str | Omit = omit,
 765        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
 766        # The extra values given here take precedence over values defined on the client or passed to this method.
 767        extra_headers: Headers | None = None,
 768        extra_query: Query | None = None,
 769        extra_body: Body | None = None,
 770        timeout: float | httpx.Timeout | None | NotGiven = not_given,
 771    ) -> ImagesResponse | Stream[ImageGenStreamEvent]:
 772        """
 773        Creates an image given a prompt.
 774        [Learn more](https://platform.openai.com/docs/guides/images).
 775
 776        Args:
 777          prompt: A text description of the desired image(s). The maximum length is 32000
 778              characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
 779              for `dall-e-3`.
 780
 781          stream: Generate the image in streaming mode. Defaults to `false`. See the
 782              [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
 783              for more information. This parameter is only supported for `gpt-image-1`.
 784
 785          background: Allows to set transparency for the background of the generated image(s). This
 786              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
 787              `opaque` or `auto` (default value). When `auto` is used, the model will
 788              automatically determine the best background for the image.
 789
 790              If `transparent`, the output format needs to support transparency, so it should
 791              be set to either `png` (default value) or `webp`.
 792
 793          model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
 794              `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
 795              `gpt-image-1` is used.
 796
 797          moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
 798              be either `low` for less restrictive filtering or `auto` (default value).
 799
 800          n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
 801              `n=1` is supported.
 802
 803          output_compression: The compression level (0-100%) for the generated images. This parameter is only
 804              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
 805              defaults to 100.
 806
 807          output_format: The format in which the generated images are returned. This parameter is only
 808              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
 809
 810          partial_images: The number of partial images to generate. This parameter is used for streaming
 811              responses that return partial images. Value must be between 0 and 3. When set to
 812              0, the response will be a single image sent in one streaming event.
 813
 814              Note that the final image may be sent before the full number of partial images
 815              are generated if the full image is generated more quickly.
 816
 817          quality: The quality of the image that will be generated.
 818
 819              - `auto` (default value) will automatically select the best quality for the
 820                given model.
 821              - `high`, `medium` and `low` are supported for `gpt-image-1`.
 822              - `hd` and `standard` are supported for `dall-e-3`.
 823              - `standard` is the only option for `dall-e-2`.
 824
 825          response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
 826              returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
 827              after the image has been generated. This parameter isn't supported for
 828              `gpt-image-1` which will always return base64-encoded images.
 829
 830          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
 831              (landscape), `1024x1536` (portrait), or `auto` (default value) for
 832              `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
 833              one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
 834
 835          style: The style of the generated images. This parameter is only supported for
 836              `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
 837              towards generating hyper-real and dramatic images. Natural causes the model to
 838              produce more natural, less hyper-real looking images.
 839
 840          user: A unique identifier representing your end-user, which can help OpenAI to monitor
 841              and detect abuse.
 842              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 843
 844          extra_headers: Send extra headers
 845
 846          extra_query: Add additional query parameters to the request
 847
 848          extra_body: Add additional JSON properties to the request
 849
 850          timeout: Override the client-level default timeout for this request, in seconds
 851        """
 852        ...
 853
 854    @required_args(["prompt"], ["prompt", "stream"])
 855    def generate(
 856        self,
 857        *,
 858        prompt: str,
 859        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
 860        model: Union[str, ImageModel, None] | Omit = omit,
 861        moderation: Optional[Literal["low", "auto"]] | Omit = omit,
 862        n: Optional[int] | Omit = omit,
 863        output_compression: Optional[int] | Omit = omit,
 864        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
 865        partial_images: Optional[int] | Omit = omit,
 866        quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
 867        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
 868        size: Optional[
 869            Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
 870        ]
 871        | Omit = omit,
 872        stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
 873        style: Optional[Literal["vivid", "natural"]] | Omit = omit,
 874        user: str | Omit = omit,
 875        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
 876        # The extra values given here take precedence over values defined on the client or passed to this method.
 877        extra_headers: Headers | None = None,
 878        extra_query: Query | None = None,
 879        extra_body: Body | None = None,
 880        timeout: float | httpx.Timeout | None | NotGiven = not_given,
 881    ) -> ImagesResponse | Stream[ImageGenStreamEvent]:
 882        return self._post(
 883            "/images/generations",
 884            body=maybe_transform(
 885                {
 886                    "prompt": prompt,
 887                    "background": background,
 888                    "model": model,
 889                    "moderation": moderation,
 890                    "n": n,
 891                    "output_compression": output_compression,
 892                    "output_format": output_format,
 893                    "partial_images": partial_images,
 894                    "quality": quality,
 895                    "response_format": response_format,
 896                    "size": size,
 897                    "stream": stream,
 898                    "style": style,
 899                    "user": user,
 900                },
 901                image_generate_params.ImageGenerateParamsStreaming
 902                if stream
 903                else image_generate_params.ImageGenerateParamsNonStreaming,
 904            ),
 905            options=make_request_options(
 906                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
 907            ),
 908            cast_to=ImagesResponse,
 909            stream=stream or False,
 910            stream_cls=Stream[ImageGenStreamEvent],
 911        )
 912
 913
 914class AsyncImages(AsyncAPIResource):
 915    @cached_property
 916    def with_raw_response(self) -> AsyncImagesWithRawResponse:
 917        """
 918        This property can be used as a prefix for any HTTP method call to return
 919        the raw response object instead of the parsed content.
 920
 921        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
 922        """
 923        return AsyncImagesWithRawResponse(self)
 924
 925    @cached_property
 926    def with_streaming_response(self) -> AsyncImagesWithStreamingResponse:
 927        """
 928        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
 929
 930        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
 931        """
 932        return AsyncImagesWithStreamingResponse(self)
 933
 934    async def create_variation(
 935        self,
 936        *,
 937        image: FileTypes,
 938        model: Union[str, ImageModel, None] | Omit = omit,
 939        n: Optional[int] | Omit = omit,
 940        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
 941        size: Optional[Literal["256x256", "512x512", "1024x1024"]] | Omit = omit,
 942        user: str | Omit = omit,
 943        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
 944        # The extra values given here take precedence over values defined on the client or passed to this method.
 945        extra_headers: Headers | None = None,
 946        extra_query: Query | None = None,
 947        extra_body: Body | None = None,
 948        timeout: float | httpx.Timeout | None | NotGiven = not_given,
 949    ) -> ImagesResponse:
 950        """Creates a variation of a given image.
 951
 952        This endpoint only supports `dall-e-2`.
 953
 954        Args:
 955          image: The image to use as the basis for the variation(s). Must be a valid PNG file,
 956              less than 4MB, and square.
 957
 958          model: The model to use for image generation. Only `dall-e-2` is supported at this
 959              time.
 960
 961          n: The number of images to generate. Must be between 1 and 10.
 962
 963          response_format: The format in which the generated images are returned. Must be one of `url` or
 964              `b64_json`. URLs are only valid for 60 minutes after the image has been
 965              generated.
 966
 967          size: The size of the generated images. Must be one of `256x256`, `512x512`, or
 968              `1024x1024`.
 969
 970          user: A unique identifier representing your end-user, which can help OpenAI to monitor
 971              and detect abuse.
 972              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
 973
 974          extra_headers: Send extra headers
 975
 976          extra_query: Add additional query parameters to the request
 977
 978          extra_body: Add additional JSON properties to the request
 979
 980          timeout: Override the client-level default timeout for this request, in seconds
 981        """
 982        body = deepcopy_minimal(
 983            {
 984                "image": image,
 985                "model": model,
 986                "n": n,
 987                "response_format": response_format,
 988                "size": size,
 989                "user": user,
 990            }
 991        )
 992        files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
 993        # It should be noted that the actual Content-Type header that will be
 994        # sent to the server will contain a `boundary` parameter, e.g.
 995        # multipart/form-data; boundary=---abc--
 996        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
 997        return await self._post(
 998            "/images/variations",
 999            body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
1000            files=files,
1001            options=make_request_options(
1002                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1003            ),
1004            cast_to=ImagesResponse,
1005        )
1006
1007    @overload
1008    async def edit(
1009        self,
1010        *,
1011        image: Union[FileTypes, SequenceNotStr[FileTypes]],
1012        prompt: str,
1013        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
1014        input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
1015        mask: FileTypes | Omit = omit,
1016        model: Union[str, ImageModel, None] | Omit = omit,
1017        n: Optional[int] | Omit = omit,
1018        output_compression: Optional[int] | Omit = omit,
1019        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
1020        partial_images: Optional[int] | Omit = omit,
1021        quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
1022        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
1023        size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
1024        stream: Optional[Literal[False]] | Omit = omit,
1025        user: str | Omit = omit,
1026        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1027        # The extra values given here take precedence over values defined on the client or passed to this method.
1028        extra_headers: Headers | None = None,
1029        extra_query: Query | None = None,
1030        extra_body: Body | None = None,
1031        timeout: float | httpx.Timeout | None | NotGiven = not_given,
1032    ) -> ImagesResponse:
1033        """Creates an edited or extended image given one or more source images and a
1034        prompt.
1035
1036        This endpoint only supports `gpt-image-1` and `dall-e-2`.
1037
1038        Args:
1039          image: The image(s) to edit. Must be a supported image file or an array of images.
1040
1041              For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
1042              50MB. You can provide up to 16 images.
1043
1044              For `dall-e-2`, you can only provide one image, and it should be a square `png`
1045              file less than 4MB.
1046
1047          prompt: A text description of the desired image(s). The maximum length is 1000
1048              characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
1049
1050          background: Allows to set transparency for the background of the generated image(s). This
1051              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
1052              `opaque` or `auto` (default value). When `auto` is used, the model will
1053              automatically determine the best background for the image.
1054
1055              If `transparent`, the output format needs to support transparency, so it should
1056              be set to either `png` (default value) or `webp`.
1057
1058          input_fidelity: Control how much effort the model will exert to match the style and features,
1059              especially facial features, of input images. This parameter is only supported
1060              for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
1061              `low`. Defaults to `low`.
1062
1063          mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
1064              indicate where `image` should be edited. If there are multiple images provided,
1065              the mask will be applied on the first image. Must be a valid PNG file, less than
1066              4MB, and have the same dimensions as `image`.
1067
1068          model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
1069              supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
1070              is used.
1071
1072          n: The number of images to generate. Must be between 1 and 10.
1073
1074          output_compression: The compression level (0-100%) for the generated images. This parameter is only
1075              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
1076              defaults to 100.
1077
1078          output_format: The format in which the generated images are returned. This parameter is only
1079              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
1080              default value is `png`.
1081
1082          partial_images: The number of partial images to generate. This parameter is used for streaming
1083              responses that return partial images. Value must be between 0 and 3. When set to
1084              0, the response will be a single image sent in one streaming event.
1085
1086              Note that the final image may be sent before the full number of partial images
1087              are generated if the full image is generated more quickly.
1088
1089          quality: The quality of the image that will be generated. `high`, `medium` and `low` are
1090              only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
1091              Defaults to `auto`.
1092
1093          response_format: The format in which the generated images are returned. Must be one of `url` or
1094              `b64_json`. URLs are only valid for 60 minutes after the image has been
1095              generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
1096              will always return base64-encoded images.
1097
1098          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
1099              (landscape), `1024x1536` (portrait), or `auto` (default value) for
1100              `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
1101
1102          stream: Edit the image in streaming mode. Defaults to `false`. See the
1103              [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
1104              for more information.
1105
1106          user: A unique identifier representing your end-user, which can help OpenAI to monitor
1107              and detect abuse.
1108              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
1109
1110          extra_headers: Send extra headers
1111
1112          extra_query: Add additional query parameters to the request
1113
1114          extra_body: Add additional JSON properties to the request
1115
1116          timeout: Override the client-level default timeout for this request, in seconds
1117        """
1118        ...
1119
1120    @overload
1121    async def edit(
1122        self,
1123        *,
1124        image: Union[FileTypes, SequenceNotStr[FileTypes]],
1125        prompt: str,
1126        stream: Literal[True],
1127        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
1128        input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
1129        mask: FileTypes | Omit = omit,
1130        model: Union[str, ImageModel, None] | Omit = omit,
1131        n: Optional[int] | Omit = omit,
1132        output_compression: Optional[int] | Omit = omit,
1133        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
1134        partial_images: Optional[int] | Omit = omit,
1135        quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
1136        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
1137        size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
1138        user: str | Omit = omit,
1139        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1140        # The extra values given here take precedence over values defined on the client or passed to this method.
1141        extra_headers: Headers | None = None,
1142        extra_query: Query | None = None,
1143        extra_body: Body | None = None,
1144        timeout: float | httpx.Timeout | None | NotGiven = not_given,
1145    ) -> AsyncStream[ImageEditStreamEvent]:
1146        """Creates an edited or extended image given one or more source images and a
1147        prompt.
1148
1149        This endpoint only supports `gpt-image-1` and `dall-e-2`.
1150
1151        Args:
1152          image: The image(s) to edit. Must be a supported image file or an array of images.
1153
1154              For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
1155              50MB. You can provide up to 16 images.
1156
1157              For `dall-e-2`, you can only provide one image, and it should be a square `png`
1158              file less than 4MB.
1159
1160          prompt: A text description of the desired image(s). The maximum length is 1000
1161              characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
1162
1163          stream: Edit the image in streaming mode. Defaults to `false`. See the
1164              [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
1165              for more information.
1166
1167          background: Allows to set transparency for the background of the generated image(s). This
1168              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
1169              `opaque` or `auto` (default value). When `auto` is used, the model will
1170              automatically determine the best background for the image.
1171
1172              If `transparent`, the output format needs to support transparency, so it should
1173              be set to either `png` (default value) or `webp`.
1174
1175          input_fidelity: Control how much effort the model will exert to match the style and features,
1176              especially facial features, of input images. This parameter is only supported
1177              for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
1178              `low`. Defaults to `low`.
1179
1180          mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
1181              indicate where `image` should be edited. If there are multiple images provided,
1182              the mask will be applied on the first image. Must be a valid PNG file, less than
1183              4MB, and have the same dimensions as `image`.
1184
1185          model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
1186              supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
1187              is used.
1188
1189          n: The number of images to generate. Must be between 1 and 10.
1190
1191          output_compression: The compression level (0-100%) for the generated images. This parameter is only
1192              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
1193              defaults to 100.
1194
1195          output_format: The format in which the generated images are returned. This parameter is only
1196              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
1197              default value is `png`.
1198
1199          partial_images: The number of partial images to generate. This parameter is used for streaming
1200              responses that return partial images. Value must be between 0 and 3. When set to
1201              0, the response will be a single image sent in one streaming event.
1202
1203              Note that the final image may be sent before the full number of partial images
1204              are generated if the full image is generated more quickly.
1205
1206          quality: The quality of the image that will be generated. `high`, `medium` and `low` are
1207              only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
1208              Defaults to `auto`.
1209
1210          response_format: The format in which the generated images are returned. Must be one of `url` or
1211              `b64_json`. URLs are only valid for 60 minutes after the image has been
1212              generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
1213              will always return base64-encoded images.
1214
1215          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
1216              (landscape), `1024x1536` (portrait), or `auto` (default value) for
1217              `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
1218
1219          user: A unique identifier representing your end-user, which can help OpenAI to monitor
1220              and detect abuse.
1221              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
1222
1223          extra_headers: Send extra headers
1224
1225          extra_query: Add additional query parameters to the request
1226
1227          extra_body: Add additional JSON properties to the request
1228
1229          timeout: Override the client-level default timeout for this request, in seconds
1230        """
1231        ...
1232
1233    @overload
1234    async def edit(
1235        self,
1236        *,
1237        image: Union[FileTypes, SequenceNotStr[FileTypes]],
1238        prompt: str,
1239        stream: bool,
1240        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
1241        input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
1242        mask: FileTypes | Omit = omit,
1243        model: Union[str, ImageModel, None] | Omit = omit,
1244        n: Optional[int] | Omit = omit,
1245        output_compression: Optional[int] | Omit = omit,
1246        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
1247        partial_images: Optional[int] | Omit = omit,
1248        quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
1249        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
1250        size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
1251        user: str | Omit = omit,
1252        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1253        # The extra values given here take precedence over values defined on the client or passed to this method.
1254        extra_headers: Headers | None = None,
1255        extra_query: Query | None = None,
1256        extra_body: Body | None = None,
1257        timeout: float | httpx.Timeout | None | NotGiven = not_given,
1258    ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
1259        """Creates an edited or extended image given one or more source images and a
1260        prompt.
1261
1262        This endpoint only supports `gpt-image-1` and `dall-e-2`.
1263
1264        Args:
1265          image: The image(s) to edit. Must be a supported image file or an array of images.
1266
1267              For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
1268              50MB. You can provide up to 16 images.
1269
1270              For `dall-e-2`, you can only provide one image, and it should be a square `png`
1271              file less than 4MB.
1272
1273          prompt: A text description of the desired image(s). The maximum length is 1000
1274              characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
1275
1276          stream: Edit the image in streaming mode. Defaults to `false`. See the
1277              [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
1278              for more information.
1279
1280          background: Allows to set transparency for the background of the generated image(s). This
1281              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
1282              `opaque` or `auto` (default value). When `auto` is used, the model will
1283              automatically determine the best background for the image.
1284
1285              If `transparent`, the output format needs to support transparency, so it should
1286              be set to either `png` (default value) or `webp`.
1287
1288          input_fidelity: Control how much effort the model will exert to match the style and features,
1289              especially facial features, of input images. This parameter is only supported
1290              for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
1291              `low`. Defaults to `low`.
1292
1293          mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
1294              indicate where `image` should be edited. If there are multiple images provided,
1295              the mask will be applied on the first image. Must be a valid PNG file, less than
1296              4MB, and have the same dimensions as `image`.
1297
1298          model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
1299              supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
1300              is used.
1301
1302          n: The number of images to generate. Must be between 1 and 10.
1303
1304          output_compression: The compression level (0-100%) for the generated images. This parameter is only
1305              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
1306              defaults to 100.
1307
1308          output_format: The format in which the generated images are returned. This parameter is only
1309              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
1310              default value is `png`.
1311
1312          partial_images: The number of partial images to generate. This parameter is used for streaming
1313              responses that return partial images. Value must be between 0 and 3. When set to
1314              0, the response will be a single image sent in one streaming event.
1315
1316              Note that the final image may be sent before the full number of partial images
1317              are generated if the full image is generated more quickly.
1318
1319          quality: The quality of the image that will be generated. `high`, `medium` and `low` are
1320              only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
1321              Defaults to `auto`.
1322
1323          response_format: The format in which the generated images are returned. Must be one of `url` or
1324              `b64_json`. URLs are only valid for 60 minutes after the image has been
1325              generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
1326              will always return base64-encoded images.
1327
1328          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
1329              (landscape), `1024x1536` (portrait), or `auto` (default value) for
1330              `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
1331
1332          user: A unique identifier representing your end-user, which can help OpenAI to monitor
1333              and detect abuse.
1334              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
1335
1336          extra_headers: Send extra headers
1337
1338          extra_query: Add additional query parameters to the request
1339
1340          extra_body: Add additional JSON properties to the request
1341
1342          timeout: Override the client-level default timeout for this request, in seconds
1343        """
1344        ...
1345
1346    @required_args(["image", "prompt"], ["image", "prompt", "stream"])
1347    async def edit(
1348        self,
1349        *,
1350        image: Union[FileTypes, SequenceNotStr[FileTypes]],
1351        prompt: str,
1352        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
1353        input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
1354        mask: FileTypes | Omit = omit,
1355        model: Union[str, ImageModel, None] | Omit = omit,
1356        n: Optional[int] | Omit = omit,
1357        output_compression: Optional[int] | Omit = omit,
1358        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
1359        partial_images: Optional[int] | Omit = omit,
1360        quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
1361        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
1362        size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
1363        stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
1364        user: str | Omit = omit,
1365        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1366        # The extra values given here take precedence over values defined on the client or passed to this method.
1367        extra_headers: Headers | None = None,
1368        extra_query: Query | None = None,
1369        extra_body: Body | None = None,
1370        timeout: float | httpx.Timeout | None | NotGiven = not_given,
1371    ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
1372        body = deepcopy_minimal(
1373            {
1374                "image": image,
1375                "prompt": prompt,
1376                "background": background,
1377                "input_fidelity": input_fidelity,
1378                "mask": mask,
1379                "model": model,
1380                "n": n,
1381                "output_compression": output_compression,
1382                "output_format": output_format,
1383                "partial_images": partial_images,
1384                "quality": quality,
1385                "response_format": response_format,
1386                "size": size,
1387                "stream": stream,
1388                "user": user,
1389            }
1390        )
1391        files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
1392        # It should be noted that the actual Content-Type header that will be
1393        # sent to the server will contain a `boundary` parameter, e.g.
1394        # multipart/form-data; boundary=---abc--
1395        extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
1396        return await self._post(
1397            "/images/edits",
1398            body=await async_maybe_transform(
1399                body,
1400                image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming,
1401            ),
1402            files=files,
1403            options=make_request_options(
1404                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1405            ),
1406            cast_to=ImagesResponse,
1407            stream=stream or False,
1408            stream_cls=AsyncStream[ImageEditStreamEvent],
1409        )
1410
1411    @overload
1412    async def generate(
1413        self,
1414        *,
1415        prompt: str,
1416        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
1417        model: Union[str, ImageModel, None] | Omit = omit,
1418        moderation: Optional[Literal["low", "auto"]] | Omit = omit,
1419        n: Optional[int] | Omit = omit,
1420        output_compression: Optional[int] | Omit = omit,
1421        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
1422        partial_images: Optional[int] | Omit = omit,
1423        quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
1424        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
1425        size: Optional[
1426            Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
1427        ]
1428        | Omit = omit,
1429        stream: Optional[Literal[False]] | Omit = omit,
1430        style: Optional[Literal["vivid", "natural"]] | Omit = omit,
1431        user: str | Omit = omit,
1432        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1433        # The extra values given here take precedence over values defined on the client or passed to this method.
1434        extra_headers: Headers | None = None,
1435        extra_query: Query | None = None,
1436        extra_body: Body | None = None,
1437        timeout: float | httpx.Timeout | None | NotGiven = not_given,
1438    ) -> ImagesResponse:
1439        """
1440        Creates an image given a prompt.
1441        [Learn more](https://platform.openai.com/docs/guides/images).
1442
1443        Args:
1444          prompt: A text description of the desired image(s). The maximum length is 32000
1445              characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
1446              for `dall-e-3`.
1447
1448          background: Allows to set transparency for the background of the generated image(s). This
1449              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
1450              `opaque` or `auto` (default value). When `auto` is used, the model will
1451              automatically determine the best background for the image.
1452
1453              If `transparent`, the output format needs to support transparency, so it should
1454              be set to either `png` (default value) or `webp`.
1455
1456          model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
1457              `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
1458              `gpt-image-1` is used.
1459
1460          moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
1461              be either `low` for less restrictive filtering or `auto` (default value).
1462
1463          n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
1464              `n=1` is supported.
1465
1466          output_compression: The compression level (0-100%) for the generated images. This parameter is only
1467              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
1468              defaults to 100.
1469
1470          output_format: The format in which the generated images are returned. This parameter is only
1471              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
1472
1473          partial_images: The number of partial images to generate. This parameter is used for streaming
1474              responses that return partial images. Value must be between 0 and 3. When set to
1475              0, the response will be a single image sent in one streaming event.
1476
1477              Note that the final image may be sent before the full number of partial images
1478              are generated if the full image is generated more quickly.
1479
1480          quality: The quality of the image that will be generated.
1481
1482              - `auto` (default value) will automatically select the best quality for the
1483                given model.
1484              - `high`, `medium` and `low` are supported for `gpt-image-1`.
1485              - `hd` and `standard` are supported for `dall-e-3`.
1486              - `standard` is the only option for `dall-e-2`.
1487
1488          response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
1489              returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
1490              after the image has been generated. This parameter isn't supported for
1491              `gpt-image-1` which will always return base64-encoded images.
1492
1493          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
1494              (landscape), `1024x1536` (portrait), or `auto` (default value) for
1495              `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
1496              one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
1497
1498          stream: Generate the image in streaming mode. Defaults to `false`. See the
1499              [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
1500              for more information. This parameter is only supported for `gpt-image-1`.
1501
1502          style: The style of the generated images. This parameter is only supported for
1503              `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
1504              towards generating hyper-real and dramatic images. Natural causes the model to
1505              produce more natural, less hyper-real looking images.
1506
1507          user: A unique identifier representing your end-user, which can help OpenAI to monitor
1508              and detect abuse.
1509              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
1510
1511          extra_headers: Send extra headers
1512
1513          extra_query: Add additional query parameters to the request
1514
1515          extra_body: Add additional JSON properties to the request
1516
1517          timeout: Override the client-level default timeout for this request, in seconds
1518        """
1519        ...
1520
1521    @overload
1522    async def generate(
1523        self,
1524        *,
1525        prompt: str,
1526        stream: Literal[True],
1527        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
1528        model: Union[str, ImageModel, None] | Omit = omit,
1529        moderation: Optional[Literal["low", "auto"]] | Omit = omit,
1530        n: Optional[int] | Omit = omit,
1531        output_compression: Optional[int] | Omit = omit,
1532        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
1533        partial_images: Optional[int] | Omit = omit,
1534        quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
1535        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
1536        size: Optional[
1537            Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
1538        ]
1539        | Omit = omit,
1540        style: Optional[Literal["vivid", "natural"]] | Omit = omit,
1541        user: str | Omit = omit,
1542        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1543        # The extra values given here take precedence over values defined on the client or passed to this method.
1544        extra_headers: Headers | None = None,
1545        extra_query: Query | None = None,
1546        extra_body: Body | None = None,
1547        timeout: float | httpx.Timeout | None | NotGiven = not_given,
1548    ) -> AsyncStream[ImageGenStreamEvent]:
1549        """
1550        Creates an image given a prompt.
1551        [Learn more](https://platform.openai.com/docs/guides/images).
1552
1553        Args:
1554          prompt: A text description of the desired image(s). The maximum length is 32000
1555              characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
1556              for `dall-e-3`.
1557
1558          stream: Generate the image in streaming mode. Defaults to `false`. See the
1559              [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
1560              for more information. This parameter is only supported for `gpt-image-1`.
1561
1562          background: Allows to set transparency for the background of the generated image(s). This
1563              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
1564              `opaque` or `auto` (default value). When `auto` is used, the model will
1565              automatically determine the best background for the image.
1566
1567              If `transparent`, the output format needs to support transparency, so it should
1568              be set to either `png` (default value) or `webp`.
1569
1570          model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
1571              `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
1572              `gpt-image-1` is used.
1573
1574          moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
1575              be either `low` for less restrictive filtering or `auto` (default value).
1576
1577          n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
1578              `n=1` is supported.
1579
1580          output_compression: The compression level (0-100%) for the generated images. This parameter is only
1581              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
1582              defaults to 100.
1583
1584          output_format: The format in which the generated images are returned. This parameter is only
1585              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
1586
1587          partial_images: The number of partial images to generate. This parameter is used for streaming
1588              responses that return partial images. Value must be between 0 and 3. When set to
1589              0, the response will be a single image sent in one streaming event.
1590
1591              Note that the final image may be sent before the full number of partial images
1592              are generated if the full image is generated more quickly.
1593
1594          quality: The quality of the image that will be generated.
1595
1596              - `auto` (default value) will automatically select the best quality for the
1597                given model.
1598              - `high`, `medium` and `low` are supported for `gpt-image-1`.
1599              - `hd` and `standard` are supported for `dall-e-3`.
1600              - `standard` is the only option for `dall-e-2`.
1601
1602          response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
1603              returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
1604              after the image has been generated. This parameter isn't supported for
1605              `gpt-image-1` which will always return base64-encoded images.
1606
1607          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
1608              (landscape), `1024x1536` (portrait), or `auto` (default value) for
1609              `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
1610              one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
1611
1612          style: The style of the generated images. This parameter is only supported for
1613              `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
1614              towards generating hyper-real and dramatic images. Natural causes the model to
1615              produce more natural, less hyper-real looking images.
1616
1617          user: A unique identifier representing your end-user, which can help OpenAI to monitor
1618              and detect abuse.
1619              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
1620
1621          extra_headers: Send extra headers
1622
1623          extra_query: Add additional query parameters to the request
1624
1625          extra_body: Add additional JSON properties to the request
1626
1627          timeout: Override the client-level default timeout for this request, in seconds
1628        """
1629        ...
1630
1631    @overload
1632    async def generate(
1633        self,
1634        *,
1635        prompt: str,
1636        stream: bool,
1637        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
1638        model: Union[str, ImageModel, None] | Omit = omit,
1639        moderation: Optional[Literal["low", "auto"]] | Omit = omit,
1640        n: Optional[int] | Omit = omit,
1641        output_compression: Optional[int] | Omit = omit,
1642        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
1643        partial_images: Optional[int] | Omit = omit,
1644        quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
1645        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
1646        size: Optional[
1647            Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
1648        ]
1649        | Omit = omit,
1650        style: Optional[Literal["vivid", "natural"]] | Omit = omit,
1651        user: str | Omit = omit,
1652        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1653        # The extra values given here take precedence over values defined on the client or passed to this method.
1654        extra_headers: Headers | None = None,
1655        extra_query: Query | None = None,
1656        extra_body: Body | None = None,
1657        timeout: float | httpx.Timeout | None | NotGiven = not_given,
1658    ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]:
1659        """
1660        Creates an image given a prompt.
1661        [Learn more](https://platform.openai.com/docs/guides/images).
1662
1663        Args:
1664          prompt: A text description of the desired image(s). The maximum length is 32000
1665              characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
1666              for `dall-e-3`.
1667
1668          stream: Generate the image in streaming mode. Defaults to `false`. See the
1669              [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
1670              for more information. This parameter is only supported for `gpt-image-1`.
1671
1672          background: Allows to set transparency for the background of the generated image(s). This
1673              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
1674              `opaque` or `auto` (default value). When `auto` is used, the model will
1675              automatically determine the best background for the image.
1676
1677              If `transparent`, the output format needs to support transparency, so it should
1678              be set to either `png` (default value) or `webp`.
1679
1680          model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
1681              `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
1682              `gpt-image-1` is used.
1683
1684          moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
1685              be either `low` for less restrictive filtering or `auto` (default value).
1686
1687          n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
1688              `n=1` is supported.
1689
1690          output_compression: The compression level (0-100%) for the generated images. This parameter is only
1691              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
1692              defaults to 100.
1693
1694          output_format: The format in which the generated images are returned. This parameter is only
1695              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
1696
1697          partial_images: The number of partial images to generate. This parameter is used for streaming
1698              responses that return partial images. Value must be between 0 and 3. When set to
1699              0, the response will be a single image sent in one streaming event.
1700
1701              Note that the final image may be sent before the full number of partial images
1702              are generated if the full image is generated more quickly.
1703
1704          quality: The quality of the image that will be generated.
1705
1706              - `auto` (default value) will automatically select the best quality for the
1707                given model.
1708              - `high`, `medium` and `low` are supported for `gpt-image-1`.
1709              - `hd` and `standard` are supported for `dall-e-3`.
1710              - `standard` is the only option for `dall-e-2`.
1711
1712          response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
1713              returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
1714              after the image has been generated. This parameter isn't supported for
1715              `gpt-image-1` which will always return base64-encoded images.
1716
1717          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
1718              (landscape), `1024x1536` (portrait), or `auto` (default value) for
1719              `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
1720              one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
1721
1722          style: The style of the generated images. This parameter is only supported for
1723              `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
1724              towards generating hyper-real and dramatic images. Natural causes the model to
1725              produce more natural, less hyper-real looking images.
1726
1727          user: A unique identifier representing your end-user, which can help OpenAI to monitor
1728              and detect abuse.
1729              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
1730
1731          extra_headers: Send extra headers
1732
1733          extra_query: Add additional query parameters to the request
1734
1735          extra_body: Add additional JSON properties to the request
1736
1737          timeout: Override the client-level default timeout for this request, in seconds
1738        """
1739        ...
1740
1741    @required_args(["prompt"], ["prompt", "stream"])
1742    async def generate(
1743        self,
1744        *,
1745        prompt: str,
1746        background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
1747        model: Union[str, ImageModel, None] | Omit = omit,
1748        moderation: Optional[Literal["low", "auto"]] | Omit = omit,
1749        n: Optional[int] | Omit = omit,
1750        output_compression: Optional[int] | Omit = omit,
1751        output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
1752        partial_images: Optional[int] | Omit = omit,
1753        quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
1754        response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
1755        size: Optional[
1756            Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
1757        ]
1758        | Omit = omit,
1759        stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
1760        style: Optional[Literal["vivid", "natural"]] | Omit = omit,
1761        user: str | Omit = omit,
1762        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1763        # The extra values given here take precedence over values defined on the client or passed to this method.
1764        extra_headers: Headers | None = None,
1765        extra_query: Query | None = None,
1766        extra_body: Body | None = None,
1767        timeout: float | httpx.Timeout | None | NotGiven = not_given,
1768    ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]:
1769        return await self._post(
1770            "/images/generations",
1771            body=await async_maybe_transform(
1772                {
1773                    "prompt": prompt,
1774                    "background": background,
1775                    "model": model,
1776                    "moderation": moderation,
1777                    "n": n,
1778                    "output_compression": output_compression,
1779                    "output_format": output_format,
1780                    "partial_images": partial_images,
1781                    "quality": quality,
1782                    "response_format": response_format,
1783                    "size": size,
1784                    "stream": stream,
1785                    "style": style,
1786                    "user": user,
1787                },
1788                image_generate_params.ImageGenerateParamsStreaming
1789                if stream
1790                else image_generate_params.ImageGenerateParamsNonStreaming,
1791            ),
1792            options=make_request_options(
1793                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1794            ),
1795            cast_to=ImagesResponse,
1796            stream=stream or False,
1797            stream_cls=AsyncStream[ImageGenStreamEvent],
1798        )
1799
1800
1801class ImagesWithRawResponse:
1802    def __init__(self, images: Images) -> None:
1803        self._images = images
1804
1805        self.create_variation = _legacy_response.to_raw_response_wrapper(
1806            images.create_variation,
1807        )
1808        self.edit = _legacy_response.to_raw_response_wrapper(
1809            images.edit,
1810        )
1811        self.generate = _legacy_response.to_raw_response_wrapper(
1812            images.generate,
1813        )
1814
1815
1816class AsyncImagesWithRawResponse:
1817    def __init__(self, images: AsyncImages) -> None:
1818        self._images = images
1819
1820        self.create_variation = _legacy_response.async_to_raw_response_wrapper(
1821            images.create_variation,
1822        )
1823        self.edit = _legacy_response.async_to_raw_response_wrapper(
1824            images.edit,
1825        )
1826        self.generate = _legacy_response.async_to_raw_response_wrapper(
1827            images.generate,
1828        )
1829
1830
1831class ImagesWithStreamingResponse:
1832    def __init__(self, images: Images) -> None:
1833        self._images = images
1834
1835        self.create_variation = to_streamed_response_wrapper(
1836            images.create_variation,
1837        )
1838        self.edit = to_streamed_response_wrapper(
1839            images.edit,
1840        )
1841        self.generate = to_streamed_response_wrapper(
1842            images.generate,
1843        )
1844
1845
1846class AsyncImagesWithStreamingResponse:
1847    def __init__(self, images: AsyncImages) -> None:
1848        self._images = images
1849
1850        self.create_variation = async_to_streamed_response_wrapper(
1851            images.create_variation,
1852        )
1853        self.edit = async_to_streamed_response_wrapper(
1854            images.edit,
1855        )
1856        self.generate = async_to_streamed_response_wrapper(
1857            images.generate,
1858        )