main
1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
3from __future__ import annotations
4
5from typing import Union, Optional
6from typing_extensions import Literal, Required, TypedDict
7
8from .._types import FileTypes, SequenceNotStr
9from .image_model import ImageModel
10
11__all__ = ["ImageEditParamsBase", "ImageEditParamsNonStreaming", "ImageEditParamsStreaming"]
12
13
14class ImageEditParamsBase(TypedDict, total=False):
15 image: Required[Union[FileTypes, SequenceNotStr[FileTypes]]]
16 """The image(s) to edit. Must be a supported image file or an array of images.
17
18 For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
19 50MB. You can provide up to 16 images.
20
21 For `dall-e-2`, you can only provide one image, and it should be a square `png`
22 file less than 4MB.
23 """
24
25 prompt: Required[str]
26 """A text description of the desired image(s).
27
28 The maximum length is 1000 characters for `dall-e-2`, and 32000 characters for
29 `gpt-image-1`.
30 """
31
32 background: Optional[Literal["transparent", "opaque", "auto"]]
33 """
34 Allows to set transparency for the background of the generated image(s). This
35 parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
36 `opaque` or `auto` (default value). When `auto` is used, the model will
37 automatically determine the best background for the image.
38
39 If `transparent`, the output format needs to support transparency, so it should
40 be set to either `png` (default value) or `webp`.
41 """
42
43 input_fidelity: Optional[Literal["high", "low"]]
44 """
45 Control how much effort the model will exert to match the style and features,
46 especially facial features, of input images. This parameter is only supported
47 for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
48 `low`. Defaults to `low`.
49 """
50
51 mask: FileTypes
52 """An additional image whose fully transparent areas (e.g.
53
54 where alpha is zero) indicate where `image` should be edited. If there are
55 multiple images provided, the mask will be applied on the first image. Must be a
56 valid PNG file, less than 4MB, and have the same dimensions as `image`.
57 """
58
59 model: Union[str, ImageModel, None]
60 """The model to use for image generation.
61
62 Only `dall-e-2` and `gpt-image-1` are supported. Defaults to `dall-e-2` unless a
63 parameter specific to `gpt-image-1` is used.
64 """
65
66 n: Optional[int]
67 """The number of images to generate. Must be between 1 and 10."""
68
69 output_compression: Optional[int]
70 """The compression level (0-100%) for the generated images.
71
72 This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg`
73 output formats, and defaults to 100.
74 """
75
76 output_format: Optional[Literal["png", "jpeg", "webp"]]
77 """The format in which the generated images are returned.
78
79 This parameter is only supported for `gpt-image-1`. Must be one of `png`,
80 `jpeg`, or `webp`. The default value is `png`.
81 """
82
83 partial_images: Optional[int]
84 """The number of partial images to generate.
85
86 This parameter is used for streaming responses that return partial images. Value
87 must be between 0 and 3. When set to 0, the response will be a single image sent
88 in one streaming event.
89
90 Note that the final image may be sent before the full number of partial images
91 are generated if the full image is generated more quickly.
92 """
93
94 quality: Optional[Literal["standard", "low", "medium", "high", "auto"]]
95 """The quality of the image that will be generated.
96
97 `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only
98 supports `standard` quality. Defaults to `auto`.
99 """
100
101 response_format: Optional[Literal["url", "b64_json"]]
102 """The format in which the generated images are returned.
103
104 Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
105 image has been generated. This parameter is only supported for `dall-e-2`, as
106 `gpt-image-1` will always return base64-encoded images.
107 """
108
109 size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
110 """The size of the generated images.
111
112 Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or
113 `auto` (default value) for `gpt-image-1`, and one of `256x256`, `512x512`, or
114 `1024x1024` for `dall-e-2`.
115 """
116
117 user: str
118 """
119 A unique identifier representing your end-user, which can help OpenAI to monitor
120 and detect abuse.
121 [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
122 """
123
124
125class ImageEditParamsNonStreaming(ImageEditParamsBase, total=False):
126 stream: Optional[Literal[False]]
127 """Edit the image in streaming mode.
128
129 Defaults to `false`. See the
130 [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
131 for more information.
132 """
133
134
135class ImageEditParamsStreaming(ImageEditParamsBase):
136 stream: Required[Literal[True]]
137 """Edit the image in streaming mode.
138
139 Defaults to `false`. See the
140 [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
141 for more information.
142 """
143
144
145ImageEditParams = Union[ImageEditParamsNonStreaming, ImageEditParamsStreaming]