main
1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
3from typing import List, Optional
4from typing_extensions import Literal
5
6from .image import Image
7from .._models import BaseModel
8
9__all__ = ["ImagesResponse", "Usage", "UsageInputTokensDetails"]
10
11
12class UsageInputTokensDetails(BaseModel):
13 image_tokens: int
14 """The number of image tokens in the input prompt."""
15
16 text_tokens: int
17 """The number of text tokens in the input prompt."""
18
19
20class Usage(BaseModel):
21 input_tokens: int
22 """The number of tokens (images and text) in the input prompt."""
23
24 input_tokens_details: UsageInputTokensDetails
25 """The input tokens detailed information for the image generation."""
26
27 output_tokens: int
28 """The number of output tokens generated by the model."""
29
30 total_tokens: int
31 """The total number of tokens (images and text) used for the image generation."""
32
33
34class ImagesResponse(BaseModel):
35 created: int
36 """The Unix timestamp (in seconds) of when the image was created."""
37
38 background: Optional[Literal["transparent", "opaque"]] = None
39 """The background parameter used for the image generation.
40
41 Either `transparent` or `opaque`.
42 """
43
44 data: Optional[List[Image]] = None
45 """The list of generated images."""
46
47 output_format: Optional[Literal["png", "webp", "jpeg"]] = None
48 """The output format of the image generation. Either `png`, `webp`, or `jpeg`."""
49
50 quality: Optional[Literal["low", "medium", "high"]] = None
51 """The quality of the image generated. Either `low`, `medium`, or `high`."""
52
53 size: Optional[Literal["1024x1024", "1024x1536", "1536x1024"]] = None
54 """The size of the image generated.
55
56 Either `1024x1024`, `1024x1536`, or `1536x1024`.
57 """
58
59 usage: Optional[Usage] = None
60 """For `gpt-image-1` only, the token usage information for the image generation."""