Commit b8a3720e

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-05-03 03:09:24
feat(api): add image sizes, reasoning encryption
1 parent 4fc5252
src/openai/resources/audio/speech.py
@@ -85,7 +85,7 @@ class Speech(SyncAPIResource):
               `wav`, and `pcm`.
 
           speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
-              the default.
+              the default. Does not work with `gpt-4o-mini-tts`.
 
           extra_headers: Send extra headers
 
@@ -176,7 +176,7 @@ class AsyncSpeech(AsyncAPIResource):
               `wav`, and `pcm`.
 
           speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
-              the default.
+              the default. Does not work with `gpt-4o-mini-tts`.
 
           extra_headers: Send extra headers
 
src/openai/resources/responses/responses.py
@@ -140,6 +140,11 @@ class Responses(SyncAPIResource):
               - `message.input_image.image_url`: Include image urls from the input message.
               - `computer_call_output.output.image_url`: Include image urls from the computer
                 call output.
+              - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+                tokens in reasoning item outputs. This enables reasoning items to be used in
+                multi-turn conversations when using the Responses API statelessly (like when
+                the `store` parameter is set to `false`, or when an organization is enrolled
+                in the zero data retention program).
 
           instructions: Inserts a system (or developer) message as the first item in the model's
               context.
@@ -331,6 +336,11 @@ class Responses(SyncAPIResource):
               - `message.input_image.image_url`: Include image urls from the input message.
               - `computer_call_output.output.image_url`: Include image urls from the computer
                 call output.
+              - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+                tokens in reasoning item outputs. This enables reasoning items to be used in
+                multi-turn conversations when using the Responses API statelessly (like when
+                the `store` parameter is set to `false`, or when an organization is enrolled
+                in the zero data retention program).
 
           instructions: Inserts a system (or developer) message as the first item in the model's
               context.
@@ -515,6 +525,11 @@ class Responses(SyncAPIResource):
               - `message.input_image.image_url`: Include image urls from the input message.
               - `computer_call_output.output.image_url`: Include image urls from the computer
                 call output.
+              - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+                tokens in reasoning item outputs. This enables reasoning items to be used in
+                multi-turn conversations when using the Responses API statelessly (like when
+                the `store` parameter is set to `false`, or when an organization is enrolled
+                in the zero data retention program).
 
           instructions: Inserts a system (or developer) message as the first item in the model's
               context.
@@ -1013,6 +1028,11 @@ class AsyncResponses(AsyncAPIResource):
               - `message.input_image.image_url`: Include image urls from the input message.
               - `computer_call_output.output.image_url`: Include image urls from the computer
                 call output.
+              - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+                tokens in reasoning item outputs. This enables reasoning items to be used in
+                multi-turn conversations when using the Responses API statelessly (like when
+                the `store` parameter is set to `false`, or when an organization is enrolled
+                in the zero data retention program).
 
           instructions: Inserts a system (or developer) message as the first item in the model's
               context.
@@ -1204,6 +1224,11 @@ class AsyncResponses(AsyncAPIResource):
               - `message.input_image.image_url`: Include image urls from the input message.
               - `computer_call_output.output.image_url`: Include image urls from the computer
                 call output.
+              - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+                tokens in reasoning item outputs. This enables reasoning items to be used in
+                multi-turn conversations when using the Responses API statelessly (like when
+                the `store` parameter is set to `false`, or when an organization is enrolled
+                in the zero data retention program).
 
           instructions: Inserts a system (or developer) message as the first item in the model's
               context.
@@ -1388,6 +1413,11 @@ class AsyncResponses(AsyncAPIResource):
               - `message.input_image.image_url`: Include image urls from the input message.
               - `computer_call_output.output.image_url`: Include image urls from the computer
                 call output.
+              - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+                tokens in reasoning item outputs. This enables reasoning items to be used in
+                multi-turn conversations when using the Responses API statelessly (like when
+                the `store` parameter is set to `false`, or when an organization is enrolled
+                in the zero data retention program).
 
           instructions: Inserts a system (or developer) message as the first item in the model's
               context.
src/openai/resources/images.py
@@ -119,12 +119,14 @@ class Images(SyncAPIResource):
         *,
         image: Union[FileTypes, List[FileTypes]],
         prompt: str,
+        background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
         mask: FileTypes | NotGiven = NOT_GIVEN,
         model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
         response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
-        size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
+        size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
+        | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -139,14 +141,25 @@ class Images(SyncAPIResource):
         This endpoint only supports `gpt-image-1` and `dall-e-2`.
 
         Args:
-          image: The image(s) to edit. Must be a supported image file or an array of images. For
-              `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
-              25MB. For `dall-e-2`, you can only provide one image, and it should be a square
-              `png` file less than 4MB.
+          image: The image(s) to edit. Must be a supported image file or an array of images.
+
+              For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
+              25MB. You can provide up to 16 images.
+
+              For `dall-e-2`, you can only provide one image, and it should be a square `png`
+              file less than 4MB.
 
           prompt: A text description of the desired image(s). The maximum length is 1000
               characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
 
+          background: Allows to set transparency for the background of the generated image(s). This
+              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+              `opaque` or `auto` (default value). When `auto` is used, the model will
+              automatically determine the best background for the image.
+
+              If `transparent`, the output format needs to support transparency, so it should
+              be set to either `png` (default value) or `webp`.
+
           mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
               indicate where `image` should be edited. If there are multiple images provided,
               the mask will be applied on the first image. Must be a valid PNG file, less than
@@ -187,6 +200,7 @@ class Images(SyncAPIResource):
             {
                 "image": image,
                 "prompt": prompt,
+                "background": background,
                 "mask": mask,
                 "model": model,
                 "n": n,
@@ -429,12 +443,14 @@ class AsyncImages(AsyncAPIResource):
         *,
         image: Union[FileTypes, List[FileTypes]],
         prompt: str,
+        background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
         mask: FileTypes | NotGiven = NOT_GIVEN,
         model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
         quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
         response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
-        size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
+        size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
+        | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -449,14 +465,25 @@ class AsyncImages(AsyncAPIResource):
         This endpoint only supports `gpt-image-1` and `dall-e-2`.
 
         Args:
-          image: The image(s) to edit. Must be a supported image file or an array of images. For
-              `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
-              25MB. For `dall-e-2`, you can only provide one image, and it should be a square
-              `png` file less than 4MB.
+          image: The image(s) to edit. Must be a supported image file or an array of images.
+
+              For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
+              25MB. You can provide up to 16 images.
+
+              For `dall-e-2`, you can only provide one image, and it should be a square `png`
+              file less than 4MB.
 
           prompt: A text description of the desired image(s). The maximum length is 1000
               characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
 
+          background: Allows to set transparency for the background of the generated image(s). This
+              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+              `opaque` or `auto` (default value). When `auto` is used, the model will
+              automatically determine the best background for the image.
+
+              If `transparent`, the output format needs to support transparency, so it should
+              be set to either `png` (default value) or `webp`.
+
           mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
               indicate where `image` should be edited. If there are multiple images provided,
               the mask will be applied on the first image. Must be a valid PNG file, less than
@@ -497,6 +524,7 @@ class AsyncImages(AsyncAPIResource):
             {
                 "image": image,
                 "prompt": prompt,
+                "background": background,
                 "mask": mask,
                 "model": model,
                 "n": n,
src/openai/types/audio/speech_create_params.py
@@ -48,5 +48,6 @@ class SpeechCreateParams(TypedDict, total=False):
     speed: float
     """The speed of the generated audio.
 
-    Select a value from `0.25` to `4.0`. `1.0` is the default.
+    Select a value from `0.25` to `4.0`. `1.0` is the default. Does not work with
+    `gpt-4o-mini-tts`.
     """
src/openai/types/responses/computer_tool.py
@@ -8,13 +8,13 @@ __all__ = ["ComputerTool"]
 
 
 class ComputerTool(BaseModel):
-    display_height: float
+    display_height: int
     """The height of the computer display."""
 
-    display_width: float
+    display_width: int
     """The width of the computer display."""
 
-    environment: Literal["mac", "windows", "ubuntu", "browser"]
+    environment: Literal["windows", "mac", "linux", "ubuntu", "browser"]
     """The type of computer environment to control."""
 
     type: Literal["computer_use_preview"]
src/openai/types/responses/computer_tool_param.py
@@ -8,13 +8,13 @@ __all__ = ["ComputerToolParam"]
 
 
 class ComputerToolParam(TypedDict, total=False):
-    display_height: Required[float]
+    display_height: Required[int]
     """The height of the computer display."""
 
-    display_width: Required[float]
+    display_width: Required[int]
     """The width of the computer display."""
 
-    environment: Required[Literal["mac", "windows", "ubuntu", "browser"]]
+    environment: Required[Literal["windows", "mac", "linux", "ubuntu", "browser"]]
     """The type of computer environment to control."""
 
     type: Required[Literal["computer_use_preview"]]
src/openai/types/responses/file_search_tool.py
@@ -9,7 +9,7 @@ from ..shared.comparison_filter import ComparisonFilter
 
 __all__ = ["FileSearchTool", "Filters", "RankingOptions"]
 
-Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter]
+Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter, None]
 
 
 class RankingOptions(BaseModel):
@@ -17,10 +17,10 @@ class RankingOptions(BaseModel):
     """The ranker to use for the file search."""
 
     score_threshold: Optional[float] = None
-    """
-    The score threshold for the file search, a number between 0 and 1. Numbers
-    closer to 1 will attempt to return only the most relevant results, but may
-    return fewer results.
+    """The score threshold for the file search, a number between 0 and 1.
+
+    Numbers closer to 1 will attempt to return only the most relevant results, but
+    may return fewer results.
     """
 
 
@@ -32,7 +32,7 @@ class FileSearchTool(BaseModel):
     """The IDs of the vector stores to search."""
 
     filters: Optional[Filters] = None
-    """A filter to apply based on file attributes."""
+    """A filter to apply."""
 
     max_num_results: Optional[int] = None
     """The maximum number of results to return.
src/openai/types/responses/file_search_tool_param.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import List, Union
+from typing import List, Union, Optional
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from ..shared_params.compound_filter import CompoundFilter
@@ -18,10 +18,10 @@ class RankingOptions(TypedDict, total=False):
     """The ranker to use for the file search."""
 
     score_threshold: float
-    """
-    The score threshold for the file search, a number between 0 and 1. Numbers
-    closer to 1 will attempt to return only the most relevant results, but may
-    return fewer results.
+    """The score threshold for the file search, a number between 0 and 1.
+
+    Numbers closer to 1 will attempt to return only the most relevant results, but
+    may return fewer results.
     """
 
 
@@ -32,8 +32,8 @@ class FileSearchToolParam(TypedDict, total=False):
     vector_store_ids: Required[List[str]]
     """The IDs of the vector stores to search."""
 
-    filters: Filters
-    """A filter to apply based on file attributes."""
+    filters: Optional[Filters]
+    """A filter to apply."""
 
     max_num_results: int
     """The maximum number of results to return.
src/openai/types/responses/function_tool.py
@@ -12,10 +12,10 @@ class FunctionTool(BaseModel):
     name: str
     """The name of the function to call."""
 
-    parameters: Dict[str, object]
+    parameters: Optional[Dict[str, object]] = None
     """A JSON schema object describing the parameters of the function."""
 
-    strict: bool
+    strict: Optional[bool] = None
     """Whether to enforce strict parameter validation. Default `true`."""
 
     type: Literal["function"]
src/openai/types/responses/function_tool_param.py
@@ -12,10 +12,10 @@ class FunctionToolParam(TypedDict, total=False):
     name: Required[str]
     """The name of the function to call."""
 
-    parameters: Required[Dict[str, object]]
+    parameters: Required[Optional[Dict[str, object]]]
     """A JSON schema object describing the parameters of the function."""
 
-    strict: Required[bool]
+    strict: Required[Optional[bool]]
     """Whether to enforce strict parameter validation. Default `true`."""
 
     type: Required[Literal["function"]]
src/openai/types/responses/response_create_params.py
@@ -56,6 +56,11 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     - `message.input_image.image_url`: Include image urls from the input message.
     - `computer_call_output.output.image_url`: Include image urls from the computer
       call output.
+    - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+      tokens in reasoning item outputs. This enables reasoning items to be used in
+      multi-turn conversations when using the Responses API statelessly (like when
+      the `store` parameter is set to `false`, or when an organization is enrolled
+      in the zero data retention program).
     """
 
     instructions: Optional[str]
src/openai/types/responses/response_includable.py
@@ -5,5 +5,8 @@ from typing_extensions import Literal, TypeAlias
 __all__ = ["ResponseIncludable"]
 
 ResponseIncludable: TypeAlias = Literal[
-    "file_search_call.results", "message.input_image.image_url", "computer_call_output.output.image_url"
+    "file_search_call.results",
+    "message.input_image.image_url",
+    "computer_call_output.output.image_url",
+    "reasoning.encrypted_content",
 ]
src/openai/types/responses/response_input_file_param.py
@@ -2,6 +2,7 @@
 
 from __future__ import annotations
 
+from typing import Optional
 from typing_extensions import Literal, Required, TypedDict
 
 __all__ = ["ResponseInputFileParam"]
@@ -14,7 +15,7 @@ class ResponseInputFileParam(TypedDict, total=False):
     file_data: str
     """The content of the file to be sent to the model."""
 
-    file_id: str
+    file_id: Optional[str]
     """The ID of the file to be sent to the model."""
 
     filename: str
src/openai/types/responses/response_input_image.py
@@ -9,7 +9,7 @@ __all__ = ["ResponseInputImage"]
 
 
 class ResponseInputImage(BaseModel):
-    detail: Literal["high", "low", "auto"]
+    detail: Literal["low", "high", "auto"]
     """The detail level of the image to be sent to the model.
 
     One of `high`, `low`, or `auto`. Defaults to `auto`.
src/openai/types/responses/response_input_image_param.py
@@ -9,7 +9,7 @@ __all__ = ["ResponseInputImageParam"]
 
 
 class ResponseInputImageParam(TypedDict, total=False):
-    detail: Required[Literal["high", "low", "auto"]]
+    detail: Required[Literal["low", "high", "auto"]]
     """The detail level of the image to be sent to the model.
 
     One of `high`, `low`, or `auto`. Defaults to `auto`.
src/openai/types/responses/response_input_item_param.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import Union, Iterable
+from typing import Union, Iterable, Optional
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from .easy_input_message_param import EasyInputMessageParam
@@ -50,10 +50,10 @@ class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False):
     id: Required[str]
     """The ID of the pending safety check."""
 
-    code: Required[str]
+    code: Optional[str]
     """The type of the pending safety check."""
 
-    message: Required[str]
+    message: Optional[str]
     """Details about the pending safety check."""
 
 
@@ -67,16 +67,16 @@ class ComputerCallOutput(TypedDict, total=False):
     type: Required[Literal["computer_call_output"]]
     """The type of the computer tool call output. Always `computer_call_output`."""
 
-    id: str
+    id: Optional[str]
     """The ID of the computer tool call output."""
 
-    acknowledged_safety_checks: Iterable[ComputerCallOutputAcknowledgedSafetyCheck]
+    acknowledged_safety_checks: Optional[Iterable[ComputerCallOutputAcknowledgedSafetyCheck]]
     """
     The safety checks reported by the API that have been acknowledged by the
     developer.
     """
 
-    status: Literal["in_progress", "completed", "incomplete"]
+    status: Optional[Literal["in_progress", "completed", "incomplete"]]
     """The status of the message input.
 
     One of `in_progress`, `completed`, or `incomplete`. Populated when input items
@@ -94,13 +94,13 @@ class FunctionCallOutput(TypedDict, total=False):
     type: Required[Literal["function_call_output"]]
     """The type of the function tool call output. Always `function_call_output`."""
 
-    id: str
+    id: Optional[str]
     """The unique ID of the function tool call output.
 
     Populated when this item is returned via API.
     """
 
-    status: Literal["in_progress", "completed", "incomplete"]
+    status: Optional[Literal["in_progress", "completed", "incomplete"]]
     """The status of the item.
 
     One of `in_progress`, `completed`, or `incomplete`. Populated when items are
@@ -112,7 +112,7 @@ class ItemReference(TypedDict, total=False):
     id: Required[str]
     """The ID of the item to reference."""
 
-    type: Required[Literal["item_reference"]]
+    type: Optional[Literal["item_reference"]]
     """The type of item to reference. Always `item_reference`."""
 
 
src/openai/types/responses/response_input_param.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import List, Union, Iterable
+from typing import List, Union, Iterable, Optional
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from .easy_input_message_param import EasyInputMessageParam
@@ -51,10 +51,10 @@ class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False):
     id: Required[str]
     """The ID of the pending safety check."""
 
-    code: Required[str]
+    code: Optional[str]
     """The type of the pending safety check."""
 
-    message: Required[str]
+    message: Optional[str]
     """Details about the pending safety check."""
 
 
@@ -68,16 +68,16 @@ class ComputerCallOutput(TypedDict, total=False):
     type: Required[Literal["computer_call_output"]]
     """The type of the computer tool call output. Always `computer_call_output`."""
 
-    id: str
+    id: Optional[str]
     """The ID of the computer tool call output."""
 
-    acknowledged_safety_checks: Iterable[ComputerCallOutputAcknowledgedSafetyCheck]
+    acknowledged_safety_checks: Optional[Iterable[ComputerCallOutputAcknowledgedSafetyCheck]]
     """
     The safety checks reported by the API that have been acknowledged by the
     developer.
     """
 
-    status: Literal["in_progress", "completed", "incomplete"]
+    status: Optional[Literal["in_progress", "completed", "incomplete"]]
     """The status of the message input.
 
     One of `in_progress`, `completed`, or `incomplete`. Populated when input items
@@ -95,13 +95,13 @@ class FunctionCallOutput(TypedDict, total=False):
     type: Required[Literal["function_call_output"]]
     """The type of the function tool call output. Always `function_call_output`."""
 
-    id: str
+    id: Optional[str]
     """The unique ID of the function tool call output.
 
     Populated when this item is returned via API.
     """
 
-    status: Literal["in_progress", "completed", "incomplete"]
+    status: Optional[Literal["in_progress", "completed", "incomplete"]]
     """The status of the item.
 
     One of `in_progress`, `completed`, or `incomplete`. Populated when items are
@@ -113,7 +113,7 @@ class ItemReference(TypedDict, total=False):
     id: Required[str]
     """The ID of the item to reference."""
 
-    type: Required[Literal["item_reference"]]
+    type: Optional[Literal["item_reference"]]
     """The type of item to reference. Always `item_reference`."""
 
 
src/openai/types/responses/response_reasoning_item.py
@@ -28,6 +28,12 @@ class ResponseReasoningItem(BaseModel):
     type: Literal["reasoning"]
     """The type of the object. Always `reasoning`."""
 
+    encrypted_content: Optional[str] = None
+    """
+    The encrypted content of the reasoning item - populated when a response is
+    generated with `reasoning.encrypted_content` in the `include` parameter.
+    """
+
     status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
     """The status of the item.
 
src/openai/types/responses/response_reasoning_item_param.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import Iterable
+from typing import Iterable, Optional
 from typing_extensions import Literal, Required, TypedDict
 
 __all__ = ["ResponseReasoningItemParam", "Summary"]
@@ -28,6 +28,12 @@ class ResponseReasoningItemParam(TypedDict, total=False):
     type: Required[Literal["reasoning"]]
     """The type of the object. Always `reasoning`."""
 
+    encrypted_content: Optional[str]
+    """
+    The encrypted content of the reasoning item - populated when a response is
+    generated with `reasoning.encrypted_content` in the `include` parameter.
+    """
+
     status: Literal["in_progress", "completed", "incomplete"]
     """The status of the item.
 
src/openai/types/responses/tool.py
@@ -12,5 +12,5 @@ from .file_search_tool import FileSearchTool
 __all__ = ["Tool"]
 
 Tool: TypeAlias = Annotated[
-    Union[FileSearchTool, FunctionTool, ComputerTool, WebSearchTool], PropertyInfo(discriminator="type")
+    Union[FileSearchTool, FunctionTool, WebSearchTool, ComputerTool], PropertyInfo(discriminator="type")
 ]
src/openai/types/responses/tool_param.py
@@ -13,6 +13,6 @@ from ..chat.chat_completion_tool_param import ChatCompletionToolParam
 
 __all__ = ["ToolParam"]
 
-ToolParam: TypeAlias = Union[FileSearchToolParam, FunctionToolParam, ComputerToolParam, WebSearchToolParam]
+ToolParam: TypeAlias = Union[FileSearchToolParam, FunctionToolParam, WebSearchToolParam, ComputerToolParam]
 
 ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionToolParam]
src/openai/types/responses/web_search_tool.py
@@ -33,16 +33,17 @@ class UserLocation(BaseModel):
 
 class WebSearchTool(BaseModel):
     type: Literal["web_search_preview", "web_search_preview_2025_03_11"]
-    """The type of the web search tool. One of:
+    """The type of the web search tool.
 
-    - `web_search_preview`
-    - `web_search_preview_2025_03_11`
+    One of `web_search_preview` or `web_search_preview_2025_03_11`.
     """
 
     search_context_size: Optional[Literal["low", "medium", "high"]] = None
-    """
-    High level guidance for the amount of context window space to use for the
-    search. One of `low`, `medium`, or `high`. `medium` is the default.
+    """High level guidance for the amount of context window space to use for the
+    search.
+
+    One of `low`, `medium`, or `high`. `medium` is the default.
     """
 
     user_location: Optional[UserLocation] = None
+    """The user's location."""
src/openai/types/responses/web_search_tool_param.py
@@ -12,19 +12,19 @@ class UserLocation(TypedDict, total=False):
     type: Required[Literal["approximate"]]
     """The type of location approximation. Always `approximate`."""
 
-    city: str
+    city: Optional[str]
     """Free text input for the city of the user, e.g. `San Francisco`."""
 
-    country: str
+    country: Optional[str]
     """
     The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
     the user, e.g. `US`.
     """
 
-    region: str
+    region: Optional[str]
     """Free text input for the region of the user, e.g. `California`."""
 
-    timezone: str
+    timezone: Optional[str]
     """
     The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
     user, e.g. `America/Los_Angeles`.
@@ -33,16 +33,17 @@ class UserLocation(TypedDict, total=False):
 
 class WebSearchToolParam(TypedDict, total=False):
     type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]]
-    """The type of the web search tool. One of:
+    """The type of the web search tool.
 
-    - `web_search_preview`
-    - `web_search_preview_2025_03_11`
+    One of `web_search_preview` or `web_search_preview_2025_03_11`.
     """
 
     search_context_size: Literal["low", "medium", "high"]
-    """
-    High level guidance for the amount of context window space to use for the
-    search. One of `low`, `medium`, or `high`. `medium` is the default.
+    """High level guidance for the amount of context window space to use for the
+    search.
+
+    One of `low`, `medium`, or `high`. `medium` is the default.
     """
 
     user_location: Optional[UserLocation]
+    """The user's location."""
src/openai/types/image_edit_params.py
@@ -13,12 +13,13 @@ __all__ = ["ImageEditParams"]
 
 class ImageEditParams(TypedDict, total=False):
     image: Required[Union[FileTypes, List[FileTypes]]]
-    """The image(s) to edit.
+    """The image(s) to edit. Must be a supported image file or an array of images.
 
-    Must be a supported image file or an array of images. For `gpt-image-1`, each
-    image should be a `png`, `webp`, or `jpg` file less than 25MB. For `dall-e-2`,
-    you can only provide one image, and it should be a square `png` file less than
-    4MB.
+    For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
+    25MB. You can provide up to 16 images.
+
+    For `dall-e-2`, you can only provide one image, and it should be a square `png`
+    file less than 4MB.
     """
 
     prompt: Required[str]
@@ -28,6 +29,17 @@ class ImageEditParams(TypedDict, total=False):
     `gpt-image-1`.
     """
 
+    background: Optional[Literal["transparent", "opaque", "auto"]]
+    """Allows to set transparency for the background of the generated image(s).
+
+    This parameter is only supported for `gpt-image-1`. Must be one of
+    `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
+    model will automatically determine the best background for the image.
+
+    If `transparent`, the output format needs to support transparency, so it should
+    be set to either `png` (default value) or `webp`.
+    """
+
     mask: FileTypes
     """An additional image whose fully transparent areas (e.g.
 
@@ -61,7 +73,7 @@ class ImageEditParams(TypedDict, total=False):
     `gpt-image-1` will always return base64-encoded images.
     """
 
-    size: Optional[Literal["256x256", "512x512", "1024x1024"]]
+    size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
     """The size of the generated images.
 
     Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or
tests/api_resources/test_images.py
@@ -73,6 +73,7 @@ class TestImages:
         image = client.images.edit(
             image=b"raw file contents",
             prompt="A cute baby sea otter wearing a beret",
+            background="transparent",
             mask=b"raw file contents",
             model="string",
             n=1,
@@ -218,6 +219,7 @@ class TestAsyncImages:
         image = await async_client.images.edit(
             image=b"raw file contents",
             prompt="A cute baby sea otter wearing a beret",
+            background="transparent",
             mask=b"raw file contents",
             model="string",
             n=1,
.stats.yml
@@ -1,4 +1,4 @@
 configured_endpoints: 97
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-8b68ae6b807dca92e914da1dd9e835a20f69b075e79102a264367fd7fddddb33.yml
-openapi_spec_hash: b6ade5b1a6327339e6669e1134de2d03
-config_hash: b597cd9a31e9e5ec709e2eefb4c54122
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-0ee6b36cf3cc278cef4199a6aec5f7d530a6c1f17a74830037e96d50ca1edc50.yml
+openapi_spec_hash: e8ec5f46bc0655b34f292422d58a60f6
+config_hash: d9b6b6e6bc85744663e300eebc482067