Commit 4a81b4ed

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-05-22 00:39:41
release: 1.80.0 (#2367) tag: v1.80.0
* codegen metadata * chore(docs): grammar improvements * feat(api): new API tools * release: 1.80.0 --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
1 parent 5bc7307
src/openai/helpers/local_audio_player.py
@@ -65,7 +65,7 @@ class LocalAudioPlayer:
             if input.dtype == np.int16 and self.dtype == np.float32:
                 audio_content = (input.astype(np.float32) / 32767.0).reshape(-1, self.channels)
             elif input.dtype == np.float32:
-                audio_content = cast('npt.NDArray[np.float32]', input)
+                audio_content = cast("npt.NDArray[np.float32]", input)
             else:
                 raise ValueError(f"Unsupported dtype: {input.dtype}")
         else:
src/openai/lib/_parsing/_responses.py
@@ -103,6 +103,13 @@ def parse_response(
             or output.type == "file_search_call"
             or output.type == "web_search_call"
             or output.type == "reasoning"
+            or output.type == "mcp_call"
+            or output.type == "mcp_approval_request"
+            or output.type == "image_generation_call"
+            or output.type == "code_interpreter_call"
+            or output.type == "local_shell_call"
+            or output.type == "mcp_list_tools"
+            or output.type == 'exec'
         ):
             output_list.append(output)
         elif TYPE_CHECKING:  # type: ignore
src/openai/lib/streaming/responses/_events.py
@@ -9,6 +9,7 @@ from ....types.responses import (
     ParsedResponse,
     ResponseErrorEvent,
     ResponseFailedEvent,
+    ResponseQueuedEvent,
     ResponseCreatedEvent,
     ResponseTextDoneEvent as RawResponseTextDoneEvent,
     ResponseAudioDoneEvent,
@@ -19,22 +20,39 @@ from ....types.responses import (
     ResponseInProgressEvent,
     ResponseRefusalDoneEvent,
     ResponseRefusalDeltaEvent,
+    ResponseMcpCallFailedEvent,
+    ResponseReasoningDoneEvent,
     ResponseOutputItemDoneEvent,
+    ResponseReasoningDeltaEvent,
     ResponseContentPartDoneEvent,
     ResponseOutputItemAddedEvent,
     ResponseContentPartAddedEvent,
+    ResponseMcpCallCompletedEvent,
+    ResponseMcpCallInProgressEvent,
+    ResponseMcpListToolsFailedEvent,
     ResponseAudioTranscriptDoneEvent,
     ResponseTextAnnotationDeltaEvent,
     ResponseAudioTranscriptDeltaEvent,
+    ResponseMcpCallArgumentsDoneEvent,
+    ResponseReasoningSummaryDoneEvent,
+    ResponseImageGenCallCompletedEvent,
+    ResponseMcpCallArgumentsDeltaEvent,
+    ResponseMcpListToolsCompletedEvent,
+    ResponseReasoningSummaryDeltaEvent,
+    ResponseImageGenCallGeneratingEvent,
+    ResponseImageGenCallInProgressEvent,
+    ResponseMcpListToolsInProgressEvent,
     ResponseWebSearchCallCompletedEvent,
     ResponseWebSearchCallSearchingEvent,
     ResponseFileSearchCallCompletedEvent,
     ResponseFileSearchCallSearchingEvent,
     ResponseWebSearchCallInProgressEvent,
     ResponseFileSearchCallInProgressEvent,
+    ResponseImageGenCallPartialImageEvent,
     ResponseReasoningSummaryPartDoneEvent,
     ResponseReasoningSummaryTextDoneEvent,
     ResponseFunctionCallArgumentsDoneEvent,
+    ResponseOutputTextAnnotationAddedEvent,
     ResponseReasoningSummaryPartAddedEvent,
     ResponseReasoningSummaryTextDeltaEvent,
     ResponseFunctionCallArgumentsDeltaEvent as RawResponseFunctionCallArgumentsDeltaEvent,
@@ -109,6 +127,24 @@ ResponseStreamEvent: TypeAlias = Annotated[
         ResponseReasoningSummaryPartDoneEvent,
         ResponseReasoningSummaryTextDeltaEvent,
         ResponseReasoningSummaryTextDoneEvent,
+        ResponseImageGenCallCompletedEvent,
+        ResponseImageGenCallInProgressEvent,
+        ResponseImageGenCallGeneratingEvent,
+        ResponseImageGenCallPartialImageEvent,
+        ResponseMcpCallCompletedEvent,
+        ResponseMcpCallArgumentsDeltaEvent,
+        ResponseMcpCallArgumentsDoneEvent,
+        ResponseMcpCallFailedEvent,
+        ResponseMcpCallInProgressEvent,
+        ResponseMcpListToolsCompletedEvent,
+        ResponseMcpListToolsFailedEvent,
+        ResponseMcpListToolsInProgressEvent,
+        ResponseOutputTextAnnotationAddedEvent,
+        ResponseQueuedEvent,
+        ResponseReasoningDeltaEvent,
+        ResponseReasoningSummaryDeltaEvent,
+        ResponseReasoningSummaryDoneEvent,
+        ResponseReasoningDoneEvent,
     ],
     PropertyInfo(discriminator="type"),
 ]
src/openai/resources/audio/transcriptions.py
@@ -449,7 +449,7 @@ class AsyncTranscriptions(AsyncAPIResource):
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
-          """
+        """
 
     @overload
     async def create(
src/openai/resources/responses/responses.py
@@ -77,6 +77,7 @@ class Responses(SyncAPIResource):
         *,
         input: Union[str, ResponseInputParam],
         model: ResponsesModel,
+        background: Optional[bool] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
@@ -132,6 +133,9 @@ class Responses(SyncAPIResource):
               [model guide](https://platform.openai.com/docs/models) to browse and compare
               available models.
 
+          background: Whether to run the model response in the background.
+              [Learn more](https://platform.openai.com/docs/guides/background).
+
           include: Specify additional output data to include in the model response. Currently
               supported values are:
 
@@ -267,6 +271,7 @@ class Responses(SyncAPIResource):
         input: Union[str, ResponseInputParam],
         model: ResponsesModel,
         stream: Literal[True],
+        background: Optional[bool] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
@@ -328,6 +333,9 @@ class Responses(SyncAPIResource):
               [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
               for more information.
 
+          background: Whether to run the model response in the background.
+              [Learn more](https://platform.openai.com/docs/guides/background).
+
           include: Specify additional output data to include in the model response. Currently
               supported values are:
 
@@ -456,6 +464,7 @@ class Responses(SyncAPIResource):
         input: Union[str, ResponseInputParam],
         model: ResponsesModel,
         stream: bool,
+        background: Optional[bool] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
@@ -517,6 +526,9 @@ class Responses(SyncAPIResource):
               [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
               for more information.
 
+          background: Whether to run the model response in the background.
+              [Learn more](https://platform.openai.com/docs/guides/background).
+
           include: Specify additional output data to include in the model response. Currently
               supported values are:
 
@@ -644,6 +656,7 @@ class Responses(SyncAPIResource):
         *,
         input: Union[str, ResponseInputParam],
         model: ResponsesModel,
+        background: Optional[bool] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
@@ -674,6 +687,7 @@ class Responses(SyncAPIResource):
                 {
                     "input": input,
                     "model": model,
+                    "background": background,
                     "include": include,
                     "instructions": instructions,
                     "max_output_tokens": max_output_tokens,
@@ -965,6 +979,7 @@ class AsyncResponses(AsyncAPIResource):
         *,
         input: Union[str, ResponseInputParam],
         model: ResponsesModel,
+        background: Optional[bool] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1020,6 +1035,9 @@ class AsyncResponses(AsyncAPIResource):
               [model guide](https://platform.openai.com/docs/models) to browse and compare
               available models.
 
+          background: Whether to run the model response in the background.
+              [Learn more](https://platform.openai.com/docs/guides/background).
+
           include: Specify additional output data to include in the model response. Currently
               supported values are:
 
@@ -1155,6 +1173,7 @@ class AsyncResponses(AsyncAPIResource):
         input: Union[str, ResponseInputParam],
         model: ResponsesModel,
         stream: Literal[True],
+        background: Optional[bool] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1216,6 +1235,9 @@ class AsyncResponses(AsyncAPIResource):
               [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
               for more information.
 
+          background: Whether to run the model response in the background.
+              [Learn more](https://platform.openai.com/docs/guides/background).
+
           include: Specify additional output data to include in the model response. Currently
               supported values are:
 
@@ -1344,6 +1366,7 @@ class AsyncResponses(AsyncAPIResource):
         input: Union[str, ResponseInputParam],
         model: ResponsesModel,
         stream: bool,
+        background: Optional[bool] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1405,6 +1428,9 @@ class AsyncResponses(AsyncAPIResource):
               [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
               for more information.
 
+          background: Whether to run the model response in the background.
+              [Learn more](https://platform.openai.com/docs/guides/background).
+
           include: Specify additional output data to include in the model response. Currently
               supported values are:
 
@@ -1532,6 +1558,7 @@ class AsyncResponses(AsyncAPIResource):
         *,
         input: Union[str, ResponseInputParam],
         model: ResponsesModel,
+        background: Optional[bool] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
         max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1562,6 +1589,7 @@ class AsyncResponses(AsyncAPIResource):
                 {
                     "input": input,
                     "model": model,
+                    "background": background,
                     "include": include,
                     "instructions": instructions,
                     "max_output_tokens": max_output_tokens,
src/openai/types/responses/__init__.py
@@ -38,6 +38,7 @@ from .response_output_text import ResponseOutputText as ResponseOutputText
 from .response_text_config import ResponseTextConfig as ResponseTextConfig
 from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction
 from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent
+from .response_queued_event import ResponseQueuedEvent as ResponseQueuedEvent
 from .response_stream_event import ResponseStreamEvent as ResponseStreamEvent
 from .web_search_tool_param import WebSearchToolParam as WebSearchToolParam
 from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
@@ -75,8 +76,11 @@ from .response_input_content_param import ResponseInputContentParam as ResponseI
 from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent
 from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam
 from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam
+from .response_reasoning_done_event import ResponseReasoningDoneEvent as ResponseReasoningDoneEvent
 from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam
 from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall
+from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent as ResponseMcpCallFailedEvent
+from .response_reasoning_delta_event import ResponseReasoningDeltaEvent as ResponseReasoningDeltaEvent
 from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent
 from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent
 from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem
@@ -85,15 +89,27 @@ from .response_computer_tool_call_param import ResponseComputerToolCallParam as
 from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent
 from .response_format_text_config_param import ResponseFormatTextConfigParam as ResponseFormatTextConfigParam
 from .response_function_tool_call_param import ResponseFunctionToolCallParam as ResponseFunctionToolCallParam
+from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent as ResponseMcpCallCompletedEvent
 from .response_function_web_search_param import ResponseFunctionWebSearchParam as ResponseFunctionWebSearchParam
 from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall
 from .response_input_message_content_list import ResponseInputMessageContentList as ResponseInputMessageContentList
+from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent as ResponseMcpCallInProgressEvent
 from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent
 from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam as ResponseFileSearchToolCallParam
+from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent as ResponseMcpListToolsFailedEvent
 from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEvent as ResponseTextAnnotationDeltaEvent
 from .response_audio_transcript_delta_event import (
     ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,
 )
+from .response_reasoning_summary_done_event import (
+    ResponseReasoningSummaryDoneEvent as ResponseReasoningSummaryDoneEvent,
+)
+from .response_mcp_call_arguments_done_event import (
+    ResponseMcpCallArgumentsDoneEvent as ResponseMcpCallArgumentsDoneEvent,
+)
+from .response_reasoning_summary_delta_event import (
+    ResponseReasoningSummaryDeltaEvent as ResponseReasoningSummaryDeltaEvent,
+)
 from .response_computer_tool_call_output_item import (
     ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem,
 )
@@ -103,21 +119,42 @@ from .response_format_text_json_schema_config import (
 from .response_function_tool_call_output_item import (
     ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem,
 )
+from .response_image_gen_call_completed_event import (
+    ResponseImageGenCallCompletedEvent as ResponseImageGenCallCompletedEvent,
+)
+from .response_mcp_call_arguments_delta_event import (
+    ResponseMcpCallArgumentsDeltaEvent as ResponseMcpCallArgumentsDeltaEvent,
+)
+from .response_mcp_list_tools_completed_event import (
+    ResponseMcpListToolsCompletedEvent as ResponseMcpListToolsCompletedEvent,
+)
+from .response_image_gen_call_generating_event import (
+    ResponseImageGenCallGeneratingEvent as ResponseImageGenCallGeneratingEvent,
+)
 from .response_web_search_call_completed_event import (
     ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent,
 )
 from .response_web_search_call_searching_event import (
     ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent,
 )
+from .response_code_interpreter_tool_call_param import (
+    ResponseCodeInterpreterToolCallParam as ResponseCodeInterpreterToolCallParam,
+)
 from .response_file_search_call_completed_event import (
     ResponseFileSearchCallCompletedEvent as ResponseFileSearchCallCompletedEvent,
 )
 from .response_file_search_call_searching_event import (
     ResponseFileSearchCallSearchingEvent as ResponseFileSearchCallSearchingEvent,
 )
+from .response_image_gen_call_in_progress_event import (
+    ResponseImageGenCallInProgressEvent as ResponseImageGenCallInProgressEvent,
+)
 from .response_input_message_content_list_param import (
     ResponseInputMessageContentListParam as ResponseInputMessageContentListParam,
 )
+from .response_mcp_list_tools_in_progress_event import (
+    ResponseMcpListToolsInProgressEvent as ResponseMcpListToolsInProgressEvent,
+)
 from .response_reasoning_summary_part_done_event import (
     ResponseReasoningSummaryPartDoneEvent as ResponseReasoningSummaryPartDoneEvent,
 )
@@ -133,6 +170,12 @@ from .response_file_search_call_in_progress_event import (
 from .response_function_call_arguments_done_event import (
     ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent,
 )
+from .response_image_gen_call_partial_image_event import (
+    ResponseImageGenCallPartialImageEvent as ResponseImageGenCallPartialImageEvent,
+)
+from .response_output_text_annotation_added_event import (
+    ResponseOutputTextAnnotationAddedEvent as ResponseOutputTextAnnotationAddedEvent,
+)
 from .response_reasoning_summary_part_added_event import (
     ResponseReasoningSummaryPartAddedEvent as ResponseReasoningSummaryPartAddedEvent,
 )
src/openai/types/responses/parsed_response.py
@@ -7,6 +7,14 @@ from ..._utils import PropertyInfo
 from .response import Response
 from ..._models import GenericModel
 from ..._utils._transform import PropertyInfo
+from .response_output_item import (
+    McpCall,
+    McpListTools,
+    LocalShellCall,
+    McpApprovalRequest,
+    ImageGenerationCall,
+    LocalShellCallAction,
+)
 from .response_output_text import ResponseOutputText
 from .response_output_message import ResponseOutputMessage
 from .response_output_refusal import ResponseOutputRefusal
@@ -15,6 +23,7 @@ from .response_computer_tool_call import ResponseComputerToolCall
 from .response_function_tool_call import ResponseFunctionToolCall
 from .response_function_web_search import ResponseFunctionWebSearch
 from .response_file_search_tool_call import ResponseFileSearchToolCall
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
 
 __all__ = ["ParsedResponse", "ParsedResponseOutputMessage", "ParsedResponseOutputText"]
 
@@ -55,6 +64,13 @@ ParsedResponseOutputItem: TypeAlias = Annotated[
         ResponseFunctionWebSearch,
         ResponseComputerToolCall,
         ResponseReasoningItem,
+        McpCall,
+        McpApprovalRequest,
+        ImageGenerationCall,
+        LocalShellCall,
+        LocalShellCallAction,
+        McpListTools,
+        ResponseCodeInterpreterToolCall,
     ],
     PropertyInfo(discriminator="type"),
 ]
src/openai/types/responses/response.py
@@ -128,6 +128,12 @@ class Response(BaseModel):
     We generally recommend altering this or `temperature` but not both.
     """
 
+    background: Optional[bool] = None
+    """Whether to run the model response in the background.
+
+    [Learn more](https://platform.openai.com/docs/guides/background).
+    """
+
     max_output_tokens: Optional[int] = None
     """
     An upper bound for the number of tokens that can be generated for a response,
@@ -173,7 +179,8 @@ class Response(BaseModel):
     status: Optional[ResponseStatus] = None
     """The status of the response generation.
 
-    One of `completed`, `failed`, `in_progress`, or `incomplete`.
+    One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or
+    `incomplete`.
     """
 
     text: Optional[ResponseTextConfig] = None
src/openai/types/responses/response_code_interpreter_tool_call.py
@@ -1,6 +1,6 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import List, Union
+from typing import List, Union, Optional
 from typing_extensions import Literal, Annotated, TypeAlias
 
 from ..._utils import PropertyInfo
@@ -50,3 +50,6 @@ class ResponseCodeInterpreterToolCall(BaseModel):
 
     type: Literal["code_interpreter_call"]
     """The type of the code interpreter tool call. Always `code_interpreter_call`."""
+
+    container_id: Optional[str] = None
+    """The ID of the container used to run the code."""
src/openai/types/responses/response_code_interpreter_tool_call_param.py
@@ -0,0 +1,54 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = ["ResponseCodeInterpreterToolCallParam", "Result", "ResultLogs", "ResultFiles", "ResultFilesFile"]
+
+
+class ResultLogs(TypedDict, total=False):
+    logs: Required[str]
+    """The logs of the code interpreter tool call."""
+
+    type: Required[Literal["logs"]]
+    """The type of the code interpreter text output. Always `logs`."""
+
+
+class ResultFilesFile(TypedDict, total=False):
+    file_id: Required[str]
+    """The ID of the file."""
+
+    mime_type: Required[str]
+    """The MIME type of the file."""
+
+
+class ResultFiles(TypedDict, total=False):
+    files: Required[Iterable[ResultFilesFile]]
+
+    type: Required[Literal["files"]]
+    """The type of the code interpreter file output. Always `files`."""
+
+
+Result: TypeAlias = Union[ResultLogs, ResultFiles]
+
+
+class ResponseCodeInterpreterToolCallParam(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the code interpreter tool call."""
+
+    code: Required[str]
+    """The code to run."""
+
+    results: Required[Iterable[Result]]
+    """The results of the code interpreter tool call."""
+
+    status: Required[Literal["in_progress", "interpreting", "completed"]]
+    """The status of the code interpreter tool call."""
+
+    type: Required[Literal["code_interpreter_call"]]
+    """The type of the code interpreter tool call. Always `code_interpreter_call`."""
+
+    container_id: str
+    """The ID of the container used to run the code."""
src/openai/types/responses/response_create_params.py
@@ -46,6 +46,12 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     available models.
     """
 
+    background: Optional[bool]
+    """Whether to run the model response in the background.
+
+    [Learn more](https://platform.openai.com/docs/guides/background).
+    """
+
     include: Optional[List[ResponseIncludable]]
     """Specify additional output data to include in the model response.
 
src/openai/types/responses/response_image_gen_call_completed_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseImageGenCallCompletedEvent"]
+
+
+class ResponseImageGenCallCompletedEvent(BaseModel):
+    item_id: str
+    """The unique identifier of the image generation item being processed."""
+
+    output_index: int
+    """The index of the output item in the response's output array."""
+
+    type: Literal["response.image_generation_call.completed"]
+    """The type of the event. Always 'response.image_generation_call.completed'."""
src/openai/types/responses/response_image_gen_call_generating_event.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseImageGenCallGeneratingEvent"]
+
+
+class ResponseImageGenCallGeneratingEvent(BaseModel):
+    item_id: str
+    """The unique identifier of the image generation item being processed."""
+
+    output_index: int
+    """The index of the output item in the response's output array."""
+
+    type: Literal["response.image_generation_call.generating"]
+    """The type of the event. Always 'response.image_generation_call.generating'."""
+
+    sequence_number: Optional[int] = None
+    """The sequence number of the image generation item being processed."""
src/openai/types/responses/response_image_gen_call_in_progress_event.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseImageGenCallInProgressEvent"]
+
+
+class ResponseImageGenCallInProgressEvent(BaseModel):
+    item_id: str
+    """The unique identifier of the image generation item being processed."""
+
+    output_index: int
+    """The index of the output item in the response's output array."""
+
+    sequence_number: int
+    """The sequence number of the image generation item being processed."""
+
+    type: Literal["response.image_generation_call.in_progress"]
+    """The type of the event. Always 'response.image_generation_call.in_progress'."""
src/openai/types/responses/response_image_gen_call_partial_image_event.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseImageGenCallPartialImageEvent"]
+
+
+class ResponseImageGenCallPartialImageEvent(BaseModel):
+    item_id: str
+    """The unique identifier of the image generation item being processed."""
+
+    output_index: int
+    """The index of the output item in the response's output array."""
+
+    partial_image_b64: str
+    """Base64-encoded partial image data, suitable for rendering as an image."""
+
+    partial_image_index: int
+    """
+    0-based index for the partial image (backend is 1-based, but this is 0-based for
+    the user).
+    """
+
+    sequence_number: int
+    """The sequence number of the image generation item being processed."""
+
+    type: Literal["response.image_generation_call.partial_image"]
+    """The type of the event. Always 'response.image_generation_call.partial_image'."""
src/openai/types/responses/response_input_item_param.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import Union, Iterable, Optional
+from typing import Dict, List, Union, Iterable, Optional
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from .easy_input_message_param import EasyInputMessageParam
@@ -12,6 +12,7 @@ from .response_computer_tool_call_param import ResponseComputerToolCallParam
 from .response_function_tool_call_param import ResponseFunctionToolCallParam
 from .response_function_web_search_param import ResponseFunctionWebSearchParam
 from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam
+from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam
 from .response_input_message_content_list_param import ResponseInputMessageContentListParam
 from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam
 
@@ -21,6 +22,15 @@ __all__ = [
     "ComputerCallOutput",
     "ComputerCallOutputAcknowledgedSafetyCheck",
     "FunctionCallOutput",
+    "ImageGenerationCall",
+    "LocalShellCall",
+    "LocalShellCallAction",
+    "LocalShellCallOutput",
+    "McpListTools",
+    "McpListToolsTool",
+    "McpApprovalRequest",
+    "McpApprovalResponse",
+    "McpCall",
     "ItemReference",
 ]
 
@@ -108,6 +118,159 @@ class FunctionCallOutput(TypedDict, total=False):
     """
 
 
+class ImageGenerationCall(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the image generation call."""
+
+    result: Required[Optional[str]]
+    """The generated image encoded in base64."""
+
+    status: Required[Literal["in_progress", "completed", "generating", "failed"]]
+    """The status of the image generation call."""
+
+    type: Required[Literal["image_generation_call"]]
+    """The type of the image generation call. Always `image_generation_call`."""
+
+
+class LocalShellCallAction(TypedDict, total=False):
+    command: Required[List[str]]
+    """The command to run."""
+
+    env: Required[Dict[str, str]]
+    """Environment variables to set for the command."""
+
+    type: Required[Literal["exec"]]
+    """The type of the local shell action. Always `exec`."""
+
+    timeout_ms: Optional[int]
+    """Optional timeout in milliseconds for the command."""
+
+    user: Optional[str]
+    """Optional user to run the command as."""
+
+    working_directory: Optional[str]
+    """Optional working directory to run the command in."""
+
+
+class LocalShellCall(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the local shell call."""
+
+    action: Required[LocalShellCallAction]
+    """Execute a shell command on the server."""
+
+    call_id: Required[str]
+    """The unique ID of the local shell tool call generated by the model."""
+
+    status: Required[Literal["in_progress", "completed", "incomplete"]]
+    """The status of the local shell call."""
+
+    type: Required[Literal["local_shell_call"]]
+    """The type of the local shell call. Always `local_shell_call`."""
+
+
+class LocalShellCallOutput(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the local shell tool call generated by the model."""
+
+    output: Required[str]
+    """A JSON string of the output of the local shell tool call."""
+
+    type: Required[Literal["local_shell_call_output"]]
+    """The type of the local shell tool call output. Always `local_shell_call_output`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]]
+    """The status of the item. One of `in_progress`, `completed`, or `incomplete`."""
+
+
+class McpListToolsTool(TypedDict, total=False):
+    input_schema: Required[object]
+    """The JSON schema describing the tool's input."""
+
+    name: Required[str]
+    """The name of the tool."""
+
+    annotations: Optional[object]
+    """Additional annotations about the tool."""
+
+    description: Optional[str]
+    """The description of the tool."""
+
+
+class McpListTools(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the list."""
+
+    server_label: Required[str]
+    """The label of the MCP server."""
+
+    tools: Required[Iterable[McpListToolsTool]]
+    """The tools available on the server."""
+
+    type: Required[Literal["mcp_list_tools"]]
+    """The type of the item. Always `mcp_list_tools`."""
+
+    error: Optional[str]
+    """Error message if the server could not list tools."""
+
+
+class McpApprovalRequest(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the approval request."""
+
+    arguments: Required[str]
+    """A JSON string of arguments for the tool."""
+
+    name: Required[str]
+    """The name of the tool to run."""
+
+    server_label: Required[str]
+    """The label of the MCP server making the request."""
+
+    type: Required[Literal["mcp_approval_request"]]
+    """The type of the item. Always `mcp_approval_request`."""
+
+
+class McpApprovalResponse(TypedDict, total=False):
+    approval_request_id: Required[str]
+    """The ID of the approval request being answered."""
+
+    approve: Required[bool]
+    """Whether the request was approved."""
+
+    type: Required[Literal["mcp_approval_response"]]
+    """The type of the item. Always `mcp_approval_response`."""
+
+    id: Optional[str]
+    """The unique ID of the approval response"""
+
+    reason: Optional[str]
+    """Optional reason for the decision."""
+
+
+class McpCall(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the tool call."""
+
+    arguments: Required[str]
+    """A JSON string of the arguments passed to the tool."""
+
+    name: Required[str]
+    """The name of the tool that was run."""
+
+    server_label: Required[str]
+    """The label of the MCP server running the tool."""
+
+    type: Required[Literal["mcp_call"]]
+    """The type of the item. Always `mcp_call`."""
+
+    error: Optional[str]
+    """The error from the tool call, if any."""
+
+    output: Optional[str]
+    """The output from the tool call."""
+
+
 class ItemReference(TypedDict, total=False):
     id: Required[str]
     """The ID of the item to reference."""
@@ -127,5 +290,13 @@ ResponseInputItemParam: TypeAlias = Union[
     ResponseFunctionToolCallParam,
     FunctionCallOutput,
     ResponseReasoningItemParam,
+    ImageGenerationCall,
+    ResponseCodeInterpreterToolCallParam,
+    LocalShellCall,
+    LocalShellCallOutput,
+    McpListTools,
+    McpApprovalRequest,
+    McpApprovalResponse,
+    McpCall,
     ItemReference,
 ]
src/openai/types/responses/response_input_param.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import List, Union, Iterable, Optional
+from typing import Dict, List, Union, Iterable, Optional
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from .easy_input_message_param import EasyInputMessageParam
@@ -12,6 +12,7 @@ from .response_computer_tool_call_param import ResponseComputerToolCallParam
 from .response_function_tool_call_param import ResponseFunctionToolCallParam
 from .response_function_web_search_param import ResponseFunctionWebSearchParam
 from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam
+from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam
 from .response_input_message_content_list_param import ResponseInputMessageContentListParam
 from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam
 
@@ -22,6 +23,15 @@ __all__ = [
     "ComputerCallOutput",
     "ComputerCallOutputAcknowledgedSafetyCheck",
     "FunctionCallOutput",
+    "ImageGenerationCall",
+    "LocalShellCall",
+    "LocalShellCallAction",
+    "LocalShellCallOutput",
+    "McpListTools",
+    "McpListToolsTool",
+    "McpApprovalRequest",
+    "McpApprovalResponse",
+    "McpCall",
     "ItemReference",
 ]
 
@@ -109,6 +119,159 @@ class FunctionCallOutput(TypedDict, total=False):
     """
 
 
+class ImageGenerationCall(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the image generation call."""
+
+    result: Required[Optional[str]]
+    """The generated image encoded in base64."""
+
+    status: Required[Literal["in_progress", "completed", "generating", "failed"]]
+    """The status of the image generation call."""
+
+    type: Required[Literal["image_generation_call"]]
+    """The type of the image generation call. Always `image_generation_call`."""
+
+
+class LocalShellCallAction(TypedDict, total=False):
+    command: Required[List[str]]
+    """The command to run."""
+
+    env: Required[Dict[str, str]]
+    """Environment variables to set for the command."""
+
+    type: Required[Literal["exec"]]
+    """The type of the local shell action. Always `exec`."""
+
+    timeout_ms: Optional[int]
+    """Optional timeout in milliseconds for the command."""
+
+    user: Optional[str]
+    """Optional user to run the command as."""
+
+    working_directory: Optional[str]
+    """Optional working directory to run the command in."""
+
+
+class LocalShellCall(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the local shell call."""
+
+    action: Required[LocalShellCallAction]
+    """Execute a shell command on the server."""
+
+    call_id: Required[str]
+    """The unique ID of the local shell tool call generated by the model."""
+
+    status: Required[Literal["in_progress", "completed", "incomplete"]]
+    """The status of the local shell call."""
+
+    type: Required[Literal["local_shell_call"]]
+    """The type of the local shell call. Always `local_shell_call`."""
+
+
+class LocalShellCallOutput(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the local shell tool call generated by the model."""
+
+    output: Required[str]
+    """A JSON string of the output of the local shell tool call."""
+
+    type: Required[Literal["local_shell_call_output"]]
+    """The type of the local shell tool call output. Always `local_shell_call_output`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]]
+    """The status of the item. One of `in_progress`, `completed`, or `incomplete`."""
+
+
+class McpListToolsTool(TypedDict, total=False):
+    input_schema: Required[object]
+    """The JSON schema describing the tool's input."""
+
+    name: Required[str]
+    """The name of the tool."""
+
+    annotations: Optional[object]
+    """Additional annotations about the tool."""
+
+    description: Optional[str]
+    """The description of the tool."""
+
+
+class McpListTools(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the list."""
+
+    server_label: Required[str]
+    """The label of the MCP server."""
+
+    tools: Required[Iterable[McpListToolsTool]]
+    """The tools available on the server."""
+
+    type: Required[Literal["mcp_list_tools"]]
+    """The type of the item. Always `mcp_list_tools`."""
+
+    error: Optional[str]
+    """Error message if the server could not list tools."""
+
+
+class McpApprovalRequest(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the approval request."""
+
+    arguments: Required[str]
+    """A JSON string of arguments for the tool."""
+
+    name: Required[str]
+    """The name of the tool to run."""
+
+    server_label: Required[str]
+    """The label of the MCP server making the request."""
+
+    type: Required[Literal["mcp_approval_request"]]
+    """The type of the item. Always `mcp_approval_request`."""
+
+
+class McpApprovalResponse(TypedDict, total=False):
+    approval_request_id: Required[str]
+    """The ID of the approval request being answered."""
+
+    approve: Required[bool]
+    """Whether the request was approved."""
+
+    type: Required[Literal["mcp_approval_response"]]
+    """The type of the item. Always `mcp_approval_response`."""
+
+    id: Optional[str]
+    """The unique ID of the approval response"""
+
+    reason: Optional[str]
+    """Optional reason for the decision."""
+
+
+class McpCall(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the tool call."""
+
+    arguments: Required[str]
+    """A JSON string of the arguments passed to the tool."""
+
+    name: Required[str]
+    """The name of the tool that was run."""
+
+    server_label: Required[str]
+    """The label of the MCP server running the tool."""
+
+    type: Required[Literal["mcp_call"]]
+    """The type of the item. Always `mcp_call`."""
+
+    error: Optional[str]
+    """The error from the tool call, if any."""
+
+    output: Optional[str]
+    """The output from the tool call."""
+
+
 class ItemReference(TypedDict, total=False):
     id: Required[str]
     """The ID of the item to reference."""
@@ -128,6 +291,14 @@ ResponseInputItemParam: TypeAlias = Union[
     ResponseFunctionToolCallParam,
     FunctionCallOutput,
     ResponseReasoningItemParam,
+    ImageGenerationCall,
+    ResponseCodeInterpreterToolCallParam,
+    LocalShellCall,
+    LocalShellCallOutput,
+    McpListTools,
+    McpApprovalRequest,
+    McpApprovalResponse,
+    McpCall,
     ItemReference,
 ]
 
src/openai/types/responses/response_item.py
@@ -1,19 +1,186 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import Union
-from typing_extensions import Annotated, TypeAlias
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
 
 from ..._utils import PropertyInfo
+from ..._models import BaseModel
 from .response_output_message import ResponseOutputMessage
 from .response_computer_tool_call import ResponseComputerToolCall
 from .response_input_message_item import ResponseInputMessageItem
 from .response_function_web_search import ResponseFunctionWebSearch
 from .response_file_search_tool_call import ResponseFileSearchToolCall
 from .response_function_tool_call_item import ResponseFunctionToolCallItem
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
 from .response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem
 from .response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem
 
-__all__ = ["ResponseItem"]
+__all__ = [
+    "ResponseItem",
+    "ImageGenerationCall",
+    "LocalShellCall",
+    "LocalShellCallAction",
+    "LocalShellCallOutput",
+    "McpListTools",
+    "McpListToolsTool",
+    "McpApprovalRequest",
+    "McpApprovalResponse",
+    "McpCall",
+]
+
+
+class ImageGenerationCall(BaseModel):
+    id: str
+    """The unique ID of the image generation call."""
+
+    result: Optional[str] = None
+    """The generated image encoded in base64."""
+
+    status: Literal["in_progress", "completed", "generating", "failed"]
+    """The status of the image generation call."""
+
+    type: Literal["image_generation_call"]
+    """The type of the image generation call. Always `image_generation_call`."""
+
+
+class LocalShellCallAction(BaseModel):
+    command: List[str]
+    """The command to run."""
+
+    env: Dict[str, str]
+    """Environment variables to set for the command."""
+
+    type: Literal["exec"]
+    """The type of the local shell action. Always `exec`."""
+
+    timeout_ms: Optional[int] = None
+    """Optional timeout in milliseconds for the command."""
+
+    user: Optional[str] = None
+    """Optional user to run the command as."""
+
+    working_directory: Optional[str] = None
+    """Optional working directory to run the command in."""
+
+
+class LocalShellCall(BaseModel):
+    id: str
+    """The unique ID of the local shell call."""
+
+    action: LocalShellCallAction
+    """Execute a shell command on the server."""
+
+    call_id: str
+    """The unique ID of the local shell tool call generated by the model."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the local shell call."""
+
+    type: Literal["local_shell_call"]
+    """The type of the local shell call. Always `local_shell_call`."""
+
+
+class LocalShellCallOutput(BaseModel):
+    id: str
+    """The unique ID of the local shell tool call generated by the model."""
+
+    output: str
+    """A JSON string of the output of the local shell tool call."""
+
+    type: Literal["local_shell_call_output"]
+    """The type of the local shell tool call output. Always `local_shell_call_output`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of the item. One of `in_progress`, `completed`, or `incomplete`."""
+
+
+class McpListToolsTool(BaseModel):
+    input_schema: object
+    """The JSON schema describing the tool's input."""
+
+    name: str
+    """The name of the tool."""
+
+    annotations: Optional[object] = None
+    """Additional annotations about the tool."""
+
+    description: Optional[str] = None
+    """The description of the tool."""
+
+
+class McpListTools(BaseModel):
+    id: str
+    """The unique ID of the list."""
+
+    server_label: str
+    """The label of the MCP server."""
+
+    tools: List[McpListToolsTool]
+    """The tools available on the server."""
+
+    type: Literal["mcp_list_tools"]
+    """The type of the item. Always `mcp_list_tools`."""
+
+    error: Optional[str] = None
+    """Error message if the server could not list tools."""
+
+
+class McpApprovalRequest(BaseModel):
+    id: str
+    """The unique ID of the approval request."""
+
+    arguments: str
+    """A JSON string of arguments for the tool."""
+
+    name: str
+    """The name of the tool to run."""
+
+    server_label: str
+    """The label of the MCP server making the request."""
+
+    type: Literal["mcp_approval_request"]
+    """The type of the item. Always `mcp_approval_request`."""
+
+
+class McpApprovalResponse(BaseModel):
+    id: str
+    """The unique ID of the approval response"""
+
+    approval_request_id: str
+    """The ID of the approval request being answered."""
+
+    approve: bool
+    """Whether the request was approved."""
+
+    type: Literal["mcp_approval_response"]
+    """The type of the item. Always `mcp_approval_response`."""
+
+    reason: Optional[str] = None
+    """Optional reason for the decision."""
+
+
+class McpCall(BaseModel):
+    id: str
+    """The unique ID of the tool call."""
+
+    arguments: str
+    """A JSON string of the arguments passed to the tool."""
+
+    name: str
+    """The name of the tool that was run."""
+
+    server_label: str
+    """The label of the MCP server running the tool."""
+
+    type: Literal["mcp_call"]
+    """The type of the item. Always `mcp_call`."""
+
+    error: Optional[str] = None
+    """The error from the tool call, if any."""
+
+    output: Optional[str] = None
+    """The output from the tool call."""
+
 
 ResponseItem: TypeAlias = Annotated[
     Union[
@@ -25,6 +192,14 @@ ResponseItem: TypeAlias = Annotated[
         ResponseFunctionWebSearch,
         ResponseFunctionToolCallItem,
         ResponseFunctionToolCallOutputItem,
+        ImageGenerationCall,
+        ResponseCodeInterpreterToolCall,
+        LocalShellCall,
+        LocalShellCallOutput,
+        McpListTools,
+        McpApprovalRequest,
+        McpApprovalResponse,
+        McpCall,
     ],
     PropertyInfo(discriminator="type"),
 ]
src/openai/types/responses/response_mcp_call_arguments_delta_event.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseMcpCallArgumentsDeltaEvent"]
+
+
+class ResponseMcpCallArgumentsDeltaEvent(BaseModel):
+    delta: object
+    """The partial update to the arguments for the MCP tool call."""
+
+    item_id: str
+    """The unique identifier of the MCP tool call item being processed."""
+
+    output_index: int
+    """The index of the output item in the response's output array."""
+
+    type: Literal["response.mcp_call.arguments_delta"]
+    """The type of the event. Always 'response.mcp_call.arguments_delta'."""
src/openai/types/responses/response_mcp_call_arguments_done_event.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseMcpCallArgumentsDoneEvent"]
+
+
+class ResponseMcpCallArgumentsDoneEvent(BaseModel):
+    arguments: object
+    """The finalized arguments for the MCP tool call."""
+
+    item_id: str
+    """The unique identifier of the MCP tool call item being processed."""
+
+    output_index: int
+    """The index of the output item in the response's output array."""
+
+    type: Literal["response.mcp_call.arguments_done"]
+    """The type of the event. Always 'response.mcp_call.arguments_done'."""
src/openai/types/responses/response_mcp_call_completed_event.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseMcpCallCompletedEvent"]
+
+
+class ResponseMcpCallCompletedEvent(BaseModel):
+    type: Literal["response.mcp_call.completed"]
+    """The type of the event. Always 'response.mcp_call.completed'."""
src/openai/types/responses/response_mcp_call_failed_event.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseMcpCallFailedEvent"]
+
+
+class ResponseMcpCallFailedEvent(BaseModel):
+    type: Literal["response.mcp_call.failed"]
+    """The type of the event. Always 'response.mcp_call.failed'."""
src/openai/types/responses/response_mcp_call_in_progress_event.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseMcpCallInProgressEvent"]
+
+
+class ResponseMcpCallInProgressEvent(BaseModel):
+    item_id: str
+    """The unique identifier of the MCP tool call item being processed."""
+
+    output_index: int
+    """The index of the output item in the response's output array."""
+
+    type: Literal["response.mcp_call.in_progress"]
+    """The type of the event. Always 'response.mcp_call.in_progress'."""
src/openai/types/responses/response_mcp_list_tools_completed_event.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseMcpListToolsCompletedEvent"]
+
+
+class ResponseMcpListToolsCompletedEvent(BaseModel):
+    type: Literal["response.mcp_list_tools.completed"]
+    """The type of the event. Always 'response.mcp_list_tools.completed'."""
src/openai/types/responses/response_mcp_list_tools_failed_event.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseMcpListToolsFailedEvent"]
+
+
+class ResponseMcpListToolsFailedEvent(BaseModel):
+    type: Literal["response.mcp_list_tools.failed"]
+    """The type of the event. Always 'response.mcp_list_tools.failed'."""
src/openai/types/responses/response_mcp_list_tools_in_progress_event.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseMcpListToolsInProgressEvent"]
+
+
+class ResponseMcpListToolsInProgressEvent(BaseModel):
+    type: Literal["response.mcp_list_tools.in_progress"]
+    """The type of the event. Always 'response.mcp_list_tools.in_progress'."""
src/openai/types/responses/response_output_item.py
@@ -1,17 +1,151 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import Union
-from typing_extensions import Annotated, TypeAlias
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
 
 from ..._utils import PropertyInfo
+from ..._models import BaseModel
 from .response_output_message import ResponseOutputMessage
 from .response_reasoning_item import ResponseReasoningItem
 from .response_computer_tool_call import ResponseComputerToolCall
 from .response_function_tool_call import ResponseFunctionToolCall
 from .response_function_web_search import ResponseFunctionWebSearch
 from .response_file_search_tool_call import ResponseFileSearchToolCall
+from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
+
+__all__ = [
+    "ResponseOutputItem",
+    "ImageGenerationCall",
+    "LocalShellCall",
+    "LocalShellCallAction",
+    "McpCall",
+    "McpListTools",
+    "McpListToolsTool",
+    "McpApprovalRequest",
+]
+
+
+class ImageGenerationCall(BaseModel):
+    id: str
+    """The unique ID of the image generation call."""
+
+    result: Optional[str] = None
+    """The generated image encoded in base64."""
+
+    status: Literal["in_progress", "completed", "generating", "failed"]
+    """The status of the image generation call."""
+
+    type: Literal["image_generation_call"]
+    """The type of the image generation call. Always `image_generation_call`."""
+
+
+class LocalShellCallAction(BaseModel):
+    command: List[str]
+    """The command to run."""
+
+    env: Dict[str, str]
+    """Environment variables to set for the command."""
+
+    type: Literal["exec"]
+    """The type of the local shell action. Always `exec`."""
+
+    timeout_ms: Optional[int] = None
+    """Optional timeout in milliseconds for the command."""
+
+    user: Optional[str] = None
+    """Optional user to run the command as."""
+
+    working_directory: Optional[str] = None
+    """Optional working directory to run the command in."""
+
+
+class LocalShellCall(BaseModel):
+    id: str
+    """The unique ID of the local shell call."""
+
+    action: LocalShellCallAction
+    """Execute a shell command on the server."""
+
+    call_id: str
+    """The unique ID of the local shell tool call generated by the model."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the local shell call."""
+
+    type: Literal["local_shell_call"]
+    """The type of the local shell call. Always `local_shell_call`."""
+
+
+class McpCall(BaseModel):
+    id: str
+    """The unique ID of the tool call."""
+
+    arguments: str
+    """A JSON string of the arguments passed to the tool."""
+
+    name: str
+    """The name of the tool that was run."""
+
+    server_label: str
+    """The label of the MCP server running the tool."""
+
+    type: Literal["mcp_call"]
+    """The type of the item. Always `mcp_call`."""
+
+    error: Optional[str] = None
+    """The error from the tool call, if any."""
+
+    output: Optional[str] = None
+    """The output from the tool call."""
+
+
+class McpListToolsTool(BaseModel):
+    input_schema: object
+    """The JSON schema describing the tool's input."""
+
+    name: str
+    """The name of the tool."""
+
+    annotations: Optional[object] = None
+    """Additional annotations about the tool."""
+
+    description: Optional[str] = None
+    """The description of the tool."""
+
+
+class McpListTools(BaseModel):
+    id: str
+    """The unique ID of the list."""
+
+    server_label: str
+    """The label of the MCP server."""
+
+    tools: List[McpListToolsTool]
+    """The tools available on the server."""
+
+    type: Literal["mcp_list_tools"]
+    """The type of the item. Always `mcp_list_tools`."""
+
+    error: Optional[str] = None
+    """Error message if the server could not list tools."""
+
+
+class McpApprovalRequest(BaseModel):
+    id: str
+    """The unique ID of the approval request."""
+
+    arguments: str
+    """A JSON string of arguments for the tool."""
+
+    name: str
+    """The name of the tool to run."""
+
+    server_label: str
+    """The label of the MCP server making the request."""
+
+    type: Literal["mcp_approval_request"]
+    """The type of the item. Always `mcp_approval_request`."""
 
-__all__ = ["ResponseOutputItem"]
 
 ResponseOutputItem: TypeAlias = Annotated[
     Union[
@@ -21,6 +155,12 @@ ResponseOutputItem: TypeAlias = Annotated[
         ResponseFunctionWebSearch,
         ResponseComputerToolCall,
         ResponseReasoningItem,
+        ImageGenerationCall,
+        ResponseCodeInterpreterToolCall,
+        LocalShellCall,
+        McpCall,
+        McpListTools,
+        McpApprovalRequest,
     ],
     PropertyInfo(discriminator="type"),
 ]
src/openai/types/responses/response_output_text_annotation_added_event.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseOutputTextAnnotationAddedEvent"]
+
+
+class ResponseOutputTextAnnotationAddedEvent(BaseModel):
+    annotation: object
+    """The annotation object being added. (See annotation schema for details.)"""
+
+    annotation_index: int
+    """The index of the annotation within the content part."""
+
+    content_index: int
+    """The index of the content part within the output item."""
+
+    item_id: str
+    """The unique identifier of the item to which the annotation is being added."""
+
+    output_index: int
+    """The index of the output item in the response's output array."""
+
+    type: Literal["response.output_text_annotation.added"]
+    """The type of the event. Always 'response.output_text_annotation.added'."""
src/openai/types/responses/response_queued_event.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .response import Response
+from ..._models import BaseModel
+
+__all__ = ["ResponseQueuedEvent"]
+
+
+class ResponseQueuedEvent(BaseModel):
+    response: Response
+    """The full response object that is queued."""
+
+    type: Literal["response.queued"]
+    """The type of the event. Always 'response.queued'."""
src/openai/types/responses/response_reasoning_delta_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningDeltaEvent"]
+
+
+class ResponseReasoningDeltaEvent(BaseModel):
+    content_index: int
+    """The index of the reasoning content part within the output item."""
+
+    delta: object
+    """The partial update to the reasoning content."""
+
+    item_id: str
+    """The unique identifier of the item for which reasoning is being updated."""
+
+    output_index: int
+    """The index of the output item in the response's output array."""
+
+    type: Literal["response.reasoning.delta"]
+    """The type of the event. Always 'response.reasoning.delta'."""
src/openai/types/responses/response_reasoning_done_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningDoneEvent"]
+
+
+class ResponseReasoningDoneEvent(BaseModel):
+    content_index: int
+    """The index of the reasoning content part within the output item."""
+
+    item_id: str
+    """The unique identifier of the item for which reasoning is finalized."""
+
+    output_index: int
+    """The index of the output item in the response's output array."""
+
+    text: str
+    """The finalized reasoning text."""
+
+    type: Literal["response.reasoning.done"]
+    """The type of the event. Always 'response.reasoning.done'."""
src/openai/types/responses/response_reasoning_summary_delta_event.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningSummaryDeltaEvent"]
+
+
+class ResponseReasoningSummaryDeltaEvent(BaseModel):
+    delta: object
+    """The partial update to the reasoning summary content."""
+
+    item_id: str
+    """
+    The unique identifier of the item for which the reasoning summary is being
+    updated.
+    """
+
+    output_index: int
+    """The index of the output item in the response's output array."""
+
+    summary_index: int
+    """The index of the summary part within the output item."""
+
+    type: Literal["response.reasoning_summary.delta"]
+    """The type of the event. Always 'response.reasoning_summary.delta'."""
src/openai/types/responses/response_reasoning_summary_done_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningSummaryDoneEvent"]
+
+
+class ResponseReasoningSummaryDoneEvent(BaseModel):
+    item_id: str
+    """The unique identifier of the item for which the reasoning summary is finalized."""
+
+    output_index: int
+    """The index of the output item in the response's output array."""
+
+    summary_index: int
+    """The index of the summary part within the output item."""
+
+    text: str
+    """The finalized reasoning summary text."""
+
+    type: Literal["response.reasoning_summary.done"]
+    """The type of the event. Always 'response.reasoning_summary.done'."""
src/openai/types/responses/response_status.py
@@ -4,4 +4,4 @@ from typing_extensions import Literal, TypeAlias
 
 __all__ = ["ResponseStatus"]
 
-ResponseStatus: TypeAlias = Literal["completed", "failed", "in_progress", "incomplete"]
+ResponseStatus: TypeAlias = Literal["completed", "failed", "in_progress", "cancelled", "queued", "incomplete"]
src/openai/types/responses/response_stream_event.py
@@ -6,6 +6,7 @@ from typing_extensions import Annotated, TypeAlias
 from ..._utils import PropertyInfo
 from .response_error_event import ResponseErrorEvent
 from .response_failed_event import ResponseFailedEvent
+from .response_queued_event import ResponseQueuedEvent
 from .response_created_event import ResponseCreatedEvent
 from .response_completed_event import ResponseCompletedEvent
 from .response_text_done_event import ResponseTextDoneEvent
@@ -16,22 +17,39 @@ from .response_audio_delta_event import ResponseAudioDeltaEvent
 from .response_in_progress_event import ResponseInProgressEvent
 from .response_refusal_done_event import ResponseRefusalDoneEvent
 from .response_refusal_delta_event import ResponseRefusalDeltaEvent
+from .response_reasoning_done_event import ResponseReasoningDoneEvent
+from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent
+from .response_reasoning_delta_event import ResponseReasoningDeltaEvent
 from .response_output_item_done_event import ResponseOutputItemDoneEvent
 from .response_content_part_done_event import ResponseContentPartDoneEvent
 from .response_output_item_added_event import ResponseOutputItemAddedEvent
 from .response_content_part_added_event import ResponseContentPartAddedEvent
+from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent
+from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent
 from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent
+from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent
 from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEvent
 from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent
+from .response_reasoning_summary_done_event import ResponseReasoningSummaryDoneEvent
+from .response_mcp_call_arguments_done_event import ResponseMcpCallArgumentsDoneEvent
+from .response_reasoning_summary_delta_event import ResponseReasoningSummaryDeltaEvent
+from .response_image_gen_call_completed_event import ResponseImageGenCallCompletedEvent
+from .response_mcp_call_arguments_delta_event import ResponseMcpCallArgumentsDeltaEvent
+from .response_mcp_list_tools_completed_event import ResponseMcpListToolsCompletedEvent
+from .response_image_gen_call_generating_event import ResponseImageGenCallGeneratingEvent
 from .response_web_search_call_completed_event import ResponseWebSearchCallCompletedEvent
 from .response_web_search_call_searching_event import ResponseWebSearchCallSearchingEvent
 from .response_file_search_call_completed_event import ResponseFileSearchCallCompletedEvent
 from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent
+from .response_image_gen_call_in_progress_event import ResponseImageGenCallInProgressEvent
+from .response_mcp_list_tools_in_progress_event import ResponseMcpListToolsInProgressEvent
 from .response_reasoning_summary_part_done_event import ResponseReasoningSummaryPartDoneEvent
 from .response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent
 from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent
 from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent
 from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
+from .response_image_gen_call_partial_image_event import ResponseImageGenCallPartialImageEvent
+from .response_output_text_annotation_added_event import ResponseOutputTextAnnotationAddedEvent
 from .response_reasoning_summary_part_added_event import ResponseReasoningSummaryPartAddedEvent
 from .response_reasoning_summary_text_delta_event import ResponseReasoningSummaryTextDeltaEvent
 from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent
@@ -81,6 +99,24 @@ ResponseStreamEvent: TypeAlias = Annotated[
         ResponseWebSearchCallCompletedEvent,
         ResponseWebSearchCallInProgressEvent,
         ResponseWebSearchCallSearchingEvent,
+        ResponseImageGenCallCompletedEvent,
+        ResponseImageGenCallGeneratingEvent,
+        ResponseImageGenCallInProgressEvent,
+        ResponseImageGenCallPartialImageEvent,
+        ResponseMcpCallArgumentsDeltaEvent,
+        ResponseMcpCallArgumentsDoneEvent,
+        ResponseMcpCallCompletedEvent,
+        ResponseMcpCallFailedEvent,
+        ResponseMcpCallInProgressEvent,
+        ResponseMcpListToolsCompletedEvent,
+        ResponseMcpListToolsFailedEvent,
+        ResponseMcpListToolsInProgressEvent,
+        ResponseOutputTextAnnotationAddedEvent,
+        ResponseQueuedEvent,
+        ResponseReasoningDeltaEvent,
+        ResponseReasoningDoneEvent,
+        ResponseReasoningSummaryDeltaEvent,
+        ResponseReasoningSummaryDoneEvent,
     ],
     PropertyInfo(discriminator="type"),
 ]
src/openai/types/responses/tool.py
@@ -1,16 +1,175 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import Union
-from typing_extensions import Annotated, TypeAlias
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
 
 from ..._utils import PropertyInfo
+from ..._models import BaseModel
 from .computer_tool import ComputerTool
 from .function_tool import FunctionTool
 from .web_search_tool import WebSearchTool
 from .file_search_tool import FileSearchTool
 
-__all__ = ["Tool"]
+__all__ = [
+    "Tool",
+    "Mcp",
+    "McpAllowedTools",
+    "McpAllowedToolsMcpAllowedToolsFilter",
+    "McpRequireApproval",
+    "McpRequireApprovalMcpToolApprovalFilter",
+    "McpRequireApprovalMcpToolApprovalFilterAlways",
+    "McpRequireApprovalMcpToolApprovalFilterNever",
+    "CodeInterpreter",
+    "CodeInterpreterContainer",
+    "CodeInterpreterContainerCodeInterpreterToolAuto",
+    "ImageGeneration",
+    "ImageGenerationInputImageMask",
+    "LocalShell",
+]
+
+
+class McpAllowedToolsMcpAllowedToolsFilter(BaseModel):
+    tool_names: Optional[List[str]] = None
+    """List of allowed tool names."""
+
+
+McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpAllowedToolsFilter, None]
+
+
+class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel):
+    tool_names: Optional[List[str]] = None
+    """List of tools that require approval."""
+
+
+class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel):
+    tool_names: Optional[List[str]] = None
+    """List of tools that do not require approval."""
+
+
+class McpRequireApprovalMcpToolApprovalFilter(BaseModel):
+    always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None
+    """A list of tools that always require approval."""
+
+    never: Optional[McpRequireApprovalMcpToolApprovalFilterNever] = None
+    """A list of tools that never require approval."""
+
+    tool_names: Optional[List[str]] = None
+    """List of allowed tool names."""
+
+
+McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None]
+
+
+class Mcp(BaseModel):
+    server_label: str
+    """A label for this MCP server, used to identify it in tool calls."""
+
+    server_url: str
+    """The URL for the MCP server."""
+
+    type: Literal["mcp"]
+    """The type of the MCP tool. Always `mcp`."""
+
+    allowed_tools: Optional[McpAllowedTools] = None
+    """List of allowed tool names or a filter object."""
+
+    headers: Optional[Dict[str, str]] = None
+    """Optional HTTP headers to send to the MCP server.
+
+    Use for authentication or other purposes.
+    """
+
+    require_approval: Optional[McpRequireApproval] = None
+    """Specify which of the MCP server's tools require approval."""
+
+
+class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel):
+    type: Literal["auto"]
+    """Always `auto`."""
+
+    file_ids: Optional[List[str]] = None
+    """An optional list of uploaded files to make available to your code."""
+
+
+CodeInterpreterContainer: TypeAlias = Union[str, CodeInterpreterContainerCodeInterpreterToolAuto]
+
+
+class CodeInterpreter(BaseModel):
+    container: CodeInterpreterContainer
+    """The code interpreter container.
+
+    Can be a container ID or an object that specifies uploaded file IDs to make
+    available to your code.
+    """
+
+    type: Literal["code_interpreter"]
+    """The type of the code interpreter tool. Always `code_interpreter`."""
+
+
+class ImageGenerationInputImageMask(BaseModel):
+    file_id: Optional[str] = None
+    """File ID for the mask image."""
+
+    image_url: Optional[str] = None
+    """Base64-encoded mask image."""
+
+
+class ImageGeneration(BaseModel):
+    type: Literal["image_generation"]
+    """The type of the image generation tool. Always `image_generation`."""
+
+    background: Optional[Literal["transparent", "opaque", "auto"]] = None
+    """Background type for the generated image.
+
+    One of `transparent`, `opaque`, or `auto`. Default: `auto`.
+    """
+
+    input_image_mask: Optional[ImageGenerationInputImageMask] = None
+    """Optional mask for inpainting.
+
+    Contains `image_url` (string, optional) and `file_id` (string, optional).
+    """
+
+    model: Optional[Literal["gpt-image-1"]] = None
+    """The image generation model to use. Default: `gpt-image-1`."""
+
+    moderation: Optional[Literal["auto", "low"]] = None
+    """Moderation level for the generated image. Default: `auto`."""
+
+    output_compression: Optional[int] = None
+    """Compression level for the output image. Default: 100."""
+
+    output_format: Optional[Literal["png", "webp", "jpeg"]] = None
+    """The output format of the generated image.
+
+    One of `png`, `webp`, or `jpeg`. Default: `png`.
+    """
+
+    partial_images: Optional[int] = None
+    """
+    Number of partial images to generate in streaming mode, from 0 (default value)
+    to 3.
+    """
+
+    quality: Optional[Literal["low", "medium", "high", "auto"]] = None
+    """The quality of the generated image.
+
+    One of `low`, `medium`, `high`, or `auto`. Default: `auto`.
+    """
+
+    size: Optional[Literal["1024x1024", "1024x1536", "1536x1024", "auto"]] = None
+    """The size of the generated image.
+
+    One of `1024x1024`, `1024x1536`, `1536x1024`, or `auto`. Default: `auto`.
+    """
+
+
+class LocalShell(BaseModel):
+    type: Literal["local_shell"]
+    """The type of the local shell tool. Always `local_shell`."""
+
 
 Tool: TypeAlias = Annotated[
-    Union[FileSearchTool, FunctionTool, WebSearchTool, ComputerTool], PropertyInfo(discriminator="type")
+    Union[FunctionTool, FileSearchTool, WebSearchTool, ComputerTool, Mcp, CodeInterpreter, ImageGeneration, LocalShell],
+    PropertyInfo(discriminator="type"),
 ]
src/openai/types/responses/tool_choice_types.py
@@ -8,7 +8,15 @@ __all__ = ["ToolChoiceTypes"]
 
 
 class ToolChoiceTypes(BaseModel):
-    type: Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"]
+    type: Literal[
+        "file_search",
+        "web_search_preview",
+        "computer_use_preview",
+        "web_search_preview_2025_03_11",
+        "image_generation",
+        "code_interpreter",
+        "mcp",
+    ]
     """The type of hosted tool the model should to use.
 
     Learn more about
@@ -19,4 +27,7 @@ class ToolChoiceTypes(BaseModel):
     - `file_search`
     - `web_search_preview`
     - `computer_use_preview`
+    - `code_interpreter`
+    - `mcp`
+    - `image_generation`
     """
src/openai/types/responses/tool_choice_types_param.py
@@ -9,7 +9,15 @@ __all__ = ["ToolChoiceTypesParam"]
 
 class ToolChoiceTypesParam(TypedDict, total=False):
     type: Required[
-        Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"]
+        Literal[
+            "file_search",
+            "web_search_preview",
+            "computer_use_preview",
+            "web_search_preview_2025_03_11",
+            "image_generation",
+            "code_interpreter",
+            "mcp",
+        ]
     ]
     """The type of hosted tool the model should to use.
 
@@ -21,4 +29,7 @@ class ToolChoiceTypesParam(TypedDict, total=False):
     - `file_search`
     - `web_search_preview`
     - `computer_use_preview`
+    - `code_interpreter`
+    - `mcp`
+    - `image_generation`
     """
src/openai/types/responses/tool_param.py
@@ -2,8 +2,8 @@
 
 from __future__ import annotations
 
-from typing import Union
-from typing_extensions import TypeAlias
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from .computer_tool_param import ComputerToolParam
 from .function_tool_param import FunctionToolParam
@@ -11,8 +11,174 @@ from .web_search_tool_param import WebSearchToolParam
 from .file_search_tool_param import FileSearchToolParam
 from ..chat.chat_completion_tool_param import ChatCompletionToolParam
 
-__all__ = ["ToolParam"]
+__all__ = [
+    "ToolParam",
+    "Mcp",
+    "McpAllowedTools",
+    "McpAllowedToolsMcpAllowedToolsFilter",
+    "McpRequireApproval",
+    "McpRequireApprovalMcpToolApprovalFilter",
+    "McpRequireApprovalMcpToolApprovalFilterAlways",
+    "McpRequireApprovalMcpToolApprovalFilterNever",
+    "CodeInterpreter",
+    "CodeInterpreterContainer",
+    "CodeInterpreterContainerCodeInterpreterToolAuto",
+    "ImageGeneration",
+    "ImageGenerationInputImageMask",
+    "LocalShell",
+]
 
-ToolParam: TypeAlias = Union[FileSearchToolParam, FunctionToolParam, WebSearchToolParam, ComputerToolParam]
+class McpAllowedToolsMcpAllowedToolsFilter(TypedDict, total=False):
+    tool_names: List[str]
+    """List of allowed tool names."""
 
+
+McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpAllowedToolsFilter]
+
+
+class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False):
+    tool_names: List[str]
+    """List of tools that require approval."""
+
+
+class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False):
+    tool_names: List[str]
+    """List of tools that do not require approval."""
+
+
+class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False):
+    always: McpRequireApprovalMcpToolApprovalFilterAlways
+    """A list of tools that always require approval."""
+
+    never: McpRequireApprovalMcpToolApprovalFilterNever
+    """A list of tools that never require approval."""
+
+    tool_names: List[str]
+    """List of allowed tool names."""
+
+
+McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"]]
+
+
+class Mcp(TypedDict, total=False):
+    server_label: Required[str]
+    """A label for this MCP server, used to identify it in tool calls."""
+
+    server_url: Required[str]
+    """The URL for the MCP server."""
+
+    type: Required[Literal["mcp"]]
+    """The type of the MCP tool. Always `mcp`."""
+
+    allowed_tools: Optional[McpAllowedTools]
+    """List of allowed tool names or a filter object."""
+
+    headers: Optional[Dict[str, str]]
+    """Optional HTTP headers to send to the MCP server.
+
+    Use for authentication or other purposes.
+    """
+
+    require_approval: Optional[McpRequireApproval]
+    """Specify which of the MCP server's tools require approval."""
+
+
+class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False):
+    type: Required[Literal["auto"]]
+    """Always `auto`."""
+
+    file_ids: List[str]
+    """An optional list of uploaded files to make available to your code."""
+
+
+CodeInterpreterContainer: TypeAlias = Union[str, CodeInterpreterContainerCodeInterpreterToolAuto]
+
+
+class CodeInterpreter(TypedDict, total=False):
+    container: Required[CodeInterpreterContainer]
+    """The code interpreter container.
+
+    Can be a container ID or an object that specifies uploaded file IDs to make
+    available to your code.
+    """
+
+    type: Required[Literal["code_interpreter"]]
+    """The type of the code interpreter tool. Always `code_interpreter`."""
+
+
+class ImageGenerationInputImageMask(TypedDict, total=False):
+    file_id: str
+    """File ID for the mask image."""
+
+    image_url: str
+    """Base64-encoded mask image."""
+
+
+class ImageGeneration(TypedDict, total=False):
+    type: Required[Literal["image_generation"]]
+    """The type of the image generation tool. Always `image_generation`."""
+
+    background: Literal["transparent", "opaque", "auto"]
+    """Background type for the generated image.
+
+    One of `transparent`, `opaque`, or `auto`. Default: `auto`.
+    """
+
+    input_image_mask: ImageGenerationInputImageMask
+    """Optional mask for inpainting.
+
+    Contains `image_url` (string, optional) and `file_id` (string, optional).
+    """
+
+    model: Literal["gpt-image-1"]
+    """The image generation model to use. Default: `gpt-image-1`."""
+
+    moderation: Literal["auto", "low"]
+    """Moderation level for the generated image. Default: `auto`."""
+
+    output_compression: int
+    """Compression level for the output image. Default: 100."""
+
+    output_format: Literal["png", "webp", "jpeg"]
+    """The output format of the generated image.
+
+    One of `png`, `webp`, or `jpeg`. Default: `png`.
+    """
+
+    partial_images: int
+    """
+    Number of partial images to generate in streaming mode, from 0 (default value)
+    to 3.
+    """
+
+    quality: Literal["low", "medium", "high", "auto"]
+    """The quality of the generated image.
+
+    One of `low`, `medium`, `high`, or `auto`. Default: `auto`.
+    """
+
+    size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
+    """The size of the generated image.
+
+    One of `1024x1024`, `1024x1536`, `1536x1024`, or `auto`. Default: `auto`.
+    """
+
+
+class LocalShell(TypedDict, total=False):
+    type: Required[Literal["local_shell"]]
+    """The type of the local shell tool. Always `local_shell`."""
+
+
+ToolParam: TypeAlias = Union[
+    FunctionToolParam,
+    FileSearchToolParam,
+    WebSearchToolParam,
+    ComputerToolParam,
+    Mcp,
+    CodeInterpreter,
+    ImageGeneration,
+    LocalShell,
+]
+
+  
 ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionToolParam]
src/openai/_streaming.py
@@ -59,7 +59,7 @@ class Stream(Generic[_T]):
             if sse.data.startswith("[DONE]"):
                 break
 
-            if sse.event is None or sse.event.startswith("response.") or sse.event.startswith('transcript.'):
+            if sse.event is None or sse.event.startswith("response.") or sse.event.startswith("transcript."):
                 data = sse.json()
                 if is_mapping(data) and data.get("error"):
                     message = None
@@ -161,7 +161,7 @@ class AsyncStream(Generic[_T]):
             if sse.data.startswith("[DONE]"):
                 break
 
-            if sse.event is None or sse.event.startswith("response.") or sse.event.startswith('transcript.'):
+            if sse.event is None or sse.event.startswith("response.") or sse.event.startswith("transcript."):
                 data = sse.json()
                 if is_mapping(data) and data.get("error"):
                     message = None
src/openai/_version.py
@@ -1,4 +1,4 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 __title__ = "openai"
-__version__ = "1.79.0"  # x-release-please-version
+__version__ = "1.80.0"  # x-release-please-version
tests/api_resources/test_responses.py
@@ -30,6 +30,7 @@ class TestResponses:
         response = client.responses.create(
             input="string",
             model="gpt-4o",
+            background=True,
             include=["file_search_call.results"],
             instructions="instructions",
             max_output_tokens=0,
@@ -49,18 +50,11 @@ class TestResponses:
             tool_choice="none",
             tools=[
                 {
-                    "type": "file_search",
-                    "vector_store_ids": ["string"],
-                    "filters": {
-                        "key": "key",
-                        "type": "eq",
-                        "value": "string",
-                    },
-                    "max_num_results": 0,
-                    "ranking_options": {
-                        "ranker": "auto",
-                        "score_threshold": 0,
-                    },
+                    "name": "name",
+                    "parameters": {"foo": "bar"},
+                    "strict": True,
+                    "type": "function",
+                    "description": "description",
                 }
             ],
             top_p=1,
@@ -110,6 +104,7 @@ class TestResponses:
             input="string",
             model="gpt-4o",
             stream=True,
+            background=True,
             include=["file_search_call.results"],
             instructions="instructions",
             max_output_tokens=0,
@@ -128,18 +123,11 @@ class TestResponses:
             tool_choice="none",
             tools=[
                 {
-                    "type": "file_search",
-                    "vector_store_ids": ["string"],
-                    "filters": {
-                        "key": "key",
-                        "type": "eq",
-                        "value": "string",
-                    },
-                    "max_num_results": 0,
-                    "ranking_options": {
-                        "ranker": "auto",
-                        "score_threshold": 0,
-                    },
+                    "name": "name",
+                    "parameters": {"foo": "bar"},
+                    "strict": True,
+                    "type": "function",
+                    "description": "description",
                 }
             ],
             top_p=1,
@@ -276,6 +264,7 @@ class TestAsyncResponses:
         response = await async_client.responses.create(
             input="string",
             model="gpt-4o",
+            background=True,
             include=["file_search_call.results"],
             instructions="instructions",
             max_output_tokens=0,
@@ -295,18 +284,11 @@ class TestAsyncResponses:
             tool_choice="none",
             tools=[
                 {
-                    "type": "file_search",
-                    "vector_store_ids": ["string"],
-                    "filters": {
-                        "key": "key",
-                        "type": "eq",
-                        "value": "string",
-                    },
-                    "max_num_results": 0,
-                    "ranking_options": {
-                        "ranker": "auto",
-                        "score_threshold": 0,
-                    },
+                    "name": "name",
+                    "parameters": {"foo": "bar"},
+                    "strict": True,
+                    "type": "function",
+                    "description": "description",
                 }
             ],
             top_p=1,
@@ -356,6 +338,7 @@ class TestAsyncResponses:
             input="string",
             model="gpt-4o",
             stream=True,
+            background=True,
             include=["file_search_call.results"],
             instructions="instructions",
             max_output_tokens=0,
@@ -374,18 +357,11 @@ class TestAsyncResponses:
             tool_choice="none",
             tools=[
                 {
-                    "type": "file_search",
-                    "vector_store_ids": ["string"],
-                    "filters": {
-                        "key": "key",
-                        "type": "eq",
-                        "value": "string",
-                    },
-                    "max_num_results": 0,
-                    "ranking_options": {
-                        "ranker": "auto",
-                        "score_threshold": 0,
-                    },
+                    "name": "name",
+                    "parameters": {"foo": "bar"},
+                    "strict": True,
+                    "type": "function",
+                    "description": "description",
                 }
             ],
             top_p=1,
.release-please-manifest.json
@@ -1,3 +1,3 @@
 {
-  ".": "1.79.0"
+  ".": "1.80.0"
 }
\ No newline at end of file
.stats.yml
@@ -1,4 +1,4 @@
 configured_endpoints: 101
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-262e171d0a8150ea1192474d16ba3afdf9a054b399f1a49a9c9b697a3073c136.yml
-openapi_spec_hash: 33e00a48df8f94c94f46290c489f132b
-config_hash: d8d5fda350f6db77c784f35429741a2e
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a5651cb97f86d1e2531af6aef8c5230f1ea350560fbae790ca2e481b30a6c217.yml
+openapi_spec_hash: 66a5104fd3bb43383cf919225df7a6fd
+config_hash: bb657c3fed232a56930035de3aaed936
api.md
@@ -717,6 +717,10 @@ from openai.types.responses import (
     ResponseFunctionToolCallItem,
     ResponseFunctionToolCallOutputItem,
     ResponseFunctionWebSearch,
+    ResponseImageGenCallCompletedEvent,
+    ResponseImageGenCallGeneratingEvent,
+    ResponseImageGenCallInProgressEvent,
+    ResponseImageGenCallPartialImageEvent,
     ResponseInProgressEvent,
     ResponseIncludable,
     ResponseIncompleteEvent,
@@ -730,6 +734,14 @@ from openai.types.responses import (
     ResponseInputMessageItem,
     ResponseInputText,
     ResponseItem,
+    ResponseMcpCallArgumentsDeltaEvent,
+    ResponseMcpCallArgumentsDoneEvent,
+    ResponseMcpCallCompletedEvent,
+    ResponseMcpCallFailedEvent,
+    ResponseMcpCallInProgressEvent,
+    ResponseMcpListToolsCompletedEvent,
+    ResponseMcpListToolsFailedEvent,
+    ResponseMcpListToolsInProgressEvent,
     ResponseOutputAudio,
     ResponseOutputItem,
     ResponseOutputItemAddedEvent,
@@ -737,7 +749,13 @@ from openai.types.responses import (
     ResponseOutputMessage,
     ResponseOutputRefusal,
     ResponseOutputText,
+    ResponseOutputTextAnnotationAddedEvent,
+    ResponseQueuedEvent,
+    ResponseReasoningDeltaEvent,
+    ResponseReasoningDoneEvent,
     ResponseReasoningItem,
+    ResponseReasoningSummaryDeltaEvent,
+    ResponseReasoningSummaryDoneEvent,
     ResponseReasoningSummaryPartAddedEvent,
     ResponseReasoningSummaryPartDoneEvent,
     ResponseReasoningSummaryTextDeltaEvent,
CHANGELOG.md
@@ -1,5 +1,18 @@
 # Changelog
 
+## 1.80.0 (2025-05-21)
+
+Full Changelog: [v1.79.0...v1.80.0](https://github.com/openai/openai-python/compare/v1.79.0...v1.80.0)
+
+### Features
+
+* **api:** new API tools ([d36ae52](https://github.com/openai/openai-python/commit/d36ae528d55fe87067c4b8c6b2c947cbad5e5002))
+
+
+### Chores
+
+* **docs:** grammar improvements ([e746145](https://github.com/openai/openai-python/commit/e746145a12b5335d8841aff95c91bbbde8bae8e3))
+
 ## 1.79.0 (2025-05-16)
 
 Full Changelog: [v1.78.1...v1.79.0](https://github.com/openai/openai-python/compare/v1.78.1...v1.79.0)
pyproject.toml
@@ -1,6 +1,6 @@
 [project]
 name = "openai"
-version = "1.79.0"
+version = "1.80.0"
 description = "The official Python library for the openai API"
 dynamic = ["readme"]
 license = "Apache-2.0"
SECURITY.md
@@ -16,13 +16,13 @@ before making any information public.
 ## Reporting Non-SDK Related Security Issues
 
 If you encounter security issues that are not directly related to SDKs but pertain to the services
-or products provided by OpenAI please follow the respective company's security reporting guidelines.
+or products provided by OpenAI, please follow the respective company's security reporting guidelines.
 
 ### OpenAI Terms and Policies
 
 Our Security Policy can be found at [Security Policy URL](https://openai.com/policies/coordinated-vulnerability-disclosure-policy).
 
-Please contact disclosure@openai.com for any questions or concerns regarding security of our services.
+Please contact disclosure@openai.com for any questions or concerns regarding the security of our services.
 
 ---