Commit bff8da95

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-03-19 05:50:14
release: 1.66.5 (#2223) tag: v1.66.5
* chore(internal): remove extra empty newlines (#2195) * chore(internal): bump rye to 0.44.0 (#2200) * chore(internal): remove CI condition (#2203) * chore(internal): update release workflows * fix(types): handle more discriminated union shapes (#2206) * fix(ci): ensure pip is always available (#2207) * fix(ci): remove publishing patch (#2208) * chore(internal): add back releases workflow * chore(internal): codegen related update (#2222) * fix(types): improve responses type names (#2224) * release: 1.66.5 --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com> Co-authored-by: meorphis <eric@stainless.com>
1 parent 17d7867
.github/workflows/create-releases.yml
@@ -0,0 +1,39 @@
+name: Create releases
+on:
+  schedule:
+    - cron: '0 5 * * *' # every day at 5am UTC
+  push:
+    branches:
+      - main
+
+jobs:
+  release:
+    name: release
+    if: github.ref == 'refs/heads/main' && github.repository == 'openai/openai-python'
+    runs-on: ubuntu-latest
+    environment: publish
+
+    steps:
+      - uses: actions/checkout@v4
+
+      - uses: stainless-api/trigger-release-please@v1
+        id: release
+        with:
+          repo: ${{ github.event.repository.full_name }}
+          stainless-api-key: ${{ secrets.STAINLESS_API_KEY }}
+
+      - name: Install Rye
+        if: ${{ steps.release.outputs.releases_created }}
+        run: |
+          curl -sSf https://rye.astral.sh/get | bash
+          echo "$HOME/.rye/shims" >> $GITHUB_PATH
+        env:
+          RYE_VERSION: '0.44.0'
+          RYE_INSTALL_OPTION: '--yes'
+
+      - name: Publish to PyPI
+        if: ${{ steps.release.outputs.releases_created }}
+        run: |
+          bash ./bin/publish-pypi
+        env:
+          PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }}
src/openai/resources/responses/input_items.py
@@ -16,7 +16,7 @@ from ..._response import to_streamed_response_wrapper, async_to_streamed_respons
 from ...pagination import SyncCursorPage, AsyncCursorPage
 from ..._base_client import AsyncPaginator, make_request_options
 from ...types.responses import input_item_list_params
-from ...types.responses.response_item_list import Data
+from ...types.responses.response_item import ResponseItem
 
 __all__ = ["InputItems", "AsyncInputItems"]
 
@@ -55,7 +55,7 @@ class InputItems(SyncAPIResource):
         extra_query: Query | None = None,
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
-    ) -> SyncCursorPage[Data]:
+    ) -> SyncCursorPage[ResponseItem]:
         """
         Returns a list of input items for a given response.
 
@@ -84,7 +84,7 @@ class InputItems(SyncAPIResource):
             raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
         return self._get_api_list(
             f"/responses/{response_id}/input_items",
-            page=SyncCursorPage[Data],
+            page=SyncCursorPage[ResponseItem],
             options=make_request_options(
                 extra_headers=extra_headers,
                 extra_query=extra_query,
@@ -100,7 +100,7 @@ class InputItems(SyncAPIResource):
                     input_item_list_params.InputItemListParams,
                 ),
             ),
-            model=cast(Any, Data),  # Union types cannot be passed in as arguments in the type system
+            model=cast(Any, ResponseItem),  # Union types cannot be passed in as arguments in the type system
         )
 
 
@@ -138,7 +138,7 @@ class AsyncInputItems(AsyncAPIResource):
         extra_query: Query | None = None,
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
-    ) -> AsyncPaginator[Data, AsyncCursorPage[Data]]:
+    ) -> AsyncPaginator[ResponseItem, AsyncCursorPage[ResponseItem]]:
         """
         Returns a list of input items for a given response.
 
@@ -167,7 +167,7 @@ class AsyncInputItems(AsyncAPIResource):
             raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
         return self._get_api_list(
             f"/responses/{response_id}/input_items",
-            page=AsyncCursorPage[Data],
+            page=AsyncCursorPage[ResponseItem],
             options=make_request_options(
                 extra_headers=extra_headers,
                 extra_query=extra_query,
@@ -183,7 +183,7 @@ class AsyncInputItems(AsyncAPIResource):
                     input_item_list_params.InputItemListParams,
                 ),
             ),
-            model=cast(Any, Data),  # Union types cannot be passed in as arguments in the type system
+            model=cast(Any, ResponseItem),  # Union types cannot be passed in as arguments in the type system
         )
 
 
src/openai/resources/batches.py
@@ -49,7 +49,7 @@ class Batches(SyncAPIResource):
         self,
         *,
         completion_window: Literal["24h"],
-        endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
+        endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
         input_file_id: str,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -67,9 +67,9 @@ class Batches(SyncAPIResource):
               is supported.
 
           endpoint: The endpoint to be used for all requests in the batch. Currently
-              `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
-              Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
-              embedding inputs across all requests in the batch.
+              `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
+              are supported. Note that `/v1/embeddings` batches are also restricted to a
+              maximum of 50,000 embedding inputs across all requests in the batch.
 
           input_file_id: The ID of an uploaded file that contains requests for the new batch.
 
@@ -259,7 +259,7 @@ class AsyncBatches(AsyncAPIResource):
         self,
         *,
         completion_window: Literal["24h"],
-        endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
+        endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
         input_file_id: str,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -277,9 +277,9 @@ class AsyncBatches(AsyncAPIResource):
               is supported.
 
           endpoint: The endpoint to be used for all requests in the batch. Currently
-              `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
-              Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
-              embedding inputs across all requests in the batch.
+              `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
+              are supported. Note that `/v1/embeddings` batches are also restricted to a
+              maximum of 50,000 embedding inputs across all requests in the batch.
 
           input_file_id: The ID of an uploaded file that contains requests for the new batch.
 
src/openai/types/chat/chat_completion_chunk.py
@@ -142,6 +142,9 @@ class ChatCompletionChunk(BaseModel):
     """
     An optional field that will only be present when you set
     `stream_options: {"include_usage": true}` in your request. When present, it
-    contains a null value except for the last chunk which contains the token usage
-    statistics for the entire request.
+    contains a null value **except for the last chunk** which contains the token
+    usage statistics for the entire request.
+
+    **NOTE:** If the stream is interrupted or cancelled, you may not receive the
+    final usage chunk which contains the total token usage for the request.
     """
src/openai/types/chat/chat_completion_content_part_param.py
@@ -22,7 +22,7 @@ class FileFile(TypedDict, total=False):
     file_id: str
     """The ID of an uploaded file to use as input."""
 
-    file_name: str
+    filename: str
     """The name of the file, used when passing the file to the model as a string."""
 
 
src/openai/types/chat/chat_completion_stream_options_param.py
@@ -12,6 +12,9 @@ class ChatCompletionStreamOptionsParam(TypedDict, total=False):
     """If set, an additional chunk will be streamed before the `data: [DONE]` message.
 
     The `usage` field on this chunk shows the token usage statistics for the entire
-    request, and the `choices` field will always be an empty array. All other chunks
-    will also include a `usage` field, but with a null value.
+    request, and the `choices` field will always be an empty array.
+
+    All other chunks will also include a `usage` field, but with a null value.
+    **NOTE:** If the stream is interrupted, you may not receive the final usage
+    chunk which contains the total token usage for the request.
     """
src/openai/types/responses/__init__.py
@@ -7,6 +7,7 @@ from .response import Response as Response
 from .tool_param import ToolParam as ToolParam
 from .computer_tool import ComputerTool as ComputerTool
 from .function_tool import FunctionTool as FunctionTool
+from .response_item import ResponseItem as ResponseItem
 from .response_error import ResponseError as ResponseError
 from .response_usage import ResponseUsage as ResponseUsage
 from .parsed_response import (
@@ -66,6 +67,7 @@ from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFun
 from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall
 from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig
 from .response_function_tool_call import ResponseFunctionToolCall as ResponseFunctionToolCall
+from .response_input_message_item import ResponseInputMessageItem as ResponseInputMessageItem
 from .response_refusal_done_event import ResponseRefusalDoneEvent as ResponseRefusalDoneEvent
 from .response_function_web_search import ResponseFunctionWebSearch as ResponseFunctionWebSearch
 from .response_input_content_param import ResponseInputContentParam as ResponseInputContentParam
@@ -76,6 +78,7 @@ from .response_reasoning_item_param import ResponseReasoningItemParam as Respons
 from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall
 from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent
 from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent
+from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem
 from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent
 from .response_computer_tool_call_param import ResponseComputerToolCallParam as ResponseComputerToolCallParam
 from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent
@@ -90,9 +93,15 @@ from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEve
 from .response_audio_transcript_delta_event import (
     ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,
 )
+from .response_computer_tool_call_output_item import (
+    ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem,
+)
 from .response_format_text_json_schema_config import (
     ResponseFormatTextJSONSchemaConfig as ResponseFormatTextJSONSchemaConfig,
 )
+from .response_function_tool_call_output_item import (
+    ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem,
+)
 from .response_web_search_call_completed_event import (
     ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent,
 )
@@ -120,6 +129,9 @@ from .response_function_call_arguments_done_event import (
 from .response_function_call_arguments_delta_event import (
     ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,
 )
+from .response_computer_tool_call_output_screenshot import (
+    ResponseComputerToolCallOutputScreenshot as ResponseComputerToolCallOutputScreenshot,
+)
 from .response_format_text_json_schema_config_param import (
     ResponseFormatTextJSONSchemaConfigParam as ResponseFormatTextJSONSchemaConfigParam,
 )
@@ -138,3 +150,6 @@ from .response_code_interpreter_call_in_progress_event import (
 from .response_code_interpreter_call_interpreting_event import (
     ResponseCodeInterpreterCallInterpretingEvent as ResponseCodeInterpreterCallInterpretingEvent,
 )
+from .response_computer_tool_call_output_screenshot_param import (
+    ResponseComputerToolCallOutputScreenshotParam as ResponseComputerToolCallOutputScreenshotParam,
+)
src/openai/types/responses/response_computer_tool_call_output_item.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot
+
+__all__ = ["ResponseComputerToolCallOutputItem", "AcknowledgedSafetyCheck"]
+
+
+class AcknowledgedSafetyCheck(BaseModel):
+    id: str
+    """The ID of the pending safety check."""
+
+    code: str
+    """The type of the pending safety check."""
+
+    message: str
+    """Details about the pending safety check."""
+
+
+class ResponseComputerToolCallOutputItem(BaseModel):
+    id: str
+    """The unique ID of the computer call tool output."""
+
+    call_id: str
+    """The ID of the computer tool call that produced the output."""
+
+    output: ResponseComputerToolCallOutputScreenshot
+    """A computer screenshot image used with the computer use tool."""
+
+    type: Literal["computer_call_output"]
+    """The type of the computer tool call output. Always `computer_call_output`."""
+
+    acknowledged_safety_checks: Optional[List[AcknowledgedSafetyCheck]] = None
+    """
+    The safety checks reported by the API that have been acknowledged by the
+    developer.
+    """
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of the message input.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when input items
+    are returned via API.
+    """
src/openai/types/responses/response_computer_tool_call_output_screenshot.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseComputerToolCallOutputScreenshot"]
+
+
+class ResponseComputerToolCallOutputScreenshot(BaseModel):
+    type: Literal["computer_screenshot"]
+    """Specifies the event type.
+
+    For a computer screenshot, this property is always set to `computer_screenshot`.
+    """
+
+    file_id: Optional[str] = None
+    """The identifier of an uploaded file that contains the screenshot."""
+
+    image_url: Optional[str] = None
+    """The URL of the screenshot image."""
src/openai/types/responses/response_computer_tool_call_output_screenshot_param.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseComputerToolCallOutputScreenshotParam"]
+
+
+class ResponseComputerToolCallOutputScreenshotParam(TypedDict, total=False):
+    type: Required[Literal["computer_screenshot"]]
+    """Specifies the event type.
+
+    For a computer screenshot, this property is always set to `computer_screenshot`.
+    """
+
+    file_id: str
+    """The identifier of an uploaded file that contains the screenshot."""
+
+    image_url: str
+    """The URL of the screenshot image."""
src/openai/types/responses/response_function_tool_call.py
@@ -9,9 +9,6 @@ __all__ = ["ResponseFunctionToolCall"]
 
 
 class ResponseFunctionToolCall(BaseModel):
-    id: str
-    """The unique ID of the function tool call."""
-
     arguments: str
     """A JSON string of the arguments to pass to the function."""
 
@@ -24,6 +21,9 @@ class ResponseFunctionToolCall(BaseModel):
     type: Literal["function_call"]
     """The type of the function tool call. Always `function_call`."""
 
+    id: Optional[str] = None
+    """The unique ID of the function tool call."""
+
     status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
     """The status of the item.
 
src/openai/types/responses/response_function_tool_call_item.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+
+from .response_function_tool_call import ResponseFunctionToolCall
+
+__all__ = ["ResponseFunctionToolCallItem"]
+
+
+class ResponseFunctionToolCallItem(ResponseFunctionToolCall):
+    id: str  # type: ignore
+    """The unique ID of the function call tool output."""
src/openai/types/responses/response_function_tool_call_output_item.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFunctionToolCallOutputItem"]
+
+
+class ResponseFunctionToolCallOutputItem(BaseModel):
+    id: str
+    """The unique ID of the function call tool output."""
+
+    call_id: str
+    """The unique ID of the function tool call generated by the model."""
+
+    output: str
+    """A JSON string of the output of the function tool call."""
+
+    type: Literal["function_call_output"]
+    """The type of the function tool call output. Always `function_call_output`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of the item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
src/openai/types/responses/response_function_tool_call_param.py
@@ -8,9 +8,6 @@ __all__ = ["ResponseFunctionToolCallParam"]
 
 
 class ResponseFunctionToolCallParam(TypedDict, total=False):
-    id: Required[str]
-    """The unique ID of the function tool call."""
-
     arguments: Required[str]
     """A JSON string of the arguments to pass to the function."""
 
@@ -23,6 +20,9 @@ class ResponseFunctionToolCallParam(TypedDict, total=False):
     type: Required[Literal["function_call"]]
     """The type of the function tool call. Always `function_call`."""
 
+    id: str
+    """The unique ID of the function tool call."""
+
     status: Literal["in_progress", "completed", "incomplete"]
     """The status of the item.
 
src/openai/types/responses/response_input_item_param.py
@@ -13,12 +13,12 @@ from .response_function_tool_call_param import ResponseFunctionToolCallParam
 from .response_function_web_search_param import ResponseFunctionWebSearchParam
 from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam
 from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam
 
 __all__ = [
     "ResponseInputItemParam",
     "Message",
     "ComputerCallOutput",
-    "ComputerCallOutputOutput",
     "ComputerCallOutputAcknowledgedSafetyCheck",
     "FunctionCallOutput",
     "ItemReference",
@@ -46,20 +46,6 @@ class Message(TypedDict, total=False):
     """The type of the message input. Always set to `message`."""
 
 
-class ComputerCallOutputOutput(TypedDict, total=False):
-    type: Required[Literal["computer_screenshot"]]
-    """Specifies the event type.
-
-    For a computer screenshot, this property is always set to `computer_screenshot`.
-    """
-
-    file_id: str
-    """The identifier of an uploaded file that contains the screenshot."""
-
-    image_url: str
-    """The URL of the screenshot image."""
-
-
 class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False):
     id: Required[str]
     """The ID of the pending safety check."""
@@ -75,7 +61,7 @@ class ComputerCallOutput(TypedDict, total=False):
     call_id: Required[str]
     """The ID of the computer tool call that produced the output."""
 
-    output: Required[ComputerCallOutputOutput]
+    output: Required[ResponseComputerToolCallOutputScreenshotParam]
     """A computer screenshot image used with the computer use tool."""
 
     type: Required[Literal["computer_call_output"]]
src/openai/types/responses/response_input_message_item.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_input_message_content_list import ResponseInputMessageContentList
+
+__all__ = ["ResponseInputMessageItem"]
+
+
+class ResponseInputMessageItem(BaseModel):
+    id: str
+    """The unique ID of the message input."""
+
+    content: ResponseInputMessageContentList
+    """
+    A list of one or many input items to the model, containing different content
+    types.
+    """
+
+    role: Literal["user", "system", "developer"]
+    """The role of the message input. One of `user`, `system`, or `developer`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always set to `message`."""
src/openai/types/responses/response_input_param.py
@@ -13,13 +13,13 @@ from .response_function_tool_call_param import ResponseFunctionToolCallParam
 from .response_function_web_search_param import ResponseFunctionWebSearchParam
 from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam
 from .response_input_message_content_list_param import ResponseInputMessageContentListParam
+from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam
 
 __all__ = [
     "ResponseInputParam",
     "ResponseInputItemParam",
     "Message",
     "ComputerCallOutput",
-    "ComputerCallOutputOutput",
     "ComputerCallOutputAcknowledgedSafetyCheck",
     "FunctionCallOutput",
     "ItemReference",
@@ -47,20 +47,6 @@ class Message(TypedDict, total=False):
     """The type of the message input. Always set to `message`."""
 
 
-class ComputerCallOutputOutput(TypedDict, total=False):
-    type: Required[Literal["computer_screenshot"]]
-    """Specifies the event type.
-
-    For a computer screenshot, this property is always set to `computer_screenshot`.
-    """
-
-    file_id: str
-    """The identifier of an uploaded file that contains the screenshot."""
-
-    image_url: str
-    """The URL of the screenshot image."""
-
-
 class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False):
     id: Required[str]
     """The ID of the pending safety check."""
@@ -76,7 +62,7 @@ class ComputerCallOutput(TypedDict, total=False):
     call_id: Required[str]
     """The ID of the computer tool call that produced the output."""
 
-    output: Required[ComputerCallOutputOutput]
+    output: Required[ResponseComputerToolCallOutputScreenshotParam]
     """A computer screenshot image used with the computer use tool."""
 
     type: Required[Literal["computer_call_output"]]
src/openai/types/responses/response_item.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .response_output_message import ResponseOutputMessage
+from .response_computer_tool_call import ResponseComputerToolCall
+from .response_input_message_item import ResponseInputMessageItem
+from .response_function_web_search import ResponseFunctionWebSearch
+from .response_file_search_tool_call import ResponseFileSearchToolCall
+from .response_function_tool_call_item import ResponseFunctionToolCallItem
+from .response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem
+from .response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem
+
+__all__ = ["ResponseItem"]
+
+ResponseItem: TypeAlias = Annotated[
+    Union[
+        ResponseInputMessageItem,
+        ResponseOutputMessage,
+        ResponseFileSearchToolCall,
+        ResponseComputerToolCall,
+        ResponseComputerToolCallOutputItem,
+        ResponseFunctionWebSearch,
+        ResponseFunctionToolCallItem,
+        ResponseFunctionToolCallOutputItem,
+    ],
+    PropertyInfo(discriminator="type"),
+]
src/openai/types/responses/response_item_list.py
@@ -1,142 +1,16 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
+from typing import List
+from typing_extensions import Literal
 
-from ..._utils import PropertyInfo
 from ..._models import BaseModel
-from .response_output_message import ResponseOutputMessage
-from .response_computer_tool_call import ResponseComputerToolCall
-from .response_function_tool_call import ResponseFunctionToolCall
-from .response_function_web_search import ResponseFunctionWebSearch
-from .response_file_search_tool_call import ResponseFileSearchToolCall
-from .response_input_message_content_list import ResponseInputMessageContentList
+from .response_item import ResponseItem
 
-__all__ = [
-    "ResponseItemList",
-    "Data",
-    "DataMessage",
-    "DataComputerCallOutput",
-    "DataComputerCallOutputOutput",
-    "DataComputerCallOutputAcknowledgedSafetyCheck",
-    "DataFunctionCallOutput",
-]
-
-
-class DataMessage(BaseModel):
-    id: str
-    """The unique ID of the message input."""
-
-    content: ResponseInputMessageContentList
-    """
-    A list of one or many input items to the model, containing different content
-    types.
-    """
-
-    role: Literal["user", "system", "developer"]
-    """The role of the message input. One of `user`, `system`, or `developer`."""
-
-    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
-    """The status of item.
-
-    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
-    returned via API.
-    """
-
-    type: Optional[Literal["message"]] = None
-    """The type of the message input. Always set to `message`."""
-
-
-class DataComputerCallOutputOutput(BaseModel):
-    type: Literal["computer_screenshot"]
-    """Specifies the event type.
-
-    For a computer screenshot, this property is always set to `computer_screenshot`.
-    """
-
-    file_id: Optional[str] = None
-    """The identifier of an uploaded file that contains the screenshot."""
-
-    image_url: Optional[str] = None
-    """The URL of the screenshot image."""
-
-
-class DataComputerCallOutputAcknowledgedSafetyCheck(BaseModel):
-    id: str
-    """The ID of the pending safety check."""
-
-    code: str
-    """The type of the pending safety check."""
-
-    message: str
-    """Details about the pending safety check."""
-
-
-class DataComputerCallOutput(BaseModel):
-    id: str
-    """The unique ID of the computer call tool output."""
-
-    call_id: str
-    """The ID of the computer tool call that produced the output."""
-
-    output: DataComputerCallOutputOutput
-    """A computer screenshot image used with the computer use tool."""
-
-    type: Literal["computer_call_output"]
-    """The type of the computer tool call output. Always `computer_call_output`."""
-
-    acknowledged_safety_checks: Optional[List[DataComputerCallOutputAcknowledgedSafetyCheck]] = None
-    """
-    The safety checks reported by the API that have been acknowledged by the
-    developer.
-    """
-
-    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
-    """The status of the message input.
-
-    One of `in_progress`, `completed`, or `incomplete`. Populated when input items
-    are returned via API.
-    """
-
-
-class DataFunctionCallOutput(BaseModel):
-    id: str
-    """The unique ID of the function call tool output."""
-
-    call_id: str
-    """The unique ID of the function tool call generated by the model."""
-
-    output: str
-    """A JSON string of the output of the function tool call."""
-
-    type: Literal["function_call_output"]
-    """The type of the function tool call output. Always `function_call_output`."""
-
-    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
-    """The status of the item.
-
-    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
-    returned via API.
-    """
-
-
-Data: TypeAlias = Annotated[
-    Union[
-        DataMessage,
-        ResponseOutputMessage,
-        ResponseFileSearchToolCall,
-        ResponseComputerToolCall,
-        DataComputerCallOutput,
-        ResponseFunctionWebSearch,
-        ResponseFunctionToolCall,
-        DataFunctionCallOutput,
-    ],
-    PropertyInfo(discriminator="type"),
-]
+__all__ = ["ResponseItemList"]
 
 
 class ResponseItemList(BaseModel):
-    data: List[Data]
+    data: List[ResponseItem]
     """A list of items used to generate this response."""
 
     first_id: str
src/openai/types/responses/response_usage.py
@@ -3,7 +3,15 @@
 
 from ..._models import BaseModel
 
-__all__ = ["ResponseUsage", "OutputTokensDetails"]
+__all__ = ["ResponseUsage", "InputTokensDetails", "OutputTokensDetails"]
+
+
+class InputTokensDetails(BaseModel):
+    cached_tokens: int
+    """The number of tokens that were retrieved from the cache.
+
+    [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
+    """
 
 
 class OutputTokensDetails(BaseModel):
@@ -15,6 +23,9 @@ class ResponseUsage(BaseModel):
     input_tokens: int
     """The number of input tokens."""
 
+    input_tokens_details: InputTokensDetails
+    """A detailed breakdown of the input tokens."""
+
     output_tokens: int
     """The number of output tokens."""
 
src/openai/types/shared/reasoning.py
@@ -20,7 +20,7 @@ class Reasoning(BaseModel):
     """
 
     generate_summary: Optional[Literal["concise", "detailed"]] = None
-    """**o-series models only**
+    """**computer_use_preview only**
 
     A summary of the reasoning performed by the model. This can be useful for
     debugging and understanding the model's reasoning process. One of `concise` or
src/openai/types/shared_params/reasoning.py
@@ -3,7 +3,7 @@
 from __future__ import annotations
 
 from typing import Optional
-from typing_extensions import Literal, Required, TypedDict
+from typing_extensions import Literal, TypedDict
 
 from ..shared.reasoning_effort import ReasoningEffort
 
@@ -11,7 +11,7 @@ __all__ = ["Reasoning"]
 
 
 class Reasoning(TypedDict, total=False):
-    effort: Required[Optional[ReasoningEffort]]
+    effort: Optional[ReasoningEffort]
     """**o-series models only**
 
     Constrains effort on reasoning for
@@ -21,7 +21,7 @@ class Reasoning(TypedDict, total=False):
     """
 
     generate_summary: Optional[Literal["concise", "detailed"]]
-    """**o-series models only**
+    """**computer_use_preview only**
 
     A summary of the reasoning performed by the model. This can be useful for
     debugging and understanding the model's reasoning process. One of `concise` or
src/openai/types/batch_create_params.py
@@ -17,12 +17,13 @@ class BatchCreateParams(TypedDict, total=False):
     Currently only `24h` is supported.
     """
 
-    endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"]]
+    endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]]
     """The endpoint to be used for all requests in the batch.
 
-    Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are
-    supported. Note that `/v1/embeddings` batches are also restricted to a maximum
-    of 50,000 embedding inputs across all requests in the batch.
+    Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and
+    `/v1/completions` are supported. Note that `/v1/embeddings` batches are also
+    restricted to a maximum of 50,000 embedding inputs across all requests in the
+    batch.
     """
 
     input_file_id: Required[str]
src/openai/_version.py
@@ -1,4 +1,4 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 __title__ = "openai"
-__version__ = "1.66.4"  # x-release-please-version
+__version__ = "1.66.5"  # x-release-please-version
tests/api_resources/responses/test_input_items.py
@@ -10,7 +10,7 @@ import pytest
 from openai import OpenAI, AsyncOpenAI
 from tests.utils import assert_matches_type
 from openai.pagination import SyncCursorPage, AsyncCursorPage
-from openai.types.responses.response_item_list import Data
+from openai.types.responses import ResponseItem
 
 base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
 
@@ -23,7 +23,7 @@ class TestInputItems:
         input_item = client.responses.input_items.list(
             response_id="response_id",
         )
-        assert_matches_type(SyncCursorPage[Data], input_item, path=["response"])
+        assert_matches_type(SyncCursorPage[ResponseItem], input_item, path=["response"])
 
     @parametrize
     def test_method_list_with_all_params(self, client: OpenAI) -> None:
@@ -34,7 +34,7 @@ class TestInputItems:
             limit=0,
             order="asc",
         )
-        assert_matches_type(SyncCursorPage[Data], input_item, path=["response"])
+        assert_matches_type(SyncCursorPage[ResponseItem], input_item, path=["response"])
 
     @parametrize
     def test_raw_response_list(self, client: OpenAI) -> None:
@@ -45,7 +45,7 @@ class TestInputItems:
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
         input_item = response.parse()
-        assert_matches_type(SyncCursorPage[Data], input_item, path=["response"])
+        assert_matches_type(SyncCursorPage[ResponseItem], input_item, path=["response"])
 
     @parametrize
     def test_streaming_response_list(self, client: OpenAI) -> None:
@@ -56,7 +56,7 @@ class TestInputItems:
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
             input_item = response.parse()
-            assert_matches_type(SyncCursorPage[Data], input_item, path=["response"])
+            assert_matches_type(SyncCursorPage[ResponseItem], input_item, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
@@ -76,7 +76,7 @@ class TestAsyncInputItems:
         input_item = await async_client.responses.input_items.list(
             response_id="response_id",
         )
-        assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"])
+        assert_matches_type(AsyncCursorPage[ResponseItem], input_item, path=["response"])
 
     @parametrize
     async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
@@ -87,7 +87,7 @@ class TestAsyncInputItems:
             limit=0,
             order="asc",
         )
-        assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"])
+        assert_matches_type(AsyncCursorPage[ResponseItem], input_item, path=["response"])
 
     @parametrize
     async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
@@ -98,7 +98,7 @@ class TestAsyncInputItems:
         assert response.is_closed is True
         assert response.http_request.headers.get("X-Stainless-Lang") == "python"
         input_item = response.parse()
-        assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"])
+        assert_matches_type(AsyncCursorPage[ResponseItem], input_item, path=["response"])
 
     @parametrize
     async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
@@ -109,7 +109,7 @@ class TestAsyncInputItems:
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 
             input_item = await response.parse()
-            assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"])
+            assert_matches_type(AsyncCursorPage[ResponseItem], input_item, path=["response"])
 
         assert cast(Any, response.is_closed) is True
 
tests/api_resources/test_batches.py
@@ -22,7 +22,7 @@ class TestBatches:
     def test_method_create(self, client: OpenAI) -> None:
         batch = client.batches.create(
             completion_window="24h",
-            endpoint="/v1/chat/completions",
+            endpoint="/v1/responses",
             input_file_id="string",
         )
         assert_matches_type(Batch, batch, path=["response"])
@@ -31,7 +31,7 @@ class TestBatches:
     def test_method_create_with_all_params(self, client: OpenAI) -> None:
         batch = client.batches.create(
             completion_window="24h",
-            endpoint="/v1/chat/completions",
+            endpoint="/v1/responses",
             input_file_id="string",
             metadata={"foo": "string"},
         )
@@ -41,7 +41,7 @@ class TestBatches:
     def test_raw_response_create(self, client: OpenAI) -> None:
         response = client.batches.with_raw_response.create(
             completion_window="24h",
-            endpoint="/v1/chat/completions",
+            endpoint="/v1/responses",
             input_file_id="string",
         )
 
@@ -54,7 +54,7 @@ class TestBatches:
     def test_streaming_response_create(self, client: OpenAI) -> None:
         with client.batches.with_streaming_response.create(
             completion_window="24h",
-            endpoint="/v1/chat/completions",
+            endpoint="/v1/responses",
             input_file_id="string",
         ) as response:
             assert not response.is_closed
@@ -182,7 +182,7 @@ class TestAsyncBatches:
     async def test_method_create(self, async_client: AsyncOpenAI) -> None:
         batch = await async_client.batches.create(
             completion_window="24h",
-            endpoint="/v1/chat/completions",
+            endpoint="/v1/responses",
             input_file_id="string",
         )
         assert_matches_type(Batch, batch, path=["response"])
@@ -191,7 +191,7 @@ class TestAsyncBatches:
     async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
         batch = await async_client.batches.create(
             completion_window="24h",
-            endpoint="/v1/chat/completions",
+            endpoint="/v1/responses",
             input_file_id="string",
             metadata={"foo": "string"},
         )
@@ -201,7 +201,7 @@ class TestAsyncBatches:
     async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.batches.with_raw_response.create(
             completion_window="24h",
-            endpoint="/v1/chat/completions",
+            endpoint="/v1/responses",
             input_file_id="string",
         )
 
@@ -214,7 +214,7 @@ class TestAsyncBatches:
     async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
         async with async_client.batches.with_streaming_response.create(
             completion_window="24h",
-            endpoint="/v1/chat/completions",
+            endpoint="/v1/responses",
             input_file_id="string",
         ) as response:
             assert not response.is_closed
.release-please-manifest.json
@@ -1,3 +1,3 @@
 {
-  ".": "1.66.4"
+  ".": "1.66.5"
 }
\ No newline at end of file
.stats.yml
@@ -1,2 +1,2 @@
 configured_endpoints: 81
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f3bce04386c4fcfd5037e0477fbaa39010003fd1558eb5185fe4a71dd6a05fdd.yml
api.md
@@ -605,6 +605,8 @@ from openai.types.responses import (
     ResponseCodeInterpreterToolCall,
     ResponseCompletedEvent,
     ResponseComputerToolCall,
+    ResponseComputerToolCallOutputItem,
+    ResponseComputerToolCallOutputScreenshot,
     ResponseContent,
     ResponseContentPartAddedEvent,
     ResponseContentPartDoneEvent,
@@ -621,6 +623,8 @@ from openai.types.responses import (
     ResponseFunctionCallArgumentsDeltaEvent,
     ResponseFunctionCallArgumentsDoneEvent,
     ResponseFunctionToolCall,
+    ResponseFunctionToolCallItem,
+    ResponseFunctionToolCallOutputItem,
     ResponseFunctionWebSearch,
     ResponseInProgressEvent,
     ResponseIncludable,
@@ -632,7 +636,9 @@ from openai.types.responses import (
     ResponseInputImage,
     ResponseInputItem,
     ResponseInputMessageContentList,
+    ResponseInputMessageItem,
     ResponseInputText,
+    ResponseItem,
     ResponseOutputAudio,
     ResponseOutputItem,
     ResponseOutputItemAddedEvent,
@@ -677,4 +683,4 @@ from openai.types.responses import ResponseItemList
 
 Methods:
 
-- <code title="get /responses/{response_id}/input_items">client.responses.input_items.<a href="./src/openai/resources/responses/input_items.py">list</a>(response_id, \*\*<a href="src/openai/types/responses/input_item_list_params.py">params</a>) -> SyncCursorPage[Data]</code>
+- <code title="get /responses/{response_id}/input_items">client.responses.input_items.<a href="./src/openai/resources/responses/input_items.py">list</a>(response_id, \*\*<a href="src/openai/types/responses/input_item_list_params.py">params</a>) -> <a href="./src/openai/types/responses/response_item.py">SyncCursorPage[ResponseItem]</a></code>
CHANGELOG.md
@@ -1,5 +1,19 @@
 # Changelog
 
+## 1.66.5 (2025-03-18)
+
+Full Changelog: [v1.66.4...v1.66.5](https://github.com/openai/openai-python/compare/v1.66.4...v1.66.5)
+
+### Bug Fixes
+
+* **types:** improve responses type names ([#2224](https://github.com/openai/openai-python/issues/2224)) ([5f7beb8](https://github.com/openai/openai-python/commit/5f7beb873af5ccef2551f34ab3ef098e099ce9c6))
+
+
+### Chores
+
+* **internal:** add back releases workflow ([c71d4c9](https://github.com/openai/openai-python/commit/c71d4c918eab3532b36ea944b0c4069db6ac2d38))
+* **internal:** codegen related update ([#2222](https://github.com/openai/openai-python/issues/2222)) ([f570d91](https://github.com/openai/openai-python/commit/f570d914a16cb5092533e32dfd863027d378c0b5))
+
 ## 1.66.4 (2025-03-17)
 
 Full Changelog: [v1.66.3...v1.66.4](https://github.com/openai/openai-python/compare/v1.66.3...v1.66.4)
pyproject.toml
@@ -1,6 +1,6 @@
 [project]
 name = "openai"
-version = "1.66.4"
+version = "1.66.5"
 description = "The official Python library for the openai API"
 dynamic = ["readme"]
 license = "Apache-2.0"