Commit 4e28a424

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-08-22 05:10:05
release: 1.101.0 (#2577) tag: v1.101.0
* feat(api): adding support for /v1/conversations to the API * chore: update github action * feat(api): Add connectors support for MCP tool * release: 1.101.0 --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
1 parent e328fb4
.github/workflows/ci.yml
@@ -36,7 +36,7 @@ jobs:
         run: ./scripts/lint
 
   build:
-    if: github.repository == 'stainless-sdks/openai-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork)
+    if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
     timeout-minutes: 10
     name: build
     permissions:
@@ -61,12 +61,14 @@ jobs:
         run: rye build
 
       - name: Get GitHub OIDC Token
+        if: github.repository == 'stainless-sdks/openai-python'
         id: github-oidc
         uses: actions/github-script@v6
         with:
           script: core.setOutput('github_token', await core.getIDToken());
 
       - name: Upload tarball
+        if: github.repository == 'stainless-sdks/openai-python'
         env:
           URL: https://pkg.stainless.com/s
           AUTH: ${{ steps.github-oidc.outputs.github_token }}
src/openai/resources/conversations/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .items import (
+    Items,
+    AsyncItems,
+    ItemsWithRawResponse,
+    AsyncItemsWithRawResponse,
+    ItemsWithStreamingResponse,
+    AsyncItemsWithStreamingResponse,
+)
+from .conversations import (
+    Conversations,
+    AsyncConversations,
+    ConversationsWithRawResponse,
+    AsyncConversationsWithRawResponse,
+    ConversationsWithStreamingResponse,
+    AsyncConversationsWithStreamingResponse,
+)
+
+__all__ = [
+    "Items",
+    "AsyncItems",
+    "ItemsWithRawResponse",
+    "AsyncItemsWithRawResponse",
+    "ItemsWithStreamingResponse",
+    "AsyncItemsWithStreamingResponse",
+    "Conversations",
+    "AsyncConversations",
+    "ConversationsWithRawResponse",
+    "AsyncConversationsWithRawResponse",
+    "ConversationsWithStreamingResponse",
+    "AsyncConversationsWithStreamingResponse",
+]
src/openai/resources/conversations/conversations.py
@@ -0,0 +1,474 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Iterable, Optional
+
+import httpx
+
+from ... import _legacy_response
+from .items import (
+    Items,
+    AsyncItems,
+    ItemsWithRawResponse,
+    AsyncItemsWithRawResponse,
+    ItemsWithStreamingResponse,
+    AsyncItemsWithStreamingResponse,
+)
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ..._base_client import make_request_options
+from ...types.conversations import conversation_create_params, conversation_update_params
+from ...types.shared_params.metadata import Metadata
+from ...types.conversations.conversation import Conversation
+from ...types.responses.response_input_item_param import ResponseInputItemParam
+from ...types.conversations.conversation_deleted_resource import ConversationDeletedResource
+
+__all__ = ["Conversations", "AsyncConversations"]
+
+
+class Conversations(SyncAPIResource):
+    @cached_property
+    def items(self) -> Items:
+        return Items(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> ConversationsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return ConversationsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ConversationsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return ConversationsWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        items: Optional[Iterable[ResponseInputItemParam]] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Conversation:
+        """
+        Create a conversation with the given ID.
+
+        Args:
+          items: Initial items to include in the conversation context. You may add up to 20 items
+              at a time.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. Useful for storing
+              additional information about the object in a structured format.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._post(
+            "/conversations",
+            body=maybe_transform(
+                {
+                    "items": items,
+                    "metadata": metadata,
+                },
+                conversation_create_params.ConversationCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Conversation,
+        )
+
+    def retrieve(
+        self,
+        conversation_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Conversation:
+        """
+        Get a conversation with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return self._get(
+            f"/conversations/{conversation_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Conversation,
+        )
+
+    def update(
+        self,
+        conversation_id: str,
+        *,
+        metadata: Dict[str, str],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Conversation:
+        """
+        Update a conversation's metadata with the given ID.
+
+        Args:
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return self._post(
+            f"/conversations/{conversation_id}",
+            body=maybe_transform({"metadata": metadata}, conversation_update_params.ConversationUpdateParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Conversation,
+        )
+
+    def delete(
+        self,
+        conversation_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ConversationDeletedResource:
+        """
+        Delete a conversation with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return self._delete(
+            f"/conversations/{conversation_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ConversationDeletedResource,
+        )
+
+
+class AsyncConversations(AsyncAPIResource):
+    @cached_property
+    def items(self) -> AsyncItems:
+        return AsyncItems(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncConversationsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncConversationsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncConversationsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncConversationsWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        items: Optional[Iterable[ResponseInputItemParam]] | NotGiven = NOT_GIVEN,
+        metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Conversation:
+        """
+        Create a conversation with the given ID.
+
+        Args:
+          items: Initial items to include in the conversation context. You may add up to 20 items
+              at a time.
+
+          metadata: Set of 16 key-value pairs that can be attached to an object. Useful for storing
+              additional information about the object in a structured format.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return await self._post(
+            "/conversations",
+            body=await async_maybe_transform(
+                {
+                    "items": items,
+                    "metadata": metadata,
+                },
+                conversation_create_params.ConversationCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Conversation,
+        )
+
+    async def retrieve(
+        self,
+        conversation_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Conversation:
+        """
+        Get a conversation with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return await self._get(
+            f"/conversations/{conversation_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Conversation,
+        )
+
+    async def update(
+        self,
+        conversation_id: str,
+        *,
+        metadata: Dict[str, str],
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Conversation:
+        """
+        Update a conversation's metadata with the given ID.
+
+        Args:
+          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+              for storing additional information about the object in a structured format, and
+              querying for objects via API or the dashboard. Keys are strings with a maximum
+              length of 64 characters. Values are strings with a maximum length of 512
+              characters.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return await self._post(
+            f"/conversations/{conversation_id}",
+            body=await async_maybe_transform(
+                {"metadata": metadata}, conversation_update_params.ConversationUpdateParams
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Conversation,
+        )
+
+    async def delete(
+        self,
+        conversation_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ConversationDeletedResource:
+        """
+        Delete a conversation with the given ID.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return await self._delete(
+            f"/conversations/{conversation_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ConversationDeletedResource,
+        )
+
+
+class ConversationsWithRawResponse:
+    def __init__(self, conversations: Conversations) -> None:
+        self._conversations = conversations
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            conversations.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            conversations.retrieve,
+        )
+        self.update = _legacy_response.to_raw_response_wrapper(
+            conversations.update,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            conversations.delete,
+        )
+
+    @cached_property
+    def items(self) -> ItemsWithRawResponse:
+        return ItemsWithRawResponse(self._conversations.items)
+
+
+class AsyncConversationsWithRawResponse:
+    def __init__(self, conversations: AsyncConversations) -> None:
+        self._conversations = conversations
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            conversations.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            conversations.retrieve,
+        )
+        self.update = _legacy_response.async_to_raw_response_wrapper(
+            conversations.update,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            conversations.delete,
+        )
+
+    @cached_property
+    def items(self) -> AsyncItemsWithRawResponse:
+        return AsyncItemsWithRawResponse(self._conversations.items)
+
+
+class ConversationsWithStreamingResponse:
+    def __init__(self, conversations: Conversations) -> None:
+        self._conversations = conversations
+
+        self.create = to_streamed_response_wrapper(
+            conversations.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            conversations.retrieve,
+        )
+        self.update = to_streamed_response_wrapper(
+            conversations.update,
+        )
+        self.delete = to_streamed_response_wrapper(
+            conversations.delete,
+        )
+
+    @cached_property
+    def items(self) -> ItemsWithStreamingResponse:
+        return ItemsWithStreamingResponse(self._conversations.items)
+
+
+class AsyncConversationsWithStreamingResponse:
+    def __init__(self, conversations: AsyncConversations) -> None:
+        self._conversations = conversations
+
+        self.create = async_to_streamed_response_wrapper(
+            conversations.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            conversations.retrieve,
+        )
+        self.update = async_to_streamed_response_wrapper(
+            conversations.update,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            conversations.delete,
+        )
+
+    @cached_property
+    def items(self) -> AsyncItemsWithStreamingResponse:
+        return AsyncItemsWithStreamingResponse(self._conversations.items)
src/openai/resources/conversations/items.py
@@ -0,0 +1,553 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Any, List, Iterable, cast
+from typing_extensions import Literal
+
+import httpx
+
+from ... import _legacy_response
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.conversations import item_list_params, item_create_params, item_retrieve_params
+from ...types.conversations.conversation import Conversation
+from ...types.responses.response_includable import ResponseIncludable
+from ...types.conversations.conversation_item import ConversationItem
+from ...types.responses.response_input_item_param import ResponseInputItemParam
+from ...types.conversations.conversation_item_list import ConversationItemList
+
+__all__ = ["Items", "AsyncItems"]
+
+
+class Items(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> ItemsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return ItemsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ItemsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return ItemsWithStreamingResponse(self)
+
+    def create(
+        self,
+        conversation_id: str,
+        *,
+        items: Iterable[ResponseInputItemParam],
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ConversationItemList:
+        """
+        Create items in a conversation with the given ID.
+
+        Args:
+          items: The items to add to the conversation. You may add up to 20 items at a time.
+
+          include: Additional fields to include in the response. See the `include` parameter for
+              [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
+              for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return self._post(
+            f"/conversations/{conversation_id}/items",
+            body=maybe_transform({"items": items}, item_create_params.ItemCreateParams),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform({"include": include}, item_create_params.ItemCreateParams),
+            ),
+            cast_to=ConversationItemList,
+        )
+
+    def retrieve(
+        self,
+        item_id: str,
+        *,
+        conversation_id: str,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ConversationItem:
+        """
+        Get a single item from a conversation with the given IDs.
+
+        Args:
+          include: Additional fields to include in the response. See the `include` parameter for
+              [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
+              for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        if not item_id:
+            raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
+        return cast(
+            ConversationItem,
+            self._get(
+                f"/conversations/{conversation_id}/items/{item_id}",
+                options=make_request_options(
+                    extra_headers=extra_headers,
+                    extra_query=extra_query,
+                    extra_body=extra_body,
+                    timeout=timeout,
+                    query=maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams),
+                ),
+                cast_to=cast(Any, ConversationItem),  # Union types cannot be passed in as arguments in the type system
+            ),
+        )
+
+    def list(
+        self,
+        conversation_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncConversationCursorPage[ConversationItem]:
+        """
+        List all items for a conversation with the given ID.
+
+        Args:
+          after: An item ID to list items after, used in pagination.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+                in code interpreter tool call items.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `message.output_text.logprobs`: Include logprobs with assistant messages.
+              - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+                tokens in reasoning item outputs. This enables reasoning items to be used in
+                multi-turn conversations when using the Responses API statelessly (like when
+                the `store` parameter is set to `false`, or when an organization is enrolled
+                in the zero data retention program).
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: The order to return the input items in. Default is `desc`.
+
+              - `asc`: Return the input items in ascending order.
+              - `desc`: Return the input items in descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return self._get_api_list(
+            f"/conversations/{conversation_id}/items",
+            page=SyncConversationCursorPage[ConversationItem],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "include": include,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    item_list_params.ItemListParams,
+                ),
+            ),
+            model=cast(Any, ConversationItem),  # Union types cannot be passed in as arguments in the type system
+        )
+
+    def delete(
+        self,
+        item_id: str,
+        *,
+        conversation_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Conversation:
+        """
+        Delete an item from a conversation with the given IDs.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        if not item_id:
+            raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
+        return self._delete(
+            f"/conversations/{conversation_id}/items/{item_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Conversation,
+        )
+
+
+class AsyncItems(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncItemsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncItemsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncItemsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncItemsWithStreamingResponse(self)
+
+    async def create(
+        self,
+        conversation_id: str,
+        *,
+        items: Iterable[ResponseInputItemParam],
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ConversationItemList:
+        """
+        Create items in a conversation with the given ID.
+
+        Args:
+          items: The items to add to the conversation. You may add up to 20 items at a time.
+
+          include: Additional fields to include in the response. See the `include` parameter for
+              [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
+              for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return await self._post(
+            f"/conversations/{conversation_id}/items",
+            body=await async_maybe_transform({"items": items}, item_create_params.ItemCreateParams),
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=await async_maybe_transform({"include": include}, item_create_params.ItemCreateParams),
+            ),
+            cast_to=ConversationItemList,
+        )
+
+    async def retrieve(
+        self,
+        item_id: str,
+        *,
+        conversation_id: str,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ConversationItem:
+        """
+        Get a single item from a conversation with the given IDs.
+
+        Args:
+          include: Additional fields to include in the response. See the `include` parameter for
+              [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
+              for more information.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        if not item_id:
+            raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
+        return cast(
+            ConversationItem,
+            await self._get(
+                f"/conversations/{conversation_id}/items/{item_id}",
+                options=make_request_options(
+                    extra_headers=extra_headers,
+                    extra_query=extra_query,
+                    extra_body=extra_body,
+                    timeout=timeout,
+                    query=await async_maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams),
+                ),
+                cast_to=cast(Any, ConversationItem),  # Union types cannot be passed in as arguments in the type system
+            ),
+        )
+
+    def list(
+        self,
+        conversation_id: str,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[ConversationItem, AsyncConversationCursorPage[ConversationItem]]:
+        """
+        List all items for a conversation with the given ID.
+
+        Args:
+          after: An item ID to list items after, used in pagination.
+
+          include: Specify additional output data to include in the model response. Currently
+              supported values are:
+
+              - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+                in code interpreter tool call items.
+              - `computer_call_output.output.image_url`: Include image urls from the computer
+                call output.
+              - `file_search_call.results`: Include the search results of the file search tool
+                call.
+              - `message.input_image.image_url`: Include image urls from the input message.
+              - `message.output_text.logprobs`: Include logprobs with assistant messages.
+              - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+                tokens in reasoning item outputs. This enables reasoning items to be used in
+                multi-turn conversations when using the Responses API statelessly (like when
+                the `store` parameter is set to `false`, or when an organization is enrolled
+                in the zero data retention program).
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          order: The order to return the input items in. Default is `desc`.
+
+              - `asc`: Return the input items in ascending order.
+              - `desc`: Return the input items in descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        return self._get_api_list(
+            f"/conversations/{conversation_id}/items",
+            page=AsyncConversationCursorPage[ConversationItem],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "include": include,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    item_list_params.ItemListParams,
+                ),
+            ),
+            model=cast(Any, ConversationItem),  # Union types cannot be passed in as arguments in the type system
+        )
+
+    async def delete(
+        self,
+        item_id: str,
+        *,
+        conversation_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> Conversation:
+        """
+        Delete an item from a conversation with the given IDs.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not conversation_id:
+            raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+        if not item_id:
+            raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
+        return await self._delete(
+            f"/conversations/{conversation_id}/items/{item_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Conversation,
+        )
+
+
+class ItemsWithRawResponse:
+    def __init__(self, items: Items) -> None:
+        self._items = items
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            items.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            items.retrieve,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            items.list,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            items.delete,
+        )
+
+
+class AsyncItemsWithRawResponse:
+    def __init__(self, items: AsyncItems) -> None:
+        self._items = items
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            items.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            items.retrieve,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            items.list,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            items.delete,
+        )
+
+
+class ItemsWithStreamingResponse:
+    def __init__(self, items: Items) -> None:
+        self._items = items
+
+        self.create = to_streamed_response_wrapper(
+            items.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            items.retrieve,
+        )
+        self.list = to_streamed_response_wrapper(
+            items.list,
+        )
+        self.delete = to_streamed_response_wrapper(
+            items.delete,
+        )
+
+
+class AsyncItemsWithStreamingResponse:
+    def __init__(self, items: AsyncItems) -> None:
+        self._items = items
+
+        self.create = async_to_streamed_response_wrapper(
+            items.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            items.retrieve,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            items.list,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            items.delete,
+        )
src/openai/resources/responses/input_items.py
@@ -47,7 +47,6 @@ class InputItems(SyncAPIResource):
         response_id: str,
         *,
         after: str | NotGiven = NOT_GIVEN,
-        before: str | NotGiven = NOT_GIVEN,
         include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
         limit: int | NotGiven = NOT_GIVEN,
         order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
@@ -64,8 +63,6 @@ class InputItems(SyncAPIResource):
         Args:
           after: An item ID to list items after, used in pagination.
 
-          before: An item ID to list items before, used in pagination.
-
           include: Additional fields to include in the response. See the `include` parameter for
               Response creation above for more information.
 
@@ -98,7 +95,6 @@ class InputItems(SyncAPIResource):
                 query=maybe_transform(
                     {
                         "after": after,
-                        "before": before,
                         "include": include,
                         "limit": limit,
                         "order": order,
@@ -135,7 +131,6 @@ class AsyncInputItems(AsyncAPIResource):
         response_id: str,
         *,
         after: str | NotGiven = NOT_GIVEN,
-        before: str | NotGiven = NOT_GIVEN,
         include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
         limit: int | NotGiven = NOT_GIVEN,
         order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
@@ -152,8 +147,6 @@ class AsyncInputItems(AsyncAPIResource):
         Args:
           after: An item ID to list items after, used in pagination.
 
-          before: An item ID to list items before, used in pagination.
-
           include: Additional fields to include in the response. See the `include` parameter for
               Response creation above for more information.
 
@@ -186,7 +179,6 @@ class AsyncInputItems(AsyncAPIResource):
                 query=maybe_transform(
                     {
                         "after": after,
-                        "before": before,
                         "include": include,
                         "limit": limit,
                         "order": order,
src/openai/resources/responses/responses.py
@@ -77,6 +77,7 @@ class Responses(SyncAPIResource):
         self,
         *,
         background: Optional[bool] | NotGiven = NOT_GIVEN,
+        conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -127,6 +128,11 @@ class Responses(SyncAPIResource):
           background: Whether to run the model response in the background.
               [Learn more](https://platform.openai.com/docs/guides/background).
 
+          conversation: The conversation that this response belongs to. Items from this conversation are
+              prepended to `input_items` for this response request. Input items and output
+              items from this response are automatically added to this conversation after this
+              response completes.
+
           include: Specify additional output data to include in the model response. Currently
               supported values are:
 
@@ -187,6 +193,7 @@ class Responses(SyncAPIResource):
           previous_response_id: The unique ID of the previous response to the model. Use this to create
               multi-turn conversations. Learn more about
               [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+              Cannot be used in conjunction with `conversation`.
 
           prompt: Reference to a prompt template and its variables.
               [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
@@ -305,6 +312,7 @@ class Responses(SyncAPIResource):
         *,
         stream: Literal[True],
         background: Optional[bool] | NotGiven = NOT_GIVEN,
+        conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -361,6 +369,11 @@ class Responses(SyncAPIResource):
           background: Whether to run the model response in the background.
               [Learn more](https://platform.openai.com/docs/guides/background).
 
+          conversation: The conversation that this response belongs to. Items from this conversation are
+              prepended to `input_items` for this response request. Input items and output
+              items from this response are automatically added to this conversation after this
+              response completes.
+
           include: Specify additional output data to include in the model response. Currently
               supported values are:
 
@@ -421,6 +434,7 @@ class Responses(SyncAPIResource):
           previous_response_id: The unique ID of the previous response to the model. Use this to create
               multi-turn conversations. Learn more about
               [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+              Cannot be used in conjunction with `conversation`.
 
           prompt: Reference to a prompt template and its variables.
               [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
@@ -532,6 +546,7 @@ class Responses(SyncAPIResource):
         *,
         stream: bool,
         background: Optional[bool] | NotGiven = NOT_GIVEN,
+        conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -588,6 +603,11 @@ class Responses(SyncAPIResource):
           background: Whether to run the model response in the background.
               [Learn more](https://platform.openai.com/docs/guides/background).
 
+          conversation: The conversation that this response belongs to. Items from this conversation are
+              prepended to `input_items` for this response request. Input items and output
+              items from this response are automatically added to this conversation after this
+              response completes.
+
           include: Specify additional output data to include in the model response. Currently
               supported values are:
 
@@ -648,6 +668,7 @@ class Responses(SyncAPIResource):
           previous_response_id: The unique ID of the previous response to the model. Use this to create
               multi-turn conversations. Learn more about
               [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+              Cannot be used in conjunction with `conversation`.
 
           prompt: Reference to a prompt template and its variables.
               [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
@@ -757,6 +778,7 @@ class Responses(SyncAPIResource):
         self,
         *,
         background: Optional[bool] | NotGiven = NOT_GIVEN,
+        conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -794,6 +816,7 @@ class Responses(SyncAPIResource):
             body=maybe_transform(
                 {
                     "background": background,
+                    "conversation": conversation,
                     "include": include,
                     "input": input,
                     "instructions": instructions,
@@ -866,7 +889,7 @@ class Responses(SyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -1009,6 +1032,7 @@ class Responses(SyncAPIResource):
         *,
         text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
         background: Optional[bool] | NotGiven = NOT_GIVEN,
+        conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1027,7 +1051,7 @@ class Responses(SyncAPIResource):
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1065,6 +1089,7 @@ class Responses(SyncAPIResource):
             body=maybe_transform(
                 {
                     "background": background,
+                    "conversation": conversation,
                     "include": include,
                     "input": input,
                     "instructions": instructions,
@@ -1440,6 +1465,7 @@ class AsyncResponses(AsyncAPIResource):
         self,
         *,
         background: Optional[bool] | NotGiven = NOT_GIVEN,
+        conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1490,6 +1516,11 @@ class AsyncResponses(AsyncAPIResource):
           background: Whether to run the model response in the background.
               [Learn more](https://platform.openai.com/docs/guides/background).
 
+          conversation: The conversation that this response belongs to. Items from this conversation are
+              prepended to `input_items` for this response request. Input items and output
+              items from this response are automatically added to this conversation after this
+              response completes.
+
           include: Specify additional output data to include in the model response. Currently
               supported values are:
 
@@ -1550,6 +1581,7 @@ class AsyncResponses(AsyncAPIResource):
           previous_response_id: The unique ID of the previous response to the model. Use this to create
               multi-turn conversations. Learn more about
               [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+              Cannot be used in conjunction with `conversation`.
 
           prompt: Reference to a prompt template and its variables.
               [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
@@ -1668,6 +1700,7 @@ class AsyncResponses(AsyncAPIResource):
         *,
         stream: Literal[True],
         background: Optional[bool] | NotGiven = NOT_GIVEN,
+        conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1724,6 +1757,11 @@ class AsyncResponses(AsyncAPIResource):
           background: Whether to run the model response in the background.
               [Learn more](https://platform.openai.com/docs/guides/background).
 
+          conversation: The conversation that this response belongs to. Items from this conversation are
+              prepended to `input_items` for this response request. Input items and output
+              items from this response are automatically added to this conversation after this
+              response completes.
+
           include: Specify additional output data to include in the model response. Currently
               supported values are:
 
@@ -1784,6 +1822,7 @@ class AsyncResponses(AsyncAPIResource):
           previous_response_id: The unique ID of the previous response to the model. Use this to create
               multi-turn conversations. Learn more about
               [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+              Cannot be used in conjunction with `conversation`.
 
           prompt: Reference to a prompt template and its variables.
               [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
@@ -1895,6 +1934,7 @@ class AsyncResponses(AsyncAPIResource):
         *,
         stream: bool,
         background: Optional[bool] | NotGiven = NOT_GIVEN,
+        conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1951,6 +1991,11 @@ class AsyncResponses(AsyncAPIResource):
           background: Whether to run the model response in the background.
               [Learn more](https://platform.openai.com/docs/guides/background).
 
+          conversation: The conversation that this response belongs to. Items from this conversation are
+              prepended to `input_items` for this response request. Input items and output
+              items from this response are automatically added to this conversation after this
+              response completes.
+
           include: Specify additional output data to include in the model response. Currently
               supported values are:
 
@@ -2011,6 +2056,7 @@ class AsyncResponses(AsyncAPIResource):
           previous_response_id: The unique ID of the previous response to the model. Use this to create
               multi-turn conversations. Learn more about
               [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+              Cannot be used in conjunction with `conversation`.
 
           prompt: Reference to a prompt template and its variables.
               [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
@@ -2120,6 +2166,7 @@ class AsyncResponses(AsyncAPIResource):
         self,
         *,
         background: Optional[bool] | NotGiven = NOT_GIVEN,
+        conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -2157,6 +2204,7 @@ class AsyncResponses(AsyncAPIResource):
             body=await async_maybe_transform(
                 {
                     "background": background,
+                    "conversation": conversation,
                     "include": include,
                     "input": input,
                     "instructions": instructions,
@@ -2229,7 +2277,7 @@ class AsyncResponses(AsyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -2261,7 +2309,7 @@ class AsyncResponses(AsyncAPIResource):
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -2376,6 +2424,7 @@ class AsyncResponses(AsyncAPIResource):
         *,
         text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
         background: Optional[bool] | NotGiven = NOT_GIVEN,
+        conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
         include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
         input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
         instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -2394,7 +2443,7 @@ class AsyncResponses(AsyncAPIResource):
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
-        text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+        text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
         tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
         tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
         top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2432,6 +2481,7 @@ class AsyncResponses(AsyncAPIResource):
             body=maybe_transform(
                 {
                     "background": background,
+                    "conversation": conversation,
                     "include": include,
                     "input": input,
                     "instructions": instructions,
src/openai/types/conversations/__init__.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .message import Message as Message
+from .lob_prob import LobProb as LobProb
+from .conversation import Conversation as Conversation
+from .text_content import TextContent as TextContent
+from .top_log_prob import TopLogProb as TopLogProb
+from .refusal_content import RefusalContent as RefusalContent
+from .item_list_params import ItemListParams as ItemListParams
+from .conversation_item import ConversationItem as ConversationItem
+from .url_citation_body import URLCitationBody as URLCitationBody
+from .file_citation_body import FileCitationBody as FileCitationBody
+from .input_file_content import InputFileContent as InputFileContent
+from .input_text_content import InputTextContent as InputTextContent
+from .item_create_params import ItemCreateParams as ItemCreateParams
+from .input_image_content import InputImageContent as InputImageContent
+from .output_text_content import OutputTextContent as OutputTextContent
+from .item_retrieve_params import ItemRetrieveParams as ItemRetrieveParams
+from .summary_text_content import SummaryTextContent as SummaryTextContent
+from .conversation_item_list import ConversationItemList as ConversationItemList
+from .conversation_create_params import ConversationCreateParams as ConversationCreateParams
+from .conversation_update_params import ConversationUpdateParams as ConversationUpdateParams
+from .computer_screenshot_content import ComputerScreenshotContent as ComputerScreenshotContent
+from .container_file_citation_body import ContainerFileCitationBody as ContainerFileCitationBody
+from .conversation_deleted_resource import ConversationDeletedResource as ConversationDeletedResource
src/openai/types/conversations/computer_screenshot_content.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ComputerScreenshotContent"]
+
+
+class ComputerScreenshotContent(BaseModel):
+    file_id: Optional[str] = None
+    """The identifier of an uploaded file that contains the screenshot."""
+
+    image_url: Optional[str] = None
+    """The URL of the screenshot image."""
+
+    type: Literal["computer_screenshot"]
+    """Specifies the event type.
+
+    For a computer screenshot, this property is always set to `computer_screenshot`.
+    """
src/openai/types/conversations/container_file_citation_body.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ContainerFileCitationBody"]
+
+
+class ContainerFileCitationBody(BaseModel):
+    container_id: str
+    """The ID of the container file."""
+
+    end_index: int
+    """The index of the last character of the container file citation in the message."""
+
+    file_id: str
+    """The ID of the file."""
+
+    filename: str
+    """The filename of the container file cited."""
+
+    start_index: int
+    """The index of the first character of the container file citation in the message."""
+
+    type: Literal["container_file_citation"]
+    """The type of the container file citation. Always `container_file_citation`."""
src/openai/types/conversations/conversation.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["Conversation"]
+
+
+class Conversation(BaseModel):
+    id: str
+    """The unique ID of the conversation."""
+
+    created_at: int
+    """
+    The time at which the conversation was created, measured in seconds since the
+    Unix epoch.
+    """
+
+    metadata: object
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters.
+    """
+
+    object: Literal["conversation"]
+    """The object type, which is always `conversation`."""
src/openai/types/conversations/conversation_create_params.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable, Optional
+from typing_extensions import TypedDict
+
+from ..shared_params.metadata import Metadata
+from ..responses.response_input_item_param import ResponseInputItemParam
+
+__all__ = ["ConversationCreateParams"]
+
+
+class ConversationCreateParams(TypedDict, total=False):
+    items: Optional[Iterable[ResponseInputItemParam]]
+    """
+    Initial items to include in the conversation context. You may add up to 20 items
+    at a time.
+    """
+
+    metadata: Optional[Metadata]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    Useful for storing additional information about the object in a structured
+    format.
+    """
src/openai/types/conversations/conversation_deleted_resource.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ConversationDeletedResource"]
+
+
+class ConversationDeletedResource(BaseModel):
+    id: str
+
+    deleted: bool
+
+    object: Literal["conversation.deleted"]
src/openai/types/conversations/conversation_item.py
@@ -0,0 +1,209 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .message import Message
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from ..responses.response_reasoning_item import ResponseReasoningItem
+from ..responses.response_custom_tool_call import ResponseCustomToolCall
+from ..responses.response_computer_tool_call import ResponseComputerToolCall
+from ..responses.response_function_web_search import ResponseFunctionWebSearch
+from ..responses.response_file_search_tool_call import ResponseFileSearchToolCall
+from ..responses.response_custom_tool_call_output import ResponseCustomToolCallOutput
+from ..responses.response_function_tool_call_item import ResponseFunctionToolCallItem
+from ..responses.response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
+from ..responses.response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem
+from ..responses.response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem
+
+__all__ = [
+    "ConversationItem",
+    "ImageGenerationCall",
+    "LocalShellCall",
+    "LocalShellCallAction",
+    "LocalShellCallOutput",
+    "McpListTools",
+    "McpListToolsTool",
+    "McpApprovalRequest",
+    "McpApprovalResponse",
+    "McpCall",
+]
+
+
+class ImageGenerationCall(BaseModel):
+    id: str
+    """The unique ID of the image generation call."""
+
+    result: Optional[str] = None
+    """The generated image encoded in base64."""
+
+    status: Literal["in_progress", "completed", "generating", "failed"]
+    """The status of the image generation call."""
+
+    type: Literal["image_generation_call"]
+    """The type of the image generation call. Always `image_generation_call`."""
+
+
+class LocalShellCallAction(BaseModel):
+    command: List[str]
+    """The command to run."""
+
+    env: Dict[str, str]
+    """Environment variables to set for the command."""
+
+    type: Literal["exec"]
+    """The type of the local shell action. Always `exec`."""
+
+    timeout_ms: Optional[int] = None
+    """Optional timeout in milliseconds for the command."""
+
+    user: Optional[str] = None
+    """Optional user to run the command as."""
+
+    working_directory: Optional[str] = None
+    """Optional working directory to run the command in."""
+
+
+class LocalShellCall(BaseModel):
+    id: str
+    """The unique ID of the local shell call."""
+
+    action: LocalShellCallAction
+    """Execute a shell command on the server."""
+
+    call_id: str
+    """The unique ID of the local shell tool call generated by the model."""
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of the local shell call."""
+
+    type: Literal["local_shell_call"]
+    """The type of the local shell call. Always `local_shell_call`."""
+
+
+class LocalShellCallOutput(BaseModel):
+    id: str
+    """The unique ID of the local shell tool call generated by the model."""
+
+    output: str
+    """A JSON string of the output of the local shell tool call."""
+
+    type: Literal["local_shell_call_output"]
+    """The type of the local shell tool call output. Always `local_shell_call_output`."""
+
+    status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+    """The status of the item. One of `in_progress`, `completed`, or `incomplete`."""
+
+
+class McpListToolsTool(BaseModel):
+    input_schema: object
+    """The JSON schema describing the tool's input."""
+
+    name: str
+    """The name of the tool."""
+
+    annotations: Optional[object] = None
+    """Additional annotations about the tool."""
+
+    description: Optional[str] = None
+    """The description of the tool."""
+
+
+class McpListTools(BaseModel):
+    id: str
+    """The unique ID of the list."""
+
+    server_label: str
+    """The label of the MCP server."""
+
+    tools: List[McpListToolsTool]
+    """The tools available on the server."""
+
+    type: Literal["mcp_list_tools"]
+    """The type of the item. Always `mcp_list_tools`."""
+
+    error: Optional[str] = None
+    """Error message if the server could not list tools."""
+
+
+class McpApprovalRequest(BaseModel):
+    id: str
+    """The unique ID of the approval request."""
+
+    arguments: str
+    """A JSON string of arguments for the tool."""
+
+    name: str
+    """The name of the tool to run."""
+
+    server_label: str
+    """The label of the MCP server making the request."""
+
+    type: Literal["mcp_approval_request"]
+    """The type of the item. Always `mcp_approval_request`."""
+
+
+class McpApprovalResponse(BaseModel):
+    id: str
+    """The unique ID of the approval response"""
+
+    approval_request_id: str
+    """The ID of the approval request being answered."""
+
+    approve: bool
+    """Whether the request was approved."""
+
+    type: Literal["mcp_approval_response"]
+    """The type of the item. Always `mcp_approval_response`."""
+
+    reason: Optional[str] = None
+    """Optional reason for the decision."""
+
+
+class McpCall(BaseModel):
+    id: str
+    """The unique ID of the tool call."""
+
+    arguments: str
+    """A JSON string of the arguments passed to the tool."""
+
+    name: str
+    """The name of the tool that was run."""
+
+    server_label: str
+    """The label of the MCP server running the tool."""
+
+    type: Literal["mcp_call"]
+    """The type of the item. Always `mcp_call`."""
+
+    error: Optional[str] = None
+    """The error from the tool call, if any."""
+
+    output: Optional[str] = None
+    """The output from the tool call."""
+
+
+ConversationItem: TypeAlias = Annotated[
+    Union[
+        Message,
+        ResponseFunctionToolCallItem,
+        ResponseFunctionToolCallOutputItem,
+        ResponseFileSearchToolCall,
+        ResponseFunctionWebSearch,
+        ImageGenerationCall,
+        ResponseComputerToolCall,
+        ResponseComputerToolCallOutputItem,
+        ResponseReasoningItem,
+        ResponseCodeInterpreterToolCall,
+        LocalShellCall,
+        LocalShellCallOutput,
+        McpListTools,
+        McpApprovalRequest,
+        McpApprovalResponse,
+        McpCall,
+        ResponseCustomToolCall,
+        ResponseCustomToolCallOutput,
+    ],
+    PropertyInfo(discriminator="type"),
+]
src/openai/types/conversations/conversation_item_list.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .conversation_item import ConversationItem
+
+__all__ = ["ConversationItemList"]
+
+
+class ConversationItemList(BaseModel):
+    data: List[ConversationItem]
+    """A list of conversation items."""
+
+    first_id: str
+    """The ID of the first item in the list."""
+
+    has_more: bool
+    """Whether there are more items available."""
+
+    last_id: str
+    """The ID of the last item in the list."""
+
+    object: Literal["list"]
+    """The type of object returned, must be `list`."""
src/openai/types/conversations/conversation_update_params.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ConversationUpdateParams"]
+
+
+class ConversationUpdateParams(TypedDict, total=False):
+    metadata: Required[Dict[str, str]]
+    """Set of 16 key-value pairs that can be attached to an object.
+
+    This can be useful for storing additional information about the object in a
+    structured format, and querying for objects via API or the dashboard. Keys are
+    strings with a maximum length of 64 characters. Values are strings with a
+    maximum length of 512 characters.
+    """
src/openai/types/conversations/file_citation_body.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FileCitationBody"]
+
+
+class FileCitationBody(BaseModel):
+    file_id: str
+    """The ID of the file."""
+
+    filename: str
+    """The filename of the file cited."""
+
+    index: int
+    """The index of the file in the list of files."""
+
+    type: Literal["file_citation"]
+    """The type of the file citation. Always `file_citation`."""
src/openai/types/conversations/input_file_content.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["InputFileContent"]
+
+
+class InputFileContent(BaseModel):
+    file_id: Optional[str] = None
+    """The ID of the file to be sent to the model."""
+
+    type: Literal["input_file"]
+    """The type of the input item. Always `input_file`."""
+
+    file_url: Optional[str] = None
+    """The URL of the file to be sent to the model."""
+
+    filename: Optional[str] = None
+    """The name of the file to be sent to the model."""
src/openai/types/conversations/input_image_content.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["InputImageContent"]
+
+
+class InputImageContent(BaseModel):
+    detail: Literal["low", "high", "auto"]
+    """The detail level of the image to be sent to the model.
+
+    One of `high`, `low`, or `auto`. Defaults to `auto`.
+    """
+
+    file_id: Optional[str] = None
+    """The ID of the file to be sent to the model."""
+
+    image_url: Optional[str] = None
+    """The URL of the image to be sent to the model.
+
+    A fully qualified URL or base64 encoded image in a data URL.
+    """
+
+    type: Literal["input_image"]
+    """The type of the input item. Always `input_image`."""
src/openai/types/conversations/input_text_content.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["InputTextContent"]
+
+
+class InputTextContent(BaseModel):
+    text: str
+    """The text input to the model."""
+
+    type: Literal["input_text"]
+    """The type of the input item. Always `input_text`."""
src/openai/types/conversations/item_create_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Iterable
+from typing_extensions import Required, TypedDict
+
+from ..responses.response_includable import ResponseIncludable
+from ..responses.response_input_item_param import ResponseInputItemParam
+
+__all__ = ["ItemCreateParams"]
+
+
+class ItemCreateParams(TypedDict, total=False):
+    items: Required[Iterable[ResponseInputItemParam]]
+    """The items to add to the conversation. You may add up to 20 items at a time."""
+
+    include: List[ResponseIncludable]
+    """Additional fields to include in the response.
+
+    See the `include` parameter for
+    [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
+    for more information.
+    """
src/openai/types/conversations/item_list_params.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, TypedDict
+
+from ..responses.response_includable import ResponseIncludable
+
+__all__ = ["ItemListParams"]
+
+
+class ItemListParams(TypedDict, total=False):
+    after: str
+    """An item ID to list items after, used in pagination."""
+
+    include: List[ResponseIncludable]
+    """Specify additional output data to include in the model response.
+
+    Currently supported values are:
+
+    - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+      in code interpreter tool call items.
+    - `computer_call_output.output.image_url`: Include image urls from the computer
+      call output.
+    - `file_search_call.results`: Include the search results of the file search tool
+      call.
+    - `message.input_image.image_url`: Include image urls from the input message.
+    - `message.output_text.logprobs`: Include logprobs with assistant messages.
+    - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+      tokens in reasoning item outputs. This enables reasoning items to be used in
+      multi-turn conversations when using the Responses API statelessly (like when
+      the `store` parameter is set to `false`, or when an organization is enrolled
+      in the zero data retention program).
+    """
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 100, and the default is 20.
+    """
+
+    order: Literal["asc", "desc"]
+    """The order to return the input items in. Default is `desc`.
+
+    - `asc`: Return the input items in ascending order.
+    - `desc`: Return the input items in descending order.
+    """
src/openai/types/conversations/item_retrieve_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Required, TypedDict
+
+from ..responses.response_includable import ResponseIncludable
+
+__all__ = ["ItemRetrieveParams"]
+
+
+class ItemRetrieveParams(TypedDict, total=False):
+    conversation_id: Required[str]
+
+    include: List[ResponseIncludable]
+    """Additional fields to include in the response.
+
+    See the `include` parameter for
+    [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
+    for more information.
+    """
src/openai/types/conversations/lob_prob.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from ..._models import BaseModel
+from .top_log_prob import TopLogProb
+
+__all__ = ["LobProb"]
+
+
+class LobProb(BaseModel):
+    token: str
+
+    bytes: List[int]
+
+    logprob: float
+
+    top_logprobs: List[TopLogProb]
src/openai/types/conversations/message.py
@@ -0,0 +1,56 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .text_content import TextContent
+from .refusal_content import RefusalContent
+from .input_file_content import InputFileContent
+from .input_text_content import InputTextContent
+from .input_image_content import InputImageContent
+from .output_text_content import OutputTextContent
+from .summary_text_content import SummaryTextContent
+from .computer_screenshot_content import ComputerScreenshotContent
+
+__all__ = ["Message", "Content"]
+
+Content: TypeAlias = Annotated[
+    Union[
+        InputTextContent,
+        OutputTextContent,
+        TextContent,
+        SummaryTextContent,
+        RefusalContent,
+        InputImageContent,
+        ComputerScreenshotContent,
+        InputFileContent,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class Message(BaseModel):
+    id: str
+    """The unique ID of the message."""
+
+    content: List[Content]
+    """The content of the message"""
+
+    role: Literal["unknown", "user", "assistant", "system", "critic", "discriminator", "developer", "tool"]
+    """The role of the message.
+
+    One of `unknown`, `user`, `assistant`, `system`, `critic`, `discriminator`,
+    `developer`, or `tool`.
+    """
+
+    status: Literal["in_progress", "completed", "incomplete"]
+    """The status of item.
+
+    One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+    returned via API.
+    """
+
+    type: Literal["message"]
+    """The type of the message. Always set to `message`."""
src/openai/types/conversations/output_text_content.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .lob_prob import LobProb
+from ..._models import BaseModel
+from .url_citation_body import URLCitationBody
+from .file_citation_body import FileCitationBody
+from .container_file_citation_body import ContainerFileCitationBody
+
+__all__ = ["OutputTextContent", "Annotation"]
+
+Annotation: TypeAlias = Annotated[
+    Union[FileCitationBody, URLCitationBody, ContainerFileCitationBody], PropertyInfo(discriminator="type")
+]
+
+
+class OutputTextContent(BaseModel):
+    annotations: List[Annotation]
+    """The annotations of the text output."""
+
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
+
+    logprobs: Optional[List[LobProb]] = None
src/openai/types/conversations/refusal_content.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["RefusalContent"]
+
+
+class RefusalContent(BaseModel):
+    refusal: str
+    """The refusal explanation from the model."""
+
+    type: Literal["refusal"]
+    """The type of the refusal. Always `refusal`."""
src/openai/types/conversations/summary_text_content.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["SummaryTextContent"]
+
+
+class SummaryTextContent(BaseModel):
+    text: str
+
+    type: Literal["summary_text"]
src/openai/types/conversations/text_content.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["TextContent"]
+
+
+class TextContent(BaseModel):
+    text: str
+
+    type: Literal["text"]
src/openai/types/conversations/top_log_prob.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from ..._models import BaseModel
+
+__all__ = ["TopLogProb"]
+
+
+class TopLogProb(BaseModel):
+    token: str
+
+    bytes: List[int]
+
+    logprob: float
src/openai/types/conversations/url_citation_body.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["URLCitationBody"]
+
+
+class URLCitationBody(BaseModel):
+    end_index: int
+    """The index of the last character of the URL citation in the message."""
+
+    start_index: int
+    """The index of the first character of the URL citation in the message."""
+
+    title: str
+    """The title of the web resource."""
+
+    type: Literal["url_citation"]
+    """The type of the URL citation. Always `url_citation`."""
+
+    url: str
+    """The URL of the web resource."""
src/openai/types/evals/create_eval_completions_run_data_source.py
@@ -23,10 +23,10 @@ __all__ = [
     "InputMessages",
     "InputMessagesTemplate",
     "InputMessagesTemplateTemplate",
-    "InputMessagesTemplateTemplateMessage",
-    "InputMessagesTemplateTemplateMessageContent",
-    "InputMessagesTemplateTemplateMessageContentOutputText",
-    "InputMessagesTemplateTemplateMessageContentInputImage",
+    "InputMessagesTemplateTemplateEvalItem",
+    "InputMessagesTemplateTemplateEvalItemContent",
+    "InputMessagesTemplateTemplateEvalItemContentOutputText",
+    "InputMessagesTemplateTemplateEvalItemContentInputImage",
     "InputMessagesItemReference",
     "SamplingParams",
     "SamplingParamsResponseFormat",
@@ -87,7 +87,7 @@ Source: TypeAlias = Annotated[
 ]
 
 
-class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel):
+class InputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
     text: str
     """The text output from the model."""
 
@@ -95,7 +95,7 @@ class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel):
     """The type of the output text. Always `output_text`."""
 
 
-class InputMessagesTemplateTemplateMessageContentInputImage(BaseModel):
+class InputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel):
     image_url: str
     """The URL of the image input."""
 
@@ -109,17 +109,17 @@ class InputMessagesTemplateTemplateMessageContentInputImage(BaseModel):
     """
 
 
-InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[
+InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
     str,
     ResponseInputText,
-    InputMessagesTemplateTemplateMessageContentOutputText,
-    InputMessagesTemplateTemplateMessageContentInputImage,
+    InputMessagesTemplateTemplateEvalItemContentOutputText,
+    InputMessagesTemplateTemplateEvalItemContentInputImage,
     List[object],
 ]
 
 
-class InputMessagesTemplateTemplateMessage(BaseModel):
-    content: InputMessagesTemplateTemplateMessageContent
+class InputMessagesTemplateTemplateEvalItem(BaseModel):
+    content: InputMessagesTemplateTemplateEvalItemContent
     """Inputs to the model - can contain template strings."""
 
     role: Literal["user", "assistant", "system", "developer"]
@@ -132,9 +132,7 @@ class InputMessagesTemplateTemplateMessage(BaseModel):
     """The type of the message input. Always `message`."""
 
 
-InputMessagesTemplateTemplate: TypeAlias = Annotated[
-    Union[EasyInputMessage, InputMessagesTemplateTemplateMessage], PropertyInfo(discriminator="type")
-]
+InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessage, InputMessagesTemplateTemplateEvalItem]
 
 
 class InputMessagesTemplate(BaseModel):
src/openai/types/evals/create_eval_completions_run_data_source_param.py
@@ -23,10 +23,10 @@ __all__ = [
     "InputMessages",
     "InputMessagesTemplate",
     "InputMessagesTemplateTemplate",
-    "InputMessagesTemplateTemplateMessage",
-    "InputMessagesTemplateTemplateMessageContent",
-    "InputMessagesTemplateTemplateMessageContentOutputText",
-    "InputMessagesTemplateTemplateMessageContentInputImage",
+    "InputMessagesTemplateTemplateEvalItem",
+    "InputMessagesTemplateTemplateEvalItemContent",
+    "InputMessagesTemplateTemplateEvalItemContentOutputText",
+    "InputMessagesTemplateTemplateEvalItemContentInputImage",
     "InputMessagesItemReference",
     "SamplingParams",
     "SamplingParamsResponseFormat",
@@ -85,7 +85,7 @@ class SourceStoredCompletions(TypedDict, total=False):
 Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceStoredCompletions]
 
 
-class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=False):
+class InputMessagesTemplateTemplateEvalItemContentOutputText(TypedDict, total=False):
     text: Required[str]
     """The text output from the model."""
 
@@ -93,7 +93,7 @@ class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=Fal
     """The type of the output text. Always `output_text`."""
 
 
-class InputMessagesTemplateTemplateMessageContentInputImage(TypedDict, total=False):
+class InputMessagesTemplateTemplateEvalItemContentInputImage(TypedDict, total=False):
     image_url: Required[str]
     """The URL of the image input."""
 
@@ -107,17 +107,17 @@ class InputMessagesTemplateTemplateMessageContentInputImage(TypedDict, total=Fal
     """
 
 
-InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[
+InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
     str,
     ResponseInputTextParam,
-    InputMessagesTemplateTemplateMessageContentOutputText,
-    InputMessagesTemplateTemplateMessageContentInputImage,
+    InputMessagesTemplateTemplateEvalItemContentOutputText,
+    InputMessagesTemplateTemplateEvalItemContentInputImage,
     Iterable[object],
 ]
 
 
-class InputMessagesTemplateTemplateMessage(TypedDict, total=False):
-    content: Required[InputMessagesTemplateTemplateMessageContent]
+class InputMessagesTemplateTemplateEvalItem(TypedDict, total=False):
+    content: Required[InputMessagesTemplateTemplateEvalItemContent]
     """Inputs to the model - can contain template strings."""
 
     role: Required[Literal["user", "assistant", "system", "developer"]]
@@ -130,7 +130,7 @@ class InputMessagesTemplateTemplateMessage(TypedDict, total=False):
     """The type of the message input. Always `message`."""
 
 
-InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateMessage]
+InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateEvalItem]
 
 
 class InputMessagesTemplate(TypedDict, total=False):
src/openai/types/responses/__init__.py
@@ -79,6 +79,7 @@ from .response_output_text_param import ResponseOutputTextParam as ResponseOutpu
 from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam
 from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam
 from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall
+from .response_conversation_param import ResponseConversationParam as ResponseConversationParam
 from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig
 from .response_function_tool_call import ResponseFunctionToolCall as ResponseFunctionToolCall
 from .response_input_message_item import ResponseInputMessageItem as ResponseInputMessageItem
src/openai/types/responses/input_item_list_params.py
@@ -14,9 +14,6 @@ class InputItemListParams(TypedDict, total=False):
     after: str
     """An item ID to list items after, used in pagination."""
 
-    before: str
-    """An item ID to list items before, used in pagination."""
-
     include: List[ResponseIncludable]
     """Additional fields to include in the response.
 
src/openai/types/responses/response.py
@@ -22,7 +22,7 @@ from .response_text_config import ResponseTextConfig
 from .tool_choice_function import ToolChoiceFunction
 from ..shared.responses_model import ResponsesModel
 
-__all__ = ["Response", "IncompleteDetails", "ToolChoice"]
+__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Conversation"]
 
 
 class IncompleteDetails(BaseModel):
@@ -35,6 +35,11 @@ ToolChoice: TypeAlias = Union[
 ]
 
 
+class Conversation(BaseModel):
+    id: str
+    """The unique ID of the conversation."""
+
+
 class Response(BaseModel):
     id: str
     """Unique identifier for this Response."""
@@ -141,6 +146,13 @@ class Response(BaseModel):
     [Learn more](https://platform.openai.com/docs/guides/background).
     """
 
+    conversation: Optional[Conversation] = None
+    """The conversation that this response belongs to.
+
+    Input items and output items from this response are automatically added to this
+    conversation.
+    """
+
     max_output_tokens: Optional[int] = None
     """
     An upper bound for the number of tokens that can be generated for a response,
@@ -161,6 +173,7 @@ class Response(BaseModel):
 
     Use this to create multi-turn conversations. Learn more about
     [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+    Cannot be used in conjunction with `conversation`.
     """
 
     prompt: Optional[ResponsePrompt] = None
src/openai/types/responses/response_conversation_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ResponseConversationParam"]
+
+
+class ResponseConversationParam(TypedDict, total=False):
+    id: Required[str]
+    """The unique ID of the conversation."""
src/openai/types/responses/response_create_params.py
@@ -18,10 +18,12 @@ from .tool_choice_custom_param import ToolChoiceCustomParam
 from .tool_choice_allowed_param import ToolChoiceAllowedParam
 from .response_text_config_param import ResponseTextConfigParam
 from .tool_choice_function_param import ToolChoiceFunctionParam
+from .response_conversation_param import ResponseConversationParam
 from ..shared_params.responses_model import ResponsesModel
 
 __all__ = [
     "ResponseCreateParamsBase",
+    "Conversation",
     "StreamOptions",
     "ToolChoice",
     "ResponseCreateParamsNonStreaming",
@@ -36,6 +38,14 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     [Learn more](https://platform.openai.com/docs/guides/background).
     """
 
+    conversation: Optional[Conversation]
+    """The conversation that this response belongs to.
+
+    Items from this conversation are prepended to `input_items` for this response
+    request. Input items and output items from this response are automatically added
+    to this conversation after this response completes.
+    """
+
     include: Optional[List[ResponseIncludable]]
     """Specify additional output data to include in the model response.
 
@@ -118,6 +128,7 @@ class ResponseCreateParamsBase(TypedDict, total=False):
 
     Use this to create multi-turn conversations. Learn more about
     [conversation state](https://platform.openai.com/docs/guides/conversation-state).
+    Cannot be used in conjunction with `conversation`.
     """
 
     prompt: Optional[ResponsePromptParam]
@@ -253,6 +264,9 @@ class ResponseCreateParamsBase(TypedDict, total=False):
     """
 
 
+Conversation: TypeAlias = Union[str, ResponseConversationParam]
+
+
 class StreamOptions(TypedDict, total=False):
     include_obfuscation: bool
     """When true, stream obfuscation will be enabled.
src/openai/types/responses/tool.py
@@ -15,7 +15,7 @@ __all__ = [
     "Tool",
     "Mcp",
     "McpAllowedTools",
-    "McpAllowedToolsMcpAllowedToolsFilter",
+    "McpAllowedToolsMcpToolFilter",
     "McpRequireApproval",
     "McpRequireApprovalMcpToolApprovalFilter",
     "McpRequireApprovalMcpToolApprovalFilterAlways",
@@ -29,30 +29,54 @@ __all__ = [
 ]
 
 
-class McpAllowedToolsMcpAllowedToolsFilter(BaseModel):
+class McpAllowedToolsMcpToolFilter(BaseModel):
+    read_only: Optional[bool] = None
+    """Indicates whether or not a tool modifies data or is read-only.
+
+    If an MCP server is
+    [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
+    it will match this filter.
+    """
+
     tool_names: Optional[List[str]] = None
     """List of allowed tool names."""
 
 
-McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpAllowedToolsFilter, None]
+McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpToolFilter, None]
 
 
 class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel):
+    read_only: Optional[bool] = None
+    """Indicates whether or not a tool modifies data or is read-only.
+
+    If an MCP server is
+    [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
+    it will match this filter.
+    """
+
     tool_names: Optional[List[str]] = None
-    """List of tools that require approval."""
+    """List of allowed tool names."""
 
 
 class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel):
+    read_only: Optional[bool] = None
+    """Indicates whether or not a tool modifies data or is read-only.
+
+    If an MCP server is
+    [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
+    it will match this filter.
+    """
+
     tool_names: Optional[List[str]] = None
-    """List of tools that do not require approval."""
+    """List of allowed tool names."""
 
 
 class McpRequireApprovalMcpToolApprovalFilter(BaseModel):
     always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None
-    """A list of tools that always require approval."""
+    """A filter object to specify which tools are allowed."""
 
     never: Optional[McpRequireApprovalMcpToolApprovalFilterNever] = None
-    """A list of tools that never require approval."""
+    """A filter object to specify which tools are allowed."""
 
 
 McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None]
@@ -62,15 +86,49 @@ class Mcp(BaseModel):
     server_label: str
     """A label for this MCP server, used to identify it in tool calls."""
 
-    server_url: str
-    """The URL for the MCP server."""
-
     type: Literal["mcp"]
     """The type of the MCP tool. Always `mcp`."""
 
     allowed_tools: Optional[McpAllowedTools] = None
     """List of allowed tool names or a filter object."""
 
+    authorization: Optional[str] = None
+    """
+    An OAuth access token that can be used with a remote MCP server, either with a
+    custom MCP server URL or a service connector. Your application must handle the
+    OAuth authorization flow and provide the token here.
+    """
+
+    connector_id: Optional[
+        Literal[
+            "connector_dropbox",
+            "connector_gmail",
+            "connector_googlecalendar",
+            "connector_googledrive",
+            "connector_microsoftteams",
+            "connector_outlookcalendar",
+            "connector_outlookemail",
+            "connector_sharepoint",
+        ]
+    ] = None
+    """Identifier for service connectors, like those available in ChatGPT.
+
+    One of `server_url` or `connector_id` must be provided. Learn more about service
+    connectors
+    [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
+
+    Currently supported `connector_id` values are:
+
+    - Dropbox: `connector_dropbox`
+    - Gmail: `connector_gmail`
+    - Google Calendar: `connector_googlecalendar`
+    - Google Drive: `connector_googledrive`
+    - Microsoft Teams: `connector_microsoftteams`
+    - Outlook Calendar: `connector_outlookcalendar`
+    - Outlook Email: `connector_outlookemail`
+    - SharePoint: `connector_sharepoint`
+    """
+
     headers: Optional[Dict[str, str]] = None
     """Optional HTTP headers to send to the MCP server.
 
@@ -83,6 +141,12 @@ class Mcp(BaseModel):
     server_description: Optional[str] = None
     """Optional description of the MCP server, used to provide more context."""
 
+    server_url: Optional[str] = None
+    """The URL for the MCP server.
+
+    One of `server_url` or `connector_id` must be provided.
+    """
+
 
 class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel):
     type: Literal["auto"]
src/openai/types/responses/tool_param.py
@@ -16,7 +16,7 @@ __all__ = [
     "ToolParam",
     "Mcp",
     "McpAllowedTools",
-    "McpAllowedToolsMcpAllowedToolsFilter",
+    "McpAllowedToolsMcpToolFilter",
     "McpRequireApproval",
     "McpRequireApprovalMcpToolApprovalFilter",
     "McpRequireApprovalMcpToolApprovalFilterAlways",
@@ -30,30 +30,54 @@ __all__ = [
 ]
 
 
-class McpAllowedToolsMcpAllowedToolsFilter(TypedDict, total=False):
+class McpAllowedToolsMcpToolFilter(TypedDict, total=False):
+    read_only: bool
+    """Indicates whether or not a tool modifies data or is read-only.
+
+    If an MCP server is
+    [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
+    it will match this filter.
+    """
+
     tool_names: List[str]
     """List of allowed tool names."""
 
 
-McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpAllowedToolsFilter]
+McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpToolFilter]
 
 
 class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False):
+    read_only: bool
+    """Indicates whether or not a tool modifies data or is read-only.
+
+    If an MCP server is
+    [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
+    it will match this filter.
+    """
+
     tool_names: List[str]
-    """List of tools that require approval."""
+    """List of allowed tool names."""
 
 
 class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False):
+    read_only: bool
+    """Indicates whether or not a tool modifies data or is read-only.
+
+    If an MCP server is
+    [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
+    it will match this filter.
+    """
+
     tool_names: List[str]
-    """List of tools that do not require approval."""
+    """List of allowed tool names."""
 
 
 class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False):
     always: McpRequireApprovalMcpToolApprovalFilterAlways
-    """A list of tools that always require approval."""
+    """A filter object to specify which tools are allowed."""
 
     never: McpRequireApprovalMcpToolApprovalFilterNever
-    """A list of tools that never require approval."""
+    """A filter object to specify which tools are allowed."""
 
 
 McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"]]
@@ -63,15 +87,47 @@ class Mcp(TypedDict, total=False):
     server_label: Required[str]
     """A label for this MCP server, used to identify it in tool calls."""
 
-    server_url: Required[str]
-    """The URL for the MCP server."""
-
     type: Required[Literal["mcp"]]
     """The type of the MCP tool. Always `mcp`."""
 
     allowed_tools: Optional[McpAllowedTools]
     """List of allowed tool names or a filter object."""
 
+    authorization: str
+    """
+    An OAuth access token that can be used with a remote MCP server, either with a
+    custom MCP server URL or a service connector. Your application must handle the
+    OAuth authorization flow and provide the token here.
+    """
+
+    connector_id: Literal[
+        "connector_dropbox",
+        "connector_gmail",
+        "connector_googlecalendar",
+        "connector_googledrive",
+        "connector_microsoftteams",
+        "connector_outlookcalendar",
+        "connector_outlookemail",
+        "connector_sharepoint",
+    ]
+    """Identifier for service connectors, like those available in ChatGPT.
+
+    One of `server_url` or `connector_id` must be provided. Learn more about service
+    connectors
+    [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
+
+    Currently supported `connector_id` values are:
+
+    - Dropbox: `connector_dropbox`
+    - Gmail: `connector_gmail`
+    - Google Calendar: `connector_googlecalendar`
+    - Google Drive: `connector_googledrive`
+    - Microsoft Teams: `connector_microsoftteams`
+    - Outlook Calendar: `connector_outlookcalendar`
+    - Outlook Email: `connector_outlookemail`
+    - SharePoint: `connector_sharepoint`
+    """
+
     headers: Optional[Dict[str, str]]
     """Optional HTTP headers to send to the MCP server.
 
@@ -84,6 +140,12 @@ class Mcp(TypedDict, total=False):
     server_description: str
     """Optional description of the MCP server, used to provide more context."""
 
+    server_url: str
+    """The URL for the MCP server.
+
+    One of `server_url` or `connector_id` must be provided.
+    """
+
 
 class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False):
     type: Required[Literal["auto"]]
src/openai/__init__.py
@@ -386,5 +386,6 @@ from ._module_client import (
     completions as completions,
     fine_tuning as fine_tuning,
     moderations as moderations,
+    conversations as conversations,
     vector_stores as vector_stores,
 )
src/openai/_client.py
@@ -51,6 +51,7 @@ if TYPE_CHECKING:
         completions,
         fine_tuning,
         moderations,
+        conversations,
         vector_stores,
     )
     from .resources.files import Files, AsyncFiles
@@ -69,6 +70,7 @@ if TYPE_CHECKING:
     from .resources.responses.responses import Responses, AsyncResponses
     from .resources.containers.containers import Containers, AsyncContainers
     from .resources.fine_tuning.fine_tuning import FineTuning, AsyncFineTuning
+    from .resources.conversations.conversations import Conversations, AsyncConversations
     from .resources.vector_stores.vector_stores import VectorStores, AsyncVectorStores
 
 __all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient"]
@@ -254,6 +256,12 @@ class OpenAI(SyncAPIClient):
 
         return Responses(self)
 
+    @cached_property
+    def conversations(self) -> Conversations:
+        from .resources.conversations import Conversations
+
+        return Conversations(self)
+
     @cached_property
     def evals(self) -> Evals:
         from .resources.evals import Evals
@@ -573,6 +581,12 @@ class AsyncOpenAI(AsyncAPIClient):
 
         return AsyncResponses(self)
 
+    @cached_property
+    def conversations(self) -> AsyncConversations:
+        from .resources.conversations import AsyncConversations
+
+        return AsyncConversations(self)
+
     @cached_property
     def evals(self) -> AsyncEvals:
         from .resources.evals import AsyncEvals
@@ -802,6 +816,12 @@ class OpenAIWithRawResponse:
 
         return ResponsesWithRawResponse(self._client.responses)
 
+    @cached_property
+    def conversations(self) -> conversations.ConversationsWithRawResponse:
+        from .resources.conversations import ConversationsWithRawResponse
+
+        return ConversationsWithRawResponse(self._client.conversations)
+
     @cached_property
     def evals(self) -> evals.EvalsWithRawResponse:
         from .resources.evals import EvalsWithRawResponse
@@ -905,6 +925,12 @@ class AsyncOpenAIWithRawResponse:
 
         return AsyncResponsesWithRawResponse(self._client.responses)
 
+    @cached_property
+    def conversations(self) -> conversations.AsyncConversationsWithRawResponse:
+        from .resources.conversations import AsyncConversationsWithRawResponse
+
+        return AsyncConversationsWithRawResponse(self._client.conversations)
+
     @cached_property
     def evals(self) -> evals.AsyncEvalsWithRawResponse:
         from .resources.evals import AsyncEvalsWithRawResponse
@@ -1008,6 +1034,12 @@ class OpenAIWithStreamedResponse:
 
         return ResponsesWithStreamingResponse(self._client.responses)
 
+    @cached_property
+    def conversations(self) -> conversations.ConversationsWithStreamingResponse:
+        from .resources.conversations import ConversationsWithStreamingResponse
+
+        return ConversationsWithStreamingResponse(self._client.conversations)
+
     @cached_property
     def evals(self) -> evals.EvalsWithStreamingResponse:
         from .resources.evals import EvalsWithStreamingResponse
@@ -1111,6 +1143,12 @@ class AsyncOpenAIWithStreamedResponse:
 
         return AsyncResponsesWithStreamingResponse(self._client.responses)
 
+    @cached_property
+    def conversations(self) -> conversations.AsyncConversationsWithStreamingResponse:
+        from .resources.conversations import AsyncConversationsWithStreamingResponse
+
+        return AsyncConversationsWithStreamingResponse(self._client.conversations)
+
     @cached_property
     def evals(self) -> evals.AsyncEvalsWithStreamingResponse:
         from .resources.evals import AsyncEvalsWithStreamingResponse
src/openai/_module_client.py
@@ -22,6 +22,7 @@ if TYPE_CHECKING:
     from .resources.responses.responses import Responses
     from .resources.containers.containers import Containers
     from .resources.fine_tuning.fine_tuning import FineTuning
+    from .resources.conversations.conversations import Conversations
     from .resources.vector_stores.vector_stores import VectorStores
 
 from . import _load_client
@@ -130,6 +131,12 @@ class VectorStoresProxy(LazyProxy["VectorStores"]):
         return _load_client().vector_stores
 
 
+class ConversationsProxy(LazyProxy["Conversations"]):
+    @override
+    def __load__(self) -> Conversations:
+        return _load_client().conversations
+
+
 chat: Chat = ChatProxy().__as_proxied__()
 beta: Beta = BetaProxy().__as_proxied__()
 files: Files = FilesProxy().__as_proxied__()
@@ -147,3 +154,4 @@ completions: Completions = CompletionsProxy().__as_proxied__()
 moderations: Moderations = ModerationsProxy().__as_proxied__()
 fine_tuning: FineTuning = FineTuningProxy().__as_proxied__()
 vector_stores: VectorStores = VectorStoresProxy().__as_proxied__()
+conversations: Conversations = ConversationsProxy().__as_proxied__()
src/openai/_version.py
@@ -1,4 +1,4 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 __title__ = "openai"
-__version__ = "1.100.3"  # x-release-please-version
+__version__ = "1.101.0"  # x-release-please-version
src/openai/pagination.py
@@ -5,7 +5,14 @@ from typing_extensions import Protocol, override, runtime_checkable
 
 from ._base_client import BasePage, PageInfo, BaseSyncPage, BaseAsyncPage
 
-__all__ = ["SyncPage", "AsyncPage", "SyncCursorPage", "AsyncCursorPage"]
+__all__ = [
+    "SyncPage",
+    "AsyncPage",
+    "SyncCursorPage",
+    "AsyncCursorPage",
+    "SyncConversationCursorPage",
+    "AsyncConversationCursorPage",
+]
 
 _T = TypeVar("_T")
 
@@ -123,3 +130,61 @@ class AsyncCursorPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
             return None
 
         return PageInfo(params={"after": item.id})
+
+
+class SyncConversationCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
+    data: List[_T]
+    has_more: Optional[bool] = None
+    last_id: Optional[str] = None
+
+    @override
+    def _get_page_items(self) -> List[_T]:
+        data = self.data
+        if not data:
+            return []
+        return data
+
+    @override
+    def has_next_page(self) -> bool:
+        has_more = self.has_more
+        if has_more is not None and has_more is False:
+            return False
+
+        return super().has_next_page()
+
+    @override
+    def next_page_info(self) -> Optional[PageInfo]:
+        last_id = self.last_id
+        if not last_id:
+            return None
+
+        return PageInfo(params={"after": last_id})
+
+
+class AsyncConversationCursorPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
+    data: List[_T]
+    has_more: Optional[bool] = None
+    last_id: Optional[str] = None
+
+    @override
+    def _get_page_items(self) -> List[_T]:
+        data = self.data
+        if not data:
+            return []
+        return data
+
+    @override
+    def has_next_page(self) -> bool:
+        has_more = self.has_more
+        if has_more is not None and has_more is False:
+            return False
+
+        return super().has_next_page()
+
+    @override
+    def next_page_info(self) -> Optional[PageInfo]:
+        last_id = self.last_id
+        if not last_id:
+            return None
+
+        return PageInfo(params={"after": last_id})
tests/api_resources/conversations/__init__.py
@@ -0,0 +1,1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
tests/api_resources/conversations/test_items.py
@@ -0,0 +1,491 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.conversations import (
+    Conversation,
+    ConversationItem,
+    ConversationItemList,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestItems:
+    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    def test_method_create(self, client: OpenAI) -> None:
+        item = client.conversations.items.create(
+            conversation_id="conv_123",
+            items=[
+                {
+                    "content": "string",
+                    "role": "user",
+                }
+            ],
+        )
+        assert_matches_type(ConversationItemList, item, path=["response"])
+
+    @parametrize
+    def test_method_create_with_all_params(self, client: OpenAI) -> None:
+        item = client.conversations.items.create(
+            conversation_id="conv_123",
+            items=[
+                {
+                    "content": "string",
+                    "role": "user",
+                    "type": "message",
+                }
+            ],
+            include=["code_interpreter_call.outputs"],
+        )
+        assert_matches_type(ConversationItemList, item, path=["response"])
+
+    @parametrize
+    def test_raw_response_create(self, client: OpenAI) -> None:
+        response = client.conversations.items.with_raw_response.create(
+            conversation_id="conv_123",
+            items=[
+                {
+                    "content": "string",
+                    "role": "user",
+                }
+            ],
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = response.parse()
+        assert_matches_type(ConversationItemList, item, path=["response"])
+
+    @parametrize
+    def test_streaming_response_create(self, client: OpenAI) -> None:
+        with client.conversations.items.with_streaming_response.create(
+            conversation_id="conv_123",
+            items=[
+                {
+                    "content": "string",
+                    "role": "user",
+                }
+            ],
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = response.parse()
+            assert_matches_type(ConversationItemList, item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_create(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.items.with_raw_response.create(
+                conversation_id="",
+                items=[
+                    {
+                        "content": "string",
+                        "role": "user",
+                    }
+                ],
+            )
+
+    @parametrize
+    def test_method_retrieve(self, client: OpenAI) -> None:
+        item = client.conversations.items.retrieve(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+        )
+        assert_matches_type(ConversationItem, item, path=["response"])
+
+    @parametrize
+    def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
+        item = client.conversations.items.retrieve(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+            include=["code_interpreter_call.outputs"],
+        )
+        assert_matches_type(ConversationItem, item, path=["response"])
+
+    @parametrize
+    def test_raw_response_retrieve(self, client: OpenAI) -> None:
+        response = client.conversations.items.with_raw_response.retrieve(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = response.parse()
+        assert_matches_type(ConversationItem, item, path=["response"])
+
+    @parametrize
+    def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+        with client.conversations.items.with_streaming_response.retrieve(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = response.parse()
+            assert_matches_type(ConversationItem, item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_retrieve(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.items.with_raw_response.retrieve(
+                item_id="msg_abc",
+                conversation_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
+            client.conversations.items.with_raw_response.retrieve(
+                item_id="",
+                conversation_id="conv_123",
+            )
+
+    @parametrize
+    def test_method_list(self, client: OpenAI) -> None:
+        item = client.conversations.items.list(
+            conversation_id="conv_123",
+        )
+        assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+    @parametrize
+    def test_method_list_with_all_params(self, client: OpenAI) -> None:
+        item = client.conversations.items.list(
+            conversation_id="conv_123",
+            after="after",
+            include=["code_interpreter_call.outputs"],
+            limit=0,
+            order="asc",
+        )
+        assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+    @parametrize
+    def test_raw_response_list(self, client: OpenAI) -> None:
+        response = client.conversations.items.with_raw_response.list(
+            conversation_id="conv_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = response.parse()
+        assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+    @parametrize
+    def test_streaming_response_list(self, client: OpenAI) -> None:
+        with client.conversations.items.with_streaming_response.list(
+            conversation_id="conv_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = response.parse()
+            assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_list(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.items.with_raw_response.list(
+                conversation_id="",
+            )
+
+    @parametrize
+    def test_method_delete(self, client: OpenAI) -> None:
+        item = client.conversations.items.delete(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+        )
+        assert_matches_type(Conversation, item, path=["response"])
+
+    @parametrize
+    def test_raw_response_delete(self, client: OpenAI) -> None:
+        response = client.conversations.items.with_raw_response.delete(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = response.parse()
+        assert_matches_type(Conversation, item, path=["response"])
+
+    @parametrize
+    def test_streaming_response_delete(self, client: OpenAI) -> None:
+        with client.conversations.items.with_streaming_response.delete(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = response.parse()
+            assert_matches_type(Conversation, item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_delete(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.items.with_raw_response.delete(
+                item_id="msg_abc",
+                conversation_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
+            client.conversations.items.with_raw_response.delete(
+                item_id="",
+                conversation_id="conv_123",
+            )
+
+
+class TestAsyncItems:
+    parametrize = pytest.mark.parametrize(
+        "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+    )
+
+    @parametrize
+    async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+        item = await async_client.conversations.items.create(
+            conversation_id="conv_123",
+            items=[
+                {
+                    "content": "string",
+                    "role": "user",
+                }
+            ],
+        )
+        assert_matches_type(ConversationItemList, item, path=["response"])
+
+    @parametrize
+    async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        item = await async_client.conversations.items.create(
+            conversation_id="conv_123",
+            items=[
+                {
+                    "content": "string",
+                    "role": "user",
+                    "type": "message",
+                }
+            ],
+            include=["code_interpreter_call.outputs"],
+        )
+        assert_matches_type(ConversationItemList, item, path=["response"])
+
+    @parametrize
+    async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.conversations.items.with_raw_response.create(
+            conversation_id="conv_123",
+            items=[
+                {
+                    "content": "string",
+                    "role": "user",
+                }
+            ],
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = response.parse()
+        assert_matches_type(ConversationItemList, item, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.conversations.items.with_streaming_response.create(
+            conversation_id="conv_123",
+            items=[
+                {
+                    "content": "string",
+                    "role": "user",
+                }
+            ],
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = await response.parse()
+            assert_matches_type(ConversationItemList, item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.items.with_raw_response.create(
+                conversation_id="",
+                items=[
+                    {
+                        "content": "string",
+                        "role": "user",
+                    }
+                ],
+            )
+
+    @parametrize
+    async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+        item = await async_client.conversations.items.retrieve(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+        )
+        assert_matches_type(ConversationItem, item, path=["response"])
+
+    @parametrize
+    async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        item = await async_client.conversations.items.retrieve(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+            include=["code_interpreter_call.outputs"],
+        )
+        assert_matches_type(ConversationItem, item, path=["response"])
+
+    @parametrize
+    async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.conversations.items.with_raw_response.retrieve(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = response.parse()
+        assert_matches_type(ConversationItem, item, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.conversations.items.with_streaming_response.retrieve(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = await response.parse()
+            assert_matches_type(ConversationItem, item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.items.with_raw_response.retrieve(
+                item_id="msg_abc",
+                conversation_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
+            await async_client.conversations.items.with_raw_response.retrieve(
+                item_id="",
+                conversation_id="conv_123",
+            )
+
+    @parametrize
+    async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+        item = await async_client.conversations.items.list(
+            conversation_id="conv_123",
+        )
+        assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+    @parametrize
+    async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        item = await async_client.conversations.items.list(
+            conversation_id="conv_123",
+            after="after",
+            include=["code_interpreter_call.outputs"],
+            limit=0,
+            order="asc",
+        )
+        assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+    @parametrize
+    async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.conversations.items.with_raw_response.list(
+            conversation_id="conv_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = response.parse()
+        assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.conversations.items.with_streaming_response.list(
+            conversation_id="conv_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = await response.parse()
+            assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.items.with_raw_response.list(
+                conversation_id="",
+            )
+
+    @parametrize
+    async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+        item = await async_client.conversations.items.delete(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+        )
+        assert_matches_type(Conversation, item, path=["response"])
+
+    @parametrize
+    async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.conversations.items.with_raw_response.delete(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        item = response.parse()
+        assert_matches_type(Conversation, item, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.conversations.items.with_streaming_response.delete(
+            item_id="msg_abc",
+            conversation_id="conv_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            item = await response.parse()
+            assert_matches_type(Conversation, item, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.items.with_raw_response.delete(
+                item_id="msg_abc",
+                conversation_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
+            await async_client.conversations.items.with_raw_response.delete(
+                item_id="",
+                conversation_id="conv_123",
+            )
tests/api_resources/responses/test_input_items.py
@@ -30,7 +30,6 @@ class TestInputItems:
         input_item = client.responses.input_items.list(
             response_id="response_id",
             after="after",
-            before="before",
             include=["code_interpreter_call.outputs"],
             limit=0,
             order="asc",
@@ -86,7 +85,6 @@ class TestAsyncInputItems:
         input_item = await async_client.responses.input_items.list(
             response_id="response_id",
             after="after",
-            before="before",
             include=["code_interpreter_call.outputs"],
             limit=0,
             order="asc",
tests/api_resources/test_conversations.py
@@ -0,0 +1,341 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.types.conversations import (
+    Conversation,
+    ConversationDeletedResource,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestConversations:
+    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    def test_method_create(self, client: OpenAI) -> None:
+        conversation = client.conversations.create()
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    def test_method_create_with_all_params(self, client: OpenAI) -> None:
+        conversation = client.conversations.create(
+            items=[
+                {
+                    "content": "string",
+                    "role": "user",
+                    "type": "message",
+                }
+            ],
+            metadata={"foo": "string"},
+        )
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    def test_raw_response_create(self, client: OpenAI) -> None:
+        response = client.conversations.with_raw_response.create()
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = response.parse()
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    def test_streaming_response_create(self, client: OpenAI) -> None:
+        with client.conversations.with_streaming_response.create() as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = response.parse()
+            assert_matches_type(Conversation, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_method_retrieve(self, client: OpenAI) -> None:
+        conversation = client.conversations.retrieve(
+            "conv_123",
+        )
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    def test_raw_response_retrieve(self, client: OpenAI) -> None:
+        response = client.conversations.with_raw_response.retrieve(
+            "conv_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = response.parse()
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+        with client.conversations.with_streaming_response.retrieve(
+            "conv_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = response.parse()
+            assert_matches_type(Conversation, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_retrieve(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.with_raw_response.retrieve(
+                "",
+            )
+
+    @parametrize
+    def test_method_update(self, client: OpenAI) -> None:
+        conversation = client.conversations.update(
+            conversation_id="conv_123",
+            metadata={"foo": "string"},
+        )
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    def test_raw_response_update(self, client: OpenAI) -> None:
+        response = client.conversations.with_raw_response.update(
+            conversation_id="conv_123",
+            metadata={"foo": "string"},
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = response.parse()
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    def test_streaming_response_update(self, client: OpenAI) -> None:
+        with client.conversations.with_streaming_response.update(
+            conversation_id="conv_123",
+            metadata={"foo": "string"},
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = response.parse()
+            assert_matches_type(Conversation, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_update(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.with_raw_response.update(
+                conversation_id="",
+                metadata={"foo": "string"},
+            )
+
+    @parametrize
+    def test_method_delete(self, client: OpenAI) -> None:
+        conversation = client.conversations.delete(
+            "conv_123",
+        )
+        assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
+
+    @parametrize
+    def test_raw_response_delete(self, client: OpenAI) -> None:
+        response = client.conversations.with_raw_response.delete(
+            "conv_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = response.parse()
+        assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
+
+    @parametrize
+    def test_streaming_response_delete(self, client: OpenAI) -> None:
+        with client.conversations.with_streaming_response.delete(
+            "conv_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = response.parse()
+            assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_delete(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            client.conversations.with_raw_response.delete(
+                "",
+            )
+
+
+class TestAsyncConversations:
+    parametrize = pytest.mark.parametrize(
+        "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+    )
+
+    @parametrize
+    async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+        conversation = await async_client.conversations.create()
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        conversation = await async_client.conversations.create(
+            items=[
+                {
+                    "content": "string",
+                    "role": "user",
+                    "type": "message",
+                }
+            ],
+            metadata={"foo": "string"},
+        )
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.conversations.with_raw_response.create()
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = response.parse()
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.conversations.with_streaming_response.create() as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = await response.parse()
+            assert_matches_type(Conversation, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+        conversation = await async_client.conversations.retrieve(
+            "conv_123",
+        )
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.conversations.with_raw_response.retrieve(
+            "conv_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = response.parse()
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.conversations.with_streaming_response.retrieve(
+            "conv_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = await response.parse()
+            assert_matches_type(Conversation, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.with_raw_response.retrieve(
+                "",
+            )
+
+    @parametrize
+    async def test_method_update(self, async_client: AsyncOpenAI) -> None:
+        conversation = await async_client.conversations.update(
+            conversation_id="conv_123",
+            metadata={"foo": "string"},
+        )
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.conversations.with_raw_response.update(
+            conversation_id="conv_123",
+            metadata={"foo": "string"},
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = response.parse()
+        assert_matches_type(Conversation, conversation, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.conversations.with_streaming_response.update(
+            conversation_id="conv_123",
+            metadata={"foo": "string"},
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = await response.parse()
+            assert_matches_type(Conversation, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.with_raw_response.update(
+                conversation_id="",
+                metadata={"foo": "string"},
+            )
+
+    @parametrize
+    async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+        conversation = await async_client.conversations.delete(
+            "conv_123",
+        )
+        assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
+
+    @parametrize
+    async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.conversations.with_raw_response.delete(
+            "conv_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        conversation = response.parse()
+        assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.conversations.with_streaming_response.delete(
+            "conv_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            conversation = await response.parse()
+            assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+            await async_client.conversations.with_raw_response.delete(
+                "",
+            )
tests/api_resources/test_responses.py
@@ -29,6 +29,7 @@ class TestResponses:
     def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
         response = client.responses.create(
             background=True,
+            conversation="string",
             include=["code_interpreter_call.outputs"],
             input="string",
             instructions="instructions",
@@ -108,6 +109,7 @@ class TestResponses:
         response_stream = client.responses.create(
             stream=True,
             background=True,
+            conversation="string",
             include=["code_interpreter_call.outputs"],
             input="string",
             instructions="instructions",
@@ -380,6 +382,7 @@ class TestAsyncResponses:
     async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.responses.create(
             background=True,
+            conversation="string",
             include=["code_interpreter_call.outputs"],
             input="string",
             instructions="instructions",
@@ -459,6 +462,7 @@ class TestAsyncResponses:
         response_stream = await async_client.responses.create(
             stream=True,
             background=True,
+            conversation="string",
             include=["code_interpreter_call.outputs"],
             input="string",
             instructions="instructions",
.release-please-manifest.json
@@ -1,3 +1,3 @@
 {
-  ".": "1.100.3"
+  ".": "1.101.0"
 }
\ No newline at end of file
.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-7ef7a457c3bf05364e66e48c9ca34f31bfef1f6c9b7c15b1812346105e0abb16.yml
-openapi_spec_hash: a2b1f5d8fbb62175c93b0ebea9f10063
-config_hash: 4870312b04f48fd717ea4151053e7fb9
+configured_endpoints: 119
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-ddbdf9343316047e8a773c54fb24e4a8d225955e202a1888fde6f9c8898ebf98.yml
+openapi_spec_hash: 9802f6dd381558466c897f6e387e06ca
+config_hash: fe0ea26680ac2075a6cd66416aefe7db
api.md
@@ -751,6 +751,7 @@ from openai.types.responses import (
     ResponseContent,
     ResponseContentPartAddedEvent,
     ResponseContentPartDoneEvent,
+    ResponseConversationParam,
     ResponseCreatedEvent,
     ResponseCustomToolCall,
     ResponseCustomToolCallInputDeltaEvent,
@@ -854,6 +855,54 @@ Methods:
 
 - <code title="get /responses/{response_id}/input_items">client.responses.input_items.<a href="./src/openai/resources/responses/input_items.py">list</a>(response_id, \*\*<a href="src/openai/types/responses/input_item_list_params.py">params</a>) -> <a href="./src/openai/types/responses/response_item.py">SyncCursorPage[ResponseItem]</a></code>
 
+# Conversations
+
+Types:
+
+```python
+from openai.types.conversations import (
+    ComputerScreenshotContent,
+    ContainerFileCitationBody,
+    Conversation,
+    ConversationDeleted,
+    ConversationDeletedResource,
+    FileCitationBody,
+    InputFileContent,
+    InputImageContent,
+    InputTextContent,
+    LobProb,
+    Message,
+    OutputTextContent,
+    RefusalContent,
+    SummaryTextContent,
+    TextContent,
+    TopLogProb,
+    URLCitationBody,
+)
+```
+
+Methods:
+
+- <code title="post /conversations">client.conversations.<a href="./src/openai/resources/conversations/conversations.py">create</a>(\*\*<a href="src/openai/types/conversations/conversation_create_params.py">params</a>) -> <a href="./src/openai/types/conversations/conversation.py">Conversation</a></code>
+- <code title="get /conversations/{conversation_id}">client.conversations.<a href="./src/openai/resources/conversations/conversations.py">retrieve</a>(conversation_id) -> <a href="./src/openai/types/conversations/conversation.py">Conversation</a></code>
+- <code title="post /conversations/{conversation_id}">client.conversations.<a href="./src/openai/resources/conversations/conversations.py">update</a>(conversation_id, \*\*<a href="src/openai/types/conversations/conversation_update_params.py">params</a>) -> <a href="./src/openai/types/conversations/conversation.py">Conversation</a></code>
+- <code title="delete /conversations/{conversation_id}">client.conversations.<a href="./src/openai/resources/conversations/conversations.py">delete</a>(conversation_id) -> <a href="./src/openai/types/conversations/conversation_deleted_resource.py">ConversationDeletedResource</a></code>
+
+## Items
+
+Types:
+
+```python
+from openai.types.conversations import ConversationItem, ConversationItemList
+```
+
+Methods:
+
+- <code title="post /conversations/{conversation_id}/items">client.conversations.items.<a href="./src/openai/resources/conversations/items.py">create</a>(conversation_id, \*\*<a href="src/openai/types/conversations/item_create_params.py">params</a>) -> <a href="./src/openai/types/conversations/conversation_item_list.py">ConversationItemList</a></code>
+- <code title="get /conversations/{conversation_id}/items/{item_id}">client.conversations.items.<a href="./src/openai/resources/conversations/items.py">retrieve</a>(item_id, \*, conversation_id, \*\*<a href="src/openai/types/conversations/item_retrieve_params.py">params</a>) -> <a href="./src/openai/types/conversations/conversation_item.py">ConversationItem</a></code>
+- <code title="get /conversations/{conversation_id}/items">client.conversations.items.<a href="./src/openai/resources/conversations/items.py">list</a>(conversation_id, \*\*<a href="src/openai/types/conversations/item_list_params.py">params</a>) -> <a href="./src/openai/types/conversations/conversation_item.py">SyncConversationCursorPage[ConversationItem]</a></code>
+- <code title="delete /conversations/{conversation_id}/items/{item_id}">client.conversations.items.<a href="./src/openai/resources/conversations/items.py">delete</a>(item_id, \*, conversation_id) -> <a href="./src/openai/types/conversations/conversation.py">Conversation</a></code>
+
 # Evals
 
 Types:
CHANGELOG.md
@@ -1,5 +1,19 @@
 # Changelog
 
+## 1.101.0 (2025-08-21)
+
+Full Changelog: [v1.100.3...v1.101.0](https://github.com/openai/openai-python/compare/v1.100.3...v1.101.0)
+
+### Features
+
+* **api:** Add connectors support for MCP tool ([a47f962](https://github.com/openai/openai-python/commit/a47f962daf579c142b8af5579be732772b688a29))
+* **api:** adding support for /v1/conversations to the API ([e30bcbc](https://github.com/openai/openai-python/commit/e30bcbc0cb7c827af779bee6971f976261abfb67))
+
+
+### Chores
+
+* update github action ([7333b28](https://github.com/openai/openai-python/commit/7333b282718a5f6977f30e1a2548207b3a089bd4))
+
 ## 1.100.3 (2025-08-20)
 
 Full Changelog: [v1.100.2...v1.100.3](https://github.com/openai/openai-python/compare/v1.100.2...v1.100.3)
pyproject.toml
@@ -1,6 +1,6 @@
 [project]
 name = "openai"
-version = "1.100.3"
+version = "1.101.0"
 description = "The official Python library for the openai API"
 dynamic = ["readme"]
 license = "Apache-2.0"