Commit 9ada2c74

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-10-07 01:49:02
feat(api): dev day 2025 launches
DevDay 2025 launches including videos and chatkit beta
1 parent 53f7a74
Changed files (75)
examples
src
tests
examples/video.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env -S poetry run python
+
+import asyncio
+
+from openai import AsyncOpenAI
+
+client = AsyncOpenAI()
+
+
+async def main() -> None:
+    video = await client.videos.create_and_poll(
+        model="sora-2",
+        prompt="A video of the words 'Thank you' in sparkling letters",
+    )
+
+    if video.status == "completed":
+        print("Video successfully completed: ", video)
+    else:
+        print("Video creation failed. Status: ", video.status)
+
+
+asyncio.run(main())
src/openai/resources/beta/chatkit/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .chatkit import (
+    ChatKit,
+    AsyncChatKit,
+    ChatKitWithRawResponse,
+    AsyncChatKitWithRawResponse,
+    ChatKitWithStreamingResponse,
+    AsyncChatKitWithStreamingResponse,
+)
+from .threads import (
+    Threads,
+    AsyncThreads,
+    ThreadsWithRawResponse,
+    AsyncThreadsWithRawResponse,
+    ThreadsWithStreamingResponse,
+    AsyncThreadsWithStreamingResponse,
+)
+from .sessions import (
+    Sessions,
+    AsyncSessions,
+    SessionsWithRawResponse,
+    AsyncSessionsWithRawResponse,
+    SessionsWithStreamingResponse,
+    AsyncSessionsWithStreamingResponse,
+)
+
+__all__ = [
+    "Sessions",
+    "AsyncSessions",
+    "SessionsWithRawResponse",
+    "AsyncSessionsWithRawResponse",
+    "SessionsWithStreamingResponse",
+    "AsyncSessionsWithStreamingResponse",
+    "Threads",
+    "AsyncThreads",
+    "ThreadsWithRawResponse",
+    "AsyncThreadsWithRawResponse",
+    "ThreadsWithStreamingResponse",
+    "AsyncThreadsWithStreamingResponse",
+    "ChatKit",
+    "AsyncChatKit",
+    "ChatKitWithRawResponse",
+    "AsyncChatKitWithRawResponse",
+    "ChatKitWithStreamingResponse",
+    "AsyncChatKitWithStreamingResponse",
+]
src/openai/resources/beta/chatkit/chatkit.py
@@ -0,0 +1,259 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Any, Mapping, cast
+
+import httpx
+
+from .... import _legacy_response
+from .threads import (
+    Threads,
+    AsyncThreads,
+    ThreadsWithRawResponse,
+    AsyncThreadsWithRawResponse,
+    ThreadsWithStreamingResponse,
+    AsyncThreadsWithStreamingResponse,
+)
+from .sessions import (
+    Sessions,
+    AsyncSessions,
+    SessionsWithRawResponse,
+    AsyncSessionsWithRawResponse,
+    SessionsWithStreamingResponse,
+    AsyncSessionsWithStreamingResponse,
+)
+from ...._types import Body, Query, Headers, NotGiven, FileTypes, not_given
+from ...._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....types.beta import chatkit_upload_file_params
+from ...._base_client import make_request_options
+from ....types.beta.chatkit_upload_file_response import ChatKitUploadFileResponse
+
+__all__ = ["ChatKit", "AsyncChatKit"]
+
+
+class ChatKit(SyncAPIResource):
+    @cached_property
+    def sessions(self) -> Sessions:
+        return Sessions(self._client)
+
+    @cached_property
+    def threads(self) -> Threads:
+        return Threads(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> ChatKitWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return ChatKitWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ChatKitWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return ChatKitWithStreamingResponse(self)
+
+    def upload_file(
+        self,
+        *,
+        file: FileTypes,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ChatKitUploadFileResponse:
+        """
+        Upload a ChatKit file
+
+        Args:
+          file: Binary file contents to store with the ChatKit session. Supports PDFs and PNG,
+              JPG, JPEG, GIF, or WEBP images.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        body = deepcopy_minimal({"file": file})
+        files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+        if files:
+            # It should be noted that the actual Content-Type header that will be
+            # sent to the server will contain a `boundary` parameter, e.g.
+            # multipart/form-data; boundary=---abc--
+            extra_headers["Content-Type"] = "multipart/form-data"
+        return cast(
+            ChatKitUploadFileResponse,
+            self._post(
+                "/chatkit/files",
+                body=maybe_transform(body, chatkit_upload_file_params.ChatKitUploadFileParams),
+                files=files,
+                options=make_request_options(
+                    extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+                ),
+                cast_to=cast(
+                    Any, ChatKitUploadFileResponse
+                ),  # Union types cannot be passed in as arguments in the type system
+            ),
+        )
+
+
+class AsyncChatKit(AsyncAPIResource):
+    @cached_property
+    def sessions(self) -> AsyncSessions:
+        return AsyncSessions(self._client)
+
+    @cached_property
+    def threads(self) -> AsyncThreads:
+        return AsyncThreads(self._client)
+
+    @cached_property
+    def with_raw_response(self) -> AsyncChatKitWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncChatKitWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncChatKitWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncChatKitWithStreamingResponse(self)
+
+    async def upload_file(
+        self,
+        *,
+        file: FileTypes,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ChatKitUploadFileResponse:
+        """
+        Upload a ChatKit file
+
+        Args:
+          file: Binary file contents to store with the ChatKit session. Supports PDFs and PNG,
+              JPG, JPEG, GIF, or WEBP images.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        body = deepcopy_minimal({"file": file})
+        files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+        if files:
+            # It should be noted that the actual Content-Type header that will be
+            # sent to the server will contain a `boundary` parameter, e.g.
+            # multipart/form-data; boundary=---abc--
+            extra_headers["Content-Type"] = "multipart/form-data"
+        return cast(
+            ChatKitUploadFileResponse,
+            await self._post(
+                "/chatkit/files",
+                body=await async_maybe_transform(body, chatkit_upload_file_params.ChatKitUploadFileParams),
+                files=files,
+                options=make_request_options(
+                    extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+                ),
+                cast_to=cast(
+                    Any, ChatKitUploadFileResponse
+                ),  # Union types cannot be passed in as arguments in the type system
+            ),
+        )
+
+
+class ChatKitWithRawResponse:
+    def __init__(self, chatkit: ChatKit) -> None:
+        self._chatkit = chatkit
+
+        self.upload_file = _legacy_response.to_raw_response_wrapper(
+            chatkit.upload_file,
+        )
+
+    @cached_property
+    def sessions(self) -> SessionsWithRawResponse:
+        return SessionsWithRawResponse(self._chatkit.sessions)
+
+    @cached_property
+    def threads(self) -> ThreadsWithRawResponse:
+        return ThreadsWithRawResponse(self._chatkit.threads)
+
+
+class AsyncChatKitWithRawResponse:
+    def __init__(self, chatkit: AsyncChatKit) -> None:
+        self._chatkit = chatkit
+
+        self.upload_file = _legacy_response.async_to_raw_response_wrapper(
+            chatkit.upload_file,
+        )
+
+    @cached_property
+    def sessions(self) -> AsyncSessionsWithRawResponse:
+        return AsyncSessionsWithRawResponse(self._chatkit.sessions)
+
+    @cached_property
+    def threads(self) -> AsyncThreadsWithRawResponse:
+        return AsyncThreadsWithRawResponse(self._chatkit.threads)
+
+
+class ChatKitWithStreamingResponse:
+    def __init__(self, chatkit: ChatKit) -> None:
+        self._chatkit = chatkit
+
+        self.upload_file = to_streamed_response_wrapper(
+            chatkit.upload_file,
+        )
+
+    @cached_property
+    def sessions(self) -> SessionsWithStreamingResponse:
+        return SessionsWithStreamingResponse(self._chatkit.sessions)
+
+    @cached_property
+    def threads(self) -> ThreadsWithStreamingResponse:
+        return ThreadsWithStreamingResponse(self._chatkit.threads)
+
+
+class AsyncChatKitWithStreamingResponse:
+    def __init__(self, chatkit: AsyncChatKit) -> None:
+        self._chatkit = chatkit
+
+        self.upload_file = async_to_streamed_response_wrapper(
+            chatkit.upload_file,
+        )
+
+    @cached_property
+    def sessions(self) -> AsyncSessionsWithStreamingResponse:
+        return AsyncSessionsWithStreamingResponse(self._chatkit.sessions)
+
+    @cached_property
+    def threads(self) -> AsyncThreadsWithStreamingResponse:
+        return AsyncThreadsWithStreamingResponse(self._chatkit.threads)
src/openai/resources/beta/chatkit/sessions.py
@@ -0,0 +1,301 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...._base_client import make_request_options
+from ....types.beta.chatkit import (
+    ChatSessionWorkflowParam,
+    ChatSessionRateLimitsParam,
+    ChatSessionExpiresAfterParam,
+    ChatSessionChatKitConfigurationParam,
+    session_create_params,
+)
+from ....types.beta.chatkit.chat_session import ChatSession
+from ....types.beta.chatkit.chat_session_workflow_param import ChatSessionWorkflowParam
+from ....types.beta.chatkit.chat_session_rate_limits_param import ChatSessionRateLimitsParam
+from ....types.beta.chatkit.chat_session_expires_after_param import ChatSessionExpiresAfterParam
+from ....types.beta.chatkit.chat_session_chatkit_configuration_param import ChatSessionChatKitConfigurationParam
+
+__all__ = ["Sessions", "AsyncSessions"]
+
+
+class Sessions(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> SessionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return SessionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> SessionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return SessionsWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        user: str,
+        workflow: ChatSessionWorkflowParam,
+        chatkit_configuration: ChatSessionChatKitConfigurationParam | Omit = omit,
+        expires_after: ChatSessionExpiresAfterParam | Omit = omit,
+        rate_limits: ChatSessionRateLimitsParam | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ChatSession:
+        """
+        Create a ChatKit session
+
+        Args:
+          user: A free-form string that identifies your end user; ensures this Session can
+              access other objects that have the same `user` scope.
+
+          workflow: Workflow that powers the session.
+
+          chatkit_configuration: Optional overrides for ChatKit runtime configuration features
+
+          expires_after: Optional override for session expiration timing in seconds from creation.
+              Defaults to 10 minutes.
+
+          rate_limits: Optional override for per-minute request limits. When omitted, defaults to 10.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        return self._post(
+            "/chatkit/sessions",
+            body=maybe_transform(
+                {
+                    "user": user,
+                    "workflow": workflow,
+                    "chatkit_configuration": chatkit_configuration,
+                    "expires_after": expires_after,
+                    "rate_limits": rate_limits,
+                },
+                session_create_params.SessionCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatSession,
+        )
+
+    def cancel(
+        self,
+        session_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ChatSession:
+        """
+        Cancel a ChatKit session
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not session_id:
+            raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}")
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        return self._post(
+            f"/chatkit/sessions/{session_id}/cancel",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatSession,
+        )
+
+
+class AsyncSessions(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncSessionsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncSessionsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncSessionsWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        user: str,
+        workflow: ChatSessionWorkflowParam,
+        chatkit_configuration: ChatSessionChatKitConfigurationParam | Omit = omit,
+        expires_after: ChatSessionExpiresAfterParam | Omit = omit,
+        rate_limits: ChatSessionRateLimitsParam | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ChatSession:
+        """
+        Create a ChatKit session
+
+        Args:
+          user: A free-form string that identifies your end user; ensures this Session can
+              access other objects that have the same `user` scope.
+
+          workflow: Workflow that powers the session.
+
+          chatkit_configuration: Optional overrides for ChatKit runtime configuration features
+
+          expires_after: Optional override for session expiration timing in seconds from creation.
+              Defaults to 10 minutes.
+
+          rate_limits: Optional override for per-minute request limits. When omitted, defaults to 10.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        return await self._post(
+            "/chatkit/sessions",
+            body=await async_maybe_transform(
+                {
+                    "user": user,
+                    "workflow": workflow,
+                    "chatkit_configuration": chatkit_configuration,
+                    "expires_after": expires_after,
+                    "rate_limits": rate_limits,
+                },
+                session_create_params.SessionCreateParams,
+            ),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatSession,
+        )
+
+    async def cancel(
+        self,
+        session_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ChatSession:
+        """
+        Cancel a ChatKit session
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not session_id:
+            raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}")
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        return await self._post(
+            f"/chatkit/sessions/{session_id}/cancel",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatSession,
+        )
+
+
+class SessionsWithRawResponse:
+    def __init__(self, sessions: Sessions) -> None:
+        self._sessions = sessions
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            sessions.create,
+        )
+        self.cancel = _legacy_response.to_raw_response_wrapper(
+            sessions.cancel,
+        )
+
+
+class AsyncSessionsWithRawResponse:
+    def __init__(self, sessions: AsyncSessions) -> None:
+        self._sessions = sessions
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            sessions.create,
+        )
+        self.cancel = _legacy_response.async_to_raw_response_wrapper(
+            sessions.cancel,
+        )
+
+
+class SessionsWithStreamingResponse:
+    def __init__(self, sessions: Sessions) -> None:
+        self._sessions = sessions
+
+        self.create = to_streamed_response_wrapper(
+            sessions.create,
+        )
+        self.cancel = to_streamed_response_wrapper(
+            sessions.cancel,
+        )
+
+
+class AsyncSessionsWithStreamingResponse:
+    def __init__(self, sessions: AsyncSessions) -> None:
+        self._sessions = sessions
+
+        self.create = async_to_streamed_response_wrapper(
+            sessions.create,
+        )
+        self.cancel = async_to_streamed_response_wrapper(
+            sessions.cancel,
+        )
src/openai/resources/beta/chatkit/threads.py
@@ -0,0 +1,521 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Any, cast
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ...._utils import maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from ...._base_client import AsyncPaginator, make_request_options
+from ....types.beta.chatkit import thread_list_params, thread_list_items_params
+from ....types.beta.chatkit.chatkit_thread import ChatKitThread
+from ....types.beta.chatkit.thread_delete_response import ThreadDeleteResponse
+from ....types.beta.chatkit.chatkit_thread_item_list import Data
+
+__all__ = ["Threads", "AsyncThreads"]
+
+
+class Threads(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> ThreadsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return ThreadsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> ThreadsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return ThreadsWithStreamingResponse(self)
+
+    def retrieve(
+        self,
+        thread_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ChatKitThread:
+        """
+        Retrieve a ChatKit thread
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        return self._get(
+            f"/chatkit/threads/{thread_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatKitThread,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | Omit = omit,
+        before: str | Omit = omit,
+        limit: int | Omit = omit,
+        order: Literal["asc", "desc"] | Omit = omit,
+        user: str | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> SyncConversationCursorPage[ChatKitThread]:
+        """
+        List ChatKit threads
+
+        Args:
+          after: List items created after this thread item ID. Defaults to null for the first
+              page.
+
+          before: List items created before this thread item ID. Defaults to null for the newest
+              results.
+
+          limit: Maximum number of thread items to return. Defaults to 20.
+
+          order: Sort order for results by creation time. Defaults to `desc`.
+
+          user: Filter threads that belong to this user identifier. Defaults to null to return
+              all users.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        return self._get_api_list(
+            "/chatkit/threads",
+            page=SyncConversationCursorPage[ChatKitThread],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                        "user": user,
+                    },
+                    thread_list_params.ThreadListParams,
+                ),
+            ),
+            model=ChatKitThread,
+        )
+
+    def delete(
+        self,
+        thread_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ThreadDeleteResponse:
+        """
+        Delete a ChatKit thread
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        return self._delete(
+            f"/chatkit/threads/{thread_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ThreadDeleteResponse,
+        )
+
+    def list_items(
+        self,
+        thread_id: str,
+        *,
+        after: str | Omit = omit,
+        before: str | Omit = omit,
+        limit: int | Omit = omit,
+        order: Literal["asc", "desc"] | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> SyncConversationCursorPage[Data]:
+        """
+        List ChatKit thread items
+
+        Args:
+          after: List items created after this thread item ID. Defaults to null for the first
+              page.
+
+          before: List items created before this thread item ID. Defaults to null for the newest
+              results.
+
+          limit: Maximum number of thread items to return. Defaults to 20.
+
+          order: Sort order for results by creation time. Defaults to `desc`.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/chatkit/threads/{thread_id}/items",
+            page=SyncConversationCursorPage[Data],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    thread_list_items_params.ThreadListItemsParams,
+                ),
+            ),
+            model=cast(Any, Data),  # Union types cannot be passed in as arguments in the type system
+        )
+
+
+class AsyncThreads(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncThreadsWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncThreadsWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncThreadsWithStreamingResponse(self)
+
+    async def retrieve(
+        self,
+        thread_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ChatKitThread:
+        """
+        Retrieve a ChatKit thread
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        return await self._get(
+            f"/chatkit/threads/{thread_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ChatKitThread,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | Omit = omit,
+        before: str | Omit = omit,
+        limit: int | Omit = omit,
+        order: Literal["asc", "desc"] | Omit = omit,
+        user: str | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> AsyncPaginator[ChatKitThread, AsyncConversationCursorPage[ChatKitThread]]:
+        """
+        List ChatKit threads
+
+        Args:
+          after: List items created after this thread item ID. Defaults to null for the first
+              page.
+
+          before: List items created before this thread item ID. Defaults to null for the newest
+              results.
+
+          limit: Maximum number of thread items to return. Defaults to 20.
+
+          order: Sort order for results by creation time. Defaults to `desc`.
+
+          user: Filter threads that belong to this user identifier. Defaults to null to return
+              all users.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        return self._get_api_list(
+            "/chatkit/threads",
+            page=AsyncConversationCursorPage[ChatKitThread],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                        "user": user,
+                    },
+                    thread_list_params.ThreadListParams,
+                ),
+            ),
+            model=ChatKitThread,
+        )
+
+    async def delete(
+        self,
+        thread_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> ThreadDeleteResponse:
+        """
+        Delete a ChatKit thread
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        return await self._delete(
+            f"/chatkit/threads/{thread_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=ThreadDeleteResponse,
+        )
+
+    def list_items(
+        self,
+        thread_id: str,
+        *,
+        after: str | Omit = omit,
+        before: str | Omit = omit,
+        limit: int | Omit = omit,
+        order: Literal["asc", "desc"] | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> AsyncPaginator[Data, AsyncConversationCursorPage[Data]]:
+        """
+        List ChatKit thread items
+
+        Args:
+          after: List items created after this thread item ID. Defaults to null for the first
+              page.
+
+          before: List items created before this thread item ID. Defaults to null for the newest
+              results.
+
+          limit: Maximum number of thread items to return. Defaults to 20.
+
+          order: Sort order for results by creation time. Defaults to `desc`.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
+        return self._get_api_list(
+            f"/chatkit/threads/{thread_id}/items",
+            page=AsyncConversationCursorPage[Data],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "before": before,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    thread_list_items_params.ThreadListItemsParams,
+                ),
+            ),
+            model=cast(Any, Data),  # Union types cannot be passed in as arguments in the type system
+        )
+
+
+class ThreadsWithRawResponse:
+    def __init__(self, threads: Threads) -> None:
+        self._threads = threads
+
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            threads.retrieve,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            threads.list,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            threads.delete,
+        )
+        self.list_items = _legacy_response.to_raw_response_wrapper(
+            threads.list_items,
+        )
+
+
+class AsyncThreadsWithRawResponse:
+    def __init__(self, threads: AsyncThreads) -> None:
+        self._threads = threads
+
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            threads.retrieve,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            threads.list,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            threads.delete,
+        )
+        self.list_items = _legacy_response.async_to_raw_response_wrapper(
+            threads.list_items,
+        )
+
+
+class ThreadsWithStreamingResponse:
+    def __init__(self, threads: Threads) -> None:
+        self._threads = threads
+
+        self.retrieve = to_streamed_response_wrapper(
+            threads.retrieve,
+        )
+        self.list = to_streamed_response_wrapper(
+            threads.list,
+        )
+        self.delete = to_streamed_response_wrapper(
+            threads.delete,
+        )
+        self.list_items = to_streamed_response_wrapper(
+            threads.list_items,
+        )
+
+
+class AsyncThreadsWithStreamingResponse:
+    def __init__(self, threads: AsyncThreads) -> None:
+        self._threads = threads
+
+        self.retrieve = async_to_streamed_response_wrapper(
+            threads.retrieve,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            threads.list,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            threads.delete,
+        )
+        self.list_items = async_to_streamed_response_wrapper(
+            threads.list_items,
+        )
src/openai/resources/beta/__init__.py
@@ -8,6 +8,14 @@ from .beta import (
     BetaWithStreamingResponse,
     AsyncBetaWithStreamingResponse,
 )
+from .chatkit import (
+    ChatKit,
+    AsyncChatKit,
+    ChatKitWithRawResponse,
+    AsyncChatKitWithRawResponse,
+    ChatKitWithStreamingResponse,
+    AsyncChatKitWithStreamingResponse,
+)
 from .threads import (
     Threads,
     AsyncThreads,
@@ -26,6 +34,12 @@ from .assistants import (
 )
 
 __all__ = [
+    "ChatKit",
+    "AsyncChatKit",
+    "ChatKitWithRawResponse",
+    "AsyncChatKitWithRawResponse",
+    "ChatKitWithStreamingResponse",
+    "AsyncChatKitWithStreamingResponse",
     "Assistants",
     "AsyncAssistants",
     "AssistantsWithRawResponse",
src/openai/resources/beta/beta.py
@@ -12,6 +12,14 @@ from .assistants import (
     AsyncAssistantsWithStreamingResponse,
 )
 from ..._resource import SyncAPIResource, AsyncAPIResource
+from .chatkit.chatkit import (
+    ChatKit,
+    AsyncChatKit,
+    ChatKitWithRawResponse,
+    AsyncChatKitWithRawResponse,
+    ChatKitWithStreamingResponse,
+    AsyncChatKitWithStreamingResponse,
+)
 from .threads.threads import (
     Threads,
     AsyncThreads,
@@ -31,14 +39,7 @@ __all__ = ["Beta", "AsyncBeta"]
 
 class Beta(SyncAPIResource):
     @cached_property
-    def chat(self) -> Chat:
-        return Chat(self._client)
-
-    @cached_property
-    def realtime(self) -> Realtime:
-        return Realtime(self._client)
 
-    @cached_property
     def assistants(self) -> Assistants:
         return Assistants(self._client)
 
@@ -68,14 +69,7 @@ class Beta(SyncAPIResource):
 
 class AsyncBeta(AsyncAPIResource):
     @cached_property
-    def chat(self) -> AsyncChat:
-        return AsyncChat(self._client)
 
-    @cached_property
-    def realtime(self) -> AsyncRealtime:
-        return AsyncRealtime(self._client)
-
-    @cached_property
     def assistants(self) -> AsyncAssistants:
         return AsyncAssistants(self._client)
 
@@ -107,6 +101,10 @@ class BetaWithRawResponse:
     def __init__(self, beta: Beta) -> None:
         self._beta = beta
 
+    @cached_property
+    def chatkit(self) -> ChatKitWithRawResponse:
+        return ChatKitWithRawResponse(self._beta.chatkit)
+
     @cached_property
     def assistants(self) -> AssistantsWithRawResponse:
         return AssistantsWithRawResponse(self._beta.assistants)
@@ -120,6 +118,10 @@ class AsyncBetaWithRawResponse:
     def __init__(self, beta: AsyncBeta) -> None:
         self._beta = beta
 
+    @cached_property
+    def chatkit(self) -> AsyncChatKitWithRawResponse:
+        return AsyncChatKitWithRawResponse(self._beta.chatkit)
+
     @cached_property
     def assistants(self) -> AsyncAssistantsWithRawResponse:
         return AsyncAssistantsWithRawResponse(self._beta.assistants)
@@ -133,6 +135,10 @@ class BetaWithStreamingResponse:
     def __init__(self, beta: Beta) -> None:
         self._beta = beta
 
+    @cached_property
+    def chatkit(self) -> ChatKitWithStreamingResponse:
+        return ChatKitWithStreamingResponse(self._beta.chatkit)
+
     @cached_property
     def assistants(self) -> AssistantsWithStreamingResponse:
         return AssistantsWithStreamingResponse(self._beta.assistants)
@@ -146,6 +152,10 @@ class AsyncBetaWithStreamingResponse:
     def __init__(self, beta: AsyncBeta) -> None:
         self._beta = beta
 
+    @cached_property
+    def chatkit(self) -> AsyncChatKitWithStreamingResponse:
+        return AsyncChatKitWithStreamingResponse(self._beta.chatkit)
+
     @cached_property
     def assistants(self) -> AsyncAssistantsWithStreamingResponse:
         return AsyncAssistantsWithStreamingResponse(self._beta.assistants)
src/openai/resources/realtime/calls.py
@@ -123,6 +123,10 @@ class Calls(SyncAPIResource):
                 "gpt-4o-realtime-preview-2025-06-03",
                 "gpt-4o-mini-realtime-preview",
                 "gpt-4o-mini-realtime-preview-2024-12-17",
+                "gpt-realtime-mini",
+                "gpt-realtime-mini-2025-10-06",
+                "gpt-audio-mini",
+                "gpt-audio-mini-2025-10-06",
             ],
         ]
         | Omit = omit,
@@ -428,6 +432,10 @@ class AsyncCalls(AsyncAPIResource):
                 "gpt-4o-realtime-preview-2025-06-03",
                 "gpt-4o-mini-realtime-preview",
                 "gpt-4o-mini-realtime-preview-2024-12-17",
+                "gpt-realtime-mini",
+                "gpt-realtime-mini-2025-10-06",
+                "gpt-audio-mini",
+                "gpt-audio-mini-2025-10-06",
             ],
         ]
         | Omit = omit,
src/openai/resources/realtime/realtime.py
@@ -363,13 +363,14 @@ class AsyncRealtimeConnectionManager:
         extra_query = self.__extra_query
         await self.__client._refresh_api_key()
         auth_headers = self.__client.auth_headers
+        extra_query = self.__extra_query
         if self.__call_id is not omit:
             extra_query = {**extra_query, "call_id": self.__call_id}
         if is_async_azure_client(self.__client):
             model = self.__model
             if not model:
                 raise OpenAIError("`model` is required for Azure Realtime API")
-            else: 
+            else:
                 url, auth_headers = await self.__client._configure_realtime(model, extra_query)
         else:
             url = self._prepare_url().copy_with(
@@ -551,13 +552,14 @@ class RealtimeConnectionManager:
         extra_query = self.__extra_query
         self.__client._refresh_api_key()
         auth_headers = self.__client.auth_headers
+        extra_query = self.__extra_query
         if self.__call_id is not omit:
             extra_query = {**extra_query, "call_id": self.__call_id}
         if is_azure_client(self.__client):
             model = self.__model
             if not model:
                 raise OpenAIError("`model` is required for Azure Realtime API")
-            else: 
+            else:
                 url, auth_headers = self.__client._configure_realtime(model, extra_query)
         else:
             url = self._prepare_url().copy_with(
src/openai/resources/__init__.py
@@ -56,6 +56,14 @@ from .models import (
     ModelsWithStreamingResponse,
     AsyncModelsWithStreamingResponse,
 )
+from .videos import (
+    Videos,
+    AsyncVideos,
+    VideosWithRawResponse,
+    AsyncVideosWithRawResponse,
+    VideosWithStreamingResponse,
+    AsyncVideosWithStreamingResponse,
+)
 from .batches import (
     Batches,
     AsyncBatches,
@@ -212,4 +220,10 @@ __all__ = [
     "AsyncContainersWithRawResponse",
     "ContainersWithStreamingResponse",
     "AsyncContainersWithStreamingResponse",
+    "Videos",
+    "AsyncVideos",
+    "VideosWithRawResponse",
+    "AsyncVideosWithRawResponse",
+    "VideosWithStreamingResponse",
+    "AsyncVideosWithStreamingResponse",
 ]
src/openai/resources/images.py
@@ -170,7 +170,8 @@ class Images(SyncAPIResource):
 
           input_fidelity: Control how much effort the model will exert to match the style and features,
               especially facial features, of input images. This parameter is only supported
-              for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+              for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
+              `low`. Defaults to `low`.
 
           mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
               indicate where `image` should be edited. If there are multiple images provided,
@@ -286,7 +287,8 @@ class Images(SyncAPIResource):
 
           input_fidelity: Control how much effort the model will exert to match the style and features,
               especially facial features, of input images. This parameter is only supported
-              for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+              for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
+              `low`. Defaults to `low`.
 
           mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
               indicate where `image` should be edited. If there are multiple images provided,
@@ -398,7 +400,8 @@ class Images(SyncAPIResource):
 
           input_fidelity: Control how much effort the model will exert to match the style and features,
               especially facial features, of input images. This parameter is only supported
-              for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+              for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
+              `low`. Defaults to `low`.
 
           mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
               indicate where `image` should be edited. If there are multiple images provided,
@@ -1054,7 +1057,8 @@ class AsyncImages(AsyncAPIResource):
 
           input_fidelity: Control how much effort the model will exert to match the style and features,
               especially facial features, of input images. This parameter is only supported
-              for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+              for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
+              `low`. Defaults to `low`.
 
           mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
               indicate where `image` should be edited. If there are multiple images provided,
@@ -1170,7 +1174,8 @@ class AsyncImages(AsyncAPIResource):
 
           input_fidelity: Control how much effort the model will exert to match the style and features,
               especially facial features, of input images. This parameter is only supported
-              for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+              for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
+              `low`. Defaults to `low`.
 
           mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
               indicate where `image` should be edited. If there are multiple images provided,
@@ -1282,7 +1287,8 @@ class AsyncImages(AsyncAPIResource):
 
           input_fidelity: Control how much effort the model will exert to match the style and features,
               especially facial features, of input images. This parameter is only supported
-              for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+              for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
+              `low`. Defaults to `low`.
 
           mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
               indicate where `image` should be edited. If there are multiple images provided,
src/openai/resources/videos.py
@@ -0,0 +1,847 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Mapping, cast
+from typing_extensions import Literal, assert_never
+
+import httpx
+
+from .. import _legacy_response
+from ..types import (
+    VideoSize,
+    VideoModel,
+    VideoSeconds,
+    video_list_params,
+    video_remix_params,
+    video_create_params,
+    video_download_content_params,
+)
+from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
+from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+    StreamedBinaryAPIResponse,
+    AsyncStreamedBinaryAPIResponse,
+    to_streamed_response_wrapper,
+    async_to_streamed_response_wrapper,
+    to_custom_streamed_response_wrapper,
+    async_to_custom_streamed_response_wrapper,
+)
+from ..pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from ..types.video import Video
+from .._base_client import AsyncPaginator, make_request_options
+from .._utils._utils import is_given
+from ..types.video_size import VideoSize
+from ..types.video_model import VideoModel
+from ..types.video_seconds import VideoSeconds
+from ..types.video_delete_response import VideoDeleteResponse
+
+__all__ = ["Videos", "AsyncVideos"]
+
+
+class Videos(SyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> VideosWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return VideosWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> VideosWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return VideosWithStreamingResponse(self)
+
+    def create(
+        self,
+        *,
+        prompt: str,
+        input_reference: FileTypes | Omit = omit,
+        model: VideoModel | Omit = omit,
+        seconds: VideoSeconds | Omit = omit,
+        size: VideoSize | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> Video:
+        """
+        Create a video
+
+        Args:
+          prompt: Text prompt that describes the video to generate.
+
+          input_reference: Optional image reference that guides generation.
+
+          model: The video generation model to use. Defaults to `sora-2`.
+
+          seconds: Clip duration in seconds. Defaults to 4 seconds.
+
+          size: Output resolution formatted as width x height. Defaults to 720x1280.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        body = deepcopy_minimal(
+            {
+                "prompt": prompt,
+                "input_reference": input_reference,
+                "model": model,
+                "seconds": seconds,
+                "size": size,
+            }
+        )
+        files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]])
+        if files:
+            # It should be noted that the actual Content-Type header that will be
+            # sent to the server will contain a `boundary` parameter, e.g.
+            # multipart/form-data; boundary=---abc--
+            extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return self._post(
+            "/videos",
+            body=maybe_transform(body, video_create_params.VideoCreateParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Video,
+        )
+
+    def create_and_poll(
+        self,
+        *,
+        prompt: str,
+        input_reference: FileTypes | Omit = omit,
+        model: VideoModel | Omit = omit,
+        seconds: VideoSeconds | Omit = omit,
+        size: VideoSize | Omit = omit,
+        poll_interval_ms: int | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> Video:
+        """Create a video and wait for it to be processed."""
+        video = self.create(
+            model=model,
+            prompt=prompt,
+            input_reference=input_reference,
+            seconds=seconds,
+            size=size,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+
+        return self.poll(
+            video.id,
+            poll_interval_ms=poll_interval_ms,
+        )
+
+    def poll(
+        self,
+        video_id: str,
+        *,
+        poll_interval_ms: int | Omit = omit,
+    ) -> Video:
+        """Wait for the vector store file to finish processing.
+
+        Note: this will return even if the file failed to process, you need to check
+        file.last_error and file.status to handle these cases
+        """
+        headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+        if is_given(poll_interval_ms):
+            headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+        while True:
+            response = self.with_raw_response.retrieve(
+                video_id,
+                extra_headers=headers,
+            )
+
+            video = response.parse()
+            if video.status == "in_progress" or video.status == "queued":
+                if not is_given(poll_interval_ms):
+                    from_header = response.headers.get("openai-poll-after-ms")
+                    if from_header is not None:
+                        poll_interval_ms = int(from_header)
+                    else:
+                        poll_interval_ms = 1000
+
+                self._sleep(poll_interval_ms / 1000)
+            elif video.status == "completed" or video.status == "failed":
+                return video
+            else:
+                if TYPE_CHECKING:  # type: ignore[unreachable]
+                    assert_never(video.status)
+                else:
+                    return video
+
+    def retrieve(
+        self,
+        video_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> Video:
+        """
+        Retrieve a video
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not video_id:
+            raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
+        return self._get(
+            f"/videos/{video_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Video,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | Omit = omit,
+        limit: int | Omit = omit,
+        order: Literal["asc", "desc"] | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> SyncConversationCursorPage[Video]:
+        """
+        List videos
+
+        Args:
+          after: Identifier for the last item from the previous pagination request
+
+          limit: Number of items to retrieve
+
+          order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
+              descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._get_api_list(
+            "/videos",
+            page=SyncConversationCursorPage[Video],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    video_list_params.VideoListParams,
+                ),
+            ),
+            model=Video,
+        )
+
+    def delete(
+        self,
+        video_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> VideoDeleteResponse:
+        """
+        Delete a video
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not video_id:
+            raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
+        return self._delete(
+            f"/videos/{video_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VideoDeleteResponse,
+        )
+
+    def download_content(
+        self,
+        video_id: str,
+        *,
+        variant: Literal["video", "thumbnail", "spritesheet"] | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> _legacy_response.HttpxBinaryResponseContent:
+        """Download video content
+
+        Args:
+          variant: Which downloadable asset to return.
+
+        Defaults to the MP4 video.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not video_id:
+            raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
+        extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
+        return self._get(
+            f"/videos/{video_id}/content",
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform({"variant": variant}, video_download_content_params.VideoDownloadContentParams),
+            ),
+            cast_to=_legacy_response.HttpxBinaryResponseContent,
+        )
+
+    def remix(
+        self,
+        video_id: str,
+        *,
+        prompt: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> Video:
+        """
+        Create a video remix
+
+        Args:
+          prompt: Updated text prompt that directs the remix generation.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not video_id:
+            raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
+        return self._post(
+            f"/videos/{video_id}/remix",
+            body=maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Video,
+        )
+
+
+class AsyncVideos(AsyncAPIResource):
+    @cached_property
+    def with_raw_response(self) -> AsyncVideosWithRawResponse:
+        """
+        This property can be used as a prefix for any HTTP method call to return
+        the raw response object instead of the parsed content.
+
+        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+        """
+        return AsyncVideosWithRawResponse(self)
+
+    @cached_property
+    def with_streaming_response(self) -> AsyncVideosWithStreamingResponse:
+        """
+        An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+        """
+        return AsyncVideosWithStreamingResponse(self)
+
+    async def create(
+        self,
+        *,
+        prompt: str,
+        input_reference: FileTypes | Omit = omit,
+        model: VideoModel | Omit = omit,
+        seconds: VideoSeconds | Omit = omit,
+        size: VideoSize | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> Video:
+        """
+        Create a video
+
+        Args:
+          prompt: Text prompt that describes the video to generate.
+
+          input_reference: Optional image reference that guides generation.
+
+          model: The video generation model to use. Defaults to `sora-2`.
+
+          seconds: Clip duration in seconds. Defaults to 4 seconds.
+
+          size: Output resolution formatted as width x height. Defaults to 720x1280.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        body = deepcopy_minimal(
+            {
+                "prompt": prompt,
+                "input_reference": input_reference,
+                "model": model,
+                "seconds": seconds,
+                "size": size,
+            }
+        )
+        files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]])
+        if files:
+            # It should be noted that the actual Content-Type header that will be
+            # sent to the server will contain a `boundary` parameter, e.g.
+            # multipart/form-data; boundary=---abc--
+            extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+        return await self._post(
+            "/videos",
+            body=await async_maybe_transform(body, video_create_params.VideoCreateParams),
+            files=files,
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Video,
+        )
+
+    async def create_and_poll(
+        self,
+        *,
+        prompt: str,
+        input_reference: FileTypes | Omit = omit,
+        model: VideoModel | Omit = omit,
+        seconds: VideoSeconds | Omit = omit,
+        size: VideoSize | Omit = omit,
+        poll_interval_ms: int | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> Video:
+        """Create a video and wait for it to be processed."""
+        video = await self.create(
+            model=model,
+            prompt=prompt,
+            input_reference=input_reference,
+            seconds=seconds,
+            size=size,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+
+        return await self.poll(
+            video.id,
+            poll_interval_ms=poll_interval_ms,
+        )
+
+    async def poll(
+        self,
+        video_id: str,
+        *,
+        poll_interval_ms: int | Omit = omit,
+    ) -> Video:
+        """Wait for the vector store file to finish processing.
+
+        Note: this will return even if the file failed to process, you need to check
+        file.last_error and file.status to handle these cases
+        """
+        headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
+        if is_given(poll_interval_ms):
+            headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+        while True:
+            response = await self.with_raw_response.retrieve(
+                video_id,
+                extra_headers=headers,
+            )
+
+            video = response.parse()
+            if video.status == "in_progress" or video.status == "queued":
+                if not is_given(poll_interval_ms):
+                    from_header = response.headers.get("openai-poll-after-ms")
+                    if from_header is not None:
+                        poll_interval_ms = int(from_header)
+                    else:
+                        poll_interval_ms = 1000
+
+                await self._sleep(poll_interval_ms / 1000)
+            elif video.status == "completed" or video.status == "failed":
+                return video
+            else:
+                if TYPE_CHECKING:  # type: ignore[unreachable]
+                    assert_never(video.status)
+                else:
+                    return video
+
+    async def retrieve(
+        self,
+        video_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> Video:
+        """
+        Retrieve a video
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not video_id:
+            raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
+        return await self._get(
+            f"/videos/{video_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Video,
+        )
+
+    def list(
+        self,
+        *,
+        after: str | Omit = omit,
+        limit: int | Omit = omit,
+        order: Literal["asc", "desc"] | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> AsyncPaginator[Video, AsyncConversationCursorPage[Video]]:
+        """
+        List videos
+
+        Args:
+          after: Identifier for the last item from the previous pagination request
+
+          limit: Number of items to retrieve
+
+          order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
+              descending order.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._get_api_list(
+            "/videos",
+            page=AsyncConversationCursorPage[Video],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                        "order": order,
+                    },
+                    video_list_params.VideoListParams,
+                ),
+            ),
+            model=Video,
+        )
+
+    async def delete(
+        self,
+        video_id: str,
+        *,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> VideoDeleteResponse:
+        """
+        Delete a video
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not video_id:
+            raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
+        return await self._delete(
+            f"/videos/{video_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=VideoDeleteResponse,
+        )
+
+    async def download_content(
+        self,
+        video_id: str,
+        *,
+        variant: Literal["video", "thumbnail", "spritesheet"] | Omit = omit,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> _legacy_response.HttpxBinaryResponseContent:
+        """Download video content
+
+        Args:
+          variant: Which downloadable asset to return.
+
+        Defaults to the MP4 video.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not video_id:
+            raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
+        extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
+        return await self._get(
+            f"/videos/{video_id}/content",
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=await async_maybe_transform(
+                    {"variant": variant}, video_download_content_params.VideoDownloadContentParams
+                ),
+            ),
+            cast_to=_legacy_response.HttpxBinaryResponseContent,
+        )
+
+    async def remix(
+        self,
+        video_id: str,
+        *,
+        prompt: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = not_given,
+    ) -> Video:
+        """
+        Create a video remix
+
+        Args:
+          prompt: Updated text prompt that directs the remix generation.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not video_id:
+            raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
+        return await self._post(
+            f"/videos/{video_id}/remix",
+            body=await async_maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams),
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=Video,
+        )
+
+
+class VideosWithRawResponse:
+    def __init__(self, videos: Videos) -> None:
+        self._videos = videos
+
+        self.create = _legacy_response.to_raw_response_wrapper(
+            videos.create,
+        )
+        self.retrieve = _legacy_response.to_raw_response_wrapper(
+            videos.retrieve,
+        )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            videos.list,
+        )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            videos.delete,
+        )
+        self.download_content = _legacy_response.to_raw_response_wrapper(
+            videos.download_content,
+        )
+        self.remix = _legacy_response.to_raw_response_wrapper(
+            videos.remix,
+        )
+
+
+class AsyncVideosWithRawResponse:
+    def __init__(self, videos: AsyncVideos) -> None:
+        self._videos = videos
+
+        self.create = _legacy_response.async_to_raw_response_wrapper(
+            videos.create,
+        )
+        self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+            videos.retrieve,
+        )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            videos.list,
+        )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            videos.delete,
+        )
+        self.download_content = _legacy_response.async_to_raw_response_wrapper(
+            videos.download_content,
+        )
+        self.remix = _legacy_response.async_to_raw_response_wrapper(
+            videos.remix,
+        )
+
+
+class VideosWithStreamingResponse:
+    def __init__(self, videos: Videos) -> None:
+        self._videos = videos
+
+        self.create = to_streamed_response_wrapper(
+            videos.create,
+        )
+        self.retrieve = to_streamed_response_wrapper(
+            videos.retrieve,
+        )
+        self.list = to_streamed_response_wrapper(
+            videos.list,
+        )
+        self.delete = to_streamed_response_wrapper(
+            videos.delete,
+        )
+        self.download_content = to_custom_streamed_response_wrapper(
+            videos.download_content,
+            StreamedBinaryAPIResponse,
+        )
+        self.remix = to_streamed_response_wrapper(
+            videos.remix,
+        )
+
+
+class AsyncVideosWithStreamingResponse:
+    def __init__(self, videos: AsyncVideos) -> None:
+        self._videos = videos
+
+        self.create = async_to_streamed_response_wrapper(
+            videos.create,
+        )
+        self.retrieve = async_to_streamed_response_wrapper(
+            videos.retrieve,
+        )
+        self.list = async_to_streamed_response_wrapper(
+            videos.list,
+        )
+        self.delete = async_to_streamed_response_wrapper(
+            videos.delete,
+        )
+        self.download_content = async_to_custom_streamed_response_wrapper(
+            videos.download_content,
+            AsyncStreamedBinaryAPIResponse,
+        )
+        self.remix = async_to_streamed_response_wrapper(
+            videos.remix,
+        )
src/openai/types/beta/chatkit/__init__.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .chat_session import ChatSession as ChatSession
+from .chatkit_thread import ChatKitThread as ChatKitThread
+from .chatkit_attachment import ChatKitAttachment as ChatKitAttachment
+from .thread_list_params import ThreadListParams as ThreadListParams
+from .chat_session_status import ChatSessionStatus as ChatSessionStatus
+from .chatkit_widget_item import ChatKitWidgetItem as ChatKitWidgetItem
+from .chat_session_history import ChatSessionHistory as ChatSessionHistory
+from .session_create_params import SessionCreateParams as SessionCreateParams
+from .thread_delete_response import ThreadDeleteResponse as ThreadDeleteResponse
+from .chat_session_file_upload import ChatSessionFileUpload as ChatSessionFileUpload
+from .chat_session_rate_limits import ChatSessionRateLimits as ChatSessionRateLimits
+from .chatkit_thread_item_list import ChatKitThreadItemList as ChatKitThreadItemList
+from .thread_list_items_params import ThreadListItemsParams as ThreadListItemsParams
+from .chat_session_workflow_param import ChatSessionWorkflowParam as ChatSessionWorkflowParam
+from .chatkit_response_output_text import ChatKitResponseOutputText as ChatKitResponseOutputText
+from .chat_session_rate_limits_param import ChatSessionRateLimitsParam as ChatSessionRateLimitsParam
+from .chat_session_expires_after_param import ChatSessionExpiresAfterParam as ChatSessionExpiresAfterParam
+from .chatkit_thread_user_message_item import ChatKitThreadUserMessageItem as ChatKitThreadUserMessageItem
+from .chat_session_chatkit_configuration import ChatSessionChatKitConfiguration as ChatSessionChatKitConfiguration
+from .chat_session_automatic_thread_titling import (
+    ChatSessionAutomaticThreadTitling as ChatSessionAutomaticThreadTitling,
+)
+from .chatkit_thread_assistant_message_item import (
+    ChatKitThreadAssistantMessageItem as ChatKitThreadAssistantMessageItem,
+)
+from .chat_session_chatkit_configuration_param import (
+    ChatSessionChatKitConfigurationParam as ChatSessionChatKitConfigurationParam,
+)
src/openai/types/beta/chatkit/chat_session.py
@@ -0,0 +1,43 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from ..chatkit_workflow import ChatKitWorkflow
+from .chat_session_status import ChatSessionStatus
+from .chat_session_rate_limits import ChatSessionRateLimits
+from .chat_session_chatkit_configuration import ChatSessionChatKitConfiguration
+
+__all__ = ["ChatSession"]
+
+
+class ChatSession(BaseModel):
+    id: str
+    """Identifier for the ChatKit session."""
+
+    chatkit_configuration: ChatSessionChatKitConfiguration
+    """Resolved ChatKit feature configuration for the session."""
+
+    client_secret: str
+    """Ephemeral client secret that authenticates session requests."""
+
+    expires_at: int
+    """Unix timestamp (in seconds) for when the session expires."""
+
+    max_requests_per_1_minute: int
+    """Convenience copy of the per-minute request limit."""
+
+    object: Literal["chatkit.session"]
+    """Type discriminator that is always `chatkit.session`."""
+
+    rate_limits: ChatSessionRateLimits
+    """Resolved rate limit values."""
+
+    status: ChatSessionStatus
+    """Current lifecycle state of the session."""
+
+    user: str
+    """User identifier associated with the session."""
+
+    workflow: ChatKitWorkflow
+    """Workflow metadata for the session."""
src/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ...._models import BaseModel
+
+__all__ = ["ChatSessionAutomaticThreadTitling"]
+
+
+class ChatSessionAutomaticThreadTitling(BaseModel):
+    enabled: bool
+    """Whether automatic thread titling is enabled."""
src/openai/types/beta/chatkit/chat_session_chatkit_configuration.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ...._models import BaseModel
+from .chat_session_history import ChatSessionHistory
+from .chat_session_file_upload import ChatSessionFileUpload
+from .chat_session_automatic_thread_titling import ChatSessionAutomaticThreadTitling
+
+__all__ = ["ChatSessionChatKitConfiguration"]
+
+
+class ChatSessionChatKitConfiguration(BaseModel):
+    automatic_thread_titling: ChatSessionAutomaticThreadTitling
+    """Automatic thread titling preferences."""
+
+    file_upload: ChatSessionFileUpload
+    """Upload settings for the session."""
+
+    history: ChatSessionHistory
+    """History retention configuration."""
src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py
@@ -0,0 +1,59 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["ChatSessionChatKitConfigurationParam", "AutomaticThreadTitling", "FileUpload", "History"]
+
+
+class AutomaticThreadTitling(TypedDict, total=False):
+    enabled: bool
+    """Enable automatic thread title generation. Defaults to true."""
+
+
+class FileUpload(TypedDict, total=False):
+    enabled: bool
+    """Enable uploads for this session. Defaults to false."""
+
+    max_file_size: int
+    """Maximum size in megabytes for each uploaded file.
+
+    Defaults to 512 MB, which is the maximum allowable size.
+    """
+
+    max_files: int
+    """Maximum number of files that can be uploaded to the session. Defaults to 10."""
+
+
+class History(TypedDict, total=False):
+    enabled: bool
+    """Enables chat users to access previous ChatKit threads. Defaults to true."""
+
+    recent_threads: int
+    """Number of recent ChatKit threads users have access to.
+
+    Defaults to unlimited when unset.
+    """
+
+
+class ChatSessionChatKitConfigurationParam(TypedDict, total=False):
+    automatic_thread_titling: AutomaticThreadTitling
+    """Configuration for automatic thread titling.
+
+    When omitted, automatic thread titling is enabled by default.
+    """
+
+    file_upload: FileUpload
+    """Configuration for upload enablement and limits.
+
+    When omitted, uploads are disabled by default (max_files 10, max_file_size 512
+    MB).
+    """
+
+    history: History
+    """Configuration for chat history retention.
+
+    When omitted, history is enabled by default with no limit on recent_threads
+    (null).
+    """
src/openai/types/beta/chatkit/chat_session_expires_after_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatSessionExpiresAfterParam"]
+
+
+class ChatSessionExpiresAfterParam(TypedDict, total=False):
+    anchor: Required[Literal["created_at"]]
+    """Base timestamp used to calculate expiration. Currently fixed to `created_at`."""
+
+    seconds: Required[int]
+    """Number of seconds after the anchor when the session expires."""
src/openai/types/beta/chatkit/chat_session_file_upload.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["ChatSessionFileUpload"]
+
+
+class ChatSessionFileUpload(BaseModel):
+    enabled: bool
+    """Indicates if uploads are enabled for the session."""
+
+    max_file_size: Optional[int] = None
+    """Maximum upload size in megabytes."""
+
+    max_files: Optional[int] = None
+    """Maximum number of uploads allowed during the session."""
src/openai/types/beta/chatkit/chat_session_history.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["ChatSessionHistory"]
+
+
+class ChatSessionHistory(BaseModel):
+    enabled: bool
+    """Indicates if chat history is persisted for the session."""
+
+    recent_threads: Optional[int] = None
+    """Number of prior threads surfaced in history views.
+
+    Defaults to null when all history is retained.
+    """
src/openai/types/beta/chatkit/chat_session_rate_limits.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from ...._models import BaseModel
+
+__all__ = ["ChatSessionRateLimits"]
+
+
+class ChatSessionRateLimits(BaseModel):
+    max_requests_per_1_minute: int
+    """Maximum allowed requests per one-minute window."""
src/openai/types/beta/chatkit/chat_session_rate_limits_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["ChatSessionRateLimitsParam"]
+
+
+class ChatSessionRateLimitsParam(TypedDict, total=False):
+    max_requests_per_1_minute: int
+    """Maximum number of requests allowed per minute for the session. Defaults to 10."""
src/openai/types/beta/chatkit/chat_session_status.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["ChatSessionStatus"]
+
+ChatSessionStatus: TypeAlias = Literal["active", "expired", "cancelled"]
src/openai/types/beta/chatkit/chat_session_workflow_param.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ChatSessionWorkflowParam", "Tracing"]
+
+
+class Tracing(TypedDict, total=False):
+    enabled: bool
+    """Whether tracing is enabled during the session. Defaults to true."""
+
+
+class ChatSessionWorkflowParam(TypedDict, total=False):
+    id: Required[str]
+    """Identifier for the workflow invoked by the session."""
+
+    state_variables: Dict[str, Union[str, bool, float]]
+    """State variables forwarded to the workflow.
+
+    Keys may be up to 64 characters, values must be primitive types, and the map
+    defaults to an empty object.
+    """
+
+    tracing: Tracing
+    """Optional tracing overrides for the workflow invocation.
+
+    When omitted, tracing is enabled by default.
+    """
+
+    version: str
+    """Specific workflow version to run. Defaults to the latest deployed version."""
src/openai/types/beta/chatkit/chatkit_attachment.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ChatKitAttachment"]
+
+
+class ChatKitAttachment(BaseModel):
+    id: str
+    """Identifier for the attachment."""
+
+    mime_type: str
+    """MIME type of the attachment."""
+
+    name: str
+    """Original display name for the attachment."""
+
+    preview_url: Optional[str] = None
+    """Preview URL for rendering the attachment inline."""
+
+    type: Literal["image", "file"]
+    """Attachment discriminator."""
src/openai/types/beta/chatkit/chatkit_response_output_text.py
@@ -0,0 +1,62 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+
+__all__ = [
+    "ChatKitResponseOutputText",
+    "Annotation",
+    "AnnotationFile",
+    "AnnotationFileSource",
+    "AnnotationURL",
+    "AnnotationURLSource",
+]
+
+
+class AnnotationFileSource(BaseModel):
+    filename: str
+    """Filename referenced by the annotation."""
+
+    type: Literal["file"]
+    """Type discriminator that is always `file`."""
+
+
+class AnnotationFile(BaseModel):
+    source: AnnotationFileSource
+    """File attachment referenced by the annotation."""
+
+    type: Literal["file"]
+    """Type discriminator that is always `file` for this annotation."""
+
+
+class AnnotationURLSource(BaseModel):
+    type: Literal["url"]
+    """Type discriminator that is always `url`."""
+
+    url: str
+    """URL referenced by the annotation."""
+
+
+class AnnotationURL(BaseModel):
+    source: AnnotationURLSource
+    """URL referenced by the annotation."""
+
+    type: Literal["url"]
+    """Type discriminator that is always `url` for this annotation."""
+
+
+Annotation: TypeAlias = Annotated[Union[AnnotationFile, AnnotationURL], PropertyInfo(discriminator="type")]
+
+
+class ChatKitResponseOutputText(BaseModel):
+    annotations: List[Annotation]
+    """Ordered list of annotations attached to the response text."""
+
+    text: str
+    """Assistant generated text."""
+
+    type: Literal["output_text"]
+    """Type discriminator that is always `output_text`."""
src/openai/types/beta/chatkit/chatkit_thread.py
@@ -0,0 +1,56 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+
+__all__ = ["ChatKitThread", "Status", "StatusActive", "StatusLocked", "StatusClosed"]
+
+
+class StatusActive(BaseModel):
+    type: Literal["active"]
+    """Status discriminator that is always `active`."""
+
+
+class StatusLocked(BaseModel):
+    reason: Optional[str] = None
+    """Reason that the thread was locked. Defaults to null when no reason is recorded."""
+
+    type: Literal["locked"]
+    """Status discriminator that is always `locked`."""
+
+
+class StatusClosed(BaseModel):
+    reason: Optional[str] = None
+    """Reason that the thread was closed. Defaults to null when no reason is recorded."""
+
+    type: Literal["closed"]
+    """Status discriminator that is always `closed`."""
+
+
+Status: TypeAlias = Annotated[Union[StatusActive, StatusLocked, StatusClosed], PropertyInfo(discriminator="type")]
+
+
+class ChatKitThread(BaseModel):
+    id: str
+    """Identifier of the thread."""
+
+    created_at: int
+    """Unix timestamp (in seconds) for when the thread was created."""
+
+    object: Literal["chatkit.thread"]
+    """Type discriminator that is always `chatkit.thread`."""
+
+    status: Status
+    """Current status for the thread. Defaults to `active` for newly created threads."""
+
+    title: Optional[str] = None
+    """Optional human-readable title for the thread.
+
+    Defaults to null when no title has been generated.
+    """
+
+    user: str
+    """Free-form string that identifies your end user who owns the thread."""
src/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .chatkit_response_output_text import ChatKitResponseOutputText
+
+__all__ = ["ChatKitThreadAssistantMessageItem"]
+
+
+class ChatKitThreadAssistantMessageItem(BaseModel):
+    id: str
+    """Identifier of the thread item."""
+
+    content: List[ChatKitResponseOutputText]
+    """Ordered assistant response segments."""
+
+    created_at: int
+    """Unix timestamp (in seconds) for when the item was created."""
+
+    object: Literal["chatkit.thread_item"]
+    """Type discriminator that is always `chatkit.thread_item`."""
+
+    thread_id: str
+    """Identifier of the parent thread."""
+
+    type: Literal["chatkit.assistant_message"]
+    """Type discriminator that is always `chatkit.assistant_message`."""
src/openai/types/beta/chatkit/chatkit_thread_item_list.py
@@ -0,0 +1,144 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+from .chatkit_widget_item import ChatKitWidgetItem
+from .chatkit_thread_user_message_item import ChatKitThreadUserMessageItem
+from .chatkit_thread_assistant_message_item import ChatKitThreadAssistantMessageItem
+
+__all__ = [
+    "ChatKitThreadItemList",
+    "Data",
+    "DataChatKitClientToolCall",
+    "DataChatKitTask",
+    "DataChatKitTaskGroup",
+    "DataChatKitTaskGroupTask",
+]
+
+
+class DataChatKitClientToolCall(BaseModel):
+    id: str
+    """Identifier of the thread item."""
+
+    arguments: str
+    """JSON-encoded arguments that were sent to the tool."""
+
+    call_id: str
+    """Identifier for the client tool call."""
+
+    created_at: int
+    """Unix timestamp (in seconds) for when the item was created."""
+
+    name: str
+    """Tool name that was invoked."""
+
+    object: Literal["chatkit.thread_item"]
+    """Type discriminator that is always `chatkit.thread_item`."""
+
+    output: Optional[str] = None
+    """JSON-encoded output captured from the tool.
+
+    Defaults to null while execution is in progress.
+    """
+
+    status: Literal["in_progress", "completed"]
+    """Execution status for the tool call."""
+
+    thread_id: str
+    """Identifier of the parent thread."""
+
+    type: Literal["chatkit.client_tool_call"]
+    """Type discriminator that is always `chatkit.client_tool_call`."""
+
+
+class DataChatKitTask(BaseModel):
+    id: str
+    """Identifier of the thread item."""
+
+    created_at: int
+    """Unix timestamp (in seconds) for when the item was created."""
+
+    heading: Optional[str] = None
+    """Optional heading for the task. Defaults to null when not provided."""
+
+    object: Literal["chatkit.thread_item"]
+    """Type discriminator that is always `chatkit.thread_item`."""
+
+    summary: Optional[str] = None
+    """Optional summary that describes the task. Defaults to null when omitted."""
+
+    task_type: Literal["custom", "thought"]
+    """Subtype for the task."""
+
+    thread_id: str
+    """Identifier of the parent thread."""
+
+    type: Literal["chatkit.task"]
+    """Type discriminator that is always `chatkit.task`."""
+
+
+class DataChatKitTaskGroupTask(BaseModel):
+    heading: Optional[str] = None
+    """Optional heading for the grouped task. Defaults to null when not provided."""
+
+    summary: Optional[str] = None
+    """Optional summary that describes the grouped task.
+
+    Defaults to null when omitted.
+    """
+
+    type: Literal["custom", "thought"]
+    """Subtype for the grouped task."""
+
+
+class DataChatKitTaskGroup(BaseModel):
+    id: str
+    """Identifier of the thread item."""
+
+    created_at: int
+    """Unix timestamp (in seconds) for when the item was created."""
+
+    object: Literal["chatkit.thread_item"]
+    """Type discriminator that is always `chatkit.thread_item`."""
+
+    tasks: List[DataChatKitTaskGroupTask]
+    """Tasks included in the group."""
+
+    thread_id: str
+    """Identifier of the parent thread."""
+
+    type: Literal["chatkit.task_group"]
+    """Type discriminator that is always `chatkit.task_group`."""
+
+
+Data: TypeAlias = Annotated[
+    Union[
+        ChatKitThreadUserMessageItem,
+        ChatKitThreadAssistantMessageItem,
+        ChatKitWidgetItem,
+        DataChatKitClientToolCall,
+        DataChatKitTask,
+        DataChatKitTaskGroup,
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class ChatKitThreadItemList(BaseModel):
+    data: List[Data]
+    """A list of items"""
+
+    first_id: Optional[str] = None
+    """The ID of the first item in the list."""
+
+    has_more: bool
+    """Whether there are more items available."""
+
+    last_id: Optional[str] = None
+    """The ID of the last item in the list."""
+
+    object: Literal["list"]
+    """The type of object returned, must be `list`."""
src/openai/types/beta/chatkit/chatkit_thread_user_message_item.py
@@ -0,0 +1,77 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ...._utils import PropertyInfo
+from ...._models import BaseModel
+from .chatkit_attachment import ChatKitAttachment
+
+__all__ = [
+    "ChatKitThreadUserMessageItem",
+    "Content",
+    "ContentInputText",
+    "ContentQuotedText",
+    "InferenceOptions",
+    "InferenceOptionsToolChoice",
+]
+
+
+class ContentInputText(BaseModel):
+    text: str
+    """Plain-text content supplied by the user."""
+
+    type: Literal["input_text"]
+    """Type discriminator that is always `input_text`."""
+
+
+class ContentQuotedText(BaseModel):
+    text: str
+    """Quoted text content."""
+
+    type: Literal["quoted_text"]
+    """Type discriminator that is always `quoted_text`."""
+
+
+Content: TypeAlias = Annotated[Union[ContentInputText, ContentQuotedText], PropertyInfo(discriminator="type")]
+
+
+class InferenceOptionsToolChoice(BaseModel):
+    id: str
+    """Identifier of the requested tool."""
+
+
+class InferenceOptions(BaseModel):
+    model: Optional[str] = None
+    """Model name that generated the response.
+
+    Defaults to null when using the session default.
+    """
+
+    tool_choice: Optional[InferenceOptionsToolChoice] = None
+    """Preferred tool to invoke. Defaults to null when ChatKit should auto-select."""
+
+
+class ChatKitThreadUserMessageItem(BaseModel):
+    id: str
+    """Identifier of the thread item."""
+
+    attachments: List[ChatKitAttachment]
+    """Attachments associated with the user message. Defaults to an empty list."""
+
+    content: List[Content]
+    """Ordered content elements supplied by the user."""
+
+    created_at: int
+    """Unix timestamp (in seconds) for when the item was created."""
+
+    inference_options: Optional[InferenceOptions] = None
+    """Inference overrides applied to the message. Defaults to null when unset."""
+
+    object: Literal["chatkit.thread_item"]
+    """Type discriminator that is always `chatkit.thread_item`."""
+
+    thread_id: str
+    """Identifier of the parent thread."""
+
+    type: Literal["chatkit.user_message"]
src/openai/types/beta/chatkit/chatkit_widget_item.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ChatKitWidgetItem"]
+
+
+class ChatKitWidgetItem(BaseModel):
+    id: str
+    """Identifier of the thread item."""
+
+    created_at: int
+    """Unix timestamp (in seconds) for when the item was created."""
+
+    object: Literal["chatkit.thread_item"]
+    """Type discriminator that is always `chatkit.thread_item`."""
+
+    thread_id: str
+    """Identifier of the parent thread."""
+
+    type: Literal["chatkit.widget"]
+    """Type discriminator that is always `chatkit.widget`."""
+
+    widget: str
+    """Serialized widget payload rendered in the UI."""
src/openai/types/beta/chatkit/session_create_params.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from .chat_session_workflow_param import ChatSessionWorkflowParam
+from .chat_session_rate_limits_param import ChatSessionRateLimitsParam
+from .chat_session_expires_after_param import ChatSessionExpiresAfterParam
+from .chat_session_chatkit_configuration_param import ChatSessionChatKitConfigurationParam
+
+__all__ = ["SessionCreateParams"]
+
+
+class SessionCreateParams(TypedDict, total=False):
+    user: Required[str]
+    """
+    A free-form string that identifies your end user; ensures this Session can
+    access other objects that have the same `user` scope.
+    """
+
+    workflow: Required[ChatSessionWorkflowParam]
+    """Workflow that powers the session."""
+
+    chatkit_configuration: ChatSessionChatKitConfigurationParam
+    """Optional overrides for ChatKit runtime configuration features"""
+
+    expires_after: ChatSessionExpiresAfterParam
+    """Optional override for session expiration timing in seconds from creation.
+
+    Defaults to 10 minutes.
+    """
+
+    rate_limits: ChatSessionRateLimitsParam
+    """Optional override for per-minute request limits. When omitted, defaults to 10."""
src/openai/types/beta/chatkit/thread_delete_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ThreadDeleteResponse"]
+
+
+class ThreadDeleteResponse(BaseModel):
+    id: str
+    """Identifier of the deleted thread."""
+
+    deleted: bool
+    """Indicates that the thread has been deleted."""
+
+    object: Literal["chatkit.thread.deleted"]
+    """Type discriminator that is always `chatkit.thread.deleted`."""
src/openai/types/beta/chatkit/thread_list_items_params.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["ThreadListItemsParams"]
+
+
+class ThreadListItemsParams(TypedDict, total=False):
+    after: str
+    """List items created after this thread item ID.
+
+    Defaults to null for the first page.
+    """
+
+    before: str
+    """List items created before this thread item ID.
+
+    Defaults to null for the newest results.
+    """
+
+    limit: int
+    """Maximum number of thread items to return. Defaults to 20."""
+
+    order: Literal["asc", "desc"]
+    """Sort order for results by creation time. Defaults to `desc`."""
src/openai/types/beta/chatkit/thread_list_params.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["ThreadListParams"]
+
+
+class ThreadListParams(TypedDict, total=False):
+    after: str
+    """List items created after this thread item ID.
+
+    Defaults to null for the first page.
+    """
+
+    before: str
+    """List items created before this thread item ID.
+
+    Defaults to null for the newest results.
+    """
+
+    limit: int
+    """Maximum number of thread items to return. Defaults to 20."""
+
+    order: Literal["asc", "desc"]
+    """Sort order for results by creation time. Defaults to `desc`."""
+
+    user: str
+    """Filter threads that belong to this user identifier.
+
+    Defaults to null to return all users.
+    """
src/openai/types/beta/__init__.py
@@ -4,9 +4,12 @@ from __future__ import annotations
 
 from .thread import Thread as Thread
 from .assistant import Assistant as Assistant
+from .file_part import FilePart as FilePart
+from .image_part import ImagePart as ImagePart
 from .function_tool import FunctionTool as FunctionTool
 from .assistant_tool import AssistantTool as AssistantTool
 from .thread_deleted import ThreadDeleted as ThreadDeleted
+from .chatkit_workflow import ChatKitWorkflow as ChatKitWorkflow
 from .file_search_tool import FileSearchTool as FileSearchTool
 from .assistant_deleted import AssistantDeleted as AssistantDeleted
 from .function_tool_param import FunctionToolParam as FunctionToolParam
@@ -20,9 +23,11 @@ from .assistant_stream_event import AssistantStreamEvent as AssistantStreamEvent
 from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
 from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
 from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
+from .chatkit_upload_file_params import ChatKitUploadFileParams as ChatKitUploadFileParams
 from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam
 from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam
 from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption
+from .chatkit_upload_file_response import ChatKitUploadFileResponse as ChatKitUploadFileResponse
 from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams
 from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction
 from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption
src/openai/types/beta/chatkit_upload_file_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from ..._types import FileTypes
+
+__all__ = ["ChatKitUploadFileParams"]
+
+
+class ChatKitUploadFileParams(TypedDict, total=False):
+    file: Required[FileTypes]
+    """Binary file contents to store with the ChatKit session.
+
+    Supports PDFs and PNG, JPG, JPEG, GIF, or WEBP images.
+    """
src/openai/types/beta/chatkit_upload_file_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .file_part import FilePart
+from .image_part import ImagePart
+
+__all__ = ["ChatKitUploadFileResponse"]
+
+ChatKitUploadFileResponse: TypeAlias = Annotated[Union[FilePart, ImagePart], PropertyInfo(discriminator="type")]
src/openai/types/beta/chatkit_workflow.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Union, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["ChatKitWorkflow", "Tracing"]
+
+
+class Tracing(BaseModel):
+    enabled: bool
+    """Indicates whether tracing is enabled."""
+
+
+class ChatKitWorkflow(BaseModel):
+    id: str
+    """Identifier of the workflow backing the session."""
+
+    state_variables: Optional[Dict[str, Union[str, bool, float]]] = None
+    """State variable key-value pairs applied when invoking the workflow.
+
+    Defaults to null when no overrides were provided.
+    """
+
+    tracing: Tracing
+    """Tracing settings applied to the workflow."""
+
+    version: Optional[str] = None
+    """Specific workflow version used for the session.
+
+    Defaults to null when using the latest deployment.
+    """
src/openai/types/beta/file_part.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FilePart"]
+
+
+class FilePart(BaseModel):
+    id: str
+    """Unique identifier for the uploaded file."""
+
+    mime_type: Optional[str] = None
+    """MIME type reported for the uploaded file. Defaults to null when unknown."""
+
+    name: Optional[str] = None
+    """Original filename supplied by the uploader. Defaults to null when unnamed."""
+
+    type: Literal["file"]
+    """Type discriminator that is always `file`."""
+
+    upload_url: Optional[str] = None
+    """Signed URL for downloading the uploaded file.
+
+    Defaults to null when no download link is available.
+    """
src/openai/types/beta/image_part.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ImagePart"]
+
+
+class ImagePart(BaseModel):
+    id: str
+    """Unique identifier for the uploaded image."""
+
+    mime_type: str
+    """MIME type of the uploaded image."""
+
+    name: Optional[str] = None
+    """Original filename for the uploaded image. Defaults to null when unnamed."""
+
+    preview_url: str
+    """Preview URL that can be rendered inline for the image."""
+
+    type: Literal["image"]
+    """Type discriminator that is always `image`."""
+
+    upload_url: Optional[str] = None
+    """Signed URL for downloading the uploaded image.
+
+    Defaults to null when no download link is available.
+    """
src/openai/types/realtime/call_accept_params.py
@@ -63,6 +63,10 @@ class CallAcceptParams(TypedDict, total=False):
             "gpt-4o-realtime-preview-2025-06-03",
             "gpt-4o-mini-realtime-preview",
             "gpt-4o-mini-realtime-preview-2024-12-17",
+            "gpt-realtime-mini",
+            "gpt-realtime-mini-2025-10-06",
+            "gpt-audio-mini",
+            "gpt-audio-mini-2025-10-06",
         ],
     ]
     """The Realtime model used for this session."""
src/openai/types/realtime/realtime_session_create_request.py
@@ -62,6 +62,10 @@ class RealtimeSessionCreateRequest(BaseModel):
             "gpt-4o-realtime-preview-2025-06-03",
             "gpt-4o-mini-realtime-preview",
             "gpt-4o-mini-realtime-preview-2024-12-17",
+            "gpt-realtime-mini",
+            "gpt-realtime-mini-2025-10-06",
+            "gpt-audio-mini",
+            "gpt-audio-mini-2025-10-06",
         ],
         None,
     ] = None
src/openai/types/realtime/realtime_session_create_request_param.py
@@ -63,6 +63,10 @@ class RealtimeSessionCreateRequestParam(TypedDict, total=False):
             "gpt-4o-realtime-preview-2025-06-03",
             "gpt-4o-mini-realtime-preview",
             "gpt-4o-mini-realtime-preview-2024-12-17",
+            "gpt-realtime-mini",
+            "gpt-realtime-mini-2025-10-06",
+            "gpt-audio-mini",
+            "gpt-audio-mini-2025-10-06",
         ],
     ]
     """The Realtime model used for this session."""
src/openai/types/realtime/realtime_session_create_response.py
@@ -415,6 +415,10 @@ class RealtimeSessionCreateResponse(BaseModel):
             "gpt-4o-realtime-preview-2025-06-03",
             "gpt-4o-mini-realtime-preview",
             "gpt-4o-mini-realtime-preview-2024-12-17",
+            "gpt-realtime-mini",
+            "gpt-realtime-mini-2025-10-06",
+            "gpt-audio-mini",
+            "gpt-audio-mini-2025-10-06",
         ],
         None,
     ] = None
src/openai/types/responses/tool.py
@@ -199,7 +199,8 @@ class ImageGeneration(BaseModel):
     """
     Control how much effort the model will exert to match the style and features,
     especially facial features, of input images. This parameter is only supported
-    for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+    for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
+    `low`. Defaults to `low`.
     """
 
     input_image_mask: Optional[ImageGenerationInputImageMask] = None
@@ -208,7 +209,7 @@ class ImageGeneration(BaseModel):
     Contains `image_url` (string, optional) and `file_id` (string, optional).
     """
 
-    model: Optional[Literal["gpt-image-1"]] = None
+    model: Optional[Literal["gpt-image-1", "gpt-image-1-mini"]] = None
     """The image generation model to use. Default: `gpt-image-1`."""
 
     moderation: Optional[Literal["auto", "low"]] = None
src/openai/types/responses/tool_param.py
@@ -199,7 +199,8 @@ class ImageGeneration(TypedDict, total=False):
     """
     Control how much effort the model will exert to match the style and features,
     especially facial features, of input images. This parameter is only supported
-    for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+    for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
+    `low`. Defaults to `low`.
     """
 
     input_image_mask: ImageGenerationInputImageMask
@@ -208,7 +209,7 @@ class ImageGeneration(TypedDict, total=False):
     Contains `image_url` (string, optional) and `file_id` (string, optional).
     """
 
-    model: Literal["gpt-image-1"]
+    model: Literal["gpt-image-1", "gpt-image-1-mini"]
     """The image generation model to use. Default: `gpt-image-1`."""
 
     moderation: Literal["auto", "low"]
src/openai/types/shared/all_models.py
@@ -22,5 +22,7 @@ AllModels: TypeAlias = Union[
         "computer-use-preview",
         "computer-use-preview-2025-03-11",
         "gpt-5-codex",
+        "gpt-5-pro",
+        "gpt-5-pro-2025-10-06",
     ],
 ]
src/openai/types/shared/responses_model.py
@@ -22,5 +22,7 @@ ResponsesModel: TypeAlias = Union[
         "computer-use-preview",
         "computer-use-preview-2025-03-11",
         "gpt-5-codex",
+        "gpt-5-pro",
+        "gpt-5-pro-2025-10-06",
     ],
 ]
src/openai/types/shared_params/responses_model.py
@@ -24,5 +24,7 @@ ResponsesModel: TypeAlias = Union[
         "computer-use-preview",
         "computer-use-preview-2025-03-11",
         "gpt-5-codex",
+        "gpt-5-pro",
+        "gpt-5-pro-2025-10-06",
     ],
 ]
src/openai/types/__init__.py
@@ -5,6 +5,7 @@ from __future__ import annotations
 from .batch import Batch as Batch
 from .image import Image as Image
 from .model import Model as Model
+from .video import Video as Video
 from .shared import (
     Metadata as Metadata,
     AllModels as AllModels,
@@ -29,16 +30,19 @@ from .embedding import Embedding as Embedding
 from .chat_model import ChatModel as ChatModel
 from .completion import Completion as Completion
 from .moderation import Moderation as Moderation
+from .video_size import VideoSize as VideoSize
 from .audio_model import AudioModel as AudioModel
 from .batch_error import BatchError as BatchError
 from .batch_usage import BatchUsage as BatchUsage
 from .file_object import FileObject as FileObject
 from .image_model import ImageModel as ImageModel
+from .video_model import VideoModel as VideoModel
 from .file_content import FileContent as FileContent
 from .file_deleted import FileDeleted as FileDeleted
 from .file_purpose import FilePurpose as FilePurpose
 from .vector_store import VectorStore as VectorStore
 from .model_deleted import ModelDeleted as ModelDeleted
+from .video_seconds import VideoSeconds as VideoSeconds
 from .embedding_model import EmbeddingModel as EmbeddingModel
 from .images_response import ImagesResponse as ImagesResponse
 from .completion_usage import CompletionUsage as CompletionUsage
@@ -48,11 +52,15 @@ from .moderation_model import ModerationModel as ModerationModel
 from .batch_list_params import BatchListParams as BatchListParams
 from .completion_choice import CompletionChoice as CompletionChoice
 from .image_edit_params import ImageEditParams as ImageEditParams
+from .video_list_params import VideoListParams as VideoListParams
 from .eval_create_params import EvalCreateParams as EvalCreateParams
 from .eval_list_response import EvalListResponse as EvalListResponse
 from .eval_update_params import EvalUpdateParams as EvalUpdateParams
 from .file_create_params import FileCreateParams as FileCreateParams
+from .video_create_error import VideoCreateError as VideoCreateError
+from .video_remix_params import VideoRemixParams as VideoRemixParams
 from .batch_create_params import BatchCreateParams as BatchCreateParams
+from .video_create_params import VideoCreateParams as VideoCreateParams
 from .batch_request_counts import BatchRequestCounts as BatchRequestCounts
 from .eval_create_response import EvalCreateResponse as EvalCreateResponse
 from .eval_delete_response import EvalDeleteResponse as EvalDeleteResponse
@@ -62,6 +70,7 @@ from .vector_store_deleted import VectorStoreDeleted as VectorStoreDeleted
 from .audio_response_format import AudioResponseFormat as AudioResponseFormat
 from .container_list_params import ContainerListParams as ContainerListParams
 from .image_generate_params import ImageGenerateParams as ImageGenerateParams
+from .video_delete_response import VideoDeleteResponse as VideoDeleteResponse
 from .eval_retrieve_response import EvalRetrieveResponse as EvalRetrieveResponse
 from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy
 from .image_gen_stream_event import ImageGenStreamEvent as ImageGenStreamEvent
@@ -89,6 +98,7 @@ from .websocket_connection_options import WebsocketConnectionOptions as Websocke
 from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
 from .image_gen_partial_image_event import ImageGenPartialImageEvent as ImageGenPartialImageEvent
 from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy
+from .video_download_content_params import VideoDownloadContentParams as VideoDownloadContentParams
 from .eval_custom_data_source_config import EvalCustomDataSourceConfig as EvalCustomDataSourceConfig
 from .image_edit_partial_image_event import ImageEditPartialImageEvent as ImageEditPartialImageEvent
 from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam
src/openai/types/image_edit_params.py
@@ -30,11 +30,11 @@ class ImageEditParamsBase(TypedDict, total=False):
     """
 
     background: Optional[Literal["transparent", "opaque", "auto"]]
-    """Allows to set transparency for the background of the generated image(s).
-
-    This parameter is only supported for `gpt-image-1`. Must be one of
-    `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
-    model will automatically determine the best background for the image.
+    """
+    Allows to set transparency for the background of the generated image(s). This
+    parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+    `opaque` or `auto` (default value). When `auto` is used, the model will
+    automatically determine the best background for the image.
 
     If `transparent`, the output format needs to support transparency, so it should
     be set to either `png` (default value) or `webp`.
@@ -44,7 +44,8 @@ class ImageEditParamsBase(TypedDict, total=False):
     """
     Control how much effort the model will exert to match the style and features,
     especially facial features, of input images. This parameter is only supported
-    for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
+    for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
+    `low`. Defaults to `low`.
     """
 
     mask: FileTypes
src/openai/types/image_generate_params.py
@@ -19,11 +19,11 @@ class ImageGenerateParamsBase(TypedDict, total=False):
     """
 
     background: Optional[Literal["transparent", "opaque", "auto"]]
-    """Allows to set transparency for the background of the generated image(s).
-
-    This parameter is only supported for `gpt-image-1`. Must be one of
-    `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
-    model will automatically determine the best background for the image.
+    """
+    Allows to set transparency for the background of the generated image(s). This
+    parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+    `opaque` or `auto` (default value). When `auto` is used, the model will
+    automatically determine the best background for the image.
 
     If `transparent`, the output format needs to support transparency, so it should
     be set to either `png` (default value) or `webp`.
src/openai/types/image_model.py
@@ -4,4 +4,4 @@ from typing_extensions import Literal, TypeAlias
 
 __all__ = ["ImageModel"]
 
-ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3", "gpt-image-1"]
+ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3", "gpt-image-1", "gpt-image-1-mini"]
src/openai/types/video.py
@@ -0,0 +1,50 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+from .video_size import VideoSize
+from .video_model import VideoModel
+from .video_seconds import VideoSeconds
+from .video_create_error import VideoCreateError
+
+__all__ = ["Video"]
+
+
+class Video(BaseModel):
+    id: str
+    """Unique identifier for the video job."""
+
+    completed_at: Optional[int] = None
+    """Unix timestamp (seconds) for when the job completed, if finished."""
+
+    created_at: int
+    """Unix timestamp (seconds) for when the job was created."""
+
+    error: Optional[VideoCreateError] = None
+    """Error payload that explains why generation failed, if applicable."""
+
+    expires_at: Optional[int] = None
+    """Unix timestamp (seconds) for when the downloadable assets expire, if set."""
+
+    model: VideoModel
+    """The video generation model that produced the job."""
+
+    object: Literal["video"]
+    """The object type, which is always `video`."""
+
+    progress: int
+    """Approximate completion percentage for the generation task."""
+
+    remixed_from_video_id: Optional[str] = None
+    """Identifier of the source video if this video is a remix."""
+
+    seconds: VideoSeconds
+    """Duration of the generated clip in seconds."""
+
+    size: VideoSize
+    """The resolution of the generated video."""
+
+    status: Literal["queued", "in_progress", "completed", "failed"]
+    """Current lifecycle status of the video job."""
src/openai/types/video_create_error.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .._models import BaseModel
+
+__all__ = ["VideoCreateError"]
+
+
+class VideoCreateError(BaseModel):
+    code: str
+
+    message: str
src/openai/types/video_create_params.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from .._types import FileTypes
+from .video_size import VideoSize
+from .video_model import VideoModel
+from .video_seconds import VideoSeconds
+
+__all__ = ["VideoCreateParams"]
+
+
+class VideoCreateParams(TypedDict, total=False):
+    prompt: Required[str]
+    """Text prompt that describes the video to generate."""
+
+    input_reference: FileTypes
+    """Optional image reference that guides generation."""
+
+    model: VideoModel
+    """The video generation model to use. Defaults to `sora-2`."""
+
+    seconds: VideoSeconds
+    """Clip duration in seconds. Defaults to 4 seconds."""
+
+    size: VideoSize
+    """Output resolution formatted as width x height. Defaults to 720x1280."""
src/openai/types/video_delete_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["VideoDeleteResponse"]
+
+
+class VideoDeleteResponse(BaseModel):
+    id: str
+    """Identifier of the deleted video."""
+
+    deleted: bool
+    """Indicates that the video resource was deleted."""
+
+    object: Literal["video.deleted"]
+    """The object type that signals the deletion response."""
src/openai/types/video_download_content_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["VideoDownloadContentParams"]
+
+
+class VideoDownloadContentParams(TypedDict, total=False):
+    variant: Literal["video", "thumbnail", "spritesheet"]
+    """Which downloadable asset to return. Defaults to the MP4 video."""
src/openai/types/video_list_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["VideoListParams"]
+
+
+class VideoListParams(TypedDict, total=False):
+    after: str
+    """Identifier for the last item from the previous pagination request"""
+
+    limit: int
+    """Number of items to retrieve"""
+
+    order: Literal["asc", "desc"]
+    """Sort order of results by timestamp.
+
+    Use `asc` for ascending order or `desc` for descending order.
+    """
src/openai/types/video_model.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["VideoModel"]
+
+VideoModel: TypeAlias = Literal["sora-2", "sora-2-pro"]
src/openai/types/video_remix_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["VideoRemixParams"]
+
+
+class VideoRemixParams(TypedDict, total=False):
+    prompt: Required[str]
+    """Updated text prompt that directs the remix generation."""
src/openai/types/video_seconds.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["VideoSeconds"]
+
+VideoSeconds: TypeAlias = Literal["4", "8", "12"]
src/openai/types/video_size.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal, TypeAlias
+
+__all__ = ["VideoSize"]
+
+VideoSize: TypeAlias = Literal["720x1280", "1280x720", "1024x1792", "1792x1024"]
src/openai/__init__.py
@@ -379,6 +379,7 @@ from ._module_client import (
     files as files,
     images as images,
     models as models,
+    videos as videos,
     batches as batches,
     uploads as uploads,
     realtime as realtime,
src/openai/_client.py
@@ -44,6 +44,7 @@ if TYPE_CHECKING:
         files,
         images,
         models,
+        videos,
         batches,
         uploads,
         realtime,
@@ -59,6 +60,7 @@ if TYPE_CHECKING:
     from .resources.files import Files, AsyncFiles
     from .resources.images import Images, AsyncImages
     from .resources.models import Models, AsyncModels
+    from .resources.videos import Videos, AsyncVideos
     from .resources.batches import Batches, AsyncBatches
     from .resources.webhooks import Webhooks, AsyncWebhooks
     from .resources.beta.beta import Beta, AsyncBeta
@@ -288,6 +290,12 @@ class OpenAI(SyncAPIClient):
 
         return Containers(self)
 
+    @cached_property
+    def videos(self) -> Videos:
+        from .resources.videos import Videos
+
+        return Videos(self)
+
     @cached_property
     def with_raw_response(self) -> OpenAIWithRawResponse:
         return OpenAIWithRawResponse(self)
@@ -633,6 +641,12 @@ class AsyncOpenAI(AsyncAPIClient):
 
         return AsyncContainers(self)
 
+    @cached_property
+    def videos(self) -> AsyncVideos:
+        from .resources.videos import AsyncVideos
+
+        return AsyncVideos(self)
+
     @cached_property
     def with_raw_response(self) -> AsyncOpenAIWithRawResponse:
         return AsyncOpenAIWithRawResponse(self)
@@ -883,6 +897,12 @@ class OpenAIWithRawResponse:
 
         return ContainersWithRawResponse(self._client.containers)
 
+    @cached_property
+    def videos(self) -> videos.VideosWithRawResponse:
+        from .resources.videos import VideosWithRawResponse
+
+        return VideosWithRawResponse(self._client.videos)
+
 
 class AsyncOpenAIWithRawResponse:
     _client: AsyncOpenAI
@@ -998,6 +1018,12 @@ class AsyncOpenAIWithRawResponse:
 
         return AsyncContainersWithRawResponse(self._client.containers)
 
+    @cached_property
+    def videos(self) -> videos.AsyncVideosWithRawResponse:
+        from .resources.videos import AsyncVideosWithRawResponse
+
+        return AsyncVideosWithRawResponse(self._client.videos)
+
 
 class OpenAIWithStreamedResponse:
     _client: OpenAI
@@ -1113,6 +1139,12 @@ class OpenAIWithStreamedResponse:
 
         return ContainersWithStreamingResponse(self._client.containers)
 
+    @cached_property
+    def videos(self) -> videos.VideosWithStreamingResponse:
+        from .resources.videos import VideosWithStreamingResponse
+
+        return VideosWithStreamingResponse(self._client.videos)
+
 
 class AsyncOpenAIWithStreamedResponse:
     _client: AsyncOpenAI
@@ -1228,6 +1260,12 @@ class AsyncOpenAIWithStreamedResponse:
 
         return AsyncContainersWithStreamingResponse(self._client.containers)
 
+    @cached_property
+    def videos(self) -> videos.AsyncVideosWithStreamingResponse:
+        from .resources.videos import AsyncVideosWithStreamingResponse
+
+        return AsyncVideosWithStreamingResponse(self._client.videos)
+
 
 Client = OpenAI
 
src/openai/_module_client.py
@@ -9,6 +9,7 @@ if TYPE_CHECKING:
     from .resources.files import Files
     from .resources.images import Images
     from .resources.models import Models
+    from .resources.videos import Videos
     from .resources.batches import Batches
     from .resources.webhooks import Webhooks
     from .resources.beta.beta import Beta
@@ -72,6 +73,12 @@ class ModelsProxy(LazyProxy["Models"]):
         return _load_client().models
 
 
+class VideosProxy(LazyProxy["Videos"]):
+    @override
+    def __load__(self) -> Videos:
+        return _load_client().videos
+
+
 class BatchesProxy(LazyProxy["Batches"]):
     @override
     def __load__(self) -> Batches:
@@ -151,6 +158,7 @@ audio: Audio = AudioProxy().__as_proxied__()
 evals: Evals = EvalsProxy().__as_proxied__()
 images: Images = ImagesProxy().__as_proxied__()
 models: Models = ModelsProxy().__as_proxied__()
+videos: Videos = VideosProxy().__as_proxied__()
 batches: Batches = BatchesProxy().__as_proxied__()
 uploads: Uploads = UploadsProxy().__as_proxied__()
 webhooks: Webhooks = WebhooksProxy().__as_proxied__()
tests/api_resources/beta/chatkit/__init__.py
@@ -0,0 +1,1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
tests/api_resources/beta/chatkit/test_sessions.py
@@ -0,0 +1,230 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.types.beta.chatkit import (
+    ChatSession,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestSessions:
+    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    def test_method_create(self, client: OpenAI) -> None:
+        session = client.beta.chatkit.sessions.create(
+            user="x",
+            workflow={"id": "id"},
+        )
+        assert_matches_type(ChatSession, session, path=["response"])
+
+    @parametrize
+    def test_method_create_with_all_params(self, client: OpenAI) -> None:
+        session = client.beta.chatkit.sessions.create(
+            user="x",
+            workflow={
+                "id": "id",
+                "state_variables": {"foo": "string"},
+                "tracing": {"enabled": True},
+                "version": "version",
+            },
+            chatkit_configuration={
+                "automatic_thread_titling": {"enabled": True},
+                "file_upload": {
+                    "enabled": True,
+                    "max_file_size": 1,
+                    "max_files": 1,
+                },
+                "history": {
+                    "enabled": True,
+                    "recent_threads": 1,
+                },
+            },
+            expires_after={
+                "anchor": "created_at",
+                "seconds": 1,
+            },
+            rate_limits={"max_requests_per_1_minute": 1},
+        )
+        assert_matches_type(ChatSession, session, path=["response"])
+
+    @parametrize
+    def test_raw_response_create(self, client: OpenAI) -> None:
+        response = client.beta.chatkit.sessions.with_raw_response.create(
+            user="x",
+            workflow={"id": "id"},
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        session = response.parse()
+        assert_matches_type(ChatSession, session, path=["response"])
+
+    @parametrize
+    def test_streaming_response_create(self, client: OpenAI) -> None:
+        with client.beta.chatkit.sessions.with_streaming_response.create(
+            user="x",
+            workflow={"id": "id"},
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            session = response.parse()
+            assert_matches_type(ChatSession, session, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_method_cancel(self, client: OpenAI) -> None:
+        session = client.beta.chatkit.sessions.cancel(
+            "cksess_123",
+        )
+        assert_matches_type(ChatSession, session, path=["response"])
+
+    @parametrize
+    def test_raw_response_cancel(self, client: OpenAI) -> None:
+        response = client.beta.chatkit.sessions.with_raw_response.cancel(
+            "cksess_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        session = response.parse()
+        assert_matches_type(ChatSession, session, path=["response"])
+
+    @parametrize
+    def test_streaming_response_cancel(self, client: OpenAI) -> None:
+        with client.beta.chatkit.sessions.with_streaming_response.cancel(
+            "cksess_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            session = response.parse()
+            assert_matches_type(ChatSession, session, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_cancel(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"):
+            client.beta.chatkit.sessions.with_raw_response.cancel(
+                "",
+            )
+
+
+class TestAsyncSessions:
+    parametrize = pytest.mark.parametrize(
+        "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+    )
+
+    @parametrize
+    async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+        session = await async_client.beta.chatkit.sessions.create(
+            user="x",
+            workflow={"id": "id"},
+        )
+        assert_matches_type(ChatSession, session, path=["response"])
+
+    @parametrize
+    async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        session = await async_client.beta.chatkit.sessions.create(
+            user="x",
+            workflow={
+                "id": "id",
+                "state_variables": {"foo": "string"},
+                "tracing": {"enabled": True},
+                "version": "version",
+            },
+            chatkit_configuration={
+                "automatic_thread_titling": {"enabled": True},
+                "file_upload": {
+                    "enabled": True,
+                    "max_file_size": 1,
+                    "max_files": 1,
+                },
+                "history": {
+                    "enabled": True,
+                    "recent_threads": 1,
+                },
+            },
+            expires_after={
+                "anchor": "created_at",
+                "seconds": 1,
+            },
+            rate_limits={"max_requests_per_1_minute": 1},
+        )
+        assert_matches_type(ChatSession, session, path=["response"])
+
+    @parametrize
+    async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.beta.chatkit.sessions.with_raw_response.create(
+            user="x",
+            workflow={"id": "id"},
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        session = response.parse()
+        assert_matches_type(ChatSession, session, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.beta.chatkit.sessions.with_streaming_response.create(
+            user="x",
+            workflow={"id": "id"},
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            session = await response.parse()
+            assert_matches_type(ChatSession, session, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
+        session = await async_client.beta.chatkit.sessions.cancel(
+            "cksess_123",
+        )
+        assert_matches_type(ChatSession, session, path=["response"])
+
+    @parametrize
+    async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.beta.chatkit.sessions.with_raw_response.cancel(
+            "cksess_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        session = response.parse()
+        assert_matches_type(ChatSession, session, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.beta.chatkit.sessions.with_streaming_response.cancel(
+            "cksess_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            session = await response.parse()
+            assert_matches_type(ChatSession, session, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"):
+            await async_client.beta.chatkit.sessions.with_raw_response.cancel(
+                "",
+            )
tests/api_resources/beta/chatkit/test_threads.py
@@ -0,0 +1,348 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.beta.chatkit import ChatKitThread, ThreadDeleteResponse
+from openai.types.beta.chatkit.chatkit_thread_item_list import Data
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestThreads:
+    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    def test_method_retrieve(self, client: OpenAI) -> None:
+        thread = client.beta.chatkit.threads.retrieve(
+            "cthr_123",
+        )
+        assert_matches_type(ChatKitThread, thread, path=["response"])
+
+    @parametrize
+    def test_raw_response_retrieve(self, client: OpenAI) -> None:
+        response = client.beta.chatkit.threads.with_raw_response.retrieve(
+            "cthr_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        thread = response.parse()
+        assert_matches_type(ChatKitThread, thread, path=["response"])
+
+    @parametrize
+    def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+        with client.beta.chatkit.threads.with_streaming_response.retrieve(
+            "cthr_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            thread = response.parse()
+            assert_matches_type(ChatKitThread, thread, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_retrieve(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+            client.beta.chatkit.threads.with_raw_response.retrieve(
+                "",
+            )
+
+    @parametrize
+    def test_method_list(self, client: OpenAI) -> None:
+        thread = client.beta.chatkit.threads.list()
+        assert_matches_type(SyncConversationCursorPage[ChatKitThread], thread, path=["response"])
+
+    @parametrize
+    def test_method_list_with_all_params(self, client: OpenAI) -> None:
+        thread = client.beta.chatkit.threads.list(
+            after="after",
+            before="before",
+            limit=0,
+            order="asc",
+            user="x",
+        )
+        assert_matches_type(SyncConversationCursorPage[ChatKitThread], thread, path=["response"])
+
+    @parametrize
+    def test_raw_response_list(self, client: OpenAI) -> None:
+        response = client.beta.chatkit.threads.with_raw_response.list()
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        thread = response.parse()
+        assert_matches_type(SyncConversationCursorPage[ChatKitThread], thread, path=["response"])
+
+    @parametrize
+    def test_streaming_response_list(self, client: OpenAI) -> None:
+        with client.beta.chatkit.threads.with_streaming_response.list() as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            thread = response.parse()
+            assert_matches_type(SyncConversationCursorPage[ChatKitThread], thread, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_method_delete(self, client: OpenAI) -> None:
+        thread = client.beta.chatkit.threads.delete(
+            "cthr_123",
+        )
+        assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
+
+    @parametrize
+    def test_raw_response_delete(self, client: OpenAI) -> None:
+        response = client.beta.chatkit.threads.with_raw_response.delete(
+            "cthr_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        thread = response.parse()
+        assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
+
+    @parametrize
+    def test_streaming_response_delete(self, client: OpenAI) -> None:
+        with client.beta.chatkit.threads.with_streaming_response.delete(
+            "cthr_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            thread = response.parse()
+            assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_delete(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+            client.beta.chatkit.threads.with_raw_response.delete(
+                "",
+            )
+
+    @parametrize
+    def test_method_list_items(self, client: OpenAI) -> None:
+        thread = client.beta.chatkit.threads.list_items(
+            thread_id="cthr_123",
+        )
+        assert_matches_type(SyncConversationCursorPage[Data], thread, path=["response"])
+
+    @parametrize
+    def test_method_list_items_with_all_params(self, client: OpenAI) -> None:
+        thread = client.beta.chatkit.threads.list_items(
+            thread_id="cthr_123",
+            after="after",
+            before="before",
+            limit=0,
+            order="asc",
+        )
+        assert_matches_type(SyncConversationCursorPage[Data], thread, path=["response"])
+
+    @parametrize
+    def test_raw_response_list_items(self, client: OpenAI) -> None:
+        response = client.beta.chatkit.threads.with_raw_response.list_items(
+            thread_id="cthr_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        thread = response.parse()
+        assert_matches_type(SyncConversationCursorPage[Data], thread, path=["response"])
+
+    @parametrize
+    def test_streaming_response_list_items(self, client: OpenAI) -> None:
+        with client.beta.chatkit.threads.with_streaming_response.list_items(
+            thread_id="cthr_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            thread = response.parse()
+            assert_matches_type(SyncConversationCursorPage[Data], thread, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_list_items(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+            client.beta.chatkit.threads.with_raw_response.list_items(
+                thread_id="",
+            )
+
+
+class TestAsyncThreads:
+    parametrize = pytest.mark.parametrize(
+        "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+    )
+
+    @parametrize
+    async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+        thread = await async_client.beta.chatkit.threads.retrieve(
+            "cthr_123",
+        )
+        assert_matches_type(ChatKitThread, thread, path=["response"])
+
+    @parametrize
+    async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.beta.chatkit.threads.with_raw_response.retrieve(
+            "cthr_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        thread = response.parse()
+        assert_matches_type(ChatKitThread, thread, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.beta.chatkit.threads.with_streaming_response.retrieve(
+            "cthr_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            thread = await response.parse()
+            assert_matches_type(ChatKitThread, thread, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+            await async_client.beta.chatkit.threads.with_raw_response.retrieve(
+                "",
+            )
+
+    @parametrize
+    async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+        thread = await async_client.beta.chatkit.threads.list()
+        assert_matches_type(AsyncConversationCursorPage[ChatKitThread], thread, path=["response"])
+
+    @parametrize
+    async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        thread = await async_client.beta.chatkit.threads.list(
+            after="after",
+            before="before",
+            limit=0,
+            order="asc",
+            user="x",
+        )
+        assert_matches_type(AsyncConversationCursorPage[ChatKitThread], thread, path=["response"])
+
+    @parametrize
+    async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.beta.chatkit.threads.with_raw_response.list()
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        thread = response.parse()
+        assert_matches_type(AsyncConversationCursorPage[ChatKitThread], thread, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.beta.chatkit.threads.with_streaming_response.list() as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            thread = await response.parse()
+            assert_matches_type(AsyncConversationCursorPage[ChatKitThread], thread, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+        thread = await async_client.beta.chatkit.threads.delete(
+            "cthr_123",
+        )
+        assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
+
+    @parametrize
+    async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.beta.chatkit.threads.with_raw_response.delete(
+            "cthr_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        thread = response.parse()
+        assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.beta.chatkit.threads.with_streaming_response.delete(
+            "cthr_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            thread = await response.parse()
+            assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+            await async_client.beta.chatkit.threads.with_raw_response.delete(
+                "",
+            )
+
+    @parametrize
+    async def test_method_list_items(self, async_client: AsyncOpenAI) -> None:
+        thread = await async_client.beta.chatkit.threads.list_items(
+            thread_id="cthr_123",
+        )
+        assert_matches_type(AsyncConversationCursorPage[Data], thread, path=["response"])
+
+    @parametrize
+    async def test_method_list_items_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        thread = await async_client.beta.chatkit.threads.list_items(
+            thread_id="cthr_123",
+            after="after",
+            before="before",
+            limit=0,
+            order="asc",
+        )
+        assert_matches_type(AsyncConversationCursorPage[Data], thread, path=["response"])
+
+    @parametrize
+    async def test_raw_response_list_items(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.beta.chatkit.threads.with_raw_response.list_items(
+            thread_id="cthr_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        thread = response.parse()
+        assert_matches_type(AsyncConversationCursorPage[Data], thread, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_list_items(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.beta.chatkit.threads.with_streaming_response.list_items(
+            thread_id="cthr_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            thread = await response.parse()
+            assert_matches_type(AsyncConversationCursorPage[Data], thread, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_list_items(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+            await async_client.beta.chatkit.threads.with_raw_response.list_items(
+                thread_id="",
+            )
tests/api_resources/beta/test_chatkit.py
@@ -0,0 +1,86 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.types.beta import ChatKitUploadFileResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestChatKit:
+    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    def test_method_upload_file(self, client: OpenAI) -> None:
+        chatkit = client.beta.chatkit.upload_file(
+            file=b"raw file contents",
+        )
+        assert_matches_type(ChatKitUploadFileResponse, chatkit, path=["response"])
+
+    @parametrize
+    def test_raw_response_upload_file(self, client: OpenAI) -> None:
+        response = client.beta.chatkit.with_raw_response.upload_file(
+            file=b"raw file contents",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        chatkit = response.parse()
+        assert_matches_type(ChatKitUploadFileResponse, chatkit, path=["response"])
+
+    @parametrize
+    def test_streaming_response_upload_file(self, client: OpenAI) -> None:
+        with client.beta.chatkit.with_streaming_response.upload_file(
+            file=b"raw file contents",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            chatkit = response.parse()
+            assert_matches_type(ChatKitUploadFileResponse, chatkit, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncChatKit:
+    parametrize = pytest.mark.parametrize(
+        "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+    )
+
+    @parametrize
+    async def test_method_upload_file(self, async_client: AsyncOpenAI) -> None:
+        chatkit = await async_client.beta.chatkit.upload_file(
+            file=b"raw file contents",
+        )
+        assert_matches_type(ChatKitUploadFileResponse, chatkit, path=["response"])
+
+    @parametrize
+    async def test_raw_response_upload_file(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.beta.chatkit.with_raw_response.upload_file(
+            file=b"raw file contents",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        chatkit = response.parse()
+        assert_matches_type(ChatKitUploadFileResponse, chatkit, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_upload_file(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.beta.chatkit.with_streaming_response.upload_file(
+            file=b"raw file contents",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            chatkit = await response.parse()
+            assert_matches_type(ChatKitUploadFileResponse, chatkit, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
tests/api_resources/test_videos.py
@@ -0,0 +1,551 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import httpx
+import pytest
+from respx import MockRouter
+
+import openai._legacy_response as _legacy_response
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.types import (
+    Video,
+    VideoDeleteResponse,
+)
+from openai._utils import assert_signatures_in_sync
+from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+
+# pyright: reportDeprecated=false
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestVideos:
+    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+    @parametrize
+    def test_method_create(self, client: OpenAI) -> None:
+        video = client.videos.create(
+            prompt="x",
+        )
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    def test_method_create_with_all_params(self, client: OpenAI) -> None:
+        video = client.videos.create(
+            prompt="x",
+            input_reference=b"raw file contents",
+            model="sora-2",
+            seconds="4",
+            size="720x1280",
+        )
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    def test_raw_response_create(self, client: OpenAI) -> None:
+        response = client.videos.with_raw_response.create(
+            prompt="x",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        video = response.parse()
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    def test_streaming_response_create(self, client: OpenAI) -> None:
+        with client.videos.with_streaming_response.create(
+            prompt="x",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            video = response.parse()
+            assert_matches_type(Video, video, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_method_retrieve(self, client: OpenAI) -> None:
+        video = client.videos.retrieve(
+            "video_123",
+        )
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    def test_raw_response_retrieve(self, client: OpenAI) -> None:
+        response = client.videos.with_raw_response.retrieve(
+            "video_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        video = response.parse()
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+        with client.videos.with_streaming_response.retrieve(
+            "video_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            video = response.parse()
+            assert_matches_type(Video, video, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_retrieve(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
+            client.videos.with_raw_response.retrieve(
+                "",
+            )
+
+    @parametrize
+    def test_method_list(self, client: OpenAI) -> None:
+        video = client.videos.list()
+        assert_matches_type(SyncConversationCursorPage[Video], video, path=["response"])
+
+    @parametrize
+    def test_method_list_with_all_params(self, client: OpenAI) -> None:
+        video = client.videos.list(
+            after="after",
+            limit=0,
+            order="asc",
+        )
+        assert_matches_type(SyncConversationCursorPage[Video], video, path=["response"])
+
+    @parametrize
+    def test_raw_response_list(self, client: OpenAI) -> None:
+        response = client.videos.with_raw_response.list()
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        video = response.parse()
+        assert_matches_type(SyncConversationCursorPage[Video], video, path=["response"])
+
+    @parametrize
+    def test_streaming_response_list(self, client: OpenAI) -> None:
+        with client.videos.with_streaming_response.list() as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            video = response.parse()
+            assert_matches_type(SyncConversationCursorPage[Video], video, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_method_delete(self, client: OpenAI) -> None:
+        video = client.videos.delete(
+            "video_123",
+        )
+        assert_matches_type(VideoDeleteResponse, video, path=["response"])
+
+    @parametrize
+    def test_raw_response_delete(self, client: OpenAI) -> None:
+        response = client.videos.with_raw_response.delete(
+            "video_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        video = response.parse()
+        assert_matches_type(VideoDeleteResponse, video, path=["response"])
+
+    @parametrize
+    def test_streaming_response_delete(self, client: OpenAI) -> None:
+        with client.videos.with_streaming_response.delete(
+            "video_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            video = response.parse()
+            assert_matches_type(VideoDeleteResponse, video, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_delete(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
+            client.videos.with_raw_response.delete(
+                "",
+            )
+
+    @parametrize
+    @pytest.mark.respx(base_url=base_url)
+    def test_method_download_content(self, client: OpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+        video = client.videos.download_content(
+            video_id="video_123",
+        )
+        assert isinstance(video, _legacy_response.HttpxBinaryResponseContent)
+        assert video.json() == {"foo": "bar"}
+
+    @parametrize
+    @pytest.mark.respx(base_url=base_url)
+    def test_method_download_content_with_all_params(self, client: OpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+        video = client.videos.download_content(
+            video_id="video_123",
+            variant="video",
+        )
+        assert isinstance(video, _legacy_response.HttpxBinaryResponseContent)
+        assert video.json() == {"foo": "bar"}
+
+    @parametrize
+    @pytest.mark.respx(base_url=base_url)
+    def test_raw_response_download_content(self, client: OpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+
+        response = client.videos.with_raw_response.download_content(
+            video_id="video_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        video = response.parse()
+        assert_matches_type(_legacy_response.HttpxBinaryResponseContent, video, path=["response"])
+
+    @parametrize
+    @pytest.mark.respx(base_url=base_url)
+    def test_streaming_response_download_content(self, client: OpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+        with client.videos.with_streaming_response.download_content(
+            video_id="video_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            video = response.parse()
+            assert_matches_type(bytes, video, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    @pytest.mark.respx(base_url=base_url)
+    def test_path_params_download_content(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
+            client.videos.with_raw_response.download_content(
+                video_id="",
+            )
+
+    @parametrize
+    def test_method_remix(self, client: OpenAI) -> None:
+        video = client.videos.remix(
+            video_id="video_123",
+            prompt="x",
+        )
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    def test_raw_response_remix(self, client: OpenAI) -> None:
+        response = client.videos.with_raw_response.remix(
+            video_id="video_123",
+            prompt="x",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        video = response.parse()
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    def test_streaming_response_remix(self, client: OpenAI) -> None:
+        with client.videos.with_streaming_response.remix(
+            video_id="video_123",
+            prompt="x",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            video = response.parse()
+            assert_matches_type(Video, video, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_remix(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
+            client.videos.with_raw_response.remix(
+                video_id="",
+                prompt="x",
+            )
+
+
+class TestAsyncVideos:
+    parametrize = pytest.mark.parametrize(
+        "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+    )
+
+    @parametrize
+    async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+        video = await async_client.videos.create(
+            prompt="x",
+        )
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        video = await async_client.videos.create(
+            prompt="x",
+            input_reference=b"raw file contents",
+            model="sora-2",
+            seconds="4",
+            size="720x1280",
+        )
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.videos.with_raw_response.create(
+            prompt="x",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        video = response.parse()
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.videos.with_streaming_response.create(
+            prompt="x",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            video = await response.parse()
+            assert_matches_type(Video, video, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+        video = await async_client.videos.retrieve(
+            "video_123",
+        )
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.videos.with_raw_response.retrieve(
+            "video_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        video = response.parse()
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.videos.with_streaming_response.retrieve(
+            "video_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            video = await response.parse()
+            assert_matches_type(Video, video, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
+            await async_client.videos.with_raw_response.retrieve(
+                "",
+            )
+
+    @parametrize
+    async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+        video = await async_client.videos.list()
+        assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"])
+
+    @parametrize
+    async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        video = await async_client.videos.list(
+            after="after",
+            limit=0,
+            order="asc",
+        )
+        assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"])
+
+    @parametrize
+    async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.videos.with_raw_response.list()
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        video = response.parse()
+        assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.videos.with_streaming_response.list() as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            video = await response.parse()
+            assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+        video = await async_client.videos.delete(
+            "video_123",
+        )
+        assert_matches_type(VideoDeleteResponse, video, path=["response"])
+
+    @parametrize
+    async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.videos.with_raw_response.delete(
+            "video_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        video = response.parse()
+        assert_matches_type(VideoDeleteResponse, video, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.videos.with_streaming_response.delete(
+            "video_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            video = await response.parse()
+            assert_matches_type(VideoDeleteResponse, video, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
+            await async_client.videos.with_raw_response.delete(
+                "",
+            )
+
+    @parametrize
+    @pytest.mark.respx(base_url=base_url)
+    async def test_method_download_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+        video = await async_client.videos.download_content(
+            video_id="video_123",
+        )
+        assert isinstance(video, _legacy_response.HttpxBinaryResponseContent)
+        assert video.json() == {"foo": "bar"}
+
+    @parametrize
+    @pytest.mark.respx(base_url=base_url)
+    async def test_method_download_content_with_all_params(
+        self, async_client: AsyncOpenAI, respx_mock: MockRouter
+    ) -> None:
+        respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+        video = await async_client.videos.download_content(
+            video_id="video_123",
+            variant="video",
+        )
+        assert isinstance(video, _legacy_response.HttpxBinaryResponseContent)
+        assert video.json() == {"foo": "bar"}
+
+    @parametrize
+    @pytest.mark.respx(base_url=base_url)
+    async def test_raw_response_download_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+
+        response = await async_client.videos.with_raw_response.download_content(
+            video_id="video_123",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        video = response.parse()
+        assert_matches_type(_legacy_response.HttpxBinaryResponseContent, video, path=["response"])
+
+    @parametrize
+    @pytest.mark.respx(base_url=base_url)
+    async def test_streaming_response_download_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
+        respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+        async with async_client.videos.with_streaming_response.download_content(
+            video_id="video_123",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            video = await response.parse()
+            assert_matches_type(bytes, video, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    @pytest.mark.respx(base_url=base_url)
+    async def test_path_params_download_content(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
+            await async_client.videos.with_raw_response.download_content(
+                video_id="",
+            )
+
+    @parametrize
+    async def test_method_remix(self, async_client: AsyncOpenAI) -> None:
+        video = await async_client.videos.remix(
+            video_id="video_123",
+            prompt="x",
+        )
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    async def test_raw_response_remix(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.videos.with_raw_response.remix(
+            video_id="video_123",
+            prompt="x",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        video = response.parse()
+        assert_matches_type(Video, video, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_remix(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.videos.with_streaming_response.remix(
+            video_id="video_123",
+            prompt="x",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            video = await response.parse()
+            assert_matches_type(Video, video, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_remix(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
+            await async_client.videos.with_raw_response.remix(
+                video_id="",
+                prompt="x",
+            )
+
+
+@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
+def test_create_and_poll_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
+    checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
+
+    assert_signatures_in_sync(
+        checking_client.videos.create,
+        checking_client.videos.create_and_poll,
+        exclude_params={"extra_headers", "extra_query", "extra_body", "timeout"},
+    )
.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 123
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-fadefdc7c7e30df47c09df323669b242ff90ee08e51f304175ace5274e0aab49.yml
-openapi_spec_hash: 6d20f639d9ff8a097a34962da6218231
-config_hash: 902654e60f5d659f2bfcfd903e17c46d
+configured_endpoints: 136
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d64cf80d2ebddf175c5578f68226a3d5bbd3f7fd8d62ccac2205f3fc05a355ee.yml
+openapi_spec_hash: d51e0d60d0c536f210b597a211bc5af0
+config_hash: e7c42016df9c6bd7bd6ff15101b9bc9b
api.md
@@ -1139,3 +1139,29 @@ Methods:
 Methods:
 
 - <code title="get /containers/{container_id}/files/{file_id}/content">client.containers.files.content.<a href="./src/openai/resources/containers/files/content.py">retrieve</a>(file_id, \*, container_id) -> HttpxBinaryResponseContent</code>
+
+# Videos
+
+Types:
+
+```python
+from openai.types import (
+    Video,
+    VideoCreateError,
+    VideoModel,
+    VideoSeconds,
+    VideoSize,
+    VideoDeleteResponse,
+)
+```
+
+Methods:
+
+- <code title="post /videos">client.videos.<a href="./src/openai/resources/videos.py">create</a>(\*\*<a href="src/openai/types/video_create_params.py">params</a>) -> <a href="./src/openai/types/video.py">Video</a></code>
+- <code title="get /videos/{video_id}">client.videos.<a href="./src/openai/resources/videos.py">retrieve</a>(video_id) -> <a href="./src/openai/types/video.py">Video</a></code>
+- <code title="get /videos">client.videos.<a href="./src/openai/resources/videos.py">list</a>(\*\*<a href="src/openai/types/video_list_params.py">params</a>) -> <a href="./src/openai/types/video.py">SyncConversationCursorPage[Video]</a></code>
+- <code title="delete /videos/{video_id}">client.videos.<a href="./src/openai/resources/videos.py">delete</a>(video_id) -> <a href="./src/openai/types/video_delete_response.py">VideoDeleteResponse</a></code>
+- <code title="get /videos/{video_id}/content">client.videos.<a href="./src/openai/resources/videos.py">download_content</a>(video_id, \*\*<a href="src/openai/types/video_download_content_params.py">params</a>) -> HttpxBinaryResponseContent</code>
+- <code title="post /videos/{video_id}/remix">client.videos.<a href="./src/openai/resources/videos.py">remix</a>(video_id, \*\*<a href="src/openai/types/video_remix_params.py">params</a>) -> <a href="./src/openai/types/video.py">Video</a></code>
+- <code>client.videos.<a href="./src/openai/resources/videos.py">create_and_poll</a>(\*args) -> Video</code>
+
helpers.md
@@ -514,4 +514,5 @@ client.beta.vector_stores.files.upload_and_poll(...)
 client.beta.vector_stores.files.create_and_poll(...)
 client.beta.vector_stores.file_batches.create_and_poll(...)
 client.beta.vector_stores.file_batches.upload_and_poll(...)
+client.videos.create_and_poll(...)
 ```