Commit 71058dd6
Changed files (85)
src
openai
lib
streaming
responses
resources
containers
responses
types
containers
responses
tests
api_resources
src/openai/lib/streaming/responses/_responses.py
@@ -251,6 +251,7 @@ class ResponseStreamState(Generic[TextFormatT]):
delta=event.delta,
item_id=event.item_id,
output_index=event.output_index,
+ sequence_number=event.sequence_number,
type="response.output_text.delta",
snapshot=content.text,
)
@@ -268,6 +269,7 @@ class ResponseStreamState(Generic[TextFormatT]):
content_index=event.content_index,
item_id=event.item_id,
output_index=event.output_index,
+ sequence_number=event.sequence_number,
type="response.output_text.done",
text=event.text,
parsed=parse_text(event.text, text_format=self._text_format),
@@ -283,6 +285,7 @@ class ResponseStreamState(Generic[TextFormatT]):
delta=event.delta,
item_id=event.item_id,
output_index=event.output_index,
+ sequence_number=event.sequence_number,
type="response.function_call_arguments.delta",
snapshot=output.arguments,
)
@@ -295,6 +298,7 @@ class ResponseStreamState(Generic[TextFormatT]):
events.append(
build(
ResponseCompletedEvent,
+ sequence_number=event.sequence_number,
type="response.completed",
response=response,
)
src/openai/resources/containers/files/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .files import (
+ Files,
+ AsyncFiles,
+ FilesWithRawResponse,
+ AsyncFilesWithRawResponse,
+ FilesWithStreamingResponse,
+ AsyncFilesWithStreamingResponse,
+)
+from .content import (
+ Content,
+ AsyncContent,
+ ContentWithRawResponse,
+ AsyncContentWithRawResponse,
+ ContentWithStreamingResponse,
+ AsyncContentWithStreamingResponse,
+)
+
+__all__ = [
+ "Content",
+ "AsyncContent",
+ "ContentWithRawResponse",
+ "AsyncContentWithRawResponse",
+ "ContentWithStreamingResponse",
+ "AsyncContentWithStreamingResponse",
+ "Files",
+ "AsyncFiles",
+ "FilesWithRawResponse",
+ "AsyncFilesWithRawResponse",
+ "FilesWithStreamingResponse",
+ "AsyncFilesWithStreamingResponse",
+]
src/openai/resources/containers/files/content.py
@@ -0,0 +1,166 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...._base_client import make_request_options
+
+__all__ = ["Content", "AsyncContent"]
+
+
+class Content(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> ContentWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return ContentWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ContentWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return ContentWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ file_id: str,
+ *,
+ container_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """
+ Retrieve Container File Content
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._get(
+ f"/containers/{container_id}/files/{file_id}/content",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncContent(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncContentWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncContentWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncContentWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncContentWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ file_id: str,
+ *,
+ container_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """
+ Retrieve Container File Content
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._get(
+ f"/containers/{container_id}/files/{file_id}/content",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class ContentWithRawResponse:
+ def __init__(self, content: Content) -> None:
+ self._content = content
+
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ content.retrieve,
+ )
+
+
+class AsyncContentWithRawResponse:
+ def __init__(self, content: AsyncContent) -> None:
+ self._content = content
+
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ content.retrieve,
+ )
+
+
+class ContentWithStreamingResponse:
+ def __init__(self, content: Content) -> None:
+ self._content = content
+
+ self.retrieve = to_streamed_response_wrapper(
+ content.retrieve,
+ )
+
+
+class AsyncContentWithStreamingResponse:
+ def __init__(self, content: AsyncContent) -> None:
+ self._content = content
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ content.retrieve,
+ )
src/openai/resources/containers/files/files.py
@@ -0,0 +1,532 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from .content import (
+ Content,
+ AsyncContent,
+ ContentWithRawResponse,
+ AsyncContentWithRawResponse,
+ ContentWithStreamingResponse,
+ AsyncContentWithStreamingResponse,
+)
+from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, FileTypes
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ...._base_client import AsyncPaginator, make_request_options
+from ....types.containers import file_list_params, file_create_params
+from ....types.containers.file_list_response import FileListResponse
+from ....types.containers.file_create_response import FileCreateResponse
+from ....types.containers.file_retrieve_response import FileRetrieveResponse
+
+__all__ = ["Files", "AsyncFiles"]
+
+
+class Files(SyncAPIResource):
+ @cached_property
+ def content(self) -> Content:
+ return Content(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> FilesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return FilesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> FilesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return FilesWithStreamingResponse(self)
+
+ def create(
+ self,
+ container_id: str,
+ *,
+ file: FileTypes | NotGiven = NOT_GIVEN,
+ file_id: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileCreateResponse:
+ """
+ Create a Container File
+
+ You can send either a multipart/form-data request with the raw file content, or
+ a JSON request with a file ID.
+
+ Args:
+ file: The File object (not file name) to be uploaded.
+
+ file_id: Name of the file to create.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ return self._post(
+ f"/containers/{container_id}/files",
+ body=maybe_transform(
+ {
+ "file": file,
+ "file_id": file_id,
+ },
+ file_create_params.FileCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ file_id: str,
+ *,
+ container_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileRetrieveResponse:
+ """
+ Retrieve Container File
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ return self._get(
+ f"/containers/{container_id}/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileRetrieveResponse,
+ )
+
+ def list(
+ self,
+ container_id: str,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncCursorPage[FileListResponse]:
+ """List Container files
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ return self._get_api_list(
+ f"/containers/{container_id}/files",
+ page=SyncCursorPage[FileListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ file_list_params.FileListParams,
+ ),
+ ),
+ model=FileListResponse,
+ )
+
+ def delete(
+ self,
+ file_id: str,
+ *,
+ container_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """
+ Delete Container File
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/containers/{container_id}/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncFiles(AsyncAPIResource):
+ @cached_property
+ def content(self) -> AsyncContent:
+ return AsyncContent(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncFilesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncFilesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncFilesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ container_id: str,
+ *,
+ file: FileTypes | NotGiven = NOT_GIVEN,
+ file_id: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileCreateResponse:
+ """
+ Create a Container File
+
+ You can send either a multipart/form-data request with the raw file content, or
+ a JSON request with a file ID.
+
+ Args:
+ file: The File object (not file name) to be uploaded.
+
+ file_id: Name of the file to create.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ return await self._post(
+ f"/containers/{container_id}/files",
+ body=await async_maybe_transform(
+ {
+ "file": file,
+ "file_id": file_id,
+ },
+ file_create_params.FileCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ file_id: str,
+ *,
+ container_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileRetrieveResponse:
+ """
+ Retrieve Container File
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ return await self._get(
+ f"/containers/{container_id}/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileRetrieveResponse,
+ )
+
+ def list(
+ self,
+ container_id: str,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[FileListResponse, AsyncCursorPage[FileListResponse]]:
+ """List Container files
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ return self._get_api_list(
+ f"/containers/{container_id}/files",
+ page=AsyncCursorPage[FileListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ file_list_params.FileListParams,
+ ),
+ ),
+ model=FileListResponse,
+ )
+
+ async def delete(
+ self,
+ file_id: str,
+ *,
+ container_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """
+ Delete Container File
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/containers/{container_id}/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class FilesWithRawResponse:
+ def __init__(self, files: Files) -> None:
+ self._files = files
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ files.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ files.retrieve,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ files.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ files.delete,
+ )
+
+ @cached_property
+ def content(self) -> ContentWithRawResponse:
+ return ContentWithRawResponse(self._files.content)
+
+
+class AsyncFilesWithRawResponse:
+ def __init__(self, files: AsyncFiles) -> None:
+ self._files = files
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ files.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ files.retrieve,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ files.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ files.delete,
+ )
+
+ @cached_property
+ def content(self) -> AsyncContentWithRawResponse:
+ return AsyncContentWithRawResponse(self._files.content)
+
+
+class FilesWithStreamingResponse:
+ def __init__(self, files: Files) -> None:
+ self._files = files
+
+ self.create = to_streamed_response_wrapper(
+ files.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ files.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ files.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ files.delete,
+ )
+
+ @cached_property
+ def content(self) -> ContentWithStreamingResponse:
+ return ContentWithStreamingResponse(self._files.content)
+
+
+class AsyncFilesWithStreamingResponse:
+ def __init__(self, files: AsyncFiles) -> None:
+ self._files = files
+
+ self.create = async_to_streamed_response_wrapper(
+ files.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ files.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ files.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ files.delete,
+ )
+
+ @cached_property
+ def content(self) -> AsyncContentWithStreamingResponse:
+ return AsyncContentWithStreamingResponse(self._files.content)
src/openai/resources/containers/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .files import (
+ Files,
+ AsyncFiles,
+ FilesWithRawResponse,
+ AsyncFilesWithRawResponse,
+ FilesWithStreamingResponse,
+ AsyncFilesWithStreamingResponse,
+)
+from .containers import (
+ Containers,
+ AsyncContainers,
+ ContainersWithRawResponse,
+ AsyncContainersWithRawResponse,
+ ContainersWithStreamingResponse,
+ AsyncContainersWithStreamingResponse,
+)
+
+__all__ = [
+ "Files",
+ "AsyncFiles",
+ "FilesWithRawResponse",
+ "AsyncFilesWithRawResponse",
+ "FilesWithStreamingResponse",
+ "AsyncFilesWithStreamingResponse",
+ "Containers",
+ "AsyncContainers",
+ "ContainersWithRawResponse",
+ "AsyncContainersWithRawResponse",
+ "ContainersWithStreamingResponse",
+ "AsyncContainersWithStreamingResponse",
+]
src/openai/resources/containers/containers.py
@@ -0,0 +1,511 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal
+
+import httpx
+
+from ... import _legacy_response
+from ...types import container_list_params, container_create_params
+from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .files.files import (
+ Files,
+ AsyncFiles,
+ FilesWithRawResponse,
+ AsyncFilesWithRawResponse,
+ FilesWithStreamingResponse,
+ AsyncFilesWithStreamingResponse,
+)
+from ...pagination import SyncCursorPage, AsyncCursorPage
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.container_list_response import ContainerListResponse
+from ...types.container_create_response import ContainerCreateResponse
+from ...types.container_retrieve_response import ContainerRetrieveResponse
+
+__all__ = ["Containers", "AsyncContainers"]
+
+
+class Containers(SyncAPIResource):
+ @cached_property
+ def files(self) -> Files:
+ return Files(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ContainersWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return ContainersWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ContainersWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return ContainersWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ name: str,
+ expires_after: container_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
+ file_ids: List[str] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ContainerCreateResponse:
+ """
+ Create Container
+
+ Args:
+ name: Name of the container to create.
+
+ expires_after: Container expiration time in seconds relative to the 'anchor' time.
+
+ file_ids: IDs of files to copy to the container.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/containers",
+ body=maybe_transform(
+ {
+ "name": name,
+ "expires_after": expires_after,
+ "file_ids": file_ids,
+ },
+ container_create_params.ContainerCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ContainerCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ container_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ContainerRetrieveResponse:
+ """
+ Retrieve Container
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ return self._get(
+ f"/containers/{container_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ContainerRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncCursorPage[ContainerListResponse]:
+ """List Containers
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/containers",
+ page=SyncCursorPage[ContainerListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ container_list_params.ContainerListParams,
+ ),
+ ),
+ model=ContainerListResponse,
+ )
+
+ def delete(
+ self,
+ container_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """
+ Delete Container
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/containers/{container_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncContainers(AsyncAPIResource):
+ @cached_property
+ def files(self) -> AsyncFiles:
+ return AsyncFiles(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncContainersWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncContainersWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncContainersWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncContainersWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ name: str,
+ expires_after: container_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
+ file_ids: List[str] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ContainerCreateResponse:
+ """
+ Create Container
+
+ Args:
+ name: Name of the container to create.
+
+ expires_after: Container expiration time in seconds relative to the 'anchor' time.
+
+ file_ids: IDs of files to copy to the container.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/containers",
+ body=await async_maybe_transform(
+ {
+ "name": name,
+ "expires_after": expires_after,
+ "file_ids": file_ids,
+ },
+ container_create_params.ContainerCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ContainerCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ container_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ContainerRetrieveResponse:
+ """
+ Retrieve Container
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ return await self._get(
+ f"/containers/{container_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ContainerRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[ContainerListResponse, AsyncCursorPage[ContainerListResponse]]:
+ """List Containers
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/containers",
+ page=AsyncCursorPage[ContainerListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ container_list_params.ContainerListParams,
+ ),
+ ),
+ model=ContainerListResponse,
+ )
+
+ async def delete(
+ self,
+ container_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """
+ Delete Container
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/containers/{container_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class ContainersWithRawResponse:
+ def __init__(self, containers: Containers) -> None:
+ self._containers = containers
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ containers.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ containers.retrieve,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ containers.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ containers.delete,
+ )
+
+ @cached_property
+ def files(self) -> FilesWithRawResponse:
+ return FilesWithRawResponse(self._containers.files)
+
+
+class AsyncContainersWithRawResponse:
+ def __init__(self, containers: AsyncContainers) -> None:
+ self._containers = containers
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ containers.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ containers.retrieve,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ containers.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ containers.delete,
+ )
+
+ @cached_property
+ def files(self) -> AsyncFilesWithRawResponse:
+ return AsyncFilesWithRawResponse(self._containers.files)
+
+
+class ContainersWithStreamingResponse:
+ def __init__(self, containers: Containers) -> None:
+ self._containers = containers
+
+ self.create = to_streamed_response_wrapper(
+ containers.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ containers.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ containers.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ containers.delete,
+ )
+
+ @cached_property
+ def files(self) -> FilesWithStreamingResponse:
+ return FilesWithStreamingResponse(self._containers.files)
+
+
+class AsyncContainersWithStreamingResponse:
+ def __init__(self, containers: AsyncContainers) -> None:
+ self._containers = containers
+
+ self.create = async_to_streamed_response_wrapper(
+ containers.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ containers.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ containers.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ containers.delete,
+ )
+
+ @cached_property
+ def files(self) -> AsyncFilesWithStreamingResponse:
+ return AsyncFilesWithStreamingResponse(self._containers.files)
src/openai/resources/responses/responses.py
@@ -948,6 +948,43 @@ class Responses(SyncAPIResource):
cast_to=NoneType,
)
+ def cancel(
+ self,
+ response_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """Cancels a model response with the given ID.
+
+ Only responses created with the
+ `background` parameter set to `true` can be cancelled.
+ [Learn more](https://platform.openai.com/docs/guides/background).
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not response_id:
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._post(
+ f"/responses/{response_id}/cancel",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
class AsyncResponses(AsyncAPIResource):
@cached_property
@@ -1851,6 +1888,43 @@ class AsyncResponses(AsyncAPIResource):
cast_to=NoneType,
)
+ async def cancel(
+ self,
+ response_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """Cancels a model response with the given ID.
+
+ Only responses created with the
+ `background` parameter set to `true` can be cancelled.
+ [Learn more](https://platform.openai.com/docs/guides/background).
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not response_id:
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._post(
+ f"/responses/{response_id}/cancel",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
class ResponsesWithRawResponse:
def __init__(self, responses: Responses) -> None:
@@ -1865,6 +1939,9 @@ class ResponsesWithRawResponse:
self.delete = _legacy_response.to_raw_response_wrapper(
responses.delete,
)
+ self.cancel = _legacy_response.to_raw_response_wrapper(
+ responses.cancel,
+ )
@cached_property
def input_items(self) -> InputItemsWithRawResponse:
@@ -1884,6 +1961,9 @@ class AsyncResponsesWithRawResponse:
self.delete = _legacy_response.async_to_raw_response_wrapper(
responses.delete,
)
+ self.cancel = _legacy_response.async_to_raw_response_wrapper(
+ responses.cancel,
+ )
@cached_property
def input_items(self) -> AsyncInputItemsWithRawResponse:
@@ -1903,6 +1983,9 @@ class ResponsesWithStreamingResponse:
self.delete = to_streamed_response_wrapper(
responses.delete,
)
+ self.cancel = to_streamed_response_wrapper(
+ responses.cancel,
+ )
@cached_property
def input_items(self) -> InputItemsWithStreamingResponse:
@@ -1922,6 +2005,9 @@ class AsyncResponsesWithStreamingResponse:
self.delete = async_to_streamed_response_wrapper(
responses.delete,
)
+ self.cancel = async_to_streamed_response_wrapper(
+ responses.cancel,
+ )
@cached_property
def input_items(self) -> AsyncInputItemsWithStreamingResponse:
src/openai/resources/__init__.py
@@ -72,6 +72,14 @@ from .uploads import (
UploadsWithStreamingResponse,
AsyncUploadsWithStreamingResponse,
)
+from .containers import (
+ Containers,
+ AsyncContainers,
+ ContainersWithRawResponse,
+ AsyncContainersWithRawResponse,
+ ContainersWithStreamingResponse,
+ AsyncContainersWithStreamingResponse,
+)
from .embeddings import (
Embeddings,
AsyncEmbeddings,
@@ -198,4 +206,10 @@ __all__ = [
"AsyncEvalsWithRawResponse",
"EvalsWithStreamingResponse",
"AsyncEvalsWithStreamingResponse",
+ "Containers",
+ "AsyncContainers",
+ "ContainersWithRawResponse",
+ "AsyncContainersWithRawResponse",
+ "ContainersWithStreamingResponse",
+ "AsyncContainersWithStreamingResponse",
]
src/openai/types/containers/files/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
src/openai/types/containers/__init__.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .file_list_params import FileListParams as FileListParams
+from .file_create_params import FileCreateParams as FileCreateParams
+from .file_list_response import FileListResponse as FileListResponse
+from .file_create_response import FileCreateResponse as FileCreateResponse
+from .file_retrieve_response import FileRetrieveResponse as FileRetrieveResponse
src/openai/types/containers/file_create_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from ..._types import FileTypes
+
+__all__ = ["FileCreateParams"]
+
+
+class FileCreateParams(TypedDict, total=False):
+ file: FileTypes
+ """The File object (not file name) to be uploaded."""
+
+ file_id: str
+ """Name of the file to create."""
src/openai/types/containers/file_create_response.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FileCreateResponse"]
+
+
+class FileCreateResponse(BaseModel):
+ id: str
+ """Unique identifier for the file."""
+
+ bytes: int
+ """Size of the file in bytes."""
+
+ container_id: str
+ """The container this file belongs to."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the file was created."""
+
+ object: Literal["container.file"]
+ """The type of this object (`container.file`)."""
+
+ path: str
+ """Path of the file in the container."""
+
+ source: str
+ """Source of the file (e.g., `user`, `assistant`)."""
src/openai/types/containers/file_list_params.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["FileListParams"]
+
+
+class FileListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
src/openai/types/containers/file_list_response.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FileListResponse"]
+
+
+class FileListResponse(BaseModel):
+ id: str
+ """Unique identifier for the file."""
+
+ bytes: int
+ """Size of the file in bytes."""
+
+ container_id: str
+ """The container this file belongs to."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the file was created."""
+
+ object: Literal["container.file"]
+ """The type of this object (`container.file`)."""
+
+ path: str
+ """Path of the file in the container."""
+
+ source: str
+ """Source of the file (e.g., `user`, `assistant`)."""
src/openai/types/containers/file_retrieve_response.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FileRetrieveResponse"]
+
+
+class FileRetrieveResponse(BaseModel):
+ id: str
+ """Unique identifier for the file."""
+
+ bytes: int
+ """Size of the file in bytes."""
+
+ container_id: str
+ """The container this file belongs to."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the file was created."""
+
+ object: Literal["container.file"]
+ """The type of this object (`container.file`)."""
+
+ path: str
+ """Path of the file in the container."""
+
+ source: str
+ """Source of the file (e.g., `user`, `assistant`)."""
src/openai/types/responses/response_audio_delta_event.py
@@ -11,5 +11,8 @@ class ResponseAudioDeltaEvent(BaseModel):
delta: str
"""A chunk of Base64 encoded response audio bytes."""
+ sequence_number: int
+ """A sequence number for this chunk of the stream response."""
+
type: Literal["response.audio.delta"]
"""The type of the event. Always `response.audio.delta`."""
src/openai/types/responses/response_audio_done_event.py
@@ -8,5 +8,8 @@ __all__ = ["ResponseAudioDoneEvent"]
class ResponseAudioDoneEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of the delta."""
+
type: Literal["response.audio.done"]
"""The type of the event. Always `response.audio.done`."""
src/openai/types/responses/response_audio_transcript_delta_event.py
@@ -11,5 +11,8 @@ class ResponseAudioTranscriptDeltaEvent(BaseModel):
delta: str
"""The partial transcript of the audio response."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.audio.transcript.delta"]
"""The type of the event. Always `response.audio.transcript.delta`."""
src/openai/types/responses/response_audio_transcript_done_event.py
@@ -8,5 +8,8 @@ __all__ = ["ResponseAudioTranscriptDoneEvent"]
class ResponseAudioTranscriptDoneEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.audio.transcript.done"]
"""The type of the event. Always `response.audio.transcript.done`."""
src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
@@ -14,5 +14,8 @@ class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel):
output_index: int
"""The index of the output item that the code interpreter call is in progress."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.code_interpreter_call.code.delta"]
"""The type of the event. Always `response.code_interpreter_call.code.delta`."""
src/openai/types/responses/response_code_interpreter_call_code_done_event.py
@@ -14,5 +14,8 @@ class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel):
output_index: int
"""The index of the output item that the code interpreter call is in progress."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.code_interpreter_call.code.done"]
"""The type of the event. Always `response.code_interpreter_call.code.done`."""
src/openai/types/responses/response_code_interpreter_call_completed_event.py
@@ -15,5 +15,8 @@ class ResponseCodeInterpreterCallCompletedEvent(BaseModel):
output_index: int
"""The index of the output item that the code interpreter call is in progress."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.code_interpreter_call.completed"]
"""The type of the event. Always `response.code_interpreter_call.completed`."""
src/openai/types/responses/response_code_interpreter_call_in_progress_event.py
@@ -15,5 +15,8 @@ class ResponseCodeInterpreterCallInProgressEvent(BaseModel):
output_index: int
"""The index of the output item that the code interpreter call is in progress."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.code_interpreter_call.in_progress"]
"""The type of the event. Always `response.code_interpreter_call.in_progress`."""
src/openai/types/responses/response_code_interpreter_call_interpreting_event.py
@@ -15,5 +15,8 @@ class ResponseCodeInterpreterCallInterpretingEvent(BaseModel):
output_index: int
"""The index of the output item that the code interpreter call is in progress."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.code_interpreter_call.interpreting"]
"""The type of the event. Always `response.code_interpreter_call.interpreting`."""
src/openai/types/responses/response_completed_event.py
@@ -12,5 +12,8 @@ class ResponseCompletedEvent(BaseModel):
response: Response
"""Properties of the completed response."""
+ sequence_number: int
+ """The sequence number for this event."""
+
type: Literal["response.completed"]
"""The type of the event. Always `response.completed`."""
src/openai/types/responses/response_content_part_added_event.py
@@ -26,5 +26,8 @@ class ResponseContentPartAddedEvent(BaseModel):
part: Part
"""The content part that was added."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.content_part.added"]
"""The type of the event. Always `response.content_part.added`."""
src/openai/types/responses/response_content_part_done_event.py
@@ -26,5 +26,8 @@ class ResponseContentPartDoneEvent(BaseModel):
part: Part
"""The content part that is done."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.content_part.done"]
"""The type of the event. Always `response.content_part.done`."""
src/openai/types/responses/response_created_event.py
@@ -12,5 +12,8 @@ class ResponseCreatedEvent(BaseModel):
response: Response
"""The response that was created."""
+ sequence_number: int
+ """The sequence number for this event."""
+
type: Literal["response.created"]
"""The type of the event. Always `response.created`."""
src/openai/types/responses/response_error_event.py
@@ -18,5 +18,8 @@ class ResponseErrorEvent(BaseModel):
param: Optional[str] = None
"""The error parameter."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["error"]
"""The type of the event. Always `error`."""
src/openai/types/responses/response_failed_event.py
@@ -12,5 +12,8 @@ class ResponseFailedEvent(BaseModel):
response: Response
"""The response that failed."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.failed"]
"""The type of the event. Always `response.failed`."""
src/openai/types/responses/response_file_search_call_completed_event.py
@@ -14,5 +14,8 @@ class ResponseFileSearchCallCompletedEvent(BaseModel):
output_index: int
"""The index of the output item that the file search call is initiated."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.file_search_call.completed"]
"""The type of the event. Always `response.file_search_call.completed`."""
src/openai/types/responses/response_file_search_call_in_progress_event.py
@@ -14,5 +14,8 @@ class ResponseFileSearchCallInProgressEvent(BaseModel):
output_index: int
"""The index of the output item that the file search call is initiated."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.file_search_call.in_progress"]
"""The type of the event. Always `response.file_search_call.in_progress`."""
src/openai/types/responses/response_file_search_call_searching_event.py
@@ -14,5 +14,8 @@ class ResponseFileSearchCallSearchingEvent(BaseModel):
output_index: int
"""The index of the output item that the file search call is searching."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.file_search_call.searching"]
"""The type of the event. Always `response.file_search_call.searching`."""
src/openai/types/responses/response_function_call_arguments_delta_event.py
@@ -19,5 +19,8 @@ class ResponseFunctionCallArgumentsDeltaEvent(BaseModel):
The index of the output item that the function-call arguments delta is added to.
"""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.function_call_arguments.delta"]
"""The type of the event. Always `response.function_call_arguments.delta`."""
src/openai/types/responses/response_function_call_arguments_done_event.py
@@ -17,4 +17,7 @@ class ResponseFunctionCallArgumentsDoneEvent(BaseModel):
output_index: int
"""The index of the output item."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.function_call_arguments.done"]
src/openai/types/responses/response_image_gen_call_completed_event.py
@@ -14,5 +14,8 @@ class ResponseImageGenCallCompletedEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.image_generation_call.completed"]
"""The type of the event. Always 'response.image_generation_call.completed'."""
src/openai/types/responses/response_image_gen_call_generating_event.py
@@ -1,6 +1,5 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
@@ -15,8 +14,8 @@ class ResponseImageGenCallGeneratingEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of the image generation item being processed."""
+
type: Literal["response.image_generation_call.generating"]
"""The type of the event. Always 'response.image_generation_call.generating'."""
-
- sequence_number: Optional[int] = None
- """The sequence number of the image generation item being processed."""
src/openai/types/responses/response_in_progress_event.py
@@ -12,5 +12,8 @@ class ResponseInProgressEvent(BaseModel):
response: Response
"""The response that is in progress."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.in_progress"]
"""The type of the event. Always `response.in_progress`."""
src/openai/types/responses/response_incomplete_event.py
@@ -12,5 +12,8 @@ class ResponseIncompleteEvent(BaseModel):
response: Response
"""The response that was incomplete."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.incomplete"]
"""The type of the event. Always `response.incomplete`."""
src/openai/types/responses/response_mcp_call_arguments_delta_event.py
@@ -17,5 +17,8 @@ class ResponseMcpCallArgumentsDeltaEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_call.arguments_delta"]
"""The type of the event. Always 'response.mcp_call.arguments_delta'."""
src/openai/types/responses/response_mcp_call_arguments_done_event.py
@@ -17,5 +17,8 @@ class ResponseMcpCallArgumentsDoneEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_call.arguments_done"]
"""The type of the event. Always 'response.mcp_call.arguments_done'."""
src/openai/types/responses/response_mcp_call_completed_event.py
@@ -8,5 +8,8 @@ __all__ = ["ResponseMcpCallCompletedEvent"]
class ResponseMcpCallCompletedEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_call.completed"]
"""The type of the event. Always 'response.mcp_call.completed'."""
src/openai/types/responses/response_mcp_call_failed_event.py
@@ -8,5 +8,8 @@ __all__ = ["ResponseMcpCallFailedEvent"]
class ResponseMcpCallFailedEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_call.failed"]
"""The type of the event. Always 'response.mcp_call.failed'."""
src/openai/types/responses/response_mcp_call_in_progress_event.py
@@ -14,5 +14,8 @@ class ResponseMcpCallInProgressEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_call.in_progress"]
"""The type of the event. Always 'response.mcp_call.in_progress'."""
src/openai/types/responses/response_mcp_list_tools_completed_event.py
@@ -8,5 +8,8 @@ __all__ = ["ResponseMcpListToolsCompletedEvent"]
class ResponseMcpListToolsCompletedEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_list_tools.completed"]
"""The type of the event. Always 'response.mcp_list_tools.completed'."""
src/openai/types/responses/response_mcp_list_tools_failed_event.py
@@ -8,5 +8,8 @@ __all__ = ["ResponseMcpListToolsFailedEvent"]
class ResponseMcpListToolsFailedEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_list_tools.failed"]
"""The type of the event. Always 'response.mcp_list_tools.failed'."""
src/openai/types/responses/response_mcp_list_tools_in_progress_event.py
@@ -8,5 +8,8 @@ __all__ = ["ResponseMcpListToolsInProgressEvent"]
class ResponseMcpListToolsInProgressEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_list_tools.in_progress"]
"""The type of the event. Always 'response.mcp_list_tools.in_progress'."""
src/openai/types/responses/response_output_item_added_event.py
@@ -15,5 +15,8 @@ class ResponseOutputItemAddedEvent(BaseModel):
output_index: int
"""The index of the output item that was added."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.output_item.added"]
"""The type of the event. Always `response.output_item.added`."""
src/openai/types/responses/response_output_item_done_event.py
@@ -15,5 +15,8 @@ class ResponseOutputItemDoneEvent(BaseModel):
output_index: int
"""The index of the output item that was marked done."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.output_item.done"]
"""The type of the event. Always `response.output_item.done`."""
src/openai/types/responses/response_output_text_annotation_added_event.py
@@ -23,5 +23,8 @@ class ResponseOutputTextAnnotationAddedEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.output_text_annotation.added"]
"""The type of the event. Always 'response.output_text_annotation.added'."""
src/openai/types/responses/response_queued_event.py
@@ -12,5 +12,8 @@ class ResponseQueuedEvent(BaseModel):
response: Response
"""The full response object that is queued."""
+ sequence_number: int
+ """The sequence number for this event."""
+
type: Literal["response.queued"]
"""The type of the event. Always 'response.queued'."""
src/openai/types/responses/response_reasoning_delta_event.py
@@ -20,5 +20,8 @@ class ResponseReasoningDeltaEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.reasoning.delta"]
"""The type of the event. Always 'response.reasoning.delta'."""
src/openai/types/responses/response_reasoning_done_event.py
@@ -17,6 +17,9 @@ class ResponseReasoningDoneEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
text: str
"""The finalized reasoning text."""
src/openai/types/responses/response_reasoning_summary_delta_event.py
@@ -20,6 +20,9 @@ class ResponseReasoningSummaryDeltaEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
summary_index: int
"""The index of the summary part within the output item."""
src/openai/types/responses/response_reasoning_summary_done_event.py
@@ -14,6 +14,9 @@ class ResponseReasoningSummaryDoneEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
summary_index: int
"""The index of the summary part within the output item."""
src/openai/types/responses/response_reasoning_summary_part_added_event.py
@@ -25,6 +25,9 @@ class ResponseReasoningSummaryPartAddedEvent(BaseModel):
part: Part
"""The summary part that was added."""
+ sequence_number: int
+ """The sequence number of this event."""
+
summary_index: int
"""The index of the summary part within the reasoning summary."""
src/openai/types/responses/response_reasoning_summary_part_done_event.py
@@ -25,6 +25,9 @@ class ResponseReasoningSummaryPartDoneEvent(BaseModel):
part: Part
"""The completed summary part."""
+ sequence_number: int
+ """The sequence number of this event."""
+
summary_index: int
"""The index of the summary part within the reasoning summary."""
src/openai/types/responses/response_reasoning_summary_text_delta_event.py
@@ -17,6 +17,9 @@ class ResponseReasoningSummaryTextDeltaEvent(BaseModel):
output_index: int
"""The index of the output item this summary text delta is associated with."""
+ sequence_number: int
+ """The sequence number of this event."""
+
summary_index: int
"""The index of the summary part within the reasoning summary."""
src/openai/types/responses/response_reasoning_summary_text_done_event.py
@@ -14,6 +14,9 @@ class ResponseReasoningSummaryTextDoneEvent(BaseModel):
output_index: int
"""The index of the output item this summary text is associated with."""
+ sequence_number: int
+ """The sequence number of this event."""
+
summary_index: int
"""The index of the summary part within the reasoning summary."""
src/openai/types/responses/response_refusal_delta_event.py
@@ -20,5 +20,8 @@ class ResponseRefusalDeltaEvent(BaseModel):
output_index: int
"""The index of the output item that the refusal text is added to."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.refusal.delta"]
"""The type of the event. Always `response.refusal.delta`."""
src/openai/types/responses/response_refusal_done_event.py
@@ -20,5 +20,8 @@ class ResponseRefusalDoneEvent(BaseModel):
refusal: str
"""The refusal text that is finalized."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.refusal.done"]
"""The type of the event. Always `response.refusal.done`."""
src/openai/types/responses/response_text_annotation_delta_event.py
@@ -75,5 +75,8 @@ class ResponseTextAnnotationDeltaEvent(BaseModel):
output_index: int
"""The index of the output item that the text annotation was added to."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.output_text.annotation.added"]
"""The type of the event. Always `response.output_text.annotation.added`."""
src/openai/types/responses/response_text_delta_event.py
@@ -20,5 +20,8 @@ class ResponseTextDeltaEvent(BaseModel):
output_index: int
"""The index of the output item that the text delta was added to."""
+ sequence_number: int
+ """The sequence number for this event."""
+
type: Literal["response.output_text.delta"]
"""The type of the event. Always `response.output_text.delta`."""
src/openai/types/responses/response_text_done_event.py
@@ -17,6 +17,9 @@ class ResponseTextDoneEvent(BaseModel):
output_index: int
"""The index of the output item that the text content is finalized."""
+ sequence_number: int
+ """The sequence number for this event."""
+
text: str
"""The text content that is finalized."""
src/openai/types/__init__.py
@@ -56,19 +56,24 @@ from .eval_update_response import EvalUpdateResponse as EvalUpdateResponse
from .upload_create_params import UploadCreateParams as UploadCreateParams
from .vector_store_deleted import VectorStoreDeleted as VectorStoreDeleted
from .audio_response_format import AudioResponseFormat as AudioResponseFormat
+from .container_list_params import ContainerListParams as ContainerListParams
from .image_generate_params import ImageGenerateParams as ImageGenerateParams
from .eval_retrieve_response import EvalRetrieveResponse as EvalRetrieveResponse
from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy
from .upload_complete_params import UploadCompleteParams as UploadCompleteParams
+from .container_create_params import ContainerCreateParams as ContainerCreateParams
+from .container_list_response import ContainerListResponse as ContainerListResponse
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
+from .container_create_response import ContainerCreateResponse as ContainerCreateResponse
from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse
from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse
from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
+from .container_retrieve_response import ContainerRetrieveResponse as ContainerRetrieveResponse
from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam
from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam
from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
src/openai/types/container_create_params.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ContainerCreateParams", "ExpiresAfter"]
+
+
+class ContainerCreateParams(TypedDict, total=False):
+ name: Required[str]
+ """Name of the container to create."""
+
+ expires_after: ExpiresAfter
+ """Container expiration time in seconds relative to the 'anchor' time."""
+
+ file_ids: List[str]
+ """IDs of files to copy to the container."""
+
+
+class ExpiresAfter(TypedDict, total=False):
+ anchor: Required[Literal["last_active_at"]]
+ """Time anchor for the expiration time.
+
+ Currently only 'last_active_at' is supported.
+ """
+
+ minutes: Required[int]
src/openai/types/container_create_response.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ContainerCreateResponse", "ExpiresAfter"]
+
+
+class ExpiresAfter(BaseModel):
+ anchor: Optional[Literal["last_active_at"]] = None
+ """The reference point for the expiration."""
+
+ minutes: Optional[int] = None
+ """The number of minutes after the anchor before the container expires."""
+
+
+class ContainerCreateResponse(BaseModel):
+ id: str
+ """Unique identifier for the container."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the container was created."""
+
+ name: str
+ """Name of the container."""
+
+ object: str
+ """The type of this object."""
+
+ status: str
+ """Status of the container (e.g., active, deleted)."""
+
+ expires_after: Optional[ExpiresAfter] = None
+ """
+ The container will expire after this time period. The anchor is the reference
+ point for the expiration. The minutes is the number of minutes after the anchor
+ before the container expires.
+ """
src/openai/types/container_list_params.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["ContainerListParams"]
+
+
+class ContainerListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
src/openai/types/container_list_response.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ContainerListResponse", "ExpiresAfter"]
+
+
+class ExpiresAfter(BaseModel):
+ anchor: Optional[Literal["last_active_at"]] = None
+ """The reference point for the expiration."""
+
+ minutes: Optional[int] = None
+ """The number of minutes after the anchor before the container expires."""
+
+
+class ContainerListResponse(BaseModel):
+ id: str
+ """Unique identifier for the container."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the container was created."""
+
+ name: str
+ """Name of the container."""
+
+ object: str
+ """The type of this object."""
+
+ status: str
+ """Status of the container (e.g., active, deleted)."""
+
+ expires_after: Optional[ExpiresAfter] = None
+ """
+ The container will expire after this time period. The anchor is the reference
+ point for the expiration. The minutes is the number of minutes after the anchor
+ before the container expires.
+ """
src/openai/types/container_retrieve_response.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ContainerRetrieveResponse", "ExpiresAfter"]
+
+
+class ExpiresAfter(BaseModel):
+ anchor: Optional[Literal["last_active_at"]] = None
+ """The reference point for the expiration."""
+
+ minutes: Optional[int] = None
+ """The number of minutes after the anchor before the container expires."""
+
+
+class ContainerRetrieveResponse(BaseModel):
+ id: str
+ """Unique identifier for the container."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the container was created."""
+
+ name: str
+ """Name of the container."""
+
+ object: str
+ """The type of this object."""
+
+ status: str
+ """Status of the container (e.g., active, deleted)."""
+
+ expires_after: Optional[ExpiresAfter] = None
+ """
+ The container will expire after this time period. The anchor is the reference
+ point for the expiration. The minutes is the number of minutes after the anchor
+ before the container expires.
+ """
src/openai/__init__.py
@@ -363,6 +363,7 @@ from ._module_client import (
batches as batches,
uploads as uploads,
responses as responses,
+ containers as containers,
embeddings as embeddings,
completions as completions,
fine_tuning as fine_tuning,
src/openai/_client.py
@@ -46,6 +46,7 @@ if TYPE_CHECKING:
batches,
uploads,
responses,
+ containers,
embeddings,
completions,
fine_tuning,
@@ -65,6 +66,7 @@ if TYPE_CHECKING:
from .resources.moderations import Moderations, AsyncModerations
from .resources.uploads.uploads import Uploads, AsyncUploads
from .resources.responses.responses import Responses, AsyncResponses
+ from .resources.containers.containers import Containers, AsyncContainers
from .resources.fine_tuning.fine_tuning import FineTuning, AsyncFineTuning
from .resources.vector_stores.vector_stores import VectorStores, AsyncVectorStores
@@ -244,6 +246,12 @@ class OpenAI(SyncAPIClient):
return Evals(self)
+ @cached_property
+ def containers(self) -> Containers:
+ from .resources.containers import Containers
+
+ return Containers(self)
+
@cached_property
def with_raw_response(self) -> OpenAIWithRawResponse:
return OpenAIWithRawResponse(self)
@@ -539,6 +547,12 @@ class AsyncOpenAI(AsyncAPIClient):
return AsyncEvals(self)
+ @cached_property
+ def containers(self) -> AsyncContainers:
+ from .resources.containers import AsyncContainers
+
+ return AsyncContainers(self)
+
@cached_property
def with_raw_response(self) -> AsyncOpenAIWithRawResponse:
return AsyncOpenAIWithRawResponse(self)
@@ -757,6 +771,12 @@ class OpenAIWithRawResponse:
return EvalsWithRawResponse(self._client.evals)
+ @cached_property
+ def containers(self) -> containers.ContainersWithRawResponse:
+ from .resources.containers import ContainersWithRawResponse
+
+ return ContainersWithRawResponse(self._client.containers)
+
class AsyncOpenAIWithRawResponse:
_client: AsyncOpenAI
@@ -854,6 +874,12 @@ class AsyncOpenAIWithRawResponse:
return AsyncEvalsWithRawResponse(self._client.evals)
+ @cached_property
+ def containers(self) -> containers.AsyncContainersWithRawResponse:
+ from .resources.containers import AsyncContainersWithRawResponse
+
+ return AsyncContainersWithRawResponse(self._client.containers)
+
class OpenAIWithStreamedResponse:
_client: OpenAI
@@ -951,6 +977,12 @@ class OpenAIWithStreamedResponse:
return EvalsWithStreamingResponse(self._client.evals)
+ @cached_property
+ def containers(self) -> containers.ContainersWithStreamingResponse:
+ from .resources.containers import ContainersWithStreamingResponse
+
+ return ContainersWithStreamingResponse(self._client.containers)
+
class AsyncOpenAIWithStreamedResponse:
_client: AsyncOpenAI
@@ -1048,6 +1080,12 @@ class AsyncOpenAIWithStreamedResponse:
return AsyncEvalsWithStreamingResponse(self._client.evals)
+ @cached_property
+ def containers(self) -> containers.AsyncContainersWithStreamingResponse:
+ from .resources.containers import AsyncContainersWithStreamingResponse
+
+ return AsyncContainersWithStreamingResponse(self._client.containers)
+
Client = OpenAI
src/openai/_module_client.py
@@ -19,6 +19,7 @@ if TYPE_CHECKING:
from .resources.moderations import Moderations
from .resources.uploads.uploads import Uploads
from .resources.responses.responses import Responses
+ from .resources.containers.containers import Containers
from .resources.fine_tuning.fine_tuning import FineTuning
from .resources.vector_stores.vector_stores import VectorStores
@@ -92,6 +93,12 @@ class EmbeddingsProxy(LazyProxy["Embeddings"]):
return _load_client().embeddings
+class ContainersProxy(LazyProxy["Containers"]):
+ @override
+ def __load__(self) -> Containers:
+ return _load_client().containers
+
+
class CompletionsProxy(LazyProxy["Completions"]):
@override
def __load__(self) -> Completions:
@@ -127,6 +134,7 @@ batches: Batches = BatchesProxy().__as_proxied__()
uploads: Uploads = UploadsProxy().__as_proxied__()
responses: Responses = ResponsesProxy().__as_proxied__()
embeddings: Embeddings = EmbeddingsProxy().__as_proxied__()
+containers: Containers = ContainersProxy().__as_proxied__()
completions: Completions = CompletionsProxy().__as_proxied__()
moderations: Moderations = ModerationsProxy().__as_proxied__()
fine_tuning: FineTuning = FineTuningProxy().__as_proxied__()
src/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "1.80.0" # x-release-please-version
+__version__ = "1.81.0" # x-release-please-version
tests/api_resources/containers/files/__init__.py
@@ -0,0 +1,1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
tests/api_resources/containers/files/test_content.py
@@ -0,0 +1,116 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestContent:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ content = client.containers.files.content.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+ assert content is None
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.containers.files.content.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ content = response.parse()
+ assert content is None
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.containers.files.content.with_streaming_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ content = response.parse()
+ assert content is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.files.content.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ client.containers.files.content.with_raw_response.retrieve(
+ file_id="",
+ container_id="container_id",
+ )
+
+
+class TestAsyncContent:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ content = await async_client.containers.files.content.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+ assert content is None
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.files.content.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ content = response.parse()
+ assert content is None
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.files.content.with_streaming_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ content = await response.parse()
+ assert content is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.files.content.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ await async_client.containers.files.content.with_raw_response.retrieve(
+ file_id="",
+ container_id="container_id",
+ )
tests/api_resources/containers/__init__.py
@@ -0,0 +1,1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
tests/api_resources/containers/test_files.py
@@ -0,0 +1,409 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncCursorPage, AsyncCursorPage
+from openai.types.containers import (
+ FileListResponse,
+ FileCreateResponse,
+ FileRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestFiles:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ file = client.containers.files.create(
+ container_id="container_id",
+ )
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ file = client.containers.files.create(
+ container_id="container_id",
+ file=b"raw file contents",
+ file_id="file_id",
+ )
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.containers.files.with_raw_response.create(
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.containers.files.with_streaming_response.create(
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.files.with_raw_response.create(
+ container_id="",
+ )
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ file = client.containers.files.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+ assert_matches_type(FileRetrieveResponse, file, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.containers.files.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(FileRetrieveResponse, file, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.containers.files.with_streaming_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert_matches_type(FileRetrieveResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.files.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ client.containers.files.with_raw_response.retrieve(
+ file_id="",
+ container_id="container_id",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ file = client.containers.files.list(
+ container_id="container_id",
+ )
+ assert_matches_type(SyncCursorPage[FileListResponse], file, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ file = client.containers.files.list(
+ container_id="container_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncCursorPage[FileListResponse], file, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.containers.files.with_raw_response.list(
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(SyncCursorPage[FileListResponse], file, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.containers.files.with_streaming_response.list(
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert_matches_type(SyncCursorPage[FileListResponse], file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.files.with_raw_response.list(
+ container_id="",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ file = client.containers.files.delete(
+ file_id="file_id",
+ container_id="container_id",
+ )
+ assert file is None
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.containers.files.with_raw_response.delete(
+ file_id="file_id",
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert file is None
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.containers.files.with_streaming_response.delete(
+ file_id="file_id",
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert file is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.files.with_raw_response.delete(
+ file_id="file_id",
+ container_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ client.containers.files.with_raw_response.delete(
+ file_id="",
+ container_id="container_id",
+ )
+
+
+class TestAsyncFiles:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ file = await async_client.containers.files.create(
+ container_id="container_id",
+ )
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ file = await async_client.containers.files.create(
+ container_id="container_id",
+ file=b"raw file contents",
+ file_id="file_id",
+ )
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.files.with_raw_response.create(
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.files.with_streaming_response.create(
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.files.with_raw_response.create(
+ container_id="",
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ file = await async_client.containers.files.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+ assert_matches_type(FileRetrieveResponse, file, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.files.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(FileRetrieveResponse, file, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.files.with_streaming_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert_matches_type(FileRetrieveResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.files.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ await async_client.containers.files.with_raw_response.retrieve(
+ file_id="",
+ container_id="container_id",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ file = await async_client.containers.files.list(
+ container_id="container_id",
+ )
+ assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ file = await async_client.containers.files.list(
+ container_id="container_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.files.with_raw_response.list(
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.files.with_streaming_response.list(
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.files.with_raw_response.list(
+ container_id="",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ file = await async_client.containers.files.delete(
+ file_id="file_id",
+ container_id="container_id",
+ )
+ assert file is None
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.files.with_raw_response.delete(
+ file_id="file_id",
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert file is None
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.files.with_streaming_response.delete(
+ file_id="file_id",
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert file is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.files.with_raw_response.delete(
+ file_id="file_id",
+ container_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ await async_client.containers.files.with_raw_response.delete(
+ file_id="",
+ container_id="container_id",
+ )
tests/api_resources/test_containers.py
@@ -0,0 +1,333 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.types import (
+ ContainerListResponse,
+ ContainerCreateResponse,
+ ContainerRetrieveResponse,
+)
+from openai.pagination import SyncCursorPage, AsyncCursorPage
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestContainers:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ container = client.containers.create(
+ name="name",
+ )
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ container = client.containers.create(
+ name="name",
+ expires_after={
+ "anchor": "last_active_at",
+ "minutes": 0,
+ },
+ file_ids=["string"],
+ )
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.containers.with_raw_response.create(
+ name="name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.containers.with_streaming_response.create(
+ name="name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = response.parse()
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ container = client.containers.retrieve(
+ "container_id",
+ )
+ assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.containers.with_raw_response.retrieve(
+ "container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.containers.with_streaming_response.retrieve(
+ "container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = response.parse()
+ assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ container = client.containers.list()
+ assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ container = client.containers.list(
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.containers.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.containers.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = response.parse()
+ assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ container = client.containers.delete(
+ "container_id",
+ )
+ assert container is None
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.containers.with_raw_response.delete(
+ "container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert container is None
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.containers.with_streaming_response.delete(
+ "container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = response.parse()
+ assert container is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncContainers:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ container = await async_client.containers.create(
+ name="name",
+ )
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ container = await async_client.containers.create(
+ name="name",
+ expires_after={
+ "anchor": "last_active_at",
+ "minutes": 0,
+ },
+ file_ids=["string"],
+ )
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.with_raw_response.create(
+ name="name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.with_streaming_response.create(
+ name="name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = await response.parse()
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ container = await async_client.containers.retrieve(
+ "container_id",
+ )
+ assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.with_raw_response.retrieve(
+ "container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.with_streaming_response.retrieve(
+ "container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = await response.parse()
+ assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ container = await async_client.containers.list()
+ assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ container = await async_client.containers.list(
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = await response.parse()
+ assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ container = await async_client.containers.delete(
+ "container_id",
+ )
+ assert container is None
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.with_raw_response.delete(
+ "container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert container is None
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.with_streaming_response.delete(
+ "container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = await response.parse()
+ assert container is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.with_raw_response.delete(
+ "",
+ )
tests/api_resources/test_responses.py
@@ -247,6 +247,44 @@ class TestResponses:
"",
)
+ @parametrize
+ def test_method_cancel(self, client: OpenAI) -> None:
+ response = client.responses.cancel(
+ "resp_677efb5139a88190b512bc3fef8e535d",
+ )
+ assert response is None
+
+ @parametrize
+ def test_raw_response_cancel(self, client: OpenAI) -> None:
+ http_response = client.responses.with_raw_response.cancel(
+ "resp_677efb5139a88190b512bc3fef8e535d",
+ )
+
+ assert http_response.is_closed is True
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+ response = http_response.parse()
+ assert response is None
+
+ @parametrize
+ def test_streaming_response_cancel(self, client: OpenAI) -> None:
+ with client.responses.with_streaming_response.cancel(
+ "resp_677efb5139a88190b512bc3fef8e535d",
+ ) as http_response:
+ assert not http_response.is_closed
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ response = http_response.parse()
+ assert response is None
+
+ assert cast(Any, http_response.is_closed) is True
+
+ @parametrize
+ def test_path_params_cancel(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+ client.responses.with_raw_response.cancel(
+ "",
+ )
+
class TestAsyncResponses:
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
@@ -480,3 +518,41 @@ class TestAsyncResponses:
await async_client.responses.with_raw_response.delete(
"",
)
+
+ @parametrize
+ async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.responses.cancel(
+ "resp_677efb5139a88190b512bc3fef8e535d",
+ )
+ assert response is None
+
+ @parametrize
+ async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
+ http_response = await async_client.responses.with_raw_response.cancel(
+ "resp_677efb5139a88190b512bc3fef8e535d",
+ )
+
+ assert http_response.is_closed is True
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+ response = http_response.parse()
+ assert response is None
+
+ @parametrize
+ async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.responses.with_streaming_response.cancel(
+ "resp_677efb5139a88190b512bc3fef8e535d",
+ ) as http_response:
+ assert not http_response.is_closed
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ response = await http_response.parse()
+ assert response is None
+
+ assert cast(Any, http_response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+ await async_client.responses.with_raw_response.cancel(
+ "",
+ )
.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.80.0"
+ ".": "1.81.0"
}
\ No newline at end of file
.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 101
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a5651cb97f86d1e2531af6aef8c5230f1ea350560fbae790ca2e481b30a6c217.yml
-openapi_spec_hash: 66a5104fd3bb43383cf919225df7a6fd
-config_hash: bb657c3fed232a56930035de3aaed936
+configured_endpoints: 111
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6af14840a810139bf407013167ce1c8fb21b6ef8eb0cc3db58b51af7d52c4b5a.yml
+openapi_spec_hash: 3241bde6b273cfec0035e522bd07985d
+config_hash: 7367b68a4e7db36885c1a886f57b17f6
api.md
@@ -785,6 +785,7 @@ Methods:
- <code title="post /responses">client.responses.<a href="./src/openai/resources/responses/responses.py">create</a>(\*\*<a href="src/openai/types/responses/response_create_params.py">params</a>) -> <a href="./src/openai/types/responses/response.py">Response</a></code>
- <code title="get /responses/{response_id}">client.responses.<a href="./src/openai/resources/responses/responses.py">retrieve</a>(response_id, \*\*<a href="src/openai/types/responses/response_retrieve_params.py">params</a>) -> <a href="./src/openai/types/responses/response.py">Response</a></code>
- <code title="delete /responses/{response_id}">client.responses.<a href="./src/openai/resources/responses/responses.py">delete</a>(response_id) -> None</code>
+- <code title="post /responses/{response_id}/cancel">client.responses.<a href="./src/openai/resources/responses/responses.py">cancel</a>(response_id) -> None</code>
## InputItems
@@ -859,3 +860,39 @@ Methods:
- <code title="get /evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}">client.evals.runs.output_items.<a href="./src/openai/resources/evals/runs/output_items.py">retrieve</a>(output_item_id, \*, eval_id, run_id) -> <a href="./src/openai/types/evals/runs/output_item_retrieve_response.py">OutputItemRetrieveResponse</a></code>
- <code title="get /evals/{eval_id}/runs/{run_id}/output_items">client.evals.runs.output_items.<a href="./src/openai/resources/evals/runs/output_items.py">list</a>(run_id, \*, eval_id, \*\*<a href="src/openai/types/evals/runs/output_item_list_params.py">params</a>) -> <a href="./src/openai/types/evals/runs/output_item_list_response.py">SyncCursorPage[OutputItemListResponse]</a></code>
+
+# Containers
+
+Types:
+
+```python
+from openai.types import ContainerCreateResponse, ContainerRetrieveResponse, ContainerListResponse
+```
+
+Methods:
+
+- <code title="post /containers">client.containers.<a href="./src/openai/resources/containers/containers.py">create</a>(\*\*<a href="src/openai/types/container_create_params.py">params</a>) -> <a href="./src/openai/types/container_create_response.py">ContainerCreateResponse</a></code>
+- <code title="get /containers/{container_id}">client.containers.<a href="./src/openai/resources/containers/containers.py">retrieve</a>(container_id) -> <a href="./src/openai/types/container_retrieve_response.py">ContainerRetrieveResponse</a></code>
+- <code title="get /containers">client.containers.<a href="./src/openai/resources/containers/containers.py">list</a>(\*\*<a href="src/openai/types/container_list_params.py">params</a>) -> <a href="./src/openai/types/container_list_response.py">SyncCursorPage[ContainerListResponse]</a></code>
+- <code title="delete /containers/{container_id}">client.containers.<a href="./src/openai/resources/containers/containers.py">delete</a>(container_id) -> None</code>
+
+## Files
+
+Types:
+
+```python
+from openai.types.containers import FileCreateResponse, FileRetrieveResponse, FileListResponse
+```
+
+Methods:
+
+- <code title="post /containers/{container_id}/files">client.containers.files.<a href="./src/openai/resources/containers/files/files.py">create</a>(container_id, \*\*<a href="src/openai/types/containers/file_create_params.py">params</a>) -> <a href="./src/openai/types/containers/file_create_response.py">FileCreateResponse</a></code>
+- <code title="get /containers/{container_id}/files/{file_id}">client.containers.files.<a href="./src/openai/resources/containers/files/files.py">retrieve</a>(file_id, \*, container_id) -> <a href="./src/openai/types/containers/file_retrieve_response.py">FileRetrieveResponse</a></code>
+- <code title="get /containers/{container_id}/files">client.containers.files.<a href="./src/openai/resources/containers/files/files.py">list</a>(container_id, \*\*<a href="src/openai/types/containers/file_list_params.py">params</a>) -> <a href="./src/openai/types/containers/file_list_response.py">SyncCursorPage[FileListResponse]</a></code>
+- <code title="delete /containers/{container_id}/files/{file_id}">client.containers.files.<a href="./src/openai/resources/containers/files/files.py">delete</a>(file_id, \*, container_id) -> None</code>
+
+### Content
+
+Methods:
+
+- <code title="get /containers/{container_id}/files/{file_id}/content">client.containers.files.content.<a href="./src/openai/resources/containers/files/content.py">retrieve</a>(file_id, \*, container_id) -> None</code>
CHANGELOG.md
@@ -1,5 +1,13 @@
# Changelog
+## 1.81.0 (2025-05-21)
+
+Full Changelog: [v1.80.0...v1.81.0](https://github.com/openai/openai-python/compare/v1.80.0...v1.81.0)
+
+### Features
+
+* **api:** add container endpoint ([054a210](https://github.com/openai/openai-python/commit/054a210289d7e0db22d2d2a61bbe4d4d9cc0cb47))
+
## 1.80.0 (2025-05-21)
Full Changelog: [v1.79.0...v1.80.0](https://github.com/openai/openai-python/compare/v1.79.0...v1.80.0)
pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "openai"
-version = "1.80.0"
+version = "1.81.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"