Commit 2edeeb1f

Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
2024-04-18 22:49:14
feat(api): batch list endpoint (#1338)
1 parent 7a7e4bc
Changed files (6)
src/openai/resources/batches.py
@@ -8,7 +8,7 @@ from typing_extensions import Literal
 import httpx
 
 from .. import _legacy_response
-from ..types import Batch, batch_create_params
+from ..types import Batch, batch_list_params, batch_create_params
 from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
 from .._utils import (
     maybe_transform,
@@ -17,7 +17,9 @@ from .._utils import (
 from .._compat import cached_property
 from .._resource import SyncAPIResource, AsyncAPIResource
 from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ..pagination import SyncCursorPage, AsyncCursorPage
 from .._base_client import (
+    AsyncPaginator,
     make_request_options,
 )
 
@@ -125,6 +127,58 @@ class Batches(SyncAPIResource):
             cast_to=Batch,
         )
 
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> SyncCursorPage[Batch]:
+        """List your organization's batches.
+
+        Args:
+          after: A cursor for use in pagination.
+
+        `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._get_api_list(
+            "/batches",
+            page=SyncCursorPage[Batch],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                    },
+                    batch_list_params.BatchListParams,
+                ),
+            ),
+            model=Batch,
+        )
+
     def cancel(
         self,
         batch_id: str,
@@ -260,6 +314,58 @@ class AsyncBatches(AsyncAPIResource):
             cast_to=Batch,
         )
 
+    def list(
+        self,
+        *,
+        after: str | NotGiven = NOT_GIVEN,
+        limit: int | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncPaginator[Batch, AsyncCursorPage[Batch]]:
+        """List your organization's batches.
+
+        Args:
+          after: A cursor for use in pagination.
+
+        `after` is an object ID that defines your place
+              in the list. For instance, if you make a list request and receive 100 objects,
+              ending with obj_foo, your subsequent call can include after=obj_foo in order to
+              fetch the next page of the list.
+
+          limit: A limit on the number of objects to be returned. Limit can range between 1 and
+              100, and the default is 20.
+
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        return self._get_api_list(
+            "/batches",
+            page=AsyncCursorPage[Batch],
+            options=make_request_options(
+                extra_headers=extra_headers,
+                extra_query=extra_query,
+                extra_body=extra_body,
+                timeout=timeout,
+                query=maybe_transform(
+                    {
+                        "after": after,
+                        "limit": limit,
+                    },
+                    batch_list_params.BatchListParams,
+                ),
+            ),
+            model=Batch,
+        )
+
     async def cancel(
         self,
         batch_id: str,
@@ -304,6 +410,9 @@ class BatchesWithRawResponse:
         self.retrieve = _legacy_response.to_raw_response_wrapper(
             batches.retrieve,
         )
+        self.list = _legacy_response.to_raw_response_wrapper(
+            batches.list,
+        )
         self.cancel = _legacy_response.to_raw_response_wrapper(
             batches.cancel,
         )
@@ -319,6 +428,9 @@ class AsyncBatchesWithRawResponse:
         self.retrieve = _legacy_response.async_to_raw_response_wrapper(
             batches.retrieve,
         )
+        self.list = _legacy_response.async_to_raw_response_wrapper(
+            batches.list,
+        )
         self.cancel = _legacy_response.async_to_raw_response_wrapper(
             batches.cancel,
         )
@@ -334,6 +446,9 @@ class BatchesWithStreamingResponse:
         self.retrieve = to_streamed_response_wrapper(
             batches.retrieve,
         )
+        self.list = to_streamed_response_wrapper(
+            batches.list,
+        )
         self.cancel = to_streamed_response_wrapper(
             batches.cancel,
         )
@@ -349,6 +464,9 @@ class AsyncBatchesWithStreamingResponse:
         self.retrieve = async_to_streamed_response_wrapper(
             batches.retrieve,
         )
+        self.list = async_to_streamed_response_wrapper(
+            batches.list,
+        )
         self.cancel = async_to_streamed_response_wrapper(
             batches.cancel,
         )
src/openai/types/__init__.py
@@ -22,6 +22,7 @@ from .model_deleted import ModelDeleted as ModelDeleted
 from .images_response import ImagesResponse as ImagesResponse
 from .completion_usage import CompletionUsage as CompletionUsage
 from .file_list_params import FileListParams as FileListParams
+from .batch_list_params import BatchListParams as BatchListParams
 from .completion_choice import CompletionChoice as CompletionChoice
 from .image_edit_params import ImageEditParams as ImageEditParams
 from .file_create_params import FileCreateParams as FileCreateParams
src/openai/types/batch_list_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["BatchListParams"]
+
+
+class BatchListParams(TypedDict, total=False):
+    after: str
+    """A cursor for use in pagination.
+
+    `after` is an object ID that defines your place in the list. For instance, if
+    you make a list request and receive 100 objects, ending with obj_foo, your
+    subsequent call can include after=obj_foo in order to fetch the next page of the
+    list.
+    """
+
+    limit: int
+    """A limit on the number of objects to be returned.
+
+    Limit can range between 1 and 100, and the default is 20.
+    """
tests/api_resources/test_batches.py
@@ -10,6 +10,7 @@ import pytest
 from openai import OpenAI, AsyncOpenAI
 from tests.utils import assert_matches_type
 from openai.types import Batch
+from openai.pagination import SyncCursorPage, AsyncCursorPage
 
 base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
 
@@ -102,6 +103,39 @@ class TestBatches:
                 "",
             )
 
+    @parametrize
+    def test_method_list(self, client: OpenAI) -> None:
+        batch = client.batches.list()
+        assert_matches_type(SyncCursorPage[Batch], batch, path=["response"])
+
+    @parametrize
+    def test_method_list_with_all_params(self, client: OpenAI) -> None:
+        batch = client.batches.list(
+            after="string",
+            limit=0,
+        )
+        assert_matches_type(SyncCursorPage[Batch], batch, path=["response"])
+
+    @parametrize
+    def test_raw_response_list(self, client: OpenAI) -> None:
+        response = client.batches.with_raw_response.list()
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        batch = response.parse()
+        assert_matches_type(SyncCursorPage[Batch], batch, path=["response"])
+
+    @parametrize
+    def test_streaming_response_list(self, client: OpenAI) -> None:
+        with client.batches.with_streaming_response.list() as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            batch = response.parse()
+            assert_matches_type(SyncCursorPage[Batch], batch, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
     @parametrize
     def test_method_cancel(self, client: OpenAI) -> None:
         batch = client.batches.cancel(
@@ -229,6 +263,39 @@ class TestAsyncBatches:
                 "",
             )
 
+    @parametrize
+    async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+        batch = await async_client.batches.list()
+        assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"])
+
+    @parametrize
+    async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+        batch = await async_client.batches.list(
+            after="string",
+            limit=0,
+        )
+        assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"])
+
+    @parametrize
+    async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.batches.with_raw_response.list()
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        batch = response.parse()
+        assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.batches.with_streaming_response.list() as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            batch = await response.parse()
+            assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
     @parametrize
     async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
         batch = await async_client.batches.cancel(
.stats.yml
@@ -1,1 +1,1 @@
-configured_endpoints: 62
+configured_endpoints: 63
api.md
@@ -405,4 +405,5 @@ Methods:
 
 - <code title="post /batches">client.batches.<a href="./src/openai/resources/batches.py">create</a>(\*\*<a href="src/openai/types/batch_create_params.py">params</a>) -> <a href="./src/openai/types/batch.py">Batch</a></code>
 - <code title="get /batches/{batch_id}">client.batches.<a href="./src/openai/resources/batches.py">retrieve</a>(batch_id) -> <a href="./src/openai/types/batch.py">Batch</a></code>
+- <code title="get /batches">client.batches.<a href="./src/openai/resources/batches.py">list</a>(\*\*<a href="src/openai/types/batch_list_params.py">params</a>) -> <a href="./src/openai/types/batch.py">SyncCursorPage[Batch]</a></code>
 - <code title="post /batches/{batch_id}/cancel">client.batches.<a href="./src/openai/resources/batches.py">cancel</a>(batch_id) -> <a href="./src/openai/types/batch.py">Batch</a></code>