main
1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
3from __future__ import annotations
4
5from typing import Optional
6from typing_extensions import Literal
7
8import httpx
9
10from .. import _legacy_response
11from ..types import batch_list_params, batch_create_params
12from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
13from .._utils import maybe_transform, async_maybe_transform
14from .._compat import cached_property
15from .._resource import SyncAPIResource, AsyncAPIResource
16from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
17from ..pagination import SyncCursorPage, AsyncCursorPage
18from ..types.batch import Batch
19from .._base_client import AsyncPaginator, make_request_options
20from ..types.shared_params.metadata import Metadata
21
22__all__ = ["Batches", "AsyncBatches"]
23
24
25class Batches(SyncAPIResource):
26 @cached_property
27 def with_raw_response(self) -> BatchesWithRawResponse:
28 """
29 This property can be used as a prefix for any HTTP method call to return
30 the raw response object instead of the parsed content.
31
32 For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
33 """
34 return BatchesWithRawResponse(self)
35
36 @cached_property
37 def with_streaming_response(self) -> BatchesWithStreamingResponse:
38 """
39 An alternative to `.with_raw_response` that doesn't eagerly read the response body.
40
41 For more information, see https://www.github.com/openai/openai-python#with_streaming_response
42 """
43 return BatchesWithStreamingResponse(self)
44
45 def create(
46 self,
47 *,
48 completion_window: Literal["24h"],
49 endpoint: Literal[
50 "/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations"
51 ],
52 input_file_id: str,
53 metadata: Optional[Metadata] | Omit = omit,
54 output_expires_after: batch_create_params.OutputExpiresAfter | Omit = omit,
55 # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
56 # The extra values given here take precedence over values defined on the client or passed to this method.
57 extra_headers: Headers | None = None,
58 extra_query: Query | None = None,
59 extra_body: Body | None = None,
60 timeout: float | httpx.Timeout | None | NotGiven = not_given,
61 ) -> Batch:
62 """
63 Creates and executes a batch from an uploaded file of requests
64
65 Args:
66 completion_window: The time frame within which the batch should be processed. Currently only `24h`
67 is supported.
68
69 endpoint: The endpoint to be used for all requests in the batch. Currently
70 `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
71 and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
72 restricted to a maximum of 50,000 embedding inputs across all requests in the
73 batch.
74
75 input_file_id: The ID of an uploaded file that contains requests for the new batch.
76
77 See [upload file](https://platform.openai.com/docs/api-reference/files/create)
78 for how to upload a file.
79
80 Your input file must be formatted as a
81 [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
82 and must be uploaded with the purpose `batch`. The file can contain up to 50,000
83 requests, and can be up to 200 MB in size.
84
85 metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
86 for storing additional information about the object in a structured format, and
87 querying for objects via API or the dashboard.
88
89 Keys are strings with a maximum length of 64 characters. Values are strings with
90 a maximum length of 512 characters.
91
92 output_expires_after: The expiration policy for the output and/or error file that are generated for a
93 batch.
94
95 extra_headers: Send extra headers
96
97 extra_query: Add additional query parameters to the request
98
99 extra_body: Add additional JSON properties to the request
100
101 timeout: Override the client-level default timeout for this request, in seconds
102 """
103 return self._post(
104 "/batches",
105 body=maybe_transform(
106 {
107 "completion_window": completion_window,
108 "endpoint": endpoint,
109 "input_file_id": input_file_id,
110 "metadata": metadata,
111 "output_expires_after": output_expires_after,
112 },
113 batch_create_params.BatchCreateParams,
114 ),
115 options=make_request_options(
116 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
117 ),
118 cast_to=Batch,
119 )
120
121 def retrieve(
122 self,
123 batch_id: str,
124 *,
125 # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
126 # The extra values given here take precedence over values defined on the client or passed to this method.
127 extra_headers: Headers | None = None,
128 extra_query: Query | None = None,
129 extra_body: Body | None = None,
130 timeout: float | httpx.Timeout | None | NotGiven = not_given,
131 ) -> Batch:
132 """
133 Retrieves a batch.
134
135 Args:
136 extra_headers: Send extra headers
137
138 extra_query: Add additional query parameters to the request
139
140 extra_body: Add additional JSON properties to the request
141
142 timeout: Override the client-level default timeout for this request, in seconds
143 """
144 if not batch_id:
145 raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
146 return self._get(
147 f"/batches/{batch_id}",
148 options=make_request_options(
149 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
150 ),
151 cast_to=Batch,
152 )
153
154 def list(
155 self,
156 *,
157 after: str | Omit = omit,
158 limit: int | Omit = omit,
159 # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
160 # The extra values given here take precedence over values defined on the client or passed to this method.
161 extra_headers: Headers | None = None,
162 extra_query: Query | None = None,
163 extra_body: Body | None = None,
164 timeout: float | httpx.Timeout | None | NotGiven = not_given,
165 ) -> SyncCursorPage[Batch]:
166 """List your organization's batches.
167
168 Args:
169 after: A cursor for use in pagination.
170
171 `after` is an object ID that defines your place
172 in the list. For instance, if you make a list request and receive 100 objects,
173 ending with obj_foo, your subsequent call can include after=obj_foo in order to
174 fetch the next page of the list.
175
176 limit: A limit on the number of objects to be returned. Limit can range between 1 and
177 100, and the default is 20.
178
179 extra_headers: Send extra headers
180
181 extra_query: Add additional query parameters to the request
182
183 extra_body: Add additional JSON properties to the request
184
185 timeout: Override the client-level default timeout for this request, in seconds
186 """
187 return self._get_api_list(
188 "/batches",
189 page=SyncCursorPage[Batch],
190 options=make_request_options(
191 extra_headers=extra_headers,
192 extra_query=extra_query,
193 extra_body=extra_body,
194 timeout=timeout,
195 query=maybe_transform(
196 {
197 "after": after,
198 "limit": limit,
199 },
200 batch_list_params.BatchListParams,
201 ),
202 ),
203 model=Batch,
204 )
205
206 def cancel(
207 self,
208 batch_id: str,
209 *,
210 # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
211 # The extra values given here take precedence over values defined on the client or passed to this method.
212 extra_headers: Headers | None = None,
213 extra_query: Query | None = None,
214 extra_body: Body | None = None,
215 timeout: float | httpx.Timeout | None | NotGiven = not_given,
216 ) -> Batch:
217 """Cancels an in-progress batch.
218
219 The batch will be in status `cancelling` for up to
220 10 minutes, before changing to `cancelled`, where it will have partial results
221 (if any) available in the output file.
222
223 Args:
224 extra_headers: Send extra headers
225
226 extra_query: Add additional query parameters to the request
227
228 extra_body: Add additional JSON properties to the request
229
230 timeout: Override the client-level default timeout for this request, in seconds
231 """
232 if not batch_id:
233 raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
234 return self._post(
235 f"/batches/{batch_id}/cancel",
236 options=make_request_options(
237 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
238 ),
239 cast_to=Batch,
240 )
241
242
243class AsyncBatches(AsyncAPIResource):
244 @cached_property
245 def with_raw_response(self) -> AsyncBatchesWithRawResponse:
246 """
247 This property can be used as a prefix for any HTTP method call to return
248 the raw response object instead of the parsed content.
249
250 For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
251 """
252 return AsyncBatchesWithRawResponse(self)
253
254 @cached_property
255 def with_streaming_response(self) -> AsyncBatchesWithStreamingResponse:
256 """
257 An alternative to `.with_raw_response` that doesn't eagerly read the response body.
258
259 For more information, see https://www.github.com/openai/openai-python#with_streaming_response
260 """
261 return AsyncBatchesWithStreamingResponse(self)
262
263 async def create(
264 self,
265 *,
266 completion_window: Literal["24h"],
267 endpoint: Literal[
268 "/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/moderations"
269 ],
270 input_file_id: str,
271 metadata: Optional[Metadata] | Omit = omit,
272 output_expires_after: batch_create_params.OutputExpiresAfter | Omit = omit,
273 # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
274 # The extra values given here take precedence over values defined on the client or passed to this method.
275 extra_headers: Headers | None = None,
276 extra_query: Query | None = None,
277 extra_body: Body | None = None,
278 timeout: float | httpx.Timeout | None | NotGiven = not_given,
279 ) -> Batch:
280 """
281 Creates and executes a batch from an uploaded file of requests
282
283 Args:
284 completion_window: The time frame within which the batch should be processed. Currently only `24h`
285 is supported.
286
287 endpoint: The endpoint to be used for all requests in the batch. Currently
288 `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
289 and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
290 restricted to a maximum of 50,000 embedding inputs across all requests in the
291 batch.
292
293 input_file_id: The ID of an uploaded file that contains requests for the new batch.
294
295 See [upload file](https://platform.openai.com/docs/api-reference/files/create)
296 for how to upload a file.
297
298 Your input file must be formatted as a
299 [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
300 and must be uploaded with the purpose `batch`. The file can contain up to 50,000
301 requests, and can be up to 200 MB in size.
302
303 metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
304 for storing additional information about the object in a structured format, and
305 querying for objects via API or the dashboard.
306
307 Keys are strings with a maximum length of 64 characters. Values are strings with
308 a maximum length of 512 characters.
309
310 output_expires_after: The expiration policy for the output and/or error file that are generated for a
311 batch.
312
313 extra_headers: Send extra headers
314
315 extra_query: Add additional query parameters to the request
316
317 extra_body: Add additional JSON properties to the request
318
319 timeout: Override the client-level default timeout for this request, in seconds
320 """
321 return await self._post(
322 "/batches",
323 body=await async_maybe_transform(
324 {
325 "completion_window": completion_window,
326 "endpoint": endpoint,
327 "input_file_id": input_file_id,
328 "metadata": metadata,
329 "output_expires_after": output_expires_after,
330 },
331 batch_create_params.BatchCreateParams,
332 ),
333 options=make_request_options(
334 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
335 ),
336 cast_to=Batch,
337 )
338
339 async def retrieve(
340 self,
341 batch_id: str,
342 *,
343 # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
344 # The extra values given here take precedence over values defined on the client or passed to this method.
345 extra_headers: Headers | None = None,
346 extra_query: Query | None = None,
347 extra_body: Body | None = None,
348 timeout: float | httpx.Timeout | None | NotGiven = not_given,
349 ) -> Batch:
350 """
351 Retrieves a batch.
352
353 Args:
354 extra_headers: Send extra headers
355
356 extra_query: Add additional query parameters to the request
357
358 extra_body: Add additional JSON properties to the request
359
360 timeout: Override the client-level default timeout for this request, in seconds
361 """
362 if not batch_id:
363 raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
364 return await self._get(
365 f"/batches/{batch_id}",
366 options=make_request_options(
367 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
368 ),
369 cast_to=Batch,
370 )
371
372 def list(
373 self,
374 *,
375 after: str | Omit = omit,
376 limit: int | Omit = omit,
377 # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
378 # The extra values given here take precedence over values defined on the client or passed to this method.
379 extra_headers: Headers | None = None,
380 extra_query: Query | None = None,
381 extra_body: Body | None = None,
382 timeout: float | httpx.Timeout | None | NotGiven = not_given,
383 ) -> AsyncPaginator[Batch, AsyncCursorPage[Batch]]:
384 """List your organization's batches.
385
386 Args:
387 after: A cursor for use in pagination.
388
389 `after` is an object ID that defines your place
390 in the list. For instance, if you make a list request and receive 100 objects,
391 ending with obj_foo, your subsequent call can include after=obj_foo in order to
392 fetch the next page of the list.
393
394 limit: A limit on the number of objects to be returned. Limit can range between 1 and
395 100, and the default is 20.
396
397 extra_headers: Send extra headers
398
399 extra_query: Add additional query parameters to the request
400
401 extra_body: Add additional JSON properties to the request
402
403 timeout: Override the client-level default timeout for this request, in seconds
404 """
405 return self._get_api_list(
406 "/batches",
407 page=AsyncCursorPage[Batch],
408 options=make_request_options(
409 extra_headers=extra_headers,
410 extra_query=extra_query,
411 extra_body=extra_body,
412 timeout=timeout,
413 query=maybe_transform(
414 {
415 "after": after,
416 "limit": limit,
417 },
418 batch_list_params.BatchListParams,
419 ),
420 ),
421 model=Batch,
422 )
423
424 async def cancel(
425 self,
426 batch_id: str,
427 *,
428 # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
429 # The extra values given here take precedence over values defined on the client or passed to this method.
430 extra_headers: Headers | None = None,
431 extra_query: Query | None = None,
432 extra_body: Body | None = None,
433 timeout: float | httpx.Timeout | None | NotGiven = not_given,
434 ) -> Batch:
435 """Cancels an in-progress batch.
436
437 The batch will be in status `cancelling` for up to
438 10 minutes, before changing to `cancelled`, where it will have partial results
439 (if any) available in the output file.
440
441 Args:
442 extra_headers: Send extra headers
443
444 extra_query: Add additional query parameters to the request
445
446 extra_body: Add additional JSON properties to the request
447
448 timeout: Override the client-level default timeout for this request, in seconds
449 """
450 if not batch_id:
451 raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
452 return await self._post(
453 f"/batches/{batch_id}/cancel",
454 options=make_request_options(
455 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
456 ),
457 cast_to=Batch,
458 )
459
460
461class BatchesWithRawResponse:
462 def __init__(self, batches: Batches) -> None:
463 self._batches = batches
464
465 self.create = _legacy_response.to_raw_response_wrapper(
466 batches.create,
467 )
468 self.retrieve = _legacy_response.to_raw_response_wrapper(
469 batches.retrieve,
470 )
471 self.list = _legacy_response.to_raw_response_wrapper(
472 batches.list,
473 )
474 self.cancel = _legacy_response.to_raw_response_wrapper(
475 batches.cancel,
476 )
477
478
479class AsyncBatchesWithRawResponse:
480 def __init__(self, batches: AsyncBatches) -> None:
481 self._batches = batches
482
483 self.create = _legacy_response.async_to_raw_response_wrapper(
484 batches.create,
485 )
486 self.retrieve = _legacy_response.async_to_raw_response_wrapper(
487 batches.retrieve,
488 )
489 self.list = _legacy_response.async_to_raw_response_wrapper(
490 batches.list,
491 )
492 self.cancel = _legacy_response.async_to_raw_response_wrapper(
493 batches.cancel,
494 )
495
496
497class BatchesWithStreamingResponse:
498 def __init__(self, batches: Batches) -> None:
499 self._batches = batches
500
501 self.create = to_streamed_response_wrapper(
502 batches.create,
503 )
504 self.retrieve = to_streamed_response_wrapper(
505 batches.retrieve,
506 )
507 self.list = to_streamed_response_wrapper(
508 batches.list,
509 )
510 self.cancel = to_streamed_response_wrapper(
511 batches.cancel,
512 )
513
514
515class AsyncBatchesWithStreamingResponse:
516 def __init__(self, batches: AsyncBatches) -> None:
517 self._batches = batches
518
519 self.create = async_to_streamed_response_wrapper(
520 batches.create,
521 )
522 self.retrieve = async_to_streamed_response_wrapper(
523 batches.retrieve,
524 )
525 self.list = async_to_streamed_response_wrapper(
526 batches.list,
527 )
528 self.cancel = async_to_streamed_response_wrapper(
529 batches.cancel,
530 )