Commit 04487885

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-10-10 09:09:07
feat(api): comparison filter in/not in
1 parent 85a91ad
src/openai/resources/beta/threads/runs/runs.py
@@ -173,6 +173,9 @@ class Runs(SyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
               [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -327,6 +330,9 @@ class Runs(SyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
               [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -477,6 +483,9 @@ class Runs(SyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
               [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -1603,6 +1612,9 @@ class AsyncRuns(AsyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
               [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -1757,6 +1769,9 @@ class AsyncRuns(AsyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
               [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -1907,6 +1922,9 @@ class AsyncRuns(AsyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
               [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
src/openai/resources/beta/assistants.py
@@ -102,6 +102,9 @@ class Assistants(SyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
               [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -309,6 +312,9 @@ class Assistants(SyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
               [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -555,6 +561,9 @@ class AsyncAssistants(AsyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
               [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -762,6 +771,9 @@ class AsyncAssistants(AsyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: Specifies the format that the model must output. Compatible with
               [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
               [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
src/openai/resources/chat/completions/completions.py
@@ -407,6 +407,9 @@ class Completions(SyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: An object specifying the format that the model must output.
 
               Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -704,6 +707,9 @@ class Completions(SyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: An object specifying the format that the model must output.
 
               Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -992,6 +998,9 @@ class Completions(SyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: An object specifying the format that the model must output.
 
               Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -1845,6 +1854,9 @@ class AsyncCompletions(AsyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: An object specifying the format that the model must output.
 
               Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -2142,6 +2154,9 @@ class AsyncCompletions(AsyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: An object specifying the format that the model must output.
 
               Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -2430,6 +2445,9 @@ class AsyncCompletions(AsyncAPIResource):
               effort can result in faster responses and fewer tokens used on reasoning in a
               response.
 
+              Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+              effort.
+
           response_format: An object specifying the format that the model must output.
 
               Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
src/openai/resources/files.py
@@ -236,7 +236,7 @@ class Files(SyncAPIResource):
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> FileDeleted:
         """
-        Delete a file.
+        Delete a file and remove it from all vector stores.
 
         Args:
           extra_headers: Send extra headers
@@ -553,7 +553,7 @@ class AsyncFiles(AsyncAPIResource):
         timeout: float | httpx.Timeout | None | NotGiven = not_given,
     ) -> FileDeleted:
         """
-        Delete a file.
+        Delete a file and remove it from all vector stores.
 
         Args:
           extra_headers: Send extra headers
src/openai/types/beta/threads/run_create_params.py
@@ -114,6 +114,9 @@ class RunCreateParamsBase(TypedDict, total=False):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     response_format: Optional[AssistantResponseFormatOptionParam]
src/openai/types/beta/assistant_create_params.py
@@ -65,6 +65,9 @@ class AssistantCreateParams(TypedDict, total=False):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     response_format: Optional[AssistantResponseFormatOptionParam]
src/openai/types/beta/assistant_update_params.py
@@ -100,6 +100,9 @@ class AssistantUpdateParams(TypedDict, total=False):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     response_format: Optional[AssistantResponseFormatOptionParam]
src/openai/types/chat/completion_create_params.py
@@ -192,6 +192,9 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     response_format: ResponseFormat
src/openai/types/evals/create_eval_completions_run_data_source.py
@@ -175,6 +175,9 @@ class SamplingParams(BaseModel):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     response_format: Optional[SamplingParamsResponseFormat] = None
src/openai/types/evals/create_eval_completions_run_data_source_param.py
@@ -171,6 +171,9 @@ class SamplingParams(TypedDict, total=False):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     response_format: SamplingParamsResponseFormat
src/openai/types/evals/run_cancel_response.py
@@ -106,6 +106,9 @@ class DataSourceResponsesSourceResponses(BaseModel):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     temperature: Optional[float] = None
@@ -241,6 +244,9 @@ class DataSourceResponsesSamplingParams(BaseModel):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     seed: Optional[int] = None
src/openai/types/evals/run_create_params.py
@@ -119,6 +119,9 @@ class DataSourceCreateEvalResponsesRunDataSourceSourceResponses(TypedDict, total
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     temperature: Optional[float]
@@ -259,6 +262,9 @@ class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total=
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     seed: int
src/openai/types/evals/run_create_response.py
@@ -106,6 +106,9 @@ class DataSourceResponsesSourceResponses(BaseModel):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     temperature: Optional[float] = None
@@ -241,6 +244,9 @@ class DataSourceResponsesSamplingParams(BaseModel):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     seed: Optional[int] = None
src/openai/types/evals/run_list_response.py
@@ -106,6 +106,9 @@ class DataSourceResponsesSourceResponses(BaseModel):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     temperature: Optional[float] = None
@@ -241,6 +244,9 @@ class DataSourceResponsesSamplingParams(BaseModel):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     seed: Optional[int] = None
src/openai/types/evals/run_retrieve_response.py
@@ -106,6 +106,9 @@ class DataSourceResponsesSourceResponses(BaseModel):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     temperature: Optional[float] = None
@@ -241,6 +244,9 @@ class DataSourceResponsesSamplingParams(BaseModel):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     seed: Optional[int] = None
src/openai/types/graders/score_model_grader.py
@@ -70,6 +70,9 @@ class SamplingParams(BaseModel):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     seed: Optional[int] = None
src/openai/types/graders/score_model_grader_param.py
@@ -76,6 +76,9 @@ class SamplingParams(TypedDict, total=False):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     seed: Optional[int]
src/openai/types/shared/comparison_filter.py
@@ -1,6 +1,6 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import Union
+from typing import List, Union
 from typing_extensions import Literal
 
 from ..._models import BaseModel
@@ -13,7 +13,9 @@ class ComparisonFilter(BaseModel):
     """The key to compare against the value."""
 
     type: Literal["eq", "ne", "gt", "gte", "lt", "lte"]
-    """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
+    """
+    Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
+    `nin`.
 
     - `eq`: equals
     - `ne`: not equal
@@ -21,9 +23,11 @@ class ComparisonFilter(BaseModel):
     - `gte`: greater than or equal
     - `lt`: less than
     - `lte`: less than or equal
+    - `in`: in
+    - `nin`: not in
     """
 
-    value: Union[str, float, bool]
+    value: Union[str, float, bool, List[Union[str, float]]]
     """
     The value to compare against the attribute key; supports string, number, or
     boolean types.
src/openai/types/shared/reasoning.py
@@ -17,6 +17,9 @@ class Reasoning(BaseModel):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     generate_summary: Optional[Literal["auto", "concise", "detailed"]] = None
src/openai/types/shared_params/comparison_filter.py
@@ -5,6 +5,8 @@ from __future__ import annotations
 from typing import Union
 from typing_extensions import Literal, Required, TypedDict
 
+from ..._types import SequenceNotStr
+
 __all__ = ["ComparisonFilter"]
 
 
@@ -13,7 +15,9 @@ class ComparisonFilter(TypedDict, total=False):
     """The key to compare against the value."""
 
     type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]]
-    """Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
+    """
+    Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
+    `nin`.
 
     - `eq`: equals
     - `ne`: not equal
@@ -21,9 +25,11 @@ class ComparisonFilter(TypedDict, total=False):
     - `gte`: greater than or equal
     - `lt`: less than
     - `lte`: less than or equal
+    - `in`: in
+    - `nin`: not in
     """
 
-    value: Required[Union[str, float, bool]]
+    value: Required[Union[str, float, bool, SequenceNotStr[Union[str, float]]]]
     """
     The value to compare against the attribute key; supports string, number, or
     boolean types.
src/openai/types/shared_params/reasoning.py
@@ -18,6 +18,9 @@ class Reasoning(TypedDict, total=False):
     supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
     effort can result in faster responses and fewer tokens used on reasoning in a
     response.
+
+    Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
+    effort.
     """
 
     generate_summary: Optional[Literal["auto", "concise", "detailed"]]
src/openai/types/vector_stores/vector_store_file.py
@@ -11,7 +11,7 @@ __all__ = ["VectorStoreFile", "LastError"]
 
 class LastError(BaseModel):
     code: Literal["server_error", "unsupported_file", "invalid_file"]
-    """One of `server_error` or `rate_limit_exceeded`."""
+    """One of `server_error`, `unsupported_file`, or `invalid_file`."""
 
     message: str
     """A human-readable description of the error."""
.stats.yml
@@ -1,4 +1,4 @@
 configured_endpoints: 136
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d64cf80d2ebddf175c5578f68226a3d5bbd3f7fd8d62ccac2205f3fc05a355ee.yml
-openapi_spec_hash: d51e0d60d0c536f210b597a211bc5af0
-config_hash: e7c42016df9c6bd7bd6ff15101b9bc9b
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-e66e85fb7f72477256dca1acb6b23396989d381c5c1b318de564195436bcb93f.yml
+openapi_spec_hash: 0a4bbb5aa0ae532a072bd6b3854e70b1
+config_hash: 89bf7bb3a1f9439ffc6ea0e7dc57ba9b