Commit 0a5ad3e8
Changed files (11)
src
tests
api_resources
src/openai/resources/images.py
@@ -168,10 +168,7 @@ class Images(SyncAPIResource):
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
- input_fidelity: Control how much effort the model will exert to match the style and features,
- especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ input_fidelity: Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
@@ -285,10 +282,7 @@ class Images(SyncAPIResource):
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
- input_fidelity: Control how much effort the model will exert to match the style and features,
- especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ input_fidelity: Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
@@ -398,10 +392,7 @@ class Images(SyncAPIResource):
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
- input_fidelity: Control how much effort the model will exert to match the style and features,
- especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ input_fidelity: Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
@@ -1055,10 +1046,7 @@ class AsyncImages(AsyncAPIResource):
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
- input_fidelity: Control how much effort the model will exert to match the style and features,
- especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ input_fidelity: Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
@@ -1172,10 +1160,7 @@ class AsyncImages(AsyncAPIResource):
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
- input_fidelity: Control how much effort the model will exert to match the style and features,
- especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ input_fidelity: Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
@@ -1285,10 +1270,7 @@ class AsyncImages(AsyncAPIResource):
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
- input_fidelity: Control how much effort the model will exert to match the style and features,
- especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
+ input_fidelity: Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
src/openai/types/responses/response_code_interpreter_tool_call.py
@@ -14,12 +14,12 @@ class OutputLogs(BaseModel):
"""The logs output from the code interpreter."""
type: Literal["logs"]
- """The type of the output. Always 'logs'."""
+ """The type of the output. Always `logs`."""
class OutputImage(BaseModel):
type: Literal["image"]
- """The type of the output. Always 'image'."""
+ """The type of the output. Always `image`."""
url: str
"""The URL of the image output from the code interpreter."""
src/openai/types/responses/response_code_interpreter_tool_call_param.py
@@ -13,12 +13,12 @@ class OutputLogs(TypedDict, total=False):
"""The logs output from the code interpreter."""
type: Required[Literal["logs"]]
- """The type of the output. Always 'logs'."""
+ """The type of the output. Always `logs`."""
class OutputImage(TypedDict, total=False):
type: Required[Literal["image"]]
- """The type of the output. Always 'image'."""
+ """The type of the output. Always `image`."""
url: Required[str]
"""The URL of the image output from the code interpreter."""
src/openai/types/responses/response_computer_tool_call.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union
+from typing import List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
@@ -31,10 +31,7 @@ class ActionClick(BaseModel):
"""
type: Literal["click"]
- """Specifies the event type.
-
- For a click action, this property is always set to `click`.
- """
+ """Specifies the event type. For a click action, this property is always `click`."""
x: int
"""The x-coordinate where the click occurred."""
@@ -181,10 +178,10 @@ class PendingSafetyCheck(BaseModel):
id: str
"""The ID of the pending safety check."""
- code: str
+ code: Optional[str] = None
"""The type of the pending safety check."""
- message: str
+ message: Optional[str] = None
"""Details about the pending safety check."""
src/openai/types/responses/response_computer_tool_call_output_item.py
@@ -13,10 +13,10 @@ class AcknowledgedSafetyCheck(BaseModel):
id: str
"""The ID of the pending safety check."""
- code: str
+ code: Optional[str] = None
"""The type of the pending safety check."""
- message: str
+ message: Optional[str] = None
"""Details about the pending safety check."""
src/openai/types/responses/response_computer_tool_call_param.py
@@ -2,7 +2,7 @@
from __future__ import annotations
-from typing import Union, Iterable
+from typing import Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..._types import SequenceNotStr
@@ -32,10 +32,7 @@ class ActionClick(TypedDict, total=False):
"""
type: Required[Literal["click"]]
- """Specifies the event type.
-
- For a click action, this property is always set to `click`.
- """
+ """Specifies the event type. For a click action, this property is always `click`."""
x: Required[int]
"""The x-coordinate where the click occurred."""
@@ -179,10 +176,10 @@ class PendingSafetyCheck(TypedDict, total=False):
id: Required[str]
"""The ID of the pending safety check."""
- code: Required[str]
+ code: Optional[str]
"""The type of the pending safety check."""
- message: Required[str]
+ message: Optional[str]
"""Details about the pending safety check."""
src/openai/types/responses/response_includable.py
@@ -5,10 +5,12 @@ from typing_extensions import Literal, TypeAlias
__all__ = ["ResponseIncludable"]
ResponseIncludable: TypeAlias = Literal[
- "code_interpreter_call.outputs",
- "computer_call_output.output.image_url",
"file_search_call.results",
+ "web_search_call.results",
+ "web_search_call.action.sources",
"message.input_image.image_url",
- "message.output_text.logprobs",
+ "computer_call_output.output.image_url",
+ "code_interpreter_call.outputs",
"reasoning.encrypted_content",
+ "message.output_text.logprobs",
]
tests/api_resources/conversations/test_items.py
@@ -47,7 +47,7 @@ class TestItems:
"type": "message",
}
],
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
)
assert_matches_type(ConversationItemList, item, path=["response"])
@@ -116,7 +116,7 @@ class TestItems:
item = client.conversations.items.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
)
assert_matches_type(ConversationItem, item, path=["response"])
@@ -172,7 +172,7 @@ class TestItems:
item = client.conversations.items.list(
conversation_id="conv_123",
after="after",
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
limit=0,
order="asc",
)
@@ -288,7 +288,7 @@ class TestAsyncItems:
"type": "message",
}
],
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
)
assert_matches_type(ConversationItemList, item, path=["response"])
@@ -357,7 +357,7 @@ class TestAsyncItems:
item = await async_client.conversations.items.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
)
assert_matches_type(ConversationItem, item, path=["response"])
@@ -413,7 +413,7 @@ class TestAsyncItems:
item = await async_client.conversations.items.list(
conversation_id="conv_123",
after="after",
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
limit=0,
order="asc",
)
tests/api_resources/responses/test_input_items.py
@@ -30,7 +30,7 @@ class TestInputItems:
input_item = client.responses.input_items.list(
response_id="response_id",
after="after",
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
limit=0,
order="asc",
)
@@ -85,7 +85,7 @@ class TestAsyncInputItems:
input_item = await async_client.responses.input_items.list(
response_id="response_id",
after="after",
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
limit=0,
order="asc",
)
tests/api_resources/test_responses.py
@@ -30,7 +30,7 @@ class TestResponses:
response = client.responses.create(
background=True,
conversation="string",
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
input="string",
instructions="instructions",
max_output_tokens=0,
@@ -110,7 +110,7 @@ class TestResponses:
stream=True,
background=True,
conversation="string",
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
input="string",
instructions="instructions",
max_output_tokens=0,
@@ -190,7 +190,7 @@ class TestResponses:
def test_method_retrieve_with_all_params_overload_1(self, client: OpenAI) -> None:
response = client.responses.retrieve(
response_id="resp_677efb5139a88190b512bc3fef8e535d",
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
include_obfuscation=True,
starting_after=0,
stream=False,
@@ -241,7 +241,7 @@ class TestResponses:
response_stream = client.responses.retrieve(
response_id="resp_677efb5139a88190b512bc3fef8e535d",
stream=True,
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
include_obfuscation=True,
starting_after=0,
)
@@ -383,7 +383,7 @@ class TestAsyncResponses:
response = await async_client.responses.create(
background=True,
conversation="string",
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
input="string",
instructions="instructions",
max_output_tokens=0,
@@ -463,7 +463,7 @@ class TestAsyncResponses:
stream=True,
background=True,
conversation="string",
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
input="string",
instructions="instructions",
max_output_tokens=0,
@@ -543,7 +543,7 @@ class TestAsyncResponses:
async def test_method_retrieve_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.responses.retrieve(
response_id="resp_677efb5139a88190b512bc3fef8e535d",
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
include_obfuscation=True,
starting_after=0,
stream=False,
@@ -594,7 +594,7 @@ class TestAsyncResponses:
response_stream = await async_client.responses.retrieve(
response_id="resp_677efb5139a88190b512bc3fef8e535d",
stream=True,
- include=["code_interpreter_call.outputs"],
+ include=["file_search_call.results"],
include_obfuscation=True,
starting_after=0,
)
.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 135
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f59befea071ed7729cbb7bce219e7f837eccfdb57e01698514e6a0bd6052ff60.yml
-openapi_spec_hash: 49da48619d37932b2e257c532078b2bb
-config_hash: 1af83449a09a3b4f276444dbcdd3eb67
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b062c33330de7e3bbf992fd4f0799afd868c30a66c39418dd2c62f4add3b45b6.yml
+openapi_spec_hash: fe067f5b1c0e93799b5ea7fde3c4b1b3
+config_hash: 4b6f471b24d659514b86b736c90a0c0a