Commit 0d85ca08
Changed files (37)
src
openai
types
conversations
evals
src/openai/types/conversations/__init__.py
@@ -3,15 +3,11 @@
from __future__ import annotations
from .message import Message as Message
-from .lob_prob import LobProb as LobProb
from .conversation import Conversation as Conversation
from .text_content import TextContent as TextContent
-from .top_log_prob import TopLogProb as TopLogProb
from .refusal_content import RefusalContent as RefusalContent
from .item_list_params import ItemListParams as ItemListParams
from .conversation_item import ConversationItem as ConversationItem
-from .url_citation_body import URLCitationBody as URLCitationBody
-from .file_citation_body import FileCitationBody as FileCitationBody
from .input_file_content import InputFileContent as InputFileContent
from .input_text_content import InputTextContent as InputTextContent
from .item_create_params import ItemCreateParams as ItemCreateParams
@@ -19,9 +15,13 @@ from .input_image_content import InputImageContent as InputImageContent
from .output_text_content import OutputTextContent as OutputTextContent
from .item_retrieve_params import ItemRetrieveParams as ItemRetrieveParams
from .summary_text_content import SummaryTextContent as SummaryTextContent
+from .refusal_content_param import RefusalContentParam as RefusalContentParam
from .conversation_item_list import ConversationItemList as ConversationItemList
+from .input_file_content_param import InputFileContentParam as InputFileContentParam
+from .input_text_content_param import InputTextContentParam as InputTextContentParam
+from .input_image_content_param import InputImageContentParam as InputImageContentParam
+from .output_text_content_param import OutputTextContentParam as OutputTextContentParam
from .conversation_create_params import ConversationCreateParams as ConversationCreateParams
from .conversation_update_params import ConversationUpdateParams as ConversationUpdateParams
from .computer_screenshot_content import ComputerScreenshotContent as ComputerScreenshotContent
-from .container_file_citation_body import ContainerFileCitationBody as ContainerFileCitationBody
from .conversation_deleted_resource import ConversationDeletedResource as ConversationDeletedResource
src/openai/types/conversations/container_file_citation_body.py
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["ContainerFileCitationBody"]
-
-
-class ContainerFileCitationBody(BaseModel):
- container_id: str
- """The ID of the container file."""
-
- end_index: int
- """The index of the last character of the container file citation in the message."""
-
- file_id: str
- """The ID of the file."""
-
- filename: str
- """The filename of the container file cited."""
-
- start_index: int
- """The index of the first character of the container file citation in the message."""
-
- type: Literal["container_file_citation"]
- """The type of the container file citation. Always `container_file_citation`."""
src/openai/types/conversations/file_citation_body.py
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["FileCitationBody"]
-
-
-class FileCitationBody(BaseModel):
- file_id: str
- """The ID of the file."""
-
- filename: str
- """The filename of the file cited."""
-
- index: int
- """The index of the file in the list of files."""
-
- type: Literal["file_citation"]
- """The type of the file citation. Always `file_citation`."""
src/openai/types/conversations/input_file_content.py
@@ -1,22 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
+from ..responses.response_input_file import ResponseInputFile
__all__ = ["InputFileContent"]
-
-class InputFileContent(BaseModel):
- file_id: Optional[str] = None
- """The ID of the file to be sent to the model."""
-
- type: Literal["input_file"]
- """The type of the input item. Always `input_file`."""
-
- file_url: Optional[str] = None
- """The URL of the file to be sent to the model."""
-
- filename: Optional[str] = None
- """The name of the file to be sent to the model."""
+InputFileContent = ResponseInputFile
src/openai/types/conversations/input_file_content_param.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..responses.response_input_file_param import ResponseInputFileParam
+
+InputFileContentParam = ResponseInputFileParam
src/openai/types/conversations/input_image_content.py
@@ -1,28 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
+from ..responses.response_input_image import ResponseInputImage
__all__ = ["InputImageContent"]
-
-class InputImageContent(BaseModel):
- detail: Literal["low", "high", "auto"]
- """The detail level of the image to be sent to the model.
-
- One of `high`, `low`, or `auto`. Defaults to `auto`.
- """
-
- file_id: Optional[str] = None
- """The ID of the file to be sent to the model."""
-
- image_url: Optional[str] = None
- """The URL of the image to be sent to the model.
-
- A fully qualified URL or base64 encoded image in a data URL.
- """
-
- type: Literal["input_image"]
- """The type of the input item. Always `input_image`."""
+InputImageContent = ResponseInputImage
src/openai/types/conversations/input_image_content_param.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..responses.response_input_image_param import ResponseInputImageParam
+
+InputImageContentParam = ResponseInputImageParam
src/openai/types/conversations/input_text_content.py
@@ -1,15 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing_extensions import Literal
-
-from ..._models import BaseModel
+from ..responses.response_input_text import ResponseInputText
__all__ = ["InputTextContent"]
-
-class InputTextContent(BaseModel):
- text: str
- """The text input to the model."""
-
- type: Literal["input_text"]
- """The type of the input item. Always `input_text`."""
+InputTextContent = ResponseInputText
src/openai/types/conversations/input_text_content_param.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..responses.response_input_text_param import ResponseInputTextParam
+
+InputTextContentParam = ResponseInputTextParam
src/openai/types/conversations/lob_prob.py
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from ..._models import BaseModel
-from .top_log_prob import TopLogProb
-
-__all__ = ["LobProb"]
-
-
-class LobProb(BaseModel):
- token: str
-
- bytes: List[int]
-
- logprob: float
-
- top_logprobs: List[TopLogProb]
src/openai/types/conversations/message.py
@@ -6,26 +6,26 @@ from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .text_content import TextContent
-from .refusal_content import RefusalContent
-from .input_file_content import InputFileContent
-from .input_text_content import InputTextContent
-from .input_image_content import InputImageContent
-from .output_text_content import OutputTextContent
from .summary_text_content import SummaryTextContent
from .computer_screenshot_content import ComputerScreenshotContent
+from ..responses.response_input_file import ResponseInputFile
+from ..responses.response_input_text import ResponseInputText
+from ..responses.response_input_image import ResponseInputImage
+from ..responses.response_output_text import ResponseOutputText
+from ..responses.response_output_refusal import ResponseOutputRefusal
__all__ = ["Message", "Content"]
Content: TypeAlias = Annotated[
Union[
- InputTextContent,
- OutputTextContent,
+ ResponseInputText,
+ ResponseOutputText,
TextContent,
SummaryTextContent,
- RefusalContent,
- InputImageContent,
+ ResponseOutputRefusal,
+ ResponseInputImage,
ComputerScreenshotContent,
- InputFileContent,
+ ResponseInputFile,
],
PropertyInfo(discriminator="type"),
]
src/openai/types/conversations/output_text_content.py
@@ -1,30 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
+from ..responses.response_output_text import ResponseOutputText
-from ..._utils import PropertyInfo
-from .lob_prob import LobProb
-from ..._models import BaseModel
-from .url_citation_body import URLCitationBody
-from .file_citation_body import FileCitationBody
-from .container_file_citation_body import ContainerFileCitationBody
+__all__ = ["OutputTextContent"]
-__all__ = ["OutputTextContent", "Annotation"]
-
-Annotation: TypeAlias = Annotated[
- Union[FileCitationBody, URLCitationBody, ContainerFileCitationBody], PropertyInfo(discriminator="type")
-]
-
-
-class OutputTextContent(BaseModel):
- annotations: List[Annotation]
- """The annotations of the text output."""
-
- text: str
- """The text output from the model."""
-
- type: Literal["output_text"]
- """The type of the output text. Always `output_text`."""
-
- logprobs: Optional[List[LobProb]] = None
+OutputTextContent = ResponseOutputText
src/openai/types/conversations/output_text_content_param.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..responses.response_output_text_param import ResponseOutputTextParam
+
+OutputTextContentParam = ResponseOutputTextParam
src/openai/types/conversations/refusal_content.py
@@ -1,15 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing_extensions import Literal
-
-from ..._models import BaseModel
+from ..responses.response_output_refusal import ResponseOutputRefusal
__all__ = ["RefusalContent"]
-
-class RefusalContent(BaseModel):
- refusal: str
- """The refusal explanation from the model."""
-
- type: Literal["refusal"]
- """The type of the refusal. Always `refusal`."""
+RefusalContent = ResponseOutputRefusal
src/openai/types/conversations/refusal_content_param.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..responses.response_output_refusal_param import ResponseOutputRefusalParam
+
+RefusalContentParam = ResponseOutputRefusalParam
src/openai/types/conversations/top_log_prob.py
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from ..._models import BaseModel
-
-__all__ = ["TopLogProb"]
-
-
-class TopLogProb(BaseModel):
- token: str
-
- bytes: List[int]
-
- logprob: float
src/openai/types/conversations/url_citation_body.py
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["URLCitationBody"]
-
-
-class URLCitationBody(BaseModel):
- end_index: int
- """The index of the last character of the URL citation in the message."""
-
- start_index: int
- """The index of the first character of the URL citation in the message."""
-
- title: str
- """The title of the web resource."""
-
- type: Literal["url_citation"]
- """The type of the URL citation. Always `url_citation`."""
-
- url: str
- """The URL of the web resource."""
src/openai/types/evals/runs/output_item_list_response.py
@@ -1,13 +1,38 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import builtins
-from typing import Dict, List, Optional
+from typing import TYPE_CHECKING, Dict, List, Optional
from typing_extensions import Literal
+from pydantic import Field as FieldInfo
+
from ...._models import BaseModel
from ..eval_api_error import EvalAPIError
-__all__ = ["OutputItemListResponse", "Sample", "SampleInput", "SampleOutput", "SampleUsage"]
+__all__ = ["OutputItemListResponse", "Result", "Sample", "SampleInput", "SampleOutput", "SampleUsage"]
+
+
+class Result(BaseModel):
+ name: str
+ """The name of the grader."""
+
+ passed: bool
+ """Whether the grader considered the output a pass."""
+
+ score: float
+ """The numeric score produced by the grader."""
+
+ sample: Optional[Dict[str, object]] = None
+ """Optional sample or intermediate data produced by the grader."""
+
+ type: Optional[str] = None
+ """The grader type (for example, "string-check-grader")."""
+
+ __pydantic_extra__: Dict[str, object] = FieldInfo(init=False) # pyright: ignore[reportIncompatibleVariableOverride]
+ if TYPE_CHECKING:
+ # Stub to indicate that arbitrary properties are accepted.
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
+ # `getattr(obj, '$type')`
+ def __getattr__(self, attr: str) -> object: ...
class SampleInput(BaseModel):
@@ -91,8 +116,8 @@ class OutputItemListResponse(BaseModel):
object: Literal["eval.run.output_item"]
"""The type of the object. Always "eval.run.output_item"."""
- results: List[Dict[str, builtins.object]]
- """A list of results from the evaluation run."""
+ results: List[Result]
+ """A list of grader results for this output item."""
run_id: str
"""The identifier of the evaluation run associated with this output item."""
src/openai/types/evals/runs/output_item_retrieve_response.py
@@ -1,13 +1,38 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import builtins
-from typing import Dict, List, Optional
+from typing import TYPE_CHECKING, Dict, List, Optional
from typing_extensions import Literal
+from pydantic import Field as FieldInfo
+
from ...._models import BaseModel
from ..eval_api_error import EvalAPIError
-__all__ = ["OutputItemRetrieveResponse", "Sample", "SampleInput", "SampleOutput", "SampleUsage"]
+__all__ = ["OutputItemRetrieveResponse", "Result", "Sample", "SampleInput", "SampleOutput", "SampleUsage"]
+
+
+class Result(BaseModel):
+ name: str
+ """The name of the grader."""
+
+ passed: bool
+ """Whether the grader considered the output a pass."""
+
+ score: float
+ """The numeric score produced by the grader."""
+
+ sample: Optional[Dict[str, object]] = None
+ """Optional sample or intermediate data produced by the grader."""
+
+ type: Optional[str] = None
+ """The grader type (for example, "string-check-grader")."""
+
+ __pydantic_extra__: Dict[str, object] = FieldInfo(init=False) # pyright: ignore[reportIncompatibleVariableOverride]
+ if TYPE_CHECKING:
+ # Stub to indicate that arbitrary properties are accepted.
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
+ # `getattr(obj, '$type')`
+ def __getattr__(self, attr: str) -> object: ...
class SampleInput(BaseModel):
@@ -91,8 +116,8 @@ class OutputItemRetrieveResponse(BaseModel):
object: Literal["eval.run.output_item"]
"""The type of the object. Always "eval.run.output_item"."""
- results: List[Dict[str, builtins.object]]
- """A list of results from the evaluation run."""
+ results: List[Result]
+ """A list of grader results for this output item."""
run_id: str
"""The identifier of the evaluation run associated with this output item."""
src/openai/types/evals/create_eval_completions_run_data_source.py
@@ -6,6 +6,7 @@ from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
from ..shared.response_format_text import ResponseFormatText
from ..responses.easy_input_message import EasyInputMessage
from ..responses.response_input_text import ResponseInputText
@@ -167,6 +168,15 @@ class SamplingParams(BaseModel):
max_completion_tokens: Optional[int] = None
"""The maximum number of tokens in the generated output."""
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ effort can result in faster responses and fewer tokens used on reasoning in a
+ response.
+ """
+
response_format: Optional[SamplingParamsResponseFormat] = None
"""An object specifying the format that the model must output.
src/openai/types/evals/create_eval_completions_run_data_source_param.py
@@ -6,6 +6,7 @@ from typing import Dict, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..shared_params.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
from ..responses.easy_input_message_param import EasyInputMessageParam
from ..shared_params.response_format_text import ResponseFormatText
from ..responses.response_input_text_param import ResponseInputTextParam
@@ -163,6 +164,15 @@ class SamplingParams(TypedDict, total=False):
max_completion_tokens: int
"""The maximum number of tokens in the generated output."""
+ reasoning_effort: Optional[ReasoningEffort]
+ """
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ effort can result in faster responses and fewer tokens used on reasoning in a
+ response.
+ """
+
response_format: SamplingParamsResponseFormat
"""An object specifying the format that the model must output.
src/openai/types/evals/run_cancel_response.py
@@ -234,6 +234,15 @@ class DataSourceResponsesSamplingParams(BaseModel):
max_completion_tokens: Optional[int] = None
"""The maximum number of tokens in the generated output."""
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ effort can result in faster responses and fewer tokens used on reasoning in a
+ response.
+ """
+
seed: Optional[int] = None
"""A seed value to initialize the randomness, during sampling."""
src/openai/types/evals/run_create_params.py
@@ -252,6 +252,15 @@ class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total=
max_completion_tokens: int
"""The maximum number of tokens in the generated output."""
+ reasoning_effort: Optional[ReasoningEffort]
+ """
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ effort can result in faster responses and fewer tokens used on reasoning in a
+ response.
+ """
+
seed: int
"""A seed value to initialize the randomness, during sampling."""
src/openai/types/evals/run_create_response.py
@@ -234,6 +234,15 @@ class DataSourceResponsesSamplingParams(BaseModel):
max_completion_tokens: Optional[int] = None
"""The maximum number of tokens in the generated output."""
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ effort can result in faster responses and fewer tokens used on reasoning in a
+ response.
+ """
+
seed: Optional[int] = None
"""A seed value to initialize the randomness, during sampling."""
src/openai/types/evals/run_list_response.py
@@ -234,6 +234,15 @@ class DataSourceResponsesSamplingParams(BaseModel):
max_completion_tokens: Optional[int] = None
"""The maximum number of tokens in the generated output."""
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ effort can result in faster responses and fewer tokens used on reasoning in a
+ response.
+ """
+
seed: Optional[int] = None
"""A seed value to initialize the randomness, during sampling."""
src/openai/types/evals/run_retrieve_response.py
@@ -234,6 +234,15 @@ class DataSourceResponsesSamplingParams(BaseModel):
max_completion_tokens: Optional[int] = None
"""The maximum number of tokens in the generated output."""
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ effort can result in faster responses and fewer tokens used on reasoning in a
+ response.
+ """
+
seed: Optional[int] = None
"""A seed value to initialize the randomness, during sampling."""
src/openai/types/graders/score_model_grader.py
@@ -4,10 +4,18 @@ from typing import List, Union, Optional
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
+from ..shared.reasoning_effort import ReasoningEffort
from ..responses.response_input_text import ResponseInputText
from ..responses.response_input_audio import ResponseInputAudio
-__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"]
+__all__ = [
+ "ScoreModelGrader",
+ "Input",
+ "InputContent",
+ "InputContentOutputText",
+ "InputContentInputImage",
+ "SamplingParams",
+]
class InputContentOutputText(BaseModel):
@@ -51,6 +59,29 @@ class Input(BaseModel):
"""The type of the message input. Always `message`."""
+class SamplingParams(BaseModel):
+ max_completions_tokens: Optional[int] = None
+ """The maximum number of tokens the grader model may generate in its response."""
+
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ effort can result in faster responses and fewer tokens used on reasoning in a
+ response.
+ """
+
+ seed: Optional[int] = None
+ """A seed value to initialize the randomness, during sampling."""
+
+ temperature: Optional[float] = None
+ """A higher temperature increases randomness in the outputs."""
+
+ top_p: Optional[float] = None
+ """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
class ScoreModelGrader(BaseModel):
input: List[Input]
"""The input text. This may include template strings."""
@@ -67,5 +98,5 @@ class ScoreModelGrader(BaseModel):
range: Optional[List[float]] = None
"""The range of the score. Defaults to `[0, 1]`."""
- sampling_params: Optional[object] = None
+ sampling_params: Optional[SamplingParams] = None
"""The sampling parameters for the model."""
src/openai/types/graders/score_model_grader_param.py
@@ -2,13 +2,21 @@
from __future__ import annotations
-from typing import Union, Iterable
+from typing import Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from ..shared.reasoning_effort import ReasoningEffort
from ..responses.response_input_text_param import ResponseInputTextParam
from ..responses.response_input_audio_param import ResponseInputAudioParam
-__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"]
+__all__ = [
+ "ScoreModelGraderParam",
+ "Input",
+ "InputContent",
+ "InputContentOutputText",
+ "InputContentInputImage",
+ "SamplingParams",
+]
class InputContentOutputText(TypedDict, total=False):
@@ -57,6 +65,29 @@ class Input(TypedDict, total=False):
"""The type of the message input. Always `message`."""
+class SamplingParams(TypedDict, total=False):
+ max_completions_tokens: Optional[int]
+ """The maximum number of tokens the grader model may generate in its response."""
+
+ reasoning_effort: Optional[ReasoningEffort]
+ """
+ Constrains effort on reasoning for
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
+ supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ effort can result in faster responses and fewer tokens used on reasoning in a
+ response.
+ """
+
+ seed: Optional[int]
+ """A seed value to initialize the randomness, during sampling."""
+
+ temperature: Optional[float]
+ """A higher temperature increases randomness in the outputs."""
+
+ top_p: Optional[float]
+ """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
class ScoreModelGraderParam(TypedDict, total=False):
input: Required[Iterable[Input]]
"""The input text. This may include template strings."""
@@ -73,5 +104,5 @@ class ScoreModelGraderParam(TypedDict, total=False):
range: Iterable[float]
"""The range of the score. Defaults to `[0, 1]`."""
- sampling_params: object
+ sampling_params: SamplingParams
"""The sampling parameters for the model."""
src/openai/_models.py
@@ -281,7 +281,7 @@ class BaseModel(pydantic.BaseModel):
mode: Literal["json", "python"] | str = "python",
include: IncEx | None = None,
exclude: IncEx | None = None,
- by_alias: bool = False,
+ by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
@@ -289,6 +289,7 @@ class BaseModel(pydantic.BaseModel):
warnings: bool | Literal["none", "warn", "error"] = True,
context: dict[str, Any] | None = None,
serialize_as_any: bool = False,
+ fallback: Callable[[Any], Any] | None = None,
) -> dict[str, Any]:
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump
@@ -320,10 +321,12 @@ class BaseModel(pydantic.BaseModel):
raise ValueError("context is only supported in Pydantic v2")
if serialize_as_any != False:
raise ValueError("serialize_as_any is only supported in Pydantic v2")
+ if fallback is not None:
+ raise ValueError("fallback is only supported in Pydantic v2")
dumped = super().dict( # pyright: ignore[reportDeprecated]
include=include,
exclude=exclude,
- by_alias=by_alias,
+ by_alias=by_alias if by_alias is not None else False,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
@@ -338,13 +341,14 @@ class BaseModel(pydantic.BaseModel):
indent: int | None = None,
include: IncEx | None = None,
exclude: IncEx | None = None,
- by_alias: bool = False,
+ by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
round_trip: bool = False,
warnings: bool | Literal["none", "warn", "error"] = True,
context: dict[str, Any] | None = None,
+ fallback: Callable[[Any], Any] | None = None,
serialize_as_any: bool = False,
) -> str:
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json
@@ -373,11 +377,13 @@ class BaseModel(pydantic.BaseModel):
raise ValueError("context is only supported in Pydantic v2")
if serialize_as_any != False:
raise ValueError("serialize_as_any is only supported in Pydantic v2")
+ if fallback is not None:
+ raise ValueError("fallback is only supported in Pydantic v2")
return super().json( # type: ignore[reportDeprecated]
indent=indent,
include=include,
exclude=exclude,
- by_alias=by_alias,
+ by_alias=by_alias if by_alias is not None else False,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
src/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "1.107.3" # x-release-please-version
+__version__ = "1.108.0" # x-release-please-version
.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.107.3"
+ ".": "1.108.0"
}
\ No newline at end of file
.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 118
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d30ff992a48873c1466c49f3c01f2ec8933faebff23424748f8d056065b1bcef.yml
-openapi_spec_hash: e933ec43b46f45c348adb78840e5808d
-config_hash: bf45940f0a7805b4ec2017eecdd36893
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-380330a93b5d010391ca3b36ea193c5353b0dfdf2ddd02789ef84a84ce427e82.yml
+openapi_spec_hash: 859703234259ecdd2a3c6f4de88eb504
+config_hash: b619b45c1e7facf819f902dee8fa4f97
api.md
@@ -991,22 +991,17 @@ Types:
```python
from openai.types.conversations import (
ComputerScreenshotContent,
- ContainerFileCitationBody,
Conversation,
ConversationDeleted,
ConversationDeletedResource,
- FileCitationBody,
- InputFileContent,
- InputImageContent,
- InputTextContent,
- LobProb,
Message,
- OutputTextContent,
- RefusalContent,
SummaryTextContent,
TextContent,
- TopLogProb,
- URLCitationBody,
+ InputTextContent,
+ OutputTextContent,
+ RefusalContent,
+ InputImageContent,
+ InputFileContent,
)
```
CHANGELOG.md
@@ -1,5 +1,18 @@
# Changelog
+## 1.108.0 (2025-09-17)
+
+Full Changelog: [v1.107.3...v1.108.0](https://github.com/openai/openai-python/compare/v1.107.3...v1.108.0)
+
+### Features
+
+* **api:** type updates for conversations, reasoning_effort and results for evals ([c2ee28c](https://github.com/openai/openai-python/commit/c2ee28c1b77eed98766fbb01cf1ad2ee240f412e))
+
+
+### Chores
+
+* **internal:** update pydantic dependency ([369d10a](https://github.com/openai/openai-python/commit/369d10a40dfe744f6bfc10c99eb1f58176500120))
+
## 1.107.3 (2025-09-15)
Full Changelog: [v1.107.2...v1.107.3](https://github.com/openai/openai-python/compare/v1.107.2...v1.107.3)
pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "openai"
-version = "1.107.3"
+version = "1.108.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
requirements-dev.lock
@@ -108,6 +108,7 @@ multidict==6.5.0
mypy==1.14.1
mypy-extensions==1.0.0
# via mypy
+nest-asyncio==1.6.0
nodeenv==1.8.0
# via pyright
nox==2023.4.22
@@ -133,11 +134,11 @@ portalocker==2.10.1
propcache==0.3.2
# via aiohttp
# via yarl
-pycparser==2.22
+pycparser==2.23
# via cffi
-pydantic==2.10.3
+pydantic==2.11.9
# via openai
-pydantic-core==2.27.1
+pydantic-core==2.33.2
# via pydantic
pygments==2.18.0
# via pytest
@@ -199,6 +200,9 @@ typing-extensions==4.12.2
# via pydantic
# via pydantic-core
# via pyright
+ # via typing-inspection
+typing-inspection==0.4.1
+ # via pydantic
tzdata==2024.1
# via pandas
urllib3==2.2.1
requirements.lock
@@ -67,11 +67,11 @@ pandas-stubs==2.2.2.240807
propcache==0.3.2
# via aiohttp
# via yarl
-pycparser==2.22
+pycparser==2.23
# via cffi
-pydantic==2.10.3
+pydantic==2.11.9
# via openai
-pydantic-core==2.27.1
+pydantic-core==2.33.2
# via pydantic
python-dateutil==2.9.0.post0
# via pandas
@@ -93,7 +93,10 @@ typing-extensions==4.12.2
# via openai
# via pydantic
# via pydantic-core
-tzdata==2024.1
+ # via typing-inspection
+typing-inspection==0.4.1
+ # via pydantic
+tzdata==2025.2
# via pandas
websockets==15.0.1
# via openai