Commit 4ebfd515
Changed files (51)
src
openai
resources
beta
threads
vector_stores
types
beta
evals
graders
shared_params
src/openai/resources/beta/threads/runs/runs.py
@@ -51,7 +51,6 @@ from .....types.shared_params.metadata import Metadata
from .....types.shared.reasoning_effort import ReasoningEffort
from .....types.beta.assistant_tool_param import AssistantToolParam
from .....types.beta.assistant_stream_event import AssistantStreamEvent
-from .....types.beta.truncation_object_param import TruncationObjectParam
from .....types.beta.threads.runs.run_step_include import RunStepInclude
from .....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
from .....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -105,7 +104,7 @@ class Runs(SyncAPIResource):
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -255,7 +254,7 @@ class Runs(SyncAPIResource):
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -405,7 +404,7 @@ class Runs(SyncAPIResource):
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -555,7 +554,7 @@ class Runs(SyncAPIResource):
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1516,7 +1515,7 @@ class AsyncRuns(AsyncAPIResource):
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1666,7 +1665,7 @@ class AsyncRuns(AsyncAPIResource):
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1816,7 +1815,7 @@ class AsyncRuns(AsyncAPIResource):
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1966,7 +1965,7 @@ class AsyncRuns(AsyncAPIResource):
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
src/openai/resources/beta/threads/threads.py
@@ -52,7 +52,6 @@ from ....types.beta.thread_deleted import ThreadDeleted
from ....types.shared_params.metadata import Metadata
from ....types.beta.assistant_tool_param import AssistantToolParam
from ....types.beta.assistant_stream_event import AssistantStreamEvent
-from ....types.beta.truncation_object_param import TruncationObjectParam
from ....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
from ....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -286,7 +285,7 @@ class Threads(SyncAPIResource):
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -419,7 +418,7 @@ class Threads(SyncAPIResource):
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -552,7 +551,7 @@ class Threads(SyncAPIResource):
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -685,7 +684,7 @@ class Threads(SyncAPIResource):
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1135,7 +1134,7 @@ class AsyncThreads(AsyncAPIResource):
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1268,7 +1267,7 @@ class AsyncThreads(AsyncAPIResource):
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1401,7 +1400,7 @@ class AsyncThreads(AsyncAPIResource):
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1534,7 +1533,7 @@ class AsyncThreads(AsyncAPIResource):
tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
- truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
+ truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
src/openai/resources/vector_stores/vector_stores.py
@@ -43,7 +43,6 @@ from ...types.vector_store_deleted import VectorStoreDeleted
from ...types.shared_params.metadata import Metadata
from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
from ...types.vector_store_search_response import VectorStoreSearchResponse
-from ...types.vector_store_expiration_after_param import VectorStoreExpirationAfterParam
__all__ = ["VectorStores", "AsyncVectorStores"]
@@ -80,7 +79,7 @@ class VectorStores(SyncAPIResource):
self,
*,
chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
- expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN,
+ expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
file_ids: List[str] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
@@ -178,7 +177,7 @@ class VectorStores(SyncAPIResource):
self,
vector_store_id: str,
*,
- expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN,
+ expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -425,7 +424,7 @@ class AsyncVectorStores(AsyncAPIResource):
self,
*,
chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
- expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN,
+ expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
file_ids: List[str] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: str | NotGiven = NOT_GIVEN,
@@ -523,7 +522,7 @@ class AsyncVectorStores(AsyncAPIResource):
self,
vector_store_id: str,
*,
- expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN,
+ expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
name: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
src/openai/types/beta/threads/run.py
@@ -7,12 +7,19 @@ from ...._models import BaseModel
from .run_status import RunStatus
from ..assistant_tool import AssistantTool
from ...shared.metadata import Metadata
-from ..truncation_object import TruncationObject
from ..assistant_tool_choice_option import AssistantToolChoiceOption
from ..assistant_response_format_option import AssistantResponseFormatOption
from .required_action_function_tool_call import RequiredActionFunctionToolCall
-__all__ = ["Run", "IncompleteDetails", "LastError", "RequiredAction", "RequiredActionSubmitToolOutputs", "Usage"]
+__all__ = [
+ "Run",
+ "IncompleteDetails",
+ "LastError",
+ "RequiredAction",
+ "RequiredActionSubmitToolOutputs",
+ "TruncationStrategy",
+ "Usage",
+]
class IncompleteDetails(BaseModel):
@@ -45,6 +52,23 @@ class RequiredAction(BaseModel):
"""For now, this is always `submit_tool_outputs`."""
+class TruncationStrategy(BaseModel):
+ type: Literal["auto", "last_messages"]
+ """The truncation strategy to use for the thread.
+
+ The default is `auto`. If set to `last_messages`, the thread will be truncated
+ to the n most recent messages in the thread. When set to `auto`, messages in the
+ middle of the thread will be dropped to fit the context length of the model,
+ `max_prompt_tokens`.
+ """
+
+ last_messages: Optional[int] = None
+ """
+ The number of most recent messages from the thread when constructing the context
+ for the run.
+ """
+
+
class Usage(BaseModel):
completion_tokens: int
"""Number of completion tokens used over the course of the run."""
@@ -201,7 +225,7 @@ class Run(BaseModel):
this run.
"""
- truncation_strategy: Optional[TruncationObject] = None
+ truncation_strategy: Optional[TruncationStrategy] = None
"""Controls for how a thread will be truncated prior to the run.
Use this to control the intial context window of the run.
src/openai/types/beta/threads/run_create_params.py
@@ -9,7 +9,6 @@ from ...shared.chat_model import ChatModel
from ..assistant_tool_param import AssistantToolParam
from .runs.run_step_include import RunStepInclude
from ...shared_params.metadata import Metadata
-from ..truncation_object_param import TruncationObjectParam
from ...shared.reasoning_effort import ReasoningEffort
from .message_content_part_param import MessageContentPartParam
from ..code_interpreter_tool_param import CodeInterpreterToolParam
@@ -22,6 +21,7 @@ __all__ = [
"AdditionalMessageAttachment",
"AdditionalMessageAttachmentTool",
"AdditionalMessageAttachmentToolFileSearch",
+ "TruncationStrategy",
"RunCreateParamsNonStreaming",
"RunCreateParamsStreaming",
]
@@ -173,7 +173,7 @@ class RunCreateParamsBase(TypedDict, total=False):
We generally recommend altering this or temperature but not both.
"""
- truncation_strategy: Optional[TruncationObjectParam]
+ truncation_strategy: Optional[TruncationStrategy]
"""Controls for how a thread will be truncated prior to the run.
Use this to control the intial context window of the run.
@@ -223,6 +223,23 @@ class AdditionalMessage(TypedDict, total=False):
"""
+class TruncationStrategy(TypedDict, total=False):
+ type: Required[Literal["auto", "last_messages"]]
+ """The truncation strategy to use for the thread.
+
+ The default is `auto`. If set to `last_messages`, the thread will be truncated
+ to the n most recent messages in the thread. When set to `auto`, messages in the
+ middle of the thread will be dropped to fit the context length of the model,
+ `max_prompt_tokens`.
+ """
+
+ last_messages: Optional[int]
+ """
+ The number of most recent messages from the thread when constructing the context
+ for the run.
+ """
+
+
class RunCreateParamsNonStreaming(RunCreateParamsBase, total=False):
stream: Optional[Literal[False]]
"""
src/openai/types/beta/__init__.py
@@ -9,7 +9,6 @@ from .assistant_tool import AssistantTool as AssistantTool
from .thread_deleted import ThreadDeleted as ThreadDeleted
from .file_search_tool import FileSearchTool as FileSearchTool
from .assistant_deleted import AssistantDeleted as AssistantDeleted
-from .truncation_object import TruncationObject as TruncationObject
from .function_tool_param import FunctionToolParam as FunctionToolParam
from .assistant_tool_param import AssistantToolParam as AssistantToolParam
from .thread_create_params import ThreadCreateParams as ThreadCreateParams
@@ -21,7 +20,6 @@ from .assistant_stream_event import AssistantStreamEvent as AssistantStreamEvent
from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
-from .truncation_object_param import TruncationObjectParam as TruncationObjectParam
from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam
from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam
from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption
src/openai/types/beta/thread_create_and_run_params.py
@@ -8,7 +8,6 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..shared.chat_model import ChatModel
from .assistant_tool_param import AssistantToolParam
from ..shared_params.metadata import Metadata
-from .truncation_object_param import TruncationObjectParam
from .code_interpreter_tool_param import CodeInterpreterToolParam
from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
from .threads.message_content_part_param import MessageContentPartParam
@@ -32,6 +31,7 @@ __all__ = [
"ToolResources",
"ToolResourcesCodeInterpreter",
"ToolResourcesFileSearch",
+ "TruncationStrategy",
"ThreadCreateAndRunParamsNonStreaming",
"ThreadCreateAndRunParamsStreaming",
]
@@ -166,7 +166,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
We generally recommend altering this or temperature but not both.
"""
- truncation_strategy: Optional[TruncationObjectParam]
+ truncation_strategy: Optional[TruncationStrategy]
"""Controls for how a thread will be truncated prior to the run.
Use this to control the intial context window of the run.
@@ -358,6 +358,23 @@ class ToolResources(TypedDict, total=False):
file_search: ToolResourcesFileSearch
+class TruncationStrategy(TypedDict, total=False):
+ type: Required[Literal["auto", "last_messages"]]
+ """The truncation strategy to use for the thread.
+
+ The default is `auto`. If set to `last_messages`, the thread will be truncated
+ to the n most recent messages in the thread. When set to `auto`, messages in the
+ middle of the thread will be dropped to fit the context length of the model,
+ `max_prompt_tokens`.
+ """
+
+ last_messages: Optional[int]
+ """
+ The number of most recent messages from the thread when constructing the context
+ for the run.
+ """
+
+
class ThreadCreateAndRunParamsNonStreaming(ThreadCreateAndRunParamsBase, total=False):
stream: Optional[Literal[False]]
"""
src/openai/types/beta/truncation_object.py
@@ -1,25 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["TruncationObject"]
-
-
-class TruncationObject(BaseModel):
- type: Literal["auto", "last_messages"]
- """The truncation strategy to use for the thread.
-
- The default is `auto`. If set to `last_messages`, the thread will be truncated
- to the n most recent messages in the thread. When set to `auto`, messages in the
- middle of the thread will be dropped to fit the context length of the model,
- `max_prompt_tokens`.
- """
-
- last_messages: Optional[int] = None
- """
- The number of most recent messages from the thread when constructing the context
- for the run.
- """
src/openai/types/beta/truncation_object_param.py
@@ -1,25 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["TruncationObjectParam"]
-
-
-class TruncationObjectParam(TypedDict, total=False):
- type: Required[Literal["auto", "last_messages"]]
- """The truncation strategy to use for the thread.
-
- The default is `auto`. If set to `last_messages`, the thread will be truncated
- to the n most recent messages in the thread. When set to `auto`, messages in the
- middle of the thread will be dropped to fit the context length of the model,
- `max_prompt_tokens`.
- """
-
- last_messages: Optional[int]
- """
- The number of most recent messages from the thread when constructing the context
- for the run.
- """
src/openai/types/evals/__init__.py
@@ -10,21 +10,13 @@ from .run_cancel_response import RunCancelResponse as RunCancelResponse
from .run_create_response import RunCreateResponse as RunCreateResponse
from .run_delete_response import RunDeleteResponse as RunDeleteResponse
from .run_retrieve_response import RunRetrieveResponse as RunRetrieveResponse
-from .eval_jsonl_file_id_source import EvalJSONLFileIDSource as EvalJSONLFileIDSource
-from .eval_jsonl_file_content_source import EvalJSONLFileContentSource as EvalJSONLFileContentSource
-from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam as EvalJSONLFileIDSourceParam
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource
-from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam as EvalJSONLFileContentSourceParam
-from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource
from .create_eval_completions_run_data_source import (
CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource,
)
from .create_eval_jsonl_run_data_source_param import (
CreateEvalJSONLRunDataSourceParam as CreateEvalJSONLRunDataSourceParam,
)
-from .create_eval_responses_run_data_source_param import (
- CreateEvalResponsesRunDataSourceParam as CreateEvalResponsesRunDataSourceParam,
-)
from .create_eval_completions_run_data_source_param import (
CreateEvalCompletionsRunDataSourceParam as CreateEvalCompletionsRunDataSourceParam,
)
src/openai/types/evals/create_eval_completions_run_data_source.py
@@ -1,28 +1,54 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from ..shared.metadata import Metadata
-from ..shared.eval_item import EvalItem
-from .eval_jsonl_file_id_source import EvalJSONLFileIDSource
from ..responses.easy_input_message import EasyInputMessage
-from .eval_jsonl_file_content_source import EvalJSONLFileContentSource
+from ..responses.response_input_text import ResponseInputText
__all__ = [
"CreateEvalCompletionsRunDataSource",
"Source",
+ "SourceFileContent",
+ "SourceFileContentContent",
+ "SourceFileID",
"SourceStoredCompletions",
"InputMessages",
"InputMessagesTemplate",
"InputMessagesTemplateTemplate",
+ "InputMessagesTemplateTemplateMessage",
+ "InputMessagesTemplateTemplateMessageContent",
+ "InputMessagesTemplateTemplateMessageContentOutputText",
"InputMessagesItemReference",
"SamplingParams",
]
+class SourceFileContentContent(BaseModel):
+ item: Dict[str, object]
+
+ sample: Optional[Dict[str, object]] = None
+
+
+class SourceFileContent(BaseModel):
+ content: List[SourceFileContentContent]
+ """The content of the jsonl file."""
+
+ type: Literal["file_content"]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class SourceFileID(BaseModel):
+ id: str
+ """The identifier of the file."""
+
+ type: Literal["file_id"]
+ """The type of jsonl source. Always `file_id`."""
+
+
class SourceStoredCompletions(BaseModel):
type: Literal["stored_completions"]
"""The type of source. Always `stored_completions`."""
@@ -51,12 +77,39 @@ class SourceStoredCompletions(BaseModel):
Source: TypeAlias = Annotated[
- Union[EvalJSONLFileContentSource, EvalJSONLFileIDSource, SourceStoredCompletions],
- PropertyInfo(discriminator="type"),
+ Union[SourceFileContent, SourceFileID, SourceStoredCompletions], PropertyInfo(discriminator="type")
+]
+
+
+class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[
+ str, ResponseInputText, InputMessagesTemplateTemplateMessageContentOutputText
]
+
+class InputMessagesTemplateTemplateMessage(BaseModel):
+ content: InputMessagesTemplateTemplateMessageContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
InputMessagesTemplateTemplate: TypeAlias = Annotated[
- Union[EasyInputMessage, EvalItem], PropertyInfo(discriminator="type")
+ Union[EasyInputMessage, InputMessagesTemplateTemplateMessage], PropertyInfo(discriminator="type")
]
src/openai/types/evals/create_eval_completions_run_data_source_param.py
@@ -2,27 +2,53 @@
from __future__ import annotations
-from typing import Union, Iterable, Optional
+from typing import Dict, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..shared_params.metadata import Metadata
-from ..shared_params.eval_item import EvalItem
-from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam
from ..responses.easy_input_message_param import EasyInputMessageParam
-from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam
+from ..responses.response_input_text_param import ResponseInputTextParam
__all__ = [
"CreateEvalCompletionsRunDataSourceParam",
"Source",
+ "SourceFileContent",
+ "SourceFileContentContent",
+ "SourceFileID",
"SourceStoredCompletions",
"InputMessages",
"InputMessagesTemplate",
"InputMessagesTemplateTemplate",
+ "InputMessagesTemplateTemplateMessage",
+ "InputMessagesTemplateTemplateMessageContent",
+ "InputMessagesTemplateTemplateMessageContentOutputText",
"InputMessagesItemReference",
"SamplingParams",
]
+class SourceFileContentContent(TypedDict, total=False):
+ item: Required[Dict[str, object]]
+
+ sample: Dict[str, object]
+
+
+class SourceFileContent(TypedDict, total=False):
+ content: Required[Iterable[SourceFileContentContent]]
+ """The content of the jsonl file."""
+
+ type: Required[Literal["file_content"]]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class SourceFileID(TypedDict, total=False):
+ id: Required[str]
+ """The identifier of the file."""
+
+ type: Required[Literal["file_id"]]
+ """The type of jsonl source. Always `file_id`."""
+
+
class SourceStoredCompletions(TypedDict, total=False):
type: Required[Literal["stored_completions"]]
"""The type of source. Always `stored_completions`."""
@@ -50,9 +76,37 @@ class SourceStoredCompletions(TypedDict, total=False):
"""An optional model to filter by (e.g., 'gpt-4o')."""
-Source: TypeAlias = Union[EvalJSONLFileContentSourceParam, EvalJSONLFileIDSourceParam, SourceStoredCompletions]
+Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceStoredCompletions]
+
+
+class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=False):
+ text: Required[str]
+ """The text output from the model."""
+
+ type: Required[Literal["output_text"]]
+ """The type of the output text. Always `output_text`."""
+
+
+InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[
+ str, ResponseInputTextParam, InputMessagesTemplateTemplateMessageContentOutputText
+]
+
+
+class InputMessagesTemplateTemplateMessage(TypedDict, total=False):
+ content: Required[InputMessagesTemplateTemplateMessageContent]
+ """Text inputs to the model - can contain template strings."""
+
+ role: Required[Literal["user", "assistant", "system", "developer"]]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always `message`."""
+
-InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, EvalItem]
+InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateMessage]
class InputMessagesTemplate(TypedDict, total=False):
src/openai/types/evals/create_eval_jsonl_run_data_source.py
@@ -1,18 +1,37 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Union
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
-from .eval_jsonl_file_id_source import EvalJSONLFileIDSource
-from .eval_jsonl_file_content_source import EvalJSONLFileContentSource
-__all__ = ["CreateEvalJSONLRunDataSource", "Source"]
+__all__ = ["CreateEvalJSONLRunDataSource", "Source", "SourceFileContent", "SourceFileContentContent", "SourceFileID"]
-Source: TypeAlias = Annotated[
- Union[EvalJSONLFileContentSource, EvalJSONLFileIDSource], PropertyInfo(discriminator="type")
-]
+
+class SourceFileContentContent(BaseModel):
+ item: Dict[str, object]
+
+ sample: Optional[Dict[str, object]] = None
+
+
+class SourceFileContent(BaseModel):
+ content: List[SourceFileContentContent]
+ """The content of the jsonl file."""
+
+ type: Literal["file_content"]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class SourceFileID(BaseModel):
+ id: str
+ """The identifier of the file."""
+
+ type: Literal["file_id"]
+ """The type of jsonl source. Always `file_id`."""
+
+
+Source: TypeAlias = Annotated[Union[SourceFileContent, SourceFileID], PropertyInfo(discriminator="type")]
class CreateEvalJSONLRunDataSource(BaseModel):
src/openai/types/evals/create_eval_jsonl_run_data_source_param.py
@@ -2,15 +2,41 @@
from __future__ import annotations
-from typing import Union
+from typing import Dict, Union, Iterable
from typing_extensions import Literal, Required, TypeAlias, TypedDict
-from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam
-from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam
+__all__ = [
+ "CreateEvalJSONLRunDataSourceParam",
+ "Source",
+ "SourceFileContent",
+ "SourceFileContentContent",
+ "SourceFileID",
+]
-__all__ = ["CreateEvalJSONLRunDataSourceParam", "Source"]
-Source: TypeAlias = Union[EvalJSONLFileContentSourceParam, EvalJSONLFileIDSourceParam]
+class SourceFileContentContent(TypedDict, total=False):
+ item: Required[Dict[str, object]]
+
+ sample: Dict[str, object]
+
+
+class SourceFileContent(TypedDict, total=False):
+ content: Required[Iterable[SourceFileContentContent]]
+ """The content of the jsonl file."""
+
+ type: Required[Literal["file_content"]]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class SourceFileID(TypedDict, total=False):
+ id: Required[str]
+ """The identifier of the file."""
+
+ type: Required[Literal["file_id"]]
+ """The type of jsonl source. Always `file_id`."""
+
+
+Source: TypeAlias = Union[SourceFileContent, SourceFileID]
class CreateEvalJSONLRunDataSourceParam(TypedDict, total=False):
src/openai/types/evals/create_eval_responses_run_data_source.py
@@ -1,151 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from ..._utils import PropertyInfo
-from ..._models import BaseModel
-from ..shared.eval_item import EvalItem
-from ..shared.reasoning_effort import ReasoningEffort
-from .eval_jsonl_file_id_source import EvalJSONLFileIDSource
-from .eval_jsonl_file_content_source import EvalJSONLFileContentSource
-
-__all__ = [
- "CreateEvalResponsesRunDataSource",
- "Source",
- "SourceResponses",
- "InputMessages",
- "InputMessagesTemplate",
- "InputMessagesTemplateTemplate",
- "InputMessagesTemplateTemplateChatMessage",
- "InputMessagesItemReference",
- "SamplingParams",
-]
-
-
-class SourceResponses(BaseModel):
- type: Literal["responses"]
- """The type of run data source. Always `responses`."""
-
- created_after: Optional[int] = None
- """Only include items created after this timestamp (inclusive).
-
- This is a query parameter used to select responses.
- """
-
- created_before: Optional[int] = None
- """Only include items created before this timestamp (inclusive).
-
- This is a query parameter used to select responses.
- """
-
- has_tool_calls: Optional[bool] = None
- """Whether the response has tool calls.
-
- This is a query parameter used to select responses.
- """
-
- instructions_search: Optional[str] = None
- """Optional string to search the 'instructions' field.
-
- This is a query parameter used to select responses.
- """
-
- metadata: Optional[object] = None
- """Metadata filter for the responses.
-
- This is a query parameter used to select responses.
- """
-
- model: Optional[str] = None
- """The name of the model to find responses for.
-
- This is a query parameter used to select responses.
- """
-
- reasoning_effort: Optional[ReasoningEffort] = None
- """Optional reasoning effort parameter.
-
- This is a query parameter used to select responses.
- """
-
- temperature: Optional[float] = None
- """Sampling temperature. This is a query parameter used to select responses."""
-
- tools: Optional[List[str]] = None
- """List of tool names. This is a query parameter used to select responses."""
-
- top_p: Optional[float] = None
- """Nucleus sampling parameter. This is a query parameter used to select responses."""
-
- users: Optional[List[str]] = None
- """List of user identifiers. This is a query parameter used to select responses."""
-
-
-Source: TypeAlias = Annotated[
- Union[EvalJSONLFileContentSource, EvalJSONLFileIDSource, SourceResponses], PropertyInfo(discriminator="type")
-]
-
-
-class InputMessagesTemplateTemplateChatMessage(BaseModel):
- content: str
- """The content of the message."""
-
- role: str
- """The role of the message (e.g. "system", "assistant", "user")."""
-
-
-InputMessagesTemplateTemplate: TypeAlias = Union[InputMessagesTemplateTemplateChatMessage, EvalItem]
-
-
-class InputMessagesTemplate(BaseModel):
- template: List[InputMessagesTemplateTemplate]
- """A list of chat messages forming the prompt or context.
-
- May include variable references to the "item" namespace, ie {{item.name}}.
- """
-
- type: Literal["template"]
- """The type of input messages. Always `template`."""
-
-
-class InputMessagesItemReference(BaseModel):
- item_reference: str
- """A reference to a variable in the "item" namespace. Ie, "item.name" """
-
- type: Literal["item_reference"]
- """The type of input messages. Always `item_reference`."""
-
-
-InputMessages: TypeAlias = Annotated[
- Union[InputMessagesTemplate, InputMessagesItemReference], PropertyInfo(discriminator="type")
-]
-
-
-class SamplingParams(BaseModel):
- max_completion_tokens: Optional[int] = None
- """The maximum number of tokens in the generated output."""
-
- seed: Optional[int] = None
- """A seed value to initialize the randomness, during sampling."""
-
- temperature: Optional[float] = None
- """A higher temperature increases randomness in the outputs."""
-
- top_p: Optional[float] = None
- """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
-
-
-class CreateEvalResponsesRunDataSource(BaseModel):
- source: Source
- """A EvalResponsesSource object describing a run data source configuration."""
-
- type: Literal["responses"]
- """The type of run data source. Always `responses`."""
-
- input_messages: Optional[InputMessages] = None
-
- model: Optional[str] = None
- """The name of the model to use for generating completions (e.g. "o3-mini")."""
-
- sampling_params: Optional[SamplingParams] = None
src/openai/types/evals/create_eval_responses_run_data_source_param.py
@@ -1,147 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from ..shared.reasoning_effort import ReasoningEffort
-from ..shared_params.eval_item import EvalItem
-from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam
-from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam
-
-__all__ = [
- "CreateEvalResponsesRunDataSourceParam",
- "Source",
- "SourceResponses",
- "InputMessages",
- "InputMessagesTemplate",
- "InputMessagesTemplateTemplate",
- "InputMessagesTemplateTemplateChatMessage",
- "InputMessagesItemReference",
- "SamplingParams",
-]
-
-
-class SourceResponses(TypedDict, total=False):
- type: Required[Literal["responses"]]
- """The type of run data source. Always `responses`."""
-
- created_after: Optional[int]
- """Only include items created after this timestamp (inclusive).
-
- This is a query parameter used to select responses.
- """
-
- created_before: Optional[int]
- """Only include items created before this timestamp (inclusive).
-
- This is a query parameter used to select responses.
- """
-
- has_tool_calls: Optional[bool]
- """Whether the response has tool calls.
-
- This is a query parameter used to select responses.
- """
-
- instructions_search: Optional[str]
- """Optional string to search the 'instructions' field.
-
- This is a query parameter used to select responses.
- """
-
- metadata: Optional[object]
- """Metadata filter for the responses.
-
- This is a query parameter used to select responses.
- """
-
- model: Optional[str]
- """The name of the model to find responses for.
-
- This is a query parameter used to select responses.
- """
-
- reasoning_effort: Optional[ReasoningEffort]
- """Optional reasoning effort parameter.
-
- This is a query parameter used to select responses.
- """
-
- temperature: Optional[float]
- """Sampling temperature. This is a query parameter used to select responses."""
-
- tools: Optional[List[str]]
- """List of tool names. This is a query parameter used to select responses."""
-
- top_p: Optional[float]
- """Nucleus sampling parameter. This is a query parameter used to select responses."""
-
- users: Optional[List[str]]
- """List of user identifiers. This is a query parameter used to select responses."""
-
-
-Source: TypeAlias = Union[EvalJSONLFileContentSourceParam, EvalJSONLFileIDSourceParam, SourceResponses]
-
-
-class InputMessagesTemplateTemplateChatMessage(TypedDict, total=False):
- content: Required[str]
- """The content of the message."""
-
- role: Required[str]
- """The role of the message (e.g. "system", "assistant", "user")."""
-
-
-InputMessagesTemplateTemplate: TypeAlias = Union[InputMessagesTemplateTemplateChatMessage, EvalItem]
-
-
-class InputMessagesTemplate(TypedDict, total=False):
- template: Required[Iterable[InputMessagesTemplateTemplate]]
- """A list of chat messages forming the prompt or context.
-
- May include variable references to the "item" namespace, ie {{item.name}}.
- """
-
- type: Required[Literal["template"]]
- """The type of input messages. Always `template`."""
-
-
-class InputMessagesItemReference(TypedDict, total=False):
- item_reference: Required[str]
- """A reference to a variable in the "item" namespace. Ie, "item.name" """
-
- type: Required[Literal["item_reference"]]
- """The type of input messages. Always `item_reference`."""
-
-
-InputMessages: TypeAlias = Union[InputMessagesTemplate, InputMessagesItemReference]
-
-
-class SamplingParams(TypedDict, total=False):
- max_completion_tokens: int
- """The maximum number of tokens in the generated output."""
-
- seed: int
- """A seed value to initialize the randomness, during sampling."""
-
- temperature: float
- """A higher temperature increases randomness in the outputs."""
-
- top_p: float
- """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
-
-
-class CreateEvalResponsesRunDataSourceParam(TypedDict, total=False):
- source: Required[Source]
- """A EvalResponsesSource object describing a run data source configuration."""
-
- type: Required[Literal["responses"]]
- """The type of run data source. Always `responses`."""
-
- input_messages: InputMessages
-
- model: str
- """The name of the model to use for generating completions (e.g. "o3-mini")."""
-
- sampling_params: SamplingParams
src/openai/types/evals/eval_jsonl_file_content_source.py
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["EvalJSONLFileContentSource", "Content"]
-
-
-class Content(BaseModel):
- item: Dict[str, object]
-
- sample: Optional[Dict[str, object]] = None
-
-
-class EvalJSONLFileContentSource(BaseModel):
- content: List[Content]
- """The content of the jsonl file."""
-
- type: Literal["file_content"]
- """The type of jsonl source. Always `file_content`."""
src/openai/types/evals/eval_jsonl_file_content_source_param.py
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["EvalJSONLFileContentSourceParam", "Content"]
-
-
-class Content(TypedDict, total=False):
- item: Required[Dict[str, object]]
-
- sample: Dict[str, object]
-
-
-class EvalJSONLFileContentSourceParam(TypedDict, total=False):
- content: Required[Iterable[Content]]
- """The content of the jsonl file."""
-
- type: Required[Literal["file_content"]]
- """The type of jsonl source. Always `file_content`."""
src/openai/types/evals/eval_jsonl_file_id_source.py
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["EvalJSONLFileIDSource"]
-
-
-class EvalJSONLFileIDSource(BaseModel):
- id: str
- """The identifier of the file."""
-
- type: Literal["file_id"]
- """The type of jsonl source. Always `file_id`."""
src/openai/types/evals/eval_jsonl_file_id_source_param.py
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["EvalJSONLFileIDSourceParam"]
-
-
-class EvalJSONLFileIDSourceParam(TypedDict, total=False):
- id: Required[str]
- """The identifier of the file."""
-
- type: Required[Literal["file_id"]]
- """The type of jsonl source. Always `file_id`."""
src/openai/types/evals/run_cancel_response.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from pydantic import Field as FieldInfo
@@ -9,14 +9,219 @@ from ..._utils import PropertyInfo
from ..._models import BaseModel
from .eval_api_error import EvalAPIError
from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text import ResponseInputText
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
-from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource
from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
-__all__ = ["RunCancelResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"]
+__all__ = [
+ "RunCancelResponse",
+ "DataSource",
+ "DataSourceResponses",
+ "DataSourceResponsesSource",
+ "DataSourceResponsesSourceFileContent",
+ "DataSourceResponsesSourceFileContentContent",
+ "DataSourceResponsesSourceFileID",
+ "DataSourceResponsesSourceResponses",
+ "DataSourceResponsesInputMessages",
+ "DataSourceResponsesInputMessagesTemplate",
+ "DataSourceResponsesInputMessagesTemplateTemplate",
+ "DataSourceResponsesInputMessagesTemplateTemplateChatMessage",
+ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem",
+ "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent",
+ "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText",
+ "DataSourceResponsesInputMessagesItemReference",
+ "DataSourceResponsesSamplingParams",
+ "PerModelUsage",
+ "PerTestingCriteriaResult",
+ "ResultCounts",
+]
+
+
+class DataSourceResponsesSourceFileContentContent(BaseModel):
+ item: Dict[str, object]
+
+ sample: Optional[Dict[str, object]] = None
+
+
+class DataSourceResponsesSourceFileContent(BaseModel):
+ content: List[DataSourceResponsesSourceFileContentContent]
+ """The content of the jsonl file."""
+
+ type: Literal["file_content"]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceResponsesSourceFileID(BaseModel):
+ id: str
+ """The identifier of the file."""
+
+ type: Literal["file_id"]
+ """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceResponsesSourceResponses(BaseModel):
+ type: Literal["responses"]
+ """The type of run data source. Always `responses`."""
+
+ created_after: Optional[int] = None
+ """Only include items created after this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ created_before: Optional[int] = None
+ """Only include items created before this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ has_tool_calls: Optional[bool] = None
+ """Whether the response has tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ instructions_search: Optional[str] = None
+ """Optional string to search the 'instructions' field.
+
+ This is a query parameter used to select responses.
+ """
+
+ metadata: Optional[object] = None
+ """Metadata filter for the responses.
+
+ This is a query parameter used to select responses.
+ """
+
+ model: Optional[str] = None
+ """The name of the model to find responses for.
+
+ This is a query parameter used to select responses.
+ """
+
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """Optional reasoning effort parameter.
+
+ This is a query parameter used to select responses.
+ """
+
+ temperature: Optional[float] = None
+ """Sampling temperature. This is a query parameter used to select responses."""
+
+ tools: Optional[List[str]] = None
+ """List of tool names. This is a query parameter used to select responses."""
+
+ top_p: Optional[float] = None
+ """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+ users: Optional[List[str]] = None
+ """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceResponsesSource: TypeAlias = Annotated[
+ Union[DataSourceResponsesSourceFileContent, DataSourceResponsesSourceFileID, DataSourceResponsesSourceResponses],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel):
+ content: str
+ """The content of the message."""
+
+ role: str
+ """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+ str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText
+]
+
+
+class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel):
+ content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+DataSourceResponsesInputMessagesTemplateTemplate: TypeAlias = Union[
+ DataSourceResponsesInputMessagesTemplateTemplateChatMessage,
+ DataSourceResponsesInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceResponsesInputMessagesTemplate(BaseModel):
+ template: List[DataSourceResponsesInputMessagesTemplateTemplate]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
+
+ type: Literal["template"]
+ """The type of input messages. Always `template`."""
+
+
+class DataSourceResponsesInputMessagesItemReference(BaseModel):
+ item_reference: str
+ """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+ type: Literal["item_reference"]
+ """The type of input messages. Always `item_reference`."""
+
+
+DataSourceResponsesInputMessages: TypeAlias = Annotated[
+ Union[DataSourceResponsesInputMessagesTemplate, DataSourceResponsesInputMessagesItemReference],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceResponsesSamplingParams(BaseModel):
+ max_completion_tokens: Optional[int] = None
+ """The maximum number of tokens in the generated output."""
+
+ seed: Optional[int] = None
+ """A seed value to initialize the randomness, during sampling."""
+
+ temperature: Optional[float] = None
+ """A higher temperature increases randomness in the outputs."""
+
+ top_p: Optional[float] = None
+ """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceResponses(BaseModel):
+ source: DataSourceResponsesSource
+ """A EvalResponsesSource object describing a run data source configuration."""
+
+ type: Literal["responses"]
+ """The type of run data source. Always `responses`."""
+
+ input_messages: Optional[DataSourceResponsesInputMessages] = None
+
+ model: Optional[str] = None
+ """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+ sampling_params: Optional[DataSourceResponsesSamplingParams] = None
+
DataSource: TypeAlias = Annotated[
- Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, CreateEvalResponsesRunDataSource],
+ Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceResponses],
PropertyInfo(discriminator="type"),
]
src/openai/types/evals/run_create_params.py
@@ -2,15 +2,34 @@
from __future__ import annotations
-from typing import Union, Optional
-from typing_extensions import Required, TypeAlias, TypedDict
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..shared_params.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text_param import ResponseInputTextParam
from .create_eval_jsonl_run_data_source_param import CreateEvalJSONLRunDataSourceParam
-from .create_eval_responses_run_data_source_param import CreateEvalResponsesRunDataSourceParam
from .create_eval_completions_run_data_source_param import CreateEvalCompletionsRunDataSourceParam
-__all__ = ["RunCreateParams", "DataSource"]
+__all__ = [
+ "RunCreateParams",
+ "DataSource",
+ "DataSourceCreateEvalResponsesRunDataSource",
+ "DataSourceCreateEvalResponsesRunDataSourceSource",
+ "DataSourceCreateEvalResponsesRunDataSourceSourceFileContent",
+ "DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent",
+ "DataSourceCreateEvalResponsesRunDataSourceSourceFileID",
+ "DataSourceCreateEvalResponsesRunDataSourceSourceResponses",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessages",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText",
+ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference",
+ "DataSourceCreateEvalResponsesRunDataSourceSamplingParams",
+]
class RunCreateParams(TypedDict, total=False):
@@ -31,6 +50,195 @@ class RunCreateParams(TypedDict, total=False):
"""The name of the run."""
+class DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent(TypedDict, total=False):
+ item: Required[Dict[str, object]]
+
+ sample: Dict[str, object]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceSourceFileContent(TypedDict, total=False):
+ content: Required[Iterable[DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent]]
+ """The content of the jsonl file."""
+
+ type: Required[Literal["file_content"]]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceCreateEvalResponsesRunDataSourceSourceFileID(TypedDict, total=False):
+ id: Required[str]
+ """The identifier of the file."""
+
+ type: Required[Literal["file_id"]]
+ """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceCreateEvalResponsesRunDataSourceSourceResponses(TypedDict, total=False):
+ type: Required[Literal["responses"]]
+ """The type of run data source. Always `responses`."""
+
+ created_after: Optional[int]
+ """Only include items created after this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ created_before: Optional[int]
+ """Only include items created before this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ has_tool_calls: Optional[bool]
+ """Whether the response has tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ instructions_search: Optional[str]
+ """Optional string to search the 'instructions' field.
+
+ This is a query parameter used to select responses.
+ """
+
+ metadata: Optional[object]
+ """Metadata filter for the responses.
+
+ This is a query parameter used to select responses.
+ """
+
+ model: Optional[str]
+ """The name of the model to find responses for.
+
+ This is a query parameter used to select responses.
+ """
+
+ reasoning_effort: Optional[ReasoningEffort]
+ """Optional reasoning effort parameter.
+
+ This is a query parameter used to select responses.
+ """
+
+ temperature: Optional[float]
+ """Sampling temperature. This is a query parameter used to select responses."""
+
+ tools: Optional[List[str]]
+ """List of tool names. This is a query parameter used to select responses."""
+
+ top_p: Optional[float]
+ """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+ users: Optional[List[str]]
+ """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceCreateEvalResponsesRunDataSourceSource: TypeAlias = Union[
+ DataSourceCreateEvalResponsesRunDataSourceSourceFileContent,
+ DataSourceCreateEvalResponsesRunDataSourceSourceFileID,
+ DataSourceCreateEvalResponsesRunDataSourceSourceResponses,
+]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage(TypedDict, total=False):
+ content: Required[str]
+ """The content of the message."""
+
+ role: Required[str]
+ """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText(
+ TypedDict, total=False
+):
+ text: Required[str]
+ """The text output from the model."""
+
+ type: Required[Literal["output_text"]]
+ """The type of the output text. Always `output_text`."""
+
+
+DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+ str,
+ ResponseInputTextParam,
+ DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText,
+]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem(TypedDict, total=False):
+ content: Required[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent]
+ """Text inputs to the model - can contain template strings."""
+
+ role: Required[Literal["user", "assistant", "system", "developer"]]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always `message`."""
+
+
+DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate: TypeAlias = Union[
+ DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage,
+ DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate(TypedDict, total=False):
+ template: Required[Iterable[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate]]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
+
+ type: Required[Literal["template"]]
+ """The type of input messages. Always `template`."""
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference(TypedDict, total=False):
+ item_reference: Required[str]
+ """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+ type: Required[Literal["item_reference"]]
+ """The type of input messages. Always `item_reference`."""
+
+
+DataSourceCreateEvalResponsesRunDataSourceInputMessages: TypeAlias = Union[
+ DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate,
+ DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference,
+]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total=False):
+ max_completion_tokens: int
+ """The maximum number of tokens in the generated output."""
+
+ seed: int
+ """A seed value to initialize the randomness, during sampling."""
+
+ temperature: float
+ """A higher temperature increases randomness in the outputs."""
+
+ top_p: float
+ """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceCreateEvalResponsesRunDataSource(TypedDict, total=False):
+ source: Required[DataSourceCreateEvalResponsesRunDataSourceSource]
+ """A EvalResponsesSource object describing a run data source configuration."""
+
+ type: Required[Literal["responses"]]
+ """The type of run data source. Always `responses`."""
+
+ input_messages: DataSourceCreateEvalResponsesRunDataSourceInputMessages
+
+ model: str
+ """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+ sampling_params: DataSourceCreateEvalResponsesRunDataSourceSamplingParams
+
+
DataSource: TypeAlias = Union[
- CreateEvalJSONLRunDataSourceParam, CreateEvalCompletionsRunDataSourceParam, CreateEvalResponsesRunDataSourceParam
+ CreateEvalJSONLRunDataSourceParam,
+ CreateEvalCompletionsRunDataSourceParam,
+ DataSourceCreateEvalResponsesRunDataSource,
]
src/openai/types/evals/run_create_response.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from pydantic import Field as FieldInfo
@@ -9,14 +9,219 @@ from ..._utils import PropertyInfo
from ..._models import BaseModel
from .eval_api_error import EvalAPIError
from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text import ResponseInputText
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
-from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource
from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
-__all__ = ["RunCreateResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"]
+__all__ = [
+ "RunCreateResponse",
+ "DataSource",
+ "DataSourceResponses",
+ "DataSourceResponsesSource",
+ "DataSourceResponsesSourceFileContent",
+ "DataSourceResponsesSourceFileContentContent",
+ "DataSourceResponsesSourceFileID",
+ "DataSourceResponsesSourceResponses",
+ "DataSourceResponsesInputMessages",
+ "DataSourceResponsesInputMessagesTemplate",
+ "DataSourceResponsesInputMessagesTemplateTemplate",
+ "DataSourceResponsesInputMessagesTemplateTemplateChatMessage",
+ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem",
+ "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent",
+ "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText",
+ "DataSourceResponsesInputMessagesItemReference",
+ "DataSourceResponsesSamplingParams",
+ "PerModelUsage",
+ "PerTestingCriteriaResult",
+ "ResultCounts",
+]
+
+
+class DataSourceResponsesSourceFileContentContent(BaseModel):
+ item: Dict[str, object]
+
+ sample: Optional[Dict[str, object]] = None
+
+
+class DataSourceResponsesSourceFileContent(BaseModel):
+ content: List[DataSourceResponsesSourceFileContentContent]
+ """The content of the jsonl file."""
+
+ type: Literal["file_content"]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceResponsesSourceFileID(BaseModel):
+ id: str
+ """The identifier of the file."""
+
+ type: Literal["file_id"]
+ """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceResponsesSourceResponses(BaseModel):
+ type: Literal["responses"]
+ """The type of run data source. Always `responses`."""
+
+ created_after: Optional[int] = None
+ """Only include items created after this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ created_before: Optional[int] = None
+ """Only include items created before this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ has_tool_calls: Optional[bool] = None
+ """Whether the response has tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ instructions_search: Optional[str] = None
+ """Optional string to search the 'instructions' field.
+
+ This is a query parameter used to select responses.
+ """
+
+ metadata: Optional[object] = None
+ """Metadata filter for the responses.
+
+ This is a query parameter used to select responses.
+ """
+
+ model: Optional[str] = None
+ """The name of the model to find responses for.
+
+ This is a query parameter used to select responses.
+ """
+
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """Optional reasoning effort parameter.
+
+ This is a query parameter used to select responses.
+ """
+
+ temperature: Optional[float] = None
+ """Sampling temperature. This is a query parameter used to select responses."""
+
+ tools: Optional[List[str]] = None
+ """List of tool names. This is a query parameter used to select responses."""
+
+ top_p: Optional[float] = None
+ """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+ users: Optional[List[str]] = None
+ """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceResponsesSource: TypeAlias = Annotated[
+ Union[DataSourceResponsesSourceFileContent, DataSourceResponsesSourceFileID, DataSourceResponsesSourceResponses],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel):
+ content: str
+ """The content of the message."""
+
+ role: str
+ """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+ str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText
+]
+
+
+class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel):
+ content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+DataSourceResponsesInputMessagesTemplateTemplate: TypeAlias = Union[
+ DataSourceResponsesInputMessagesTemplateTemplateChatMessage,
+ DataSourceResponsesInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceResponsesInputMessagesTemplate(BaseModel):
+ template: List[DataSourceResponsesInputMessagesTemplateTemplate]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
+
+ type: Literal["template"]
+ """The type of input messages. Always `template`."""
+
+
+class DataSourceResponsesInputMessagesItemReference(BaseModel):
+ item_reference: str
+ """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+ type: Literal["item_reference"]
+ """The type of input messages. Always `item_reference`."""
+
+
+DataSourceResponsesInputMessages: TypeAlias = Annotated[
+ Union[DataSourceResponsesInputMessagesTemplate, DataSourceResponsesInputMessagesItemReference],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceResponsesSamplingParams(BaseModel):
+ max_completion_tokens: Optional[int] = None
+ """The maximum number of tokens in the generated output."""
+
+ seed: Optional[int] = None
+ """A seed value to initialize the randomness, during sampling."""
+
+ temperature: Optional[float] = None
+ """A higher temperature increases randomness in the outputs."""
+
+ top_p: Optional[float] = None
+ """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceResponses(BaseModel):
+ source: DataSourceResponsesSource
+ """A EvalResponsesSource object describing a run data source configuration."""
+
+ type: Literal["responses"]
+ """The type of run data source. Always `responses`."""
+
+ input_messages: Optional[DataSourceResponsesInputMessages] = None
+
+ model: Optional[str] = None
+ """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+ sampling_params: Optional[DataSourceResponsesSamplingParams] = None
+
DataSource: TypeAlias = Annotated[
- Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, CreateEvalResponsesRunDataSource],
+ Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceResponses],
PropertyInfo(discriminator="type"),
]
src/openai/types/evals/run_list_response.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from pydantic import Field as FieldInfo
@@ -9,14 +9,219 @@ from ..._utils import PropertyInfo
from ..._models import BaseModel
from .eval_api_error import EvalAPIError
from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text import ResponseInputText
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
-from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource
from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
-__all__ = ["RunListResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"]
+__all__ = [
+ "RunListResponse",
+ "DataSource",
+ "DataSourceResponses",
+ "DataSourceResponsesSource",
+ "DataSourceResponsesSourceFileContent",
+ "DataSourceResponsesSourceFileContentContent",
+ "DataSourceResponsesSourceFileID",
+ "DataSourceResponsesSourceResponses",
+ "DataSourceResponsesInputMessages",
+ "DataSourceResponsesInputMessagesTemplate",
+ "DataSourceResponsesInputMessagesTemplateTemplate",
+ "DataSourceResponsesInputMessagesTemplateTemplateChatMessage",
+ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem",
+ "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent",
+ "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText",
+ "DataSourceResponsesInputMessagesItemReference",
+ "DataSourceResponsesSamplingParams",
+ "PerModelUsage",
+ "PerTestingCriteriaResult",
+ "ResultCounts",
+]
+
+
+class DataSourceResponsesSourceFileContentContent(BaseModel):
+ item: Dict[str, object]
+
+ sample: Optional[Dict[str, object]] = None
+
+
+class DataSourceResponsesSourceFileContent(BaseModel):
+ content: List[DataSourceResponsesSourceFileContentContent]
+ """The content of the jsonl file."""
+
+ type: Literal["file_content"]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceResponsesSourceFileID(BaseModel):
+ id: str
+ """The identifier of the file."""
+
+ type: Literal["file_id"]
+ """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceResponsesSourceResponses(BaseModel):
+ type: Literal["responses"]
+ """The type of run data source. Always `responses`."""
+
+ created_after: Optional[int] = None
+ """Only include items created after this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ created_before: Optional[int] = None
+ """Only include items created before this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ has_tool_calls: Optional[bool] = None
+ """Whether the response has tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ instructions_search: Optional[str] = None
+ """Optional string to search the 'instructions' field.
+
+ This is a query parameter used to select responses.
+ """
+
+ metadata: Optional[object] = None
+ """Metadata filter for the responses.
+
+ This is a query parameter used to select responses.
+ """
+
+ model: Optional[str] = None
+ """The name of the model to find responses for.
+
+ This is a query parameter used to select responses.
+ """
+
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """Optional reasoning effort parameter.
+
+ This is a query parameter used to select responses.
+ """
+
+ temperature: Optional[float] = None
+ """Sampling temperature. This is a query parameter used to select responses."""
+
+ tools: Optional[List[str]] = None
+ """List of tool names. This is a query parameter used to select responses."""
+
+ top_p: Optional[float] = None
+ """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+ users: Optional[List[str]] = None
+ """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceResponsesSource: TypeAlias = Annotated[
+ Union[DataSourceResponsesSourceFileContent, DataSourceResponsesSourceFileID, DataSourceResponsesSourceResponses],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel):
+ content: str
+ """The content of the message."""
+
+ role: str
+ """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+ str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText
+]
+
+
+class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel):
+ content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+DataSourceResponsesInputMessagesTemplateTemplate: TypeAlias = Union[
+ DataSourceResponsesInputMessagesTemplateTemplateChatMessage,
+ DataSourceResponsesInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceResponsesInputMessagesTemplate(BaseModel):
+ template: List[DataSourceResponsesInputMessagesTemplateTemplate]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
+
+ type: Literal["template"]
+ """The type of input messages. Always `template`."""
+
+
+class DataSourceResponsesInputMessagesItemReference(BaseModel):
+ item_reference: str
+ """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+ type: Literal["item_reference"]
+ """The type of input messages. Always `item_reference`."""
+
+
+DataSourceResponsesInputMessages: TypeAlias = Annotated[
+ Union[DataSourceResponsesInputMessagesTemplate, DataSourceResponsesInputMessagesItemReference],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceResponsesSamplingParams(BaseModel):
+ max_completion_tokens: Optional[int] = None
+ """The maximum number of tokens in the generated output."""
+
+ seed: Optional[int] = None
+ """A seed value to initialize the randomness, during sampling."""
+
+ temperature: Optional[float] = None
+ """A higher temperature increases randomness in the outputs."""
+
+ top_p: Optional[float] = None
+ """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceResponses(BaseModel):
+ source: DataSourceResponsesSource
+ """A EvalResponsesSource object describing a run data source configuration."""
+
+ type: Literal["responses"]
+ """The type of run data source. Always `responses`."""
+
+ input_messages: Optional[DataSourceResponsesInputMessages] = None
+
+ model: Optional[str] = None
+ """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+ sampling_params: Optional[DataSourceResponsesSamplingParams] = None
+
DataSource: TypeAlias = Annotated[
- Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, CreateEvalResponsesRunDataSource],
+ Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceResponses],
PropertyInfo(discriminator="type"),
]
src/openai/types/evals/run_retrieve_response.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from pydantic import Field as FieldInfo
@@ -9,14 +9,219 @@ from ..._utils import PropertyInfo
from ..._models import BaseModel
from .eval_api_error import EvalAPIError
from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text import ResponseInputText
from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
-from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource
from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
-__all__ = ["RunRetrieveResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"]
+__all__ = [
+ "RunRetrieveResponse",
+ "DataSource",
+ "DataSourceResponses",
+ "DataSourceResponsesSource",
+ "DataSourceResponsesSourceFileContent",
+ "DataSourceResponsesSourceFileContentContent",
+ "DataSourceResponsesSourceFileID",
+ "DataSourceResponsesSourceResponses",
+ "DataSourceResponsesInputMessages",
+ "DataSourceResponsesInputMessagesTemplate",
+ "DataSourceResponsesInputMessagesTemplateTemplate",
+ "DataSourceResponsesInputMessagesTemplateTemplateChatMessage",
+ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem",
+ "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent",
+ "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText",
+ "DataSourceResponsesInputMessagesItemReference",
+ "DataSourceResponsesSamplingParams",
+ "PerModelUsage",
+ "PerTestingCriteriaResult",
+ "ResultCounts",
+]
+
+
+class DataSourceResponsesSourceFileContentContent(BaseModel):
+ item: Dict[str, object]
+
+ sample: Optional[Dict[str, object]] = None
+
+
+class DataSourceResponsesSourceFileContent(BaseModel):
+ content: List[DataSourceResponsesSourceFileContentContent]
+ """The content of the jsonl file."""
+
+ type: Literal["file_content"]
+ """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceResponsesSourceFileID(BaseModel):
+ id: str
+ """The identifier of the file."""
+
+ type: Literal["file_id"]
+ """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceResponsesSourceResponses(BaseModel):
+ type: Literal["responses"]
+ """The type of run data source. Always `responses`."""
+
+ created_after: Optional[int] = None
+ """Only include items created after this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ created_before: Optional[int] = None
+ """Only include items created before this timestamp (inclusive).
+
+ This is a query parameter used to select responses.
+ """
+
+ has_tool_calls: Optional[bool] = None
+ """Whether the response has tool calls.
+
+ This is a query parameter used to select responses.
+ """
+
+ instructions_search: Optional[str] = None
+ """Optional string to search the 'instructions' field.
+
+ This is a query parameter used to select responses.
+ """
+
+ metadata: Optional[object] = None
+ """Metadata filter for the responses.
+
+ This is a query parameter used to select responses.
+ """
+
+ model: Optional[str] = None
+ """The name of the model to find responses for.
+
+ This is a query parameter used to select responses.
+ """
+
+ reasoning_effort: Optional[ReasoningEffort] = None
+ """Optional reasoning effort parameter.
+
+ This is a query parameter used to select responses.
+ """
+
+ temperature: Optional[float] = None
+ """Sampling temperature. This is a query parameter used to select responses."""
+
+ tools: Optional[List[str]] = None
+ """List of tool names. This is a query parameter used to select responses."""
+
+ top_p: Optional[float] = None
+ """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+ users: Optional[List[str]] = None
+ """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceResponsesSource: TypeAlias = Annotated[
+ Union[DataSourceResponsesSourceFileContent, DataSourceResponsesSourceFileID, DataSourceResponsesSourceResponses],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel):
+ content: str
+ """The content of the message."""
+
+ role: str
+ """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+ str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText
+]
+
+
+class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel):
+ content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
+
+
+DataSourceResponsesInputMessagesTemplateTemplate: TypeAlias = Union[
+ DataSourceResponsesInputMessagesTemplateTemplateChatMessage,
+ DataSourceResponsesInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceResponsesInputMessagesTemplate(BaseModel):
+ template: List[DataSourceResponsesInputMessagesTemplateTemplate]
+ """A list of chat messages forming the prompt or context.
+
+ May include variable references to the "item" namespace, ie {{item.name}}.
+ """
+
+ type: Literal["template"]
+ """The type of input messages. Always `template`."""
+
+
+class DataSourceResponsesInputMessagesItemReference(BaseModel):
+ item_reference: str
+ """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+ type: Literal["item_reference"]
+ """The type of input messages. Always `item_reference`."""
+
+
+DataSourceResponsesInputMessages: TypeAlias = Annotated[
+ Union[DataSourceResponsesInputMessagesTemplate, DataSourceResponsesInputMessagesItemReference],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceResponsesSamplingParams(BaseModel):
+ max_completion_tokens: Optional[int] = None
+ """The maximum number of tokens in the generated output."""
+
+ seed: Optional[int] = None
+ """A seed value to initialize the randomness, during sampling."""
+
+ temperature: Optional[float] = None
+ """A higher temperature increases randomness in the outputs."""
+
+ top_p: Optional[float] = None
+ """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceResponses(BaseModel):
+ source: DataSourceResponsesSource
+ """A EvalResponsesSource object describing a run data source configuration."""
+
+ type: Literal["responses"]
+ """The type of run data source. Always `responses`."""
+
+ input_messages: Optional[DataSourceResponsesInputMessages] = None
+
+ model: Optional[str] = None
+ """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+ sampling_params: Optional[DataSourceResponsesSamplingParams] = None
+
DataSource: TypeAlias = Annotated[
- Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, CreateEvalResponsesRunDataSource],
+ Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceResponses],
PropertyInfo(discriminator="type"),
]
src/openai/types/graders/label_model_grader.py
@@ -1,16 +1,41 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List
-from typing_extensions import Literal
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
-from ..shared.eval_item import EvalItem
+from ..responses.response_input_text import ResponseInputText
-__all__ = ["LabelModelGrader"]
+__all__ = ["LabelModelGrader", "Input", "InputContent", "InputContentOutputText"]
+
+
+class InputContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText]
+
+
+class Input(BaseModel):
+ content: InputContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
class LabelModelGrader(BaseModel):
- input: List[EvalItem]
+ input: List[Input]
labels: List[str]
"""The labels to assign to each item in the evaluation."""
src/openai/types/graders/label_model_grader_param.py
@@ -2,16 +2,41 @@
from __future__ import annotations
-from typing import List, Iterable
-from typing_extensions import Literal, Required, TypedDict
+from typing import List, Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
-from ..shared_params.eval_item import EvalItem
+from ..responses.response_input_text_param import ResponseInputTextParam
-__all__ = ["LabelModelGraderParam"]
+__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText"]
+
+
+class InputContentOutputText(TypedDict, total=False):
+ text: Required[str]
+ """The text output from the model."""
+
+ type: Required[Literal["output_text"]]
+ """The type of the output text. Always `output_text`."""
+
+
+InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText]
+
+
+class Input(TypedDict, total=False):
+ content: Required[InputContent]
+ """Text inputs to the model - can contain template strings."""
+
+ role: Required[Literal["user", "assistant", "system", "developer"]]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always `message`."""
class LabelModelGraderParam(TypedDict, total=False):
- input: Required[Iterable[EvalItem]]
+ input: Required[Iterable[Input]]
labels: Required[List[str]]
"""The labels to assign to each item in the evaluation."""
src/openai/types/graders/multi_grader.py
@@ -25,4 +25,4 @@ class MultiGrader(BaseModel):
"""The name of the grader."""
type: Literal["multi"]
- """The type of grader."""
+ """The object type, which is always `multi`."""
src/openai/types/graders/multi_grader_param.py
@@ -28,4 +28,4 @@ class MultiGraderParam(TypedDict, total=False):
"""The name of the grader."""
type: Required[Literal["multi"]]
- """The type of grader."""
+ """The object type, which is always `multi`."""
src/openai/types/graders/score_model_grader.py
@@ -1,16 +1,41 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Optional
-from typing_extensions import Literal
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
-from ..shared.eval_item import EvalItem
+from ..responses.response_input_text import ResponseInputText
-__all__ = ["ScoreModelGrader"]
+__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText"]
+
+
+class InputContentOutputText(BaseModel):
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+
+InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText]
+
+
+class Input(BaseModel):
+ content: InputContent
+ """Text inputs to the model - can contain template strings."""
+
+ role: Literal["user", "assistant", "system", "developer"]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Optional[Literal["message"]] = None
+ """The type of the message input. Always `message`."""
class ScoreModelGrader(BaseModel):
- input: List[EvalItem]
+ input: List[Input]
"""The input text. This may include template strings."""
model: str
src/openai/types/graders/score_model_grader_param.py
@@ -2,16 +2,41 @@
from __future__ import annotations
-from typing import Iterable
-from typing_extensions import Literal, Required, TypedDict
+from typing import Union, Iterable
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
-from ..shared_params.eval_item import EvalItem
+from ..responses.response_input_text_param import ResponseInputTextParam
-__all__ = ["ScoreModelGraderParam"]
+__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText"]
+
+
+class InputContentOutputText(TypedDict, total=False):
+ text: Required[str]
+ """The text output from the model."""
+
+ type: Required[Literal["output_text"]]
+ """The type of the output text. Always `output_text`."""
+
+
+InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText]
+
+
+class Input(TypedDict, total=False):
+ content: Required[InputContent]
+ """Text inputs to the model - can contain template strings."""
+
+ role: Required[Literal["user", "assistant", "system", "developer"]]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always `message`."""
class ScoreModelGraderParam(TypedDict, total=False):
- input: Required[Iterable[EvalItem]]
+ input: Required[Iterable[Input]]
"""The input text. This may include template strings."""
model: Required[str]
src/openai/types/__init__.py
@@ -6,7 +6,6 @@ from .batch import Batch as Batch
from .image import Image as Image
from .model import Model as Model
from .shared import (
- EvalItem as EvalItem,
Metadata as Metadata,
AllModels as AllModels,
ChatModel as ChatModel,
@@ -71,20 +70,17 @@ from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCr
from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam
-from .eval_logs_data_source_config import EvalLogsDataSourceConfig as EvalLogsDataSourceConfig
from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam
from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions
from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy
-from .vector_store_expiration_after import VectorStoreExpirationAfter as VectorStoreExpirationAfter
from .eval_custom_data_source_config import EvalCustomDataSourceConfig as EvalCustomDataSourceConfig
from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam
from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam
from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam
from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject
from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam
-from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam as VectorStoreExpirationAfterParam
from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject
from .eval_stored_completions_data_source_config import (
EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig,
src/openai/types/eval_create_params.py
@@ -6,10 +6,10 @@ from typing import Dict, List, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .shared_params.metadata import Metadata
-from .shared_params.eval_item import EvalItem
from .graders.python_grader_param import PythonGraderParam
from .graders.score_model_grader_param import ScoreModelGraderParam
from .graders.string_check_grader_param import StringCheckGraderParam
+from .responses.response_input_text_param import ResponseInputTextParam
from .graders.text_similarity_grader_param import TextSimilarityGraderParam
__all__ = [
@@ -22,6 +22,9 @@ __all__ = [
"TestingCriterionLabelModel",
"TestingCriterionLabelModelInput",
"TestingCriterionLabelModelInputSimpleInputMessage",
+ "TestingCriterionLabelModelInputEvalItem",
+ "TestingCriterionLabelModelInputEvalItemContent",
+ "TestingCriterionLabelModelInputEvalItemContentOutputText",
"TestingCriterionTextSimilarity",
"TestingCriterionPython",
"TestingCriterionScoreModel",
@@ -90,7 +93,36 @@ class TestingCriterionLabelModelInputSimpleInputMessage(TypedDict, total=False):
"""The role of the message (e.g. "system", "assistant", "user")."""
-TestingCriterionLabelModelInput: TypeAlias = Union[TestingCriterionLabelModelInputSimpleInputMessage, EvalItem]
+class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total=False):
+ text: Required[str]
+ """The text output from the model."""
+
+ type: Required[Literal["output_text"]]
+ """The type of the output text. Always `output_text`."""
+
+
+TestingCriterionLabelModelInputEvalItemContent: TypeAlias = Union[
+ str, ResponseInputTextParam, TestingCriterionLabelModelInputEvalItemContentOutputText
+]
+
+
+class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False):
+ content: Required[TestingCriterionLabelModelInputEvalItemContent]
+ """Text inputs to the model - can contain template strings."""
+
+ role: Required[Literal["user", "assistant", "system", "developer"]]
+ """The role of the message input.
+
+ One of `user`, `assistant`, `system`, or `developer`.
+ """
+
+ type: Literal["message"]
+ """The type of the message input. Always `message`."""
+
+
+TestingCriterionLabelModelInput: TypeAlias = Union[
+ TestingCriterionLabelModelInputSimpleInputMessage, TestingCriterionLabelModelInputEvalItem
+]
class TestingCriterionLabelModel(TypedDict, total=False):
src/openai/types/eval_create_response.py
@@ -1,8 +1,10 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
+from pydantic import Field as FieldInfo
+
from .._utils import PropertyInfo
from .._models import BaseModel
from .shared.metadata import Metadata
@@ -10,7 +12,6 @@ from .graders.python_grader import PythonGrader
from .graders.label_model_grader import LabelModelGrader
from .graders.score_model_grader import ScoreModelGrader
from .graders.string_check_grader import StringCheckGrader
-from .eval_logs_data_source_config import EvalLogsDataSourceConfig
from .eval_custom_data_source_config import EvalCustomDataSourceConfig
from .graders.text_similarity_grader import TextSimilarityGrader
from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
@@ -18,14 +19,37 @@ from .eval_stored_completions_data_source_config import EvalStoredCompletionsDat
__all__ = [
"EvalCreateResponse",
"DataSourceConfig",
+ "DataSourceConfigLogs",
"TestingCriterion",
"TestingCriterionEvalGraderTextSimilarity",
"TestingCriterionEvalGraderPython",
"TestingCriterionEvalGraderScoreModel",
]
+
+class DataSourceConfigLogs(BaseModel):
+ schema_: Dict[str, object] = FieldInfo(alias="schema")
+ """
+ The json schema for the run data source items. Learn how to build JSON schemas
+ [here](https://json-schema.org/).
+ """
+
+ type: Literal["logs"]
+ """The type of data source. Always `logs`."""
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+
DataSourceConfig: TypeAlias = Annotated[
- Union[EvalCustomDataSourceConfig, EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig],
+ Union[EvalCustomDataSourceConfig, DataSourceConfigLogs, EvalStoredCompletionsDataSourceConfig],
PropertyInfo(discriminator="type"),
]
src/openai/types/eval_list_response.py
@@ -1,8 +1,10 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
+from pydantic import Field as FieldInfo
+
from .._utils import PropertyInfo
from .._models import BaseModel
from .shared.metadata import Metadata
@@ -10,7 +12,6 @@ from .graders.python_grader import PythonGrader
from .graders.label_model_grader import LabelModelGrader
from .graders.score_model_grader import ScoreModelGrader
from .graders.string_check_grader import StringCheckGrader
-from .eval_logs_data_source_config import EvalLogsDataSourceConfig
from .eval_custom_data_source_config import EvalCustomDataSourceConfig
from .graders.text_similarity_grader import TextSimilarityGrader
from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
@@ -18,14 +19,37 @@ from .eval_stored_completions_data_source_config import EvalStoredCompletionsDat
__all__ = [
"EvalListResponse",
"DataSourceConfig",
+ "DataSourceConfigLogs",
"TestingCriterion",
"TestingCriterionEvalGraderTextSimilarity",
"TestingCriterionEvalGraderPython",
"TestingCriterionEvalGraderScoreModel",
]
+
+class DataSourceConfigLogs(BaseModel):
+ schema_: Dict[str, object] = FieldInfo(alias="schema")
+ """
+ The json schema for the run data source items. Learn how to build JSON schemas
+ [here](https://json-schema.org/).
+ """
+
+ type: Literal["logs"]
+ """The type of data source. Always `logs`."""
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+
DataSourceConfig: TypeAlias = Annotated[
- Union[EvalCustomDataSourceConfig, EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig],
+ Union[EvalCustomDataSourceConfig, DataSourceConfigLogs, EvalStoredCompletionsDataSourceConfig],
PropertyInfo(discriminator="type"),
]
src/openai/types/eval_logs_data_source_config.py
@@ -1,32 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Optional
-from typing_extensions import Literal
-
-from pydantic import Field as FieldInfo
-
-from .._models import BaseModel
-from .shared.metadata import Metadata
-
-__all__ = ["EvalLogsDataSourceConfig"]
-
-
-class EvalLogsDataSourceConfig(BaseModel):
- schema_: Dict[str, object] = FieldInfo(alias="schema")
- """
- The json schema for the run data source items. Learn how to build JSON schemas
- [here](https://json-schema.org/).
- """
-
- type: Literal["logs"]
- """The type of data source. Always `logs`."""
-
- metadata: Optional[Metadata] = None
- """Set of 16 key-value pairs that can be attached to an object.
-
- This can be useful for storing additional information about the object in a
- structured format, and querying for objects via API or the dashboard.
-
- Keys are strings with a maximum length of 64 characters. Values are strings with
- a maximum length of 512 characters.
- """
src/openai/types/eval_retrieve_response.py
@@ -1,8 +1,10 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
+from pydantic import Field as FieldInfo
+
from .._utils import PropertyInfo
from .._models import BaseModel
from .shared.metadata import Metadata
@@ -10,7 +12,6 @@ from .graders.python_grader import PythonGrader
from .graders.label_model_grader import LabelModelGrader
from .graders.score_model_grader import ScoreModelGrader
from .graders.string_check_grader import StringCheckGrader
-from .eval_logs_data_source_config import EvalLogsDataSourceConfig
from .eval_custom_data_source_config import EvalCustomDataSourceConfig
from .graders.text_similarity_grader import TextSimilarityGrader
from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
@@ -18,14 +19,37 @@ from .eval_stored_completions_data_source_config import EvalStoredCompletionsDat
__all__ = [
"EvalRetrieveResponse",
"DataSourceConfig",
+ "DataSourceConfigLogs",
"TestingCriterion",
"TestingCriterionEvalGraderTextSimilarity",
"TestingCriterionEvalGraderPython",
"TestingCriterionEvalGraderScoreModel",
]
+
+class DataSourceConfigLogs(BaseModel):
+ schema_: Dict[str, object] = FieldInfo(alias="schema")
+ """
+ The json schema for the run data source items. Learn how to build JSON schemas
+ [here](https://json-schema.org/).
+ """
+
+ type: Literal["logs"]
+ """The type of data source. Always `logs`."""
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+
DataSourceConfig: TypeAlias = Annotated[
- Union[EvalCustomDataSourceConfig, EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig],
+ Union[EvalCustomDataSourceConfig, DataSourceConfigLogs, EvalStoredCompletionsDataSourceConfig],
PropertyInfo(discriminator="type"),
]
src/openai/types/eval_update_response.py
@@ -1,8 +1,10 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
+from pydantic import Field as FieldInfo
+
from .._utils import PropertyInfo
from .._models import BaseModel
from .shared.metadata import Metadata
@@ -10,7 +12,6 @@ from .graders.python_grader import PythonGrader
from .graders.label_model_grader import LabelModelGrader
from .graders.score_model_grader import ScoreModelGrader
from .graders.string_check_grader import StringCheckGrader
-from .eval_logs_data_source_config import EvalLogsDataSourceConfig
from .eval_custom_data_source_config import EvalCustomDataSourceConfig
from .graders.text_similarity_grader import TextSimilarityGrader
from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
@@ -18,14 +19,37 @@ from .eval_stored_completions_data_source_config import EvalStoredCompletionsDat
__all__ = [
"EvalUpdateResponse",
"DataSourceConfig",
+ "DataSourceConfigLogs",
"TestingCriterion",
"TestingCriterionEvalGraderTextSimilarity",
"TestingCriterionEvalGraderPython",
"TestingCriterionEvalGraderScoreModel",
]
+
+class DataSourceConfigLogs(BaseModel):
+ schema_: Dict[str, object] = FieldInfo(alias="schema")
+ """
+ The json schema for the run data source items. Learn how to build JSON schemas
+ [here](https://json-schema.org/).
+ """
+
+ type: Literal["logs"]
+ """The type of data source. Always `logs`."""
+
+ metadata: Optional[Metadata] = None
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard.
+
+ Keys are strings with a maximum length of 64 characters. Values are strings with
+ a maximum length of 512 characters.
+ """
+
+
DataSourceConfig: TypeAlias = Annotated[
- Union[EvalCustomDataSourceConfig, EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig],
+ Union[EvalCustomDataSourceConfig, DataSourceConfigLogs, EvalStoredCompletionsDataSourceConfig],
PropertyInfo(discriminator="type"),
]
src/openai/types/vector_store.py
@@ -5,9 +5,8 @@ from typing_extensions import Literal
from .._models import BaseModel
from .shared.metadata import Metadata
-from .vector_store_expiration_after import VectorStoreExpirationAfter
-__all__ = ["VectorStore", "FileCounts"]
+__all__ = ["VectorStore", "FileCounts", "ExpiresAfter"]
class FileCounts(BaseModel):
@@ -27,6 +26,17 @@ class FileCounts(BaseModel):
"""The total number of files."""
+class ExpiresAfter(BaseModel):
+ anchor: Literal["last_active_at"]
+ """Anchor timestamp after which the expiration policy applies.
+
+ Supported anchors: `last_active_at`.
+ """
+
+ days: int
+ """The number of days after the anchor time that the vector store will expire."""
+
+
class VectorStore(BaseModel):
id: str
"""The identifier, which can be referenced in API endpoints."""
@@ -65,7 +75,7 @@ class VectorStore(BaseModel):
usage_bytes: int
"""The total number of bytes used by the files in the vector store."""
- expires_after: Optional[VectorStoreExpirationAfter] = None
+ expires_after: Optional[ExpiresAfter] = None
"""The expiration policy for a vector store."""
expires_at: Optional[int] = None
src/openai/types/vector_store_create_params.py
@@ -3,13 +3,12 @@
from __future__ import annotations
from typing import List, Optional
-from typing_extensions import TypedDict
+from typing_extensions import Literal, Required, TypedDict
from .shared_params.metadata import Metadata
from .file_chunking_strategy_param import FileChunkingStrategyParam
-from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam
-__all__ = ["VectorStoreCreateParams"]
+__all__ = ["VectorStoreCreateParams", "ExpiresAfter"]
class VectorStoreCreateParams(TypedDict, total=False):
@@ -20,7 +19,7 @@ class VectorStoreCreateParams(TypedDict, total=False):
non-empty.
"""
- expires_after: VectorStoreExpirationAfterParam
+ expires_after: ExpiresAfter
"""The expiration policy for a vector store."""
file_ids: List[str]
@@ -42,3 +41,14 @@ class VectorStoreCreateParams(TypedDict, total=False):
name: str
"""The name of the vector store."""
+
+
+class ExpiresAfter(TypedDict, total=False):
+ anchor: Required[Literal["last_active_at"]]
+ """Anchor timestamp after which the expiration policy applies.
+
+ Supported anchors: `last_active_at`.
+ """
+
+ days: Required[int]
+ """The number of days after the anchor time that the vector store will expire."""
src/openai/types/vector_store_expiration_after.py
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["VectorStoreExpirationAfter"]
-
-
-class VectorStoreExpirationAfter(BaseModel):
- anchor: Literal["last_active_at"]
- """Anchor timestamp after which the expiration policy applies.
-
- Supported anchors: `last_active_at`.
- """
-
- days: int
- """The number of days after the anchor time that the vector store will expire."""
src/openai/types/vector_store_expiration_after_param.py
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["VectorStoreExpirationAfterParam"]
-
-
-class VectorStoreExpirationAfterParam(TypedDict, total=False):
- anchor: Required[Literal["last_active_at"]]
- """Anchor timestamp after which the expiration policy applies.
-
- Supported anchors: `last_active_at`.
- """
-
- days: Required[int]
- """The number of days after the anchor time that the vector store will expire."""
src/openai/types/vector_store_update_params.py
@@ -3,16 +3,15 @@
from __future__ import annotations
from typing import Optional
-from typing_extensions import TypedDict
+from typing_extensions import Literal, Required, TypedDict
from .shared_params.metadata import Metadata
-from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam
-__all__ = ["VectorStoreUpdateParams"]
+__all__ = ["VectorStoreUpdateParams", "ExpiresAfter"]
class VectorStoreUpdateParams(TypedDict, total=False):
- expires_after: Optional[VectorStoreExpirationAfterParam]
+ expires_after: Optional[ExpiresAfter]
"""The expiration policy for a vector store."""
metadata: Optional[Metadata]
@@ -27,3 +26,14 @@ class VectorStoreUpdateParams(TypedDict, total=False):
name: Optional[str]
"""The name of the vector store."""
+
+
+class ExpiresAfter(TypedDict, total=False):
+ anchor: Required[Literal["last_active_at"]]
+ """Anchor timestamp after which the expiration policy applies.
+
+ Supported anchors: `last_active_at`.
+ """
+
+ days: Required[int]
+ """The number of days after the anchor time that the vector store will expire."""
.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 101
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml
-openapi_spec_hash: 602e14add4bee018c6774e320ce309b8
-config_hash: bdacc55eb995c15255ec82130eb8c3bb
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5fa16b9a02985ae06e41be14946a9c325dc672fb014b3c19abca65880c6990e6.yml
+openapi_spec_hash: da3e669f65130043b1170048c0727890
+config_hash: d8d5fda350f6db77c784f35429741a2e
api.md
@@ -7,7 +7,6 @@ from openai.types import (
ComparisonFilter,
CompoundFilter,
ErrorObject,
- EvalItem,
FunctionDefinition,
FunctionParameters,
Metadata,
@@ -344,7 +343,6 @@ from openai.types import (
StaticFileChunkingStrategyObjectParam,
VectorStore,
VectorStoreDeleted,
- VectorStoreExpirationAfter,
VectorStoreSearchResponse,
)
```
@@ -521,7 +519,6 @@ from openai.types.beta import (
AssistantToolChoiceOption,
Thread,
ThreadDeleted,
- TruncationObject,
)
```
@@ -790,7 +787,6 @@ Types:
```python
from openai.types import (
EvalCustomDataSourceConfig,
- EvalLogsDataSourceConfig,
EvalStoredCompletionsDataSourceConfig,
EvalCreateResponse,
EvalRetrieveResponse,
@@ -816,10 +812,7 @@ Types:
from openai.types.evals import (
CreateEvalCompletionsRunDataSource,
CreateEvalJSONLRunDataSource,
- CreateEvalResponsesRunDataSource,
EvalAPIError,
- EvalJSONLFileContentSource,
- EvalJSONLFileIDSource,
RunCreateResponse,
RunRetrieveResponse,
RunListResponse,