Commit d7765341

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-05-16 07:47:25
feat(api): manual updates
1 parent 28d60d9
src/openai/resources/beta/threads/runs/runs.py
@@ -51,6 +51,7 @@ from .....types.shared_params.metadata import Metadata
 from .....types.shared.reasoning_effort import ReasoningEffort
 from .....types.beta.assistant_tool_param import AssistantToolParam
 from .....types.beta.assistant_stream_event import AssistantStreamEvent
+from .....types.beta.truncation_object_param import TruncationObjectParam
 from .....types.beta.threads.runs.run_step_include import RunStepInclude
 from .....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
 from .....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -104,7 +105,7 @@ class Runs(SyncAPIResource):
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -254,7 +255,7 @@ class Runs(SyncAPIResource):
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -404,7 +405,7 @@ class Runs(SyncAPIResource):
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -554,7 +555,7 @@ class Runs(SyncAPIResource):
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1515,7 +1516,7 @@ class AsyncRuns(AsyncAPIResource):
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1665,7 +1666,7 @@ class AsyncRuns(AsyncAPIResource):
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1815,7 +1816,7 @@ class AsyncRuns(AsyncAPIResource):
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1965,7 +1966,7 @@ class AsyncRuns(AsyncAPIResource):
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
src/openai/resources/beta/threads/threads.py
@@ -52,6 +52,7 @@ from ....types.beta.thread_deleted import ThreadDeleted
 from ....types.shared_params.metadata import Metadata
 from ....types.beta.assistant_tool_param import AssistantToolParam
 from ....types.beta.assistant_stream_event import AssistantStreamEvent
+from ....types.beta.truncation_object_param import TruncationObjectParam
 from ....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
 from ....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
 
@@ -285,7 +286,7 @@ class Threads(SyncAPIResource):
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -418,7 +419,7 @@ class Threads(SyncAPIResource):
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -551,7 +552,7 @@ class Threads(SyncAPIResource):
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -684,7 +685,7 @@ class Threads(SyncAPIResource):
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1134,7 +1135,7 @@ class AsyncThreads(AsyncAPIResource):
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1267,7 +1268,7 @@ class AsyncThreads(AsyncAPIResource):
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1400,7 +1401,7 @@ class AsyncThreads(AsyncAPIResource):
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -1533,7 +1534,7 @@ class AsyncThreads(AsyncAPIResource):
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
         tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
-        truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
+        truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
src/openai/resources/vector_stores/vector_stores.py
@@ -43,6 +43,7 @@ from ...types.vector_store_deleted import VectorStoreDeleted
 from ...types.shared_params.metadata import Metadata
 from ...types.file_chunking_strategy_param import FileChunkingStrategyParam
 from ...types.vector_store_search_response import VectorStoreSearchResponse
+from ...types.vector_store_expiration_after_param import VectorStoreExpirationAfterParam
 
 __all__ = ["VectorStores", "AsyncVectorStores"]
 
@@ -79,7 +80,7 @@ class VectorStores(SyncAPIResource):
         self,
         *,
         chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
-        expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
+        expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN,
         file_ids: List[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         name: str | NotGiven = NOT_GIVEN,
@@ -177,7 +178,7 @@ class VectorStores(SyncAPIResource):
         self,
         vector_store_id: str,
         *,
-        expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
+        expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         name: Optional[str] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -424,7 +425,7 @@ class AsyncVectorStores(AsyncAPIResource):
         self,
         *,
         chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN,
-        expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
+        expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN,
         file_ids: List[str] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         name: str | NotGiven = NOT_GIVEN,
@@ -522,7 +523,7 @@ class AsyncVectorStores(AsyncAPIResource):
         self,
         vector_store_id: str,
         *,
-        expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN,
+        expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN,
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         name: Optional[str] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
src/openai/types/beta/threads/run.py
@@ -7,19 +7,12 @@ from ...._models import BaseModel
 from .run_status import RunStatus
 from ..assistant_tool import AssistantTool
 from ...shared.metadata import Metadata
+from ..truncation_object import TruncationObject
 from ..assistant_tool_choice_option import AssistantToolChoiceOption
 from ..assistant_response_format_option import AssistantResponseFormatOption
 from .required_action_function_tool_call import RequiredActionFunctionToolCall
 
-__all__ = [
-    "Run",
-    "IncompleteDetails",
-    "LastError",
-    "RequiredAction",
-    "RequiredActionSubmitToolOutputs",
-    "TruncationStrategy",
-    "Usage",
-]
+__all__ = ["Run", "IncompleteDetails", "LastError", "RequiredAction", "RequiredActionSubmitToolOutputs", "Usage"]
 
 
 class IncompleteDetails(BaseModel):
@@ -52,23 +45,6 @@ class RequiredAction(BaseModel):
     """For now, this is always `submit_tool_outputs`."""
 
 
-class TruncationStrategy(BaseModel):
-    type: Literal["auto", "last_messages"]
-    """The truncation strategy to use for the thread.
-
-    The default is `auto`. If set to `last_messages`, the thread will be truncated
-    to the n most recent messages in the thread. When set to `auto`, messages in the
-    middle of the thread will be dropped to fit the context length of the model,
-    `max_prompt_tokens`.
-    """
-
-    last_messages: Optional[int] = None
-    """
-    The number of most recent messages from the thread when constructing the context
-    for the run.
-    """
-
-
 class Usage(BaseModel):
     completion_tokens: int
     """Number of completion tokens used over the course of the run."""
@@ -225,7 +201,7 @@ class Run(BaseModel):
     this run.
     """
 
-    truncation_strategy: Optional[TruncationStrategy] = None
+    truncation_strategy: Optional[TruncationObject] = None
     """Controls for how a thread will be truncated prior to the run.
 
     Use this to control the intial context window of the run.
src/openai/types/beta/threads/run_create_params.py
@@ -9,6 +9,7 @@ from ...shared.chat_model import ChatModel
 from ..assistant_tool_param import AssistantToolParam
 from .runs.run_step_include import RunStepInclude
 from ...shared_params.metadata import Metadata
+from ..truncation_object_param import TruncationObjectParam
 from ...shared.reasoning_effort import ReasoningEffort
 from .message_content_part_param import MessageContentPartParam
 from ..code_interpreter_tool_param import CodeInterpreterToolParam
@@ -21,7 +22,6 @@ __all__ = [
     "AdditionalMessageAttachment",
     "AdditionalMessageAttachmentTool",
     "AdditionalMessageAttachmentToolFileSearch",
-    "TruncationStrategy",
     "RunCreateParamsNonStreaming",
     "RunCreateParamsStreaming",
 ]
@@ -173,7 +173,7 @@ class RunCreateParamsBase(TypedDict, total=False):
     We generally recommend altering this or temperature but not both.
     """
 
-    truncation_strategy: Optional[TruncationStrategy]
+    truncation_strategy: Optional[TruncationObjectParam]
     """Controls for how a thread will be truncated prior to the run.
 
     Use this to control the intial context window of the run.
@@ -223,23 +223,6 @@ class AdditionalMessage(TypedDict, total=False):
     """
 
 
-class TruncationStrategy(TypedDict, total=False):
-    type: Required[Literal["auto", "last_messages"]]
-    """The truncation strategy to use for the thread.
-
-    The default is `auto`. If set to `last_messages`, the thread will be truncated
-    to the n most recent messages in the thread. When set to `auto`, messages in the
-    middle of the thread will be dropped to fit the context length of the model,
-    `max_prompt_tokens`.
-    """
-
-    last_messages: Optional[int]
-    """
-    The number of most recent messages from the thread when constructing the context
-    for the run.
-    """
-
-
 class RunCreateParamsNonStreaming(RunCreateParamsBase, total=False):
     stream: Optional[Literal[False]]
     """
src/openai/types/beta/__init__.py
@@ -9,6 +9,7 @@ from .assistant_tool import AssistantTool as AssistantTool
 from .thread_deleted import ThreadDeleted as ThreadDeleted
 from .file_search_tool import FileSearchTool as FileSearchTool
 from .assistant_deleted import AssistantDeleted as AssistantDeleted
+from .truncation_object import TruncationObject as TruncationObject
 from .function_tool_param import FunctionToolParam as FunctionToolParam
 from .assistant_tool_param import AssistantToolParam as AssistantToolParam
 from .thread_create_params import ThreadCreateParams as ThreadCreateParams
@@ -20,6 +21,7 @@ from .assistant_stream_event import AssistantStreamEvent as AssistantStreamEvent
 from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
 from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
 from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
+from .truncation_object_param import TruncationObjectParam as TruncationObjectParam
 from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam
 from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam
 from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption
src/openai/types/beta/thread_create_and_run_params.py
@@ -8,6 +8,7 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict
 from ..shared.chat_model import ChatModel
 from .assistant_tool_param import AssistantToolParam
 from ..shared_params.metadata import Metadata
+from .truncation_object_param import TruncationObjectParam
 from .code_interpreter_tool_param import CodeInterpreterToolParam
 from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
 from .threads.message_content_part_param import MessageContentPartParam
@@ -31,7 +32,6 @@ __all__ = [
     "ToolResources",
     "ToolResourcesCodeInterpreter",
     "ToolResourcesFileSearch",
-    "TruncationStrategy",
     "ThreadCreateAndRunParamsNonStreaming",
     "ThreadCreateAndRunParamsStreaming",
 ]
@@ -166,7 +166,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
     We generally recommend altering this or temperature but not both.
     """
 
-    truncation_strategy: Optional[TruncationStrategy]
+    truncation_strategy: Optional[TruncationObjectParam]
     """Controls for how a thread will be truncated prior to the run.
 
     Use this to control the intial context window of the run.
@@ -358,23 +358,6 @@ class ToolResources(TypedDict, total=False):
     file_search: ToolResourcesFileSearch
 
 
-class TruncationStrategy(TypedDict, total=False):
-    type: Required[Literal["auto", "last_messages"]]
-    """The truncation strategy to use for the thread.
-
-    The default is `auto`. If set to `last_messages`, the thread will be truncated
-    to the n most recent messages in the thread. When set to `auto`, messages in the
-    middle of the thread will be dropped to fit the context length of the model,
-    `max_prompt_tokens`.
-    """
-
-    last_messages: Optional[int]
-    """
-    The number of most recent messages from the thread when constructing the context
-    for the run.
-    """
-
-
 class ThreadCreateAndRunParamsNonStreaming(ThreadCreateAndRunParamsBase, total=False):
     stream: Optional[Literal[False]]
     """
src/openai/types/beta/truncation_object.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["TruncationObject"]
+
+
+class TruncationObject(BaseModel):
+    type: Literal["auto", "last_messages"]
+    """The truncation strategy to use for the thread.
+
+    The default is `auto`. If set to `last_messages`, the thread will be truncated
+    to the n most recent messages in the thread. When set to `auto`, messages in the
+    middle of the thread will be dropped to fit the context length of the model,
+    `max_prompt_tokens`.
+    """
+
+    last_messages: Optional[int] = None
+    """
+    The number of most recent messages from the thread when constructing the context
+    for the run.
+    """
src/openai/types/beta/truncation_object_param.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["TruncationObjectParam"]
+
+
+class TruncationObjectParam(TypedDict, total=False):
+    type: Required[Literal["auto", "last_messages"]]
+    """The truncation strategy to use for the thread.
+
+    The default is `auto`. If set to `last_messages`, the thread will be truncated
+    to the n most recent messages in the thread. When set to `auto`, messages in the
+    middle of the thread will be dropped to fit the context length of the model,
+    `max_prompt_tokens`.
+    """
+
+    last_messages: Optional[int]
+    """
+    The number of most recent messages from the thread when constructing the context
+    for the run.
+    """
src/openai/types/evals/__init__.py
@@ -10,7 +10,11 @@ from .run_cancel_response import RunCancelResponse as RunCancelResponse
 from .run_create_response import RunCreateResponse as RunCreateResponse
 from .run_delete_response import RunDeleteResponse as RunDeleteResponse
 from .run_retrieve_response import RunRetrieveResponse as RunRetrieveResponse
+from .eval_jsonl_file_id_source import EvalJSONLFileIDSource as EvalJSONLFileIDSource
+from .eval_jsonl_file_content_source import EvalJSONLFileContentSource as EvalJSONLFileContentSource
+from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam as EvalJSONLFileIDSourceParam
 from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource
+from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam as EvalJSONLFileContentSourceParam
 from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource
 from .create_eval_completions_run_data_source import (
     CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource,
src/openai/types/evals/create_eval_completions_run_data_source.py
@@ -1,54 +1,28 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import Dict, List, Union, Optional
+from typing import List, Union, Optional
 from typing_extensions import Literal, Annotated, TypeAlias
 
 from ..._utils import PropertyInfo
 from ..._models import BaseModel
 from ..shared.metadata import Metadata
+from ..shared.eval_item import EvalItem
+from .eval_jsonl_file_id_source import EvalJSONLFileIDSource
 from ..responses.easy_input_message import EasyInputMessage
-from ..responses.response_input_text import ResponseInputText
+from .eval_jsonl_file_content_source import EvalJSONLFileContentSource
 
 __all__ = [
     "CreateEvalCompletionsRunDataSource",
     "Source",
-    "SourceFileContent",
-    "SourceFileContentContent",
-    "SourceFileID",
     "SourceStoredCompletions",
     "InputMessages",
     "InputMessagesTemplate",
     "InputMessagesTemplateTemplate",
-    "InputMessagesTemplateTemplateMessage",
-    "InputMessagesTemplateTemplateMessageContent",
-    "InputMessagesTemplateTemplateMessageContentOutputText",
     "InputMessagesItemReference",
     "SamplingParams",
 ]
 
 
-class SourceFileContentContent(BaseModel):
-    item: Dict[str, object]
-
-    sample: Optional[Dict[str, object]] = None
-
-
-class SourceFileContent(BaseModel):
-    content: List[SourceFileContentContent]
-    """The content of the jsonl file."""
-
-    type: Literal["file_content"]
-    """The type of jsonl source. Always `file_content`."""
-
-
-class SourceFileID(BaseModel):
-    id: str
-    """The identifier of the file."""
-
-    type: Literal["file_id"]
-    """The type of jsonl source. Always `file_id`."""
-
-
 class SourceStoredCompletions(BaseModel):
     type: Literal["stored_completions"]
     """The type of source. Always `stored_completions`."""
@@ -77,39 +51,12 @@ class SourceStoredCompletions(BaseModel):
 
 
 Source: TypeAlias = Annotated[
-    Union[SourceFileContent, SourceFileID, SourceStoredCompletions], PropertyInfo(discriminator="type")
-]
-
-
-class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel):
-    text: str
-    """The text output from the model."""
-
-    type: Literal["output_text"]
-    """The type of the output text. Always `output_text`."""
-
-
-InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[
-    str, ResponseInputText, InputMessagesTemplateTemplateMessageContentOutputText
+    Union[EvalJSONLFileContentSource, EvalJSONLFileIDSource, SourceStoredCompletions],
+    PropertyInfo(discriminator="type"),
 ]
 
-
-class InputMessagesTemplateTemplateMessage(BaseModel):
-    content: InputMessagesTemplateTemplateMessageContent
-    """Text inputs to the model - can contain template strings."""
-
-    role: Literal["user", "assistant", "system", "developer"]
-    """The role of the message input.
-
-    One of `user`, `assistant`, `system`, or `developer`.
-    """
-
-    type: Optional[Literal["message"]] = None
-    """The type of the message input. Always `message`."""
-
-
 InputMessagesTemplateTemplate: TypeAlias = Annotated[
-    Union[EasyInputMessage, InputMessagesTemplateTemplateMessage], PropertyInfo(discriminator="type")
+    Union[EasyInputMessage, EvalItem], PropertyInfo(discriminator="type")
 ]
 
 
src/openai/types/evals/create_eval_completions_run_data_source_param.py
@@ -2,53 +2,27 @@
 
 from __future__ import annotations
 
-from typing import Dict, Union, Iterable, Optional
+from typing import Union, Iterable, Optional
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from ..shared_params.metadata import Metadata
+from ..shared_params.eval_item import EvalItem
+from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam
 from ..responses.easy_input_message_param import EasyInputMessageParam
-from ..responses.response_input_text_param import ResponseInputTextParam
+from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam
 
 __all__ = [
     "CreateEvalCompletionsRunDataSourceParam",
     "Source",
-    "SourceFileContent",
-    "SourceFileContentContent",
-    "SourceFileID",
     "SourceStoredCompletions",
     "InputMessages",
     "InputMessagesTemplate",
     "InputMessagesTemplateTemplate",
-    "InputMessagesTemplateTemplateMessage",
-    "InputMessagesTemplateTemplateMessageContent",
-    "InputMessagesTemplateTemplateMessageContentOutputText",
     "InputMessagesItemReference",
     "SamplingParams",
 ]
 
 
-class SourceFileContentContent(TypedDict, total=False):
-    item: Required[Dict[str, object]]
-
-    sample: Dict[str, object]
-
-
-class SourceFileContent(TypedDict, total=False):
-    content: Required[Iterable[SourceFileContentContent]]
-    """The content of the jsonl file."""
-
-    type: Required[Literal["file_content"]]
-    """The type of jsonl source. Always `file_content`."""
-
-
-class SourceFileID(TypedDict, total=False):
-    id: Required[str]
-    """The identifier of the file."""
-
-    type: Required[Literal["file_id"]]
-    """The type of jsonl source. Always `file_id`."""
-
-
 class SourceStoredCompletions(TypedDict, total=False):
     type: Required[Literal["stored_completions"]]
     """The type of source. Always `stored_completions`."""
@@ -76,37 +50,9 @@ class SourceStoredCompletions(TypedDict, total=False):
     """An optional model to filter by (e.g., 'gpt-4o')."""
 
 
-Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceStoredCompletions]
-
-
-class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=False):
-    text: Required[str]
-    """The text output from the model."""
-
-    type: Required[Literal["output_text"]]
-    """The type of the output text. Always `output_text`."""
-
-
-InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[
-    str, ResponseInputTextParam, InputMessagesTemplateTemplateMessageContentOutputText
-]
-
-
-class InputMessagesTemplateTemplateMessage(TypedDict, total=False):
-    content: Required[InputMessagesTemplateTemplateMessageContent]
-    """Text inputs to the model - can contain template strings."""
-
-    role: Required[Literal["user", "assistant", "system", "developer"]]
-    """The role of the message input.
-
-    One of `user`, `assistant`, `system`, or `developer`.
-    """
-
-    type: Literal["message"]
-    """The type of the message input. Always `message`."""
-
+Source: TypeAlias = Union[EvalJSONLFileContentSourceParam, EvalJSONLFileIDSourceParam, SourceStoredCompletions]
 
-InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateMessage]
+InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, EvalItem]
 
 
 class InputMessagesTemplate(TypedDict, total=False):
src/openai/types/evals/create_eval_jsonl_run_data_source.py
@@ -1,37 +1,18 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import Dict, List, Union, Optional
+from typing import Union
 from typing_extensions import Literal, Annotated, TypeAlias
 
 from ..._utils import PropertyInfo
 from ..._models import BaseModel
+from .eval_jsonl_file_id_source import EvalJSONLFileIDSource
+from .eval_jsonl_file_content_source import EvalJSONLFileContentSource
 
-__all__ = ["CreateEvalJSONLRunDataSource", "Source", "SourceFileContent", "SourceFileContentContent", "SourceFileID"]
+__all__ = ["CreateEvalJSONLRunDataSource", "Source"]
 
-
-class SourceFileContentContent(BaseModel):
-    item: Dict[str, object]
-
-    sample: Optional[Dict[str, object]] = None
-
-
-class SourceFileContent(BaseModel):
-    content: List[SourceFileContentContent]
-    """The content of the jsonl file."""
-
-    type: Literal["file_content"]
-    """The type of jsonl source. Always `file_content`."""
-
-
-class SourceFileID(BaseModel):
-    id: str
-    """The identifier of the file."""
-
-    type: Literal["file_id"]
-    """The type of jsonl source. Always `file_id`."""
-
-
-Source: TypeAlias = Annotated[Union[SourceFileContent, SourceFileID], PropertyInfo(discriminator="type")]
+Source: TypeAlias = Annotated[
+    Union[EvalJSONLFileContentSource, EvalJSONLFileIDSource], PropertyInfo(discriminator="type")
+]
 
 
 class CreateEvalJSONLRunDataSource(BaseModel):
src/openai/types/evals/create_eval_jsonl_run_data_source_param.py
@@ -2,41 +2,15 @@
 
 from __future__ import annotations
 
-from typing import Dict, Union, Iterable
+from typing import Union
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
-__all__ = [
-    "CreateEvalJSONLRunDataSourceParam",
-    "Source",
-    "SourceFileContent",
-    "SourceFileContentContent",
-    "SourceFileID",
-]
+from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam
+from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam
 
+__all__ = ["CreateEvalJSONLRunDataSourceParam", "Source"]
 
-class SourceFileContentContent(TypedDict, total=False):
-    item: Required[Dict[str, object]]
-
-    sample: Dict[str, object]
-
-
-class SourceFileContent(TypedDict, total=False):
-    content: Required[Iterable[SourceFileContentContent]]
-    """The content of the jsonl file."""
-
-    type: Required[Literal["file_content"]]
-    """The type of jsonl source. Always `file_content`."""
-
-
-class SourceFileID(TypedDict, total=False):
-    id: Required[str]
-    """The identifier of the file."""
-
-    type: Required[Literal["file_id"]]
-    """The type of jsonl source. Always `file_id`."""
-
-
-Source: TypeAlias = Union[SourceFileContent, SourceFileID]
+Source: TypeAlias = Union[EvalJSONLFileContentSourceParam, EvalJSONLFileIDSourceParam]
 
 
 class CreateEvalJSONLRunDataSourceParam(TypedDict, total=False):
src/openai/types/evals/create_eval_responses_run_data_source.py
@@ -1,54 +1,28 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import Dict, List, Union, Optional
+from typing import List, Union, Optional
 from typing_extensions import Literal, Annotated, TypeAlias
 
 from ..._utils import PropertyInfo
 from ..._models import BaseModel
+from ..shared.eval_item import EvalItem
 from ..shared.reasoning_effort import ReasoningEffort
-from ..responses.response_input_text import ResponseInputText
+from .eval_jsonl_file_id_source import EvalJSONLFileIDSource
+from .eval_jsonl_file_content_source import EvalJSONLFileContentSource
 
 __all__ = [
     "CreateEvalResponsesRunDataSource",
     "Source",
-    "SourceFileContent",
-    "SourceFileContentContent",
-    "SourceFileID",
     "SourceResponses",
     "InputMessages",
     "InputMessagesTemplate",
     "InputMessagesTemplateTemplate",
     "InputMessagesTemplateTemplateChatMessage",
-    "InputMessagesTemplateTemplateEvalItem",
-    "InputMessagesTemplateTemplateEvalItemContent",
-    "InputMessagesTemplateTemplateEvalItemContentOutputText",
     "InputMessagesItemReference",
     "SamplingParams",
 ]
 
 
-class SourceFileContentContent(BaseModel):
-    item: Dict[str, object]
-
-    sample: Optional[Dict[str, object]] = None
-
-
-class SourceFileContent(BaseModel):
-    content: List[SourceFileContentContent]
-    """The content of the jsonl file."""
-
-    type: Literal["file_content"]
-    """The type of jsonl source. Always `file_content`."""
-
-
-class SourceFileID(BaseModel):
-    id: str
-    """The identifier of the file."""
-
-    type: Literal["file_id"]
-    """The type of jsonl source. Always `file_id`."""
-
-
 class SourceResponses(BaseModel):
     type: Literal["responses"]
     """The type of run data source. Always `responses`."""
@@ -109,7 +83,7 @@ class SourceResponses(BaseModel):
 
 
 Source: TypeAlias = Annotated[
-    Union[SourceFileContent, SourceFileID, SourceResponses], PropertyInfo(discriminator="type")
+    Union[EvalJSONLFileContentSource, EvalJSONLFileIDSource, SourceResponses], PropertyInfo(discriminator="type")
 ]
 
 
@@ -121,36 +95,7 @@ class InputMessagesTemplateTemplateChatMessage(BaseModel):
     """The role of the message (e.g. "system", "assistant", "user")."""
 
 
-class InputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
-    text: str
-    """The text output from the model."""
-
-    type: Literal["output_text"]
-    """The type of the output text. Always `output_text`."""
-
-
-InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
-    str, ResponseInputText, InputMessagesTemplateTemplateEvalItemContentOutputText
-]
-
-
-class InputMessagesTemplateTemplateEvalItem(BaseModel):
-    content: InputMessagesTemplateTemplateEvalItemContent
-    """Text inputs to the model - can contain template strings."""
-
-    role: Literal["user", "assistant", "system", "developer"]
-    """The role of the message input.
-
-    One of `user`, `assistant`, `system`, or `developer`.
-    """
-
-    type: Optional[Literal["message"]] = None
-    """The type of the message input. Always `message`."""
-
-
-InputMessagesTemplateTemplate: TypeAlias = Union[
-    InputMessagesTemplateTemplateChatMessage, InputMessagesTemplateTemplateEvalItem
-]
+InputMessagesTemplateTemplate: TypeAlias = Union[InputMessagesTemplateTemplateChatMessage, EvalItem]
 
 
 class InputMessagesTemplate(BaseModel):
src/openai/types/evals/create_eval_responses_run_data_source_param.py
@@ -2,53 +2,27 @@
 
 from __future__ import annotations
 
-from typing import Dict, List, Union, Iterable, Optional
+from typing import List, Union, Iterable, Optional
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from ..shared.reasoning_effort import ReasoningEffort
-from ..responses.response_input_text_param import ResponseInputTextParam
+from ..shared_params.eval_item import EvalItem
+from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam
+from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam
 
 __all__ = [
     "CreateEvalResponsesRunDataSourceParam",
     "Source",
-    "SourceFileContent",
-    "SourceFileContentContent",
-    "SourceFileID",
     "SourceResponses",
     "InputMessages",
     "InputMessagesTemplate",
     "InputMessagesTemplateTemplate",
     "InputMessagesTemplateTemplateChatMessage",
-    "InputMessagesTemplateTemplateEvalItem",
-    "InputMessagesTemplateTemplateEvalItemContent",
-    "InputMessagesTemplateTemplateEvalItemContentOutputText",
     "InputMessagesItemReference",
     "SamplingParams",
 ]
 
 
-class SourceFileContentContent(TypedDict, total=False):
-    item: Required[Dict[str, object]]
-
-    sample: Dict[str, object]
-
-
-class SourceFileContent(TypedDict, total=False):
-    content: Required[Iterable[SourceFileContentContent]]
-    """The content of the jsonl file."""
-
-    type: Required[Literal["file_content"]]
-    """The type of jsonl source. Always `file_content`."""
-
-
-class SourceFileID(TypedDict, total=False):
-    id: Required[str]
-    """The identifier of the file."""
-
-    type: Required[Literal["file_id"]]
-    """The type of jsonl source. Always `file_id`."""
-
-
 class SourceResponses(TypedDict, total=False):
     type: Required[Literal["responses"]]
     """The type of run data source. Always `responses`."""
@@ -108,7 +82,7 @@ class SourceResponses(TypedDict, total=False):
     """List of user identifiers. This is a query parameter used to select responses."""
 
 
-Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceResponses]
+Source: TypeAlias = Union[EvalJSONLFileContentSourceParam, EvalJSONLFileIDSourceParam, SourceResponses]
 
 
 class InputMessagesTemplateTemplateChatMessage(TypedDict, total=False):
@@ -119,36 +93,7 @@ class InputMessagesTemplateTemplateChatMessage(TypedDict, total=False):
     """The role of the message (e.g. "system", "assistant", "user")."""
 
 
-class InputMessagesTemplateTemplateEvalItemContentOutputText(TypedDict, total=False):
-    text: Required[str]
-    """The text output from the model."""
-
-    type: Required[Literal["output_text"]]
-    """The type of the output text. Always `output_text`."""
-
-
-InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
-    str, ResponseInputTextParam, InputMessagesTemplateTemplateEvalItemContentOutputText
-]
-
-
-class InputMessagesTemplateTemplateEvalItem(TypedDict, total=False):
-    content: Required[InputMessagesTemplateTemplateEvalItemContent]
-    """Text inputs to the model - can contain template strings."""
-
-    role: Required[Literal["user", "assistant", "system", "developer"]]
-    """The role of the message input.
-
-    One of `user`, `assistant`, `system`, or `developer`.
-    """
-
-    type: Literal["message"]
-    """The type of the message input. Always `message`."""
-
-
-InputMessagesTemplateTemplate: TypeAlias = Union[
-    InputMessagesTemplateTemplateChatMessage, InputMessagesTemplateTemplateEvalItem
-]
+InputMessagesTemplateTemplate: TypeAlias = Union[InputMessagesTemplateTemplateChatMessage, EvalItem]
 
 
 class InputMessagesTemplate(TypedDict, total=False):
src/openai/types/evals/eval_jsonl_file_content_source.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["EvalJSONLFileContentSource", "Content"]
+
+
+class Content(BaseModel):
+    item: Dict[str, object]
+
+    sample: Optional[Dict[str, object]] = None
+
+
+class EvalJSONLFileContentSource(BaseModel):
+    content: List[Content]
+    """The content of the jsonl file."""
+
+    type: Literal["file_content"]
+    """The type of jsonl source. Always `file_content`."""
src/openai/types/evals/eval_jsonl_file_content_source_param.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Iterable
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["EvalJSONLFileContentSourceParam", "Content"]
+
+
+class Content(TypedDict, total=False):
+    item: Required[Dict[str, object]]
+
+    sample: Dict[str, object]
+
+
+class EvalJSONLFileContentSourceParam(TypedDict, total=False):
+    content: Required[Iterable[Content]]
+    """The content of the jsonl file."""
+
+    type: Required[Literal["file_content"]]
+    """The type of jsonl source. Always `file_content`."""
src/openai/types/evals/eval_jsonl_file_id_source.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["EvalJSONLFileIDSource"]
+
+
+class EvalJSONLFileIDSource(BaseModel):
+    id: str
+    """The identifier of the file."""
+
+    type: Literal["file_id"]
+    """The type of jsonl source. Always `file_id`."""
src/openai/types/evals/eval_jsonl_file_id_source_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["EvalJSONLFileIDSourceParam"]
+
+
+class EvalJSONLFileIDSourceParam(TypedDict, total=False):
+    id: Required[str]
+    """The identifier of the file."""
+
+    type: Required[Literal["file_id"]]
+    """The type of jsonl source. Always `file_id`."""
src/openai/types/graders/label_model_grader.py
@@ -1,41 +1,16 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import List, Union, Optional
-from typing_extensions import Literal, TypeAlias
+from typing import List
+from typing_extensions import Literal
 
 from ..._models import BaseModel
-from ..responses.response_input_text import ResponseInputText
+from ..shared.eval_item import EvalItem
 
-__all__ = ["LabelModelGrader", "Input", "InputContent", "InputContentOutputText"]
-
-
-class InputContentOutputText(BaseModel):
-    text: str
-    """The text output from the model."""
-
-    type: Literal["output_text"]
-    """The type of the output text. Always `output_text`."""
-
-
-InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText]
-
-
-class Input(BaseModel):
-    content: InputContent
-    """Text inputs to the model - can contain template strings."""
-
-    role: Literal["user", "assistant", "system", "developer"]
-    """The role of the message input.
-
-    One of `user`, `assistant`, `system`, or `developer`.
-    """
-
-    type: Optional[Literal["message"]] = None
-    """The type of the message input. Always `message`."""
+__all__ = ["LabelModelGrader"]
 
 
 class LabelModelGrader(BaseModel):
-    input: List[Input]
+    input: List[EvalItem]
 
     labels: List[str]
     """The labels to assign to each item in the evaluation."""
src/openai/types/graders/label_model_grader_param.py
@@ -2,41 +2,16 @@
 
 from __future__ import annotations
 
-from typing import List, Union, Iterable
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from typing import List, Iterable
+from typing_extensions import Literal, Required, TypedDict
 
-from ..responses.response_input_text_param import ResponseInputTextParam
+from ..shared_params.eval_item import EvalItem
 
-__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText"]
-
-
-class InputContentOutputText(TypedDict, total=False):
-    text: Required[str]
-    """The text output from the model."""
-
-    type: Required[Literal["output_text"]]
-    """The type of the output text. Always `output_text`."""
-
-
-InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText]
-
-
-class Input(TypedDict, total=False):
-    content: Required[InputContent]
-    """Text inputs to the model - can contain template strings."""
-
-    role: Required[Literal["user", "assistant", "system", "developer"]]
-    """The role of the message input.
-
-    One of `user`, `assistant`, `system`, or `developer`.
-    """
-
-    type: Literal["message"]
-    """The type of the message input. Always `message`."""
+__all__ = ["LabelModelGraderParam"]
 
 
 class LabelModelGraderParam(TypedDict, total=False):
-    input: Required[Iterable[Input]]
+    input: Required[Iterable[EvalItem]]
 
     labels: Required[List[str]]
     """The labels to assign to each item in the evaluation."""
src/openai/types/graders/score_model_grader.py
@@ -1,41 +1,16 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import List, Union, Optional
-from typing_extensions import Literal, TypeAlias
+from typing import List, Optional
+from typing_extensions import Literal
 
 from ..._models import BaseModel
-from ..responses.response_input_text import ResponseInputText
+from ..shared.eval_item import EvalItem
 
-__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText"]
-
-
-class InputContentOutputText(BaseModel):
-    text: str
-    """The text output from the model."""
-
-    type: Literal["output_text"]
-    """The type of the output text. Always `output_text`."""
-
-
-InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText]
-
-
-class Input(BaseModel):
-    content: InputContent
-    """Text inputs to the model - can contain template strings."""
-
-    role: Literal["user", "assistant", "system", "developer"]
-    """The role of the message input.
-
-    One of `user`, `assistant`, `system`, or `developer`.
-    """
-
-    type: Optional[Literal["message"]] = None
-    """The type of the message input. Always `message`."""
+__all__ = ["ScoreModelGrader"]
 
 
 class ScoreModelGrader(BaseModel):
-    input: List[Input]
+    input: List[EvalItem]
     """The input text. This may include template strings."""
 
     model: str
src/openai/types/graders/score_model_grader_param.py
@@ -2,41 +2,16 @@
 
 from __future__ import annotations
 
-from typing import Union, Iterable
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from typing import Iterable
+from typing_extensions import Literal, Required, TypedDict
 
-from ..responses.response_input_text_param import ResponseInputTextParam
+from ..shared_params.eval_item import EvalItem
 
-__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText"]
-
-
-class InputContentOutputText(TypedDict, total=False):
-    text: Required[str]
-    """The text output from the model."""
-
-    type: Required[Literal["output_text"]]
-    """The type of the output text. Always `output_text`."""
-
-
-InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText]
-
-
-class Input(TypedDict, total=False):
-    content: Required[InputContent]
-    """Text inputs to the model - can contain template strings."""
-
-    role: Required[Literal["user", "assistant", "system", "developer"]]
-    """The role of the message input.
-
-    One of `user`, `assistant`, `system`, or `developer`.
-    """
-
-    type: Literal["message"]
-    """The type of the message input. Always `message`."""
+__all__ = ["ScoreModelGraderParam"]
 
 
 class ScoreModelGraderParam(TypedDict, total=False):
-    input: Required[Iterable[Input]]
+    input: Required[Iterable[EvalItem]]
     """The input text. This may include template strings."""
 
     model: Required[str]
src/openai/types/shared/__init__.py
@@ -1,6 +1,7 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 from .metadata import Metadata as Metadata
+from .eval_item import EvalItem as EvalItem
 from .reasoning import Reasoning as Reasoning
 from .all_models import AllModels as AllModels
 from .chat_model import ChatModel as ChatModel
src/openai/types/shared/eval_item.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Literal, TypeAlias
+
+from ..._models import BaseModel
+from ..responses.response_input_text import ResponseInputText
+
+__all__ = ["EvalItem", "Content", "ContentOutputText"]
+
+
+class ContentOutputText(BaseModel):
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
+
+
+Content: TypeAlias = Union[str, ResponseInputText, ContentOutputText]
+
+
+class EvalItem(BaseModel):
+    content: Content
+    """Text inputs to the model - can contain template strings."""
+
+    role: Literal["user", "assistant", "system", "developer"]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always `message`."""
src/openai/types/shared_params/__init__.py
@@ -1,6 +1,7 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 from .metadata import Metadata as Metadata
+from .eval_item import EvalItem as EvalItem
 from .reasoning import Reasoning as Reasoning
 from .chat_model import ChatModel as ChatModel
 from .compound_filter import CompoundFilter as CompoundFilter
src/openai/types/shared_params/eval_item.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..responses.response_input_text_param import ResponseInputTextParam
+
+__all__ = ["EvalItem", "Content", "ContentOutputText"]
+
+
+class ContentOutputText(TypedDict, total=False):
+    text: Required[str]
+    """The text output from the model."""
+
+    type: Required[Literal["output_text"]]
+    """The type of the output text. Always `output_text`."""
+
+
+Content: TypeAlias = Union[str, ResponseInputTextParam, ContentOutputText]
+
+
+class EvalItem(TypedDict, total=False):
+    content: Required[Content]
+    """Text inputs to the model - can contain template strings."""
+
+    role: Required[Literal["user", "assistant", "system", "developer"]]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Literal["message"]
+    """The type of the message input. Always `message`."""
src/openai/types/__init__.py
@@ -6,6 +6,7 @@ from .batch import Batch as Batch
 from .image import Image as Image
 from .model import Model as Model
 from .shared import (
+    EvalItem as EvalItem,
     Metadata as Metadata,
     AllModels as AllModels,
     ChatModel as ChatModel,
@@ -76,12 +77,14 @@ from .vector_store_search_response import VectorStoreSearchResponse as VectorSto
 from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions
 from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
 from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy
+from .vector_store_expiration_after import VectorStoreExpirationAfter as VectorStoreExpirationAfter
 from .eval_custom_data_source_config import EvalCustomDataSourceConfig as EvalCustomDataSourceConfig
 from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam
 from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam
 from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam
 from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject
 from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam
+from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam as VectorStoreExpirationAfterParam
 from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject
 from .eval_stored_completions_data_source_config import (
     EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig,
src/openai/types/eval_create_params.py
@@ -6,10 +6,10 @@ from typing import Dict, List, Union, Iterable, Optional
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from .shared_params.metadata import Metadata
+from .shared_params.eval_item import EvalItem
 from .graders.python_grader_param import PythonGraderParam
 from .graders.score_model_grader_param import ScoreModelGraderParam
 from .graders.string_check_grader_param import StringCheckGraderParam
-from .responses.response_input_text_param import ResponseInputTextParam
 from .graders.text_similarity_grader_param import TextSimilarityGraderParam
 
 __all__ = [
@@ -22,9 +22,6 @@ __all__ = [
     "TestingCriterionLabelModel",
     "TestingCriterionLabelModelInput",
     "TestingCriterionLabelModelInputSimpleInputMessage",
-    "TestingCriterionLabelModelInputEvalItem",
-    "TestingCriterionLabelModelInputEvalItemContent",
-    "TestingCriterionLabelModelInputEvalItemContentOutputText",
     "TestingCriterionTextSimilarity",
     "TestingCriterionPython",
     "TestingCriterionScoreModel",
@@ -93,36 +90,7 @@ class TestingCriterionLabelModelInputSimpleInputMessage(TypedDict, total=False):
     """The role of the message (e.g. "system", "assistant", "user")."""
 
 
-class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total=False):
-    text: Required[str]
-    """The text output from the model."""
-
-    type: Required[Literal["output_text"]]
-    """The type of the output text. Always `output_text`."""
-
-
-TestingCriterionLabelModelInputEvalItemContent: TypeAlias = Union[
-    str, ResponseInputTextParam, TestingCriterionLabelModelInputEvalItemContentOutputText
-]
-
-
-class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False):
-    content: Required[TestingCriterionLabelModelInputEvalItemContent]
-    """Text inputs to the model - can contain template strings."""
-
-    role: Required[Literal["user", "assistant", "system", "developer"]]
-    """The role of the message input.
-
-    One of `user`, `assistant`, `system`, or `developer`.
-    """
-
-    type: Literal["message"]
-    """The type of the message input. Always `message`."""
-
-
-TestingCriterionLabelModelInput: TypeAlias = Union[
-    TestingCriterionLabelModelInputSimpleInputMessage, TestingCriterionLabelModelInputEvalItem
-]
+TestingCriterionLabelModelInput: TypeAlias = Union[TestingCriterionLabelModelInputSimpleInputMessage, EvalItem]
 
 
 class TestingCriterionLabelModel(TypedDict, total=False):
src/openai/types/vector_store.py
@@ -5,8 +5,9 @@ from typing_extensions import Literal
 
 from .._models import BaseModel
 from .shared.metadata import Metadata
+from .vector_store_expiration_after import VectorStoreExpirationAfter
 
-__all__ = ["VectorStore", "FileCounts", "ExpiresAfter"]
+__all__ = ["VectorStore", "FileCounts"]
 
 
 class FileCounts(BaseModel):
@@ -26,17 +27,6 @@ class FileCounts(BaseModel):
     """The total number of files."""
 
 
-class ExpiresAfter(BaseModel):
-    anchor: Literal["last_active_at"]
-    """Anchor timestamp after which the expiration policy applies.
-
-    Supported anchors: `last_active_at`.
-    """
-
-    days: int
-    """The number of days after the anchor time that the vector store will expire."""
-
-
 class VectorStore(BaseModel):
     id: str
     """The identifier, which can be referenced in API endpoints."""
@@ -75,7 +65,7 @@ class VectorStore(BaseModel):
     usage_bytes: int
     """The total number of bytes used by the files in the vector store."""
 
-    expires_after: Optional[ExpiresAfter] = None
+    expires_after: Optional[VectorStoreExpirationAfter] = None
     """The expiration policy for a vector store."""
 
     expires_at: Optional[int] = None
src/openai/types/vector_store_create_params.py
@@ -3,12 +3,13 @@
 from __future__ import annotations
 
 from typing import List, Optional
-from typing_extensions import Literal, Required, TypedDict
+from typing_extensions import TypedDict
 
 from .shared_params.metadata import Metadata
 from .file_chunking_strategy_param import FileChunkingStrategyParam
+from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam
 
-__all__ = ["VectorStoreCreateParams", "ExpiresAfter"]
+__all__ = ["VectorStoreCreateParams"]
 
 
 class VectorStoreCreateParams(TypedDict, total=False):
@@ -19,7 +20,7 @@ class VectorStoreCreateParams(TypedDict, total=False):
     non-empty.
     """
 
-    expires_after: ExpiresAfter
+    expires_after: VectorStoreExpirationAfterParam
     """The expiration policy for a vector store."""
 
     file_ids: List[str]
@@ -41,14 +42,3 @@ class VectorStoreCreateParams(TypedDict, total=False):
 
     name: str
     """The name of the vector store."""
-
-
-class ExpiresAfter(TypedDict, total=False):
-    anchor: Required[Literal["last_active_at"]]
-    """Anchor timestamp after which the expiration policy applies.
-
-    Supported anchors: `last_active_at`.
-    """
-
-    days: Required[int]
-    """The number of days after the anchor time that the vector store will expire."""
src/openai/types/vector_store_expiration_after.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["VectorStoreExpirationAfter"]
+
+
+class VectorStoreExpirationAfter(BaseModel):
+    anchor: Literal["last_active_at"]
+    """Anchor timestamp after which the expiration policy applies.
+
+    Supported anchors: `last_active_at`.
+    """
+
+    days: int
+    """The number of days after the anchor time that the vector store will expire."""
src/openai/types/vector_store_expiration_after_param.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["VectorStoreExpirationAfterParam"]
+
+
+class VectorStoreExpirationAfterParam(TypedDict, total=False):
+    anchor: Required[Literal["last_active_at"]]
+    """Anchor timestamp after which the expiration policy applies.
+
+    Supported anchors: `last_active_at`.
+    """
+
+    days: Required[int]
+    """The number of days after the anchor time that the vector store will expire."""
src/openai/types/vector_store_update_params.py
@@ -3,15 +3,16 @@
 from __future__ import annotations
 
 from typing import Optional
-from typing_extensions import Literal, Required, TypedDict
+from typing_extensions import TypedDict
 
 from .shared_params.metadata import Metadata
+from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam
 
-__all__ = ["VectorStoreUpdateParams", "ExpiresAfter"]
+__all__ = ["VectorStoreUpdateParams"]
 
 
 class VectorStoreUpdateParams(TypedDict, total=False):
-    expires_after: Optional[ExpiresAfter]
+    expires_after: Optional[VectorStoreExpirationAfterParam]
     """The expiration policy for a vector store."""
 
     metadata: Optional[Metadata]
@@ -26,14 +27,3 @@ class VectorStoreUpdateParams(TypedDict, total=False):
 
     name: Optional[str]
     """The name of the vector store."""
-
-
-class ExpiresAfter(TypedDict, total=False):
-    anchor: Required[Literal["last_active_at"]]
-    """Anchor timestamp after which the expiration policy applies.
-
-    Supported anchors: `last_active_at`.
-    """
-
-    days: Required[int]
-    """The number of days after the anchor time that the vector store will expire."""
.stats.yml
@@ -1,4 +1,4 @@
 configured_endpoints: 101
 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml
 openapi_spec_hash: 602e14add4bee018c6774e320ce309b8
-config_hash: 7da27f7260075e8813ddcea542fba1bf
+config_hash: bdacc55eb995c15255ec82130eb8c3bb
api.md
@@ -7,6 +7,7 @@ from openai.types import (
     ComparisonFilter,
     CompoundFilter,
     ErrorObject,
+    EvalItem,
     FunctionDefinition,
     FunctionParameters,
     Metadata,
@@ -343,6 +344,7 @@ from openai.types import (
     StaticFileChunkingStrategyObjectParam,
     VectorStore,
     VectorStoreDeleted,
+    VectorStoreExpirationAfter,
     VectorStoreSearchResponse,
 )
 ```
@@ -519,6 +521,7 @@ from openai.types.beta import (
     AssistantToolChoiceOption,
     Thread,
     ThreadDeleted,
+    TruncationObject,
 )
 ```
 
@@ -815,6 +818,8 @@ from openai.types.evals import (
     CreateEvalJSONLRunDataSource,
     CreateEvalResponsesRunDataSource,
     EvalAPIError,
+    EvalJSONLFileContentSource,
+    EvalJSONLFileIDSource,
     RunCreateResponse,
     RunRetrieveResponse,
     RunListResponse,