main
1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
3from typing import List, Optional
4from typing_extensions import Literal
5
6from ..._models import BaseModel
7from .assistant_tool import AssistantTool
8from ..shared.metadata import Metadata
9from .assistant_response_format_option import AssistantResponseFormatOption
10
11__all__ = ["Assistant", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
12
13
14class ToolResourcesCodeInterpreter(BaseModel):
15 file_ids: Optional[List[str]] = None
16 """
17 A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
18 available to the `code_interpreter`` tool. There can be a maximum of 20 files
19 associated with the tool.
20 """
21
22
23class ToolResourcesFileSearch(BaseModel):
24 vector_store_ids: Optional[List[str]] = None
25 """
26 The ID of the
27 [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
28 attached to this assistant. There can be a maximum of 1 vector store attached to
29 the assistant.
30 """
31
32
33class ToolResources(BaseModel):
34 code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
35
36 file_search: Optional[ToolResourcesFileSearch] = None
37
38
39class Assistant(BaseModel):
40 id: str
41 """The identifier, which can be referenced in API endpoints."""
42
43 created_at: int
44 """The Unix timestamp (in seconds) for when the assistant was created."""
45
46 description: Optional[str] = None
47 """The description of the assistant. The maximum length is 512 characters."""
48
49 instructions: Optional[str] = None
50 """The system instructions that the assistant uses.
51
52 The maximum length is 256,000 characters.
53 """
54
55 metadata: Optional[Metadata] = None
56 """Set of 16 key-value pairs that can be attached to an object.
57
58 This can be useful for storing additional information about the object in a
59 structured format, and querying for objects via API or the dashboard.
60
61 Keys are strings with a maximum length of 64 characters. Values are strings with
62 a maximum length of 512 characters.
63 """
64
65 model: str
66 """ID of the model to use.
67
68 You can use the
69 [List models](https://platform.openai.com/docs/api-reference/models/list) API to
70 see all of your available models, or see our
71 [Model overview](https://platform.openai.com/docs/models) for descriptions of
72 them.
73 """
74
75 name: Optional[str] = None
76 """The name of the assistant. The maximum length is 256 characters."""
77
78 object: Literal["assistant"]
79 """The object type, which is always `assistant`."""
80
81 tools: List[AssistantTool]
82 """A list of tool enabled on the assistant.
83
84 There can be a maximum of 128 tools per assistant. Tools can be of types
85 `code_interpreter`, `file_search`, or `function`.
86 """
87
88 response_format: Optional[AssistantResponseFormatOption] = None
89 """Specifies the format that the model must output.
90
91 Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
92 [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
93 and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
94
95 Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
96 Outputs which ensures the model will match your supplied JSON schema. Learn more
97 in the
98 [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
99
100 Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
101 message the model generates is valid JSON.
102
103 **Important:** when using JSON mode, you **must** also instruct the model to
104 produce JSON yourself via a system or user message. Without this, the model may
105 generate an unending stream of whitespace until the generation reaches the token
106 limit, resulting in a long-running and seemingly "stuck" request. Also note that
107 the message content may be partially cut off if `finish_reason="length"`, which
108 indicates the generation exceeded `max_tokens` or the conversation exceeded the
109 max context length.
110 """
111
112 temperature: Optional[float] = None
113 """What sampling temperature to use, between 0 and 2.
114
115 Higher values like 0.8 will make the output more random, while lower values like
116 0.2 will make it more focused and deterministic.
117 """
118
119 tool_resources: Optional[ToolResources] = None
120 """A set of resources that are used by the assistant's tools.
121
122 The resources are specific to the type of tool. For example, the
123 `code_interpreter` tool requires a list of file IDs, while the `file_search`
124 tool requires a list of vector store IDs.
125 """
126
127 top_p: Optional[float] = None
128 """
129 An alternative to sampling with temperature, called nucleus sampling, where the
130 model considers the results of the tokens with top_p probability mass. So 0.1
131 means only the tokens comprising the top 10% probability mass are considered.
132
133 We generally recommend altering this or temperature but not both.
134 """