main
  1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
  2
  3from typing import List, Union, Optional
  4from typing_extensions import Literal, TypeAlias
  5
  6from .tool import Tool
  7from ..._models import BaseModel
  8from .response_error import ResponseError
  9from .response_usage import ResponseUsage
 10from .response_prompt import ResponsePrompt
 11from .response_status import ResponseStatus
 12from .tool_choice_mcp import ToolChoiceMcp
 13from ..shared.metadata import Metadata
 14from ..shared.reasoning import Reasoning
 15from .tool_choice_shell import ToolChoiceShell
 16from .tool_choice_types import ToolChoiceTypes
 17from .tool_choice_custom import ToolChoiceCustom
 18from .response_input_item import ResponseInputItem
 19from .tool_choice_allowed import ToolChoiceAllowed
 20from .tool_choice_options import ToolChoiceOptions
 21from .response_output_item import ResponseOutputItem
 22from .response_text_config import ResponseTextConfig
 23from .tool_choice_function import ToolChoiceFunction
 24from ..shared.responses_model import ResponsesModel
 25from .tool_choice_apply_patch import ToolChoiceApplyPatch
 26
 27__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Conversation"]
 28
 29
 30class IncompleteDetails(BaseModel):
 31    reason: Optional[Literal["max_output_tokens", "content_filter"]] = None
 32    """The reason why the response is incomplete."""
 33
 34
 35ToolChoice: TypeAlias = Union[
 36    ToolChoiceOptions,
 37    ToolChoiceAllowed,
 38    ToolChoiceTypes,
 39    ToolChoiceFunction,
 40    ToolChoiceMcp,
 41    ToolChoiceCustom,
 42    ToolChoiceApplyPatch,
 43    ToolChoiceShell,
 44]
 45
 46
 47class Conversation(BaseModel):
 48    id: str
 49    """The unique ID of the conversation."""
 50
 51
 52class Response(BaseModel):
 53    id: str
 54    """Unique identifier for this Response."""
 55
 56    created_at: float
 57    """Unix timestamp (in seconds) of when this Response was created."""
 58
 59    error: Optional[ResponseError] = None
 60    """An error object returned when the model fails to generate a Response."""
 61
 62    incomplete_details: Optional[IncompleteDetails] = None
 63    """Details about why the response is incomplete."""
 64
 65    instructions: Union[str, List[ResponseInputItem], None] = None
 66    """A system (or developer) message inserted into the model's context.
 67
 68    When using along with `previous_response_id`, the instructions from a previous
 69    response will not be carried over to the next response. This makes it simple to
 70    swap out system (or developer) messages in new responses.
 71    """
 72
 73    metadata: Optional[Metadata] = None
 74    """Set of 16 key-value pairs that can be attached to an object.
 75
 76    This can be useful for storing additional information about the object in a
 77    structured format, and querying for objects via API or the dashboard.
 78
 79    Keys are strings with a maximum length of 64 characters. Values are strings with
 80    a maximum length of 512 characters.
 81    """
 82
 83    model: ResponsesModel
 84    """Model ID used to generate the response, like `gpt-4o` or `o3`.
 85
 86    OpenAI offers a wide range of models with different capabilities, performance
 87    characteristics, and price points. Refer to the
 88    [model guide](https://platform.openai.com/docs/models) to browse and compare
 89    available models.
 90    """
 91
 92    object: Literal["response"]
 93    """The object type of this resource - always set to `response`."""
 94
 95    output: List[ResponseOutputItem]
 96    """An array of content items generated by the model.
 97
 98    - The length and order of items in the `output` array is dependent on the
 99      model's response.
100    - Rather than accessing the first item in the `output` array and assuming it's
101      an `assistant` message with the content generated by the model, you might
102      consider using the `output_text` property where supported in SDKs.
103    """
104
105    parallel_tool_calls: bool
106    """Whether to allow the model to run tool calls in parallel."""
107
108    temperature: Optional[float] = None
109    """What sampling temperature to use, between 0 and 2.
110
111    Higher values like 0.8 will make the output more random, while lower values like
112    0.2 will make it more focused and deterministic. We generally recommend altering
113    this or `top_p` but not both.
114    """
115
116    tool_choice: ToolChoice
117    """
118    How the model should select which tool (or tools) to use when generating a
119    response. See the `tools` parameter to see how to specify which tools the model
120    can call.
121    """
122
123    tools: List[Tool]
124    """An array of tools the model may call while generating a response.
125
126    You can specify which tool to use by setting the `tool_choice` parameter.
127
128    We support the following categories of tools:
129
130    - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
131      capabilities, like
132      [web search](https://platform.openai.com/docs/guides/tools-web-search) or
133      [file search](https://platform.openai.com/docs/guides/tools-file-search).
134      Learn more about
135      [built-in tools](https://platform.openai.com/docs/guides/tools).
136    - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
137      predefined connectors such as Google Drive and SharePoint. Learn more about
138      [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
139    - **Function calls (custom tools)**: Functions that are defined by you, enabling
140      the model to call your own code with strongly typed arguments and outputs.
141      Learn more about
142      [function calling](https://platform.openai.com/docs/guides/function-calling).
143      You can also use custom tools to call your own code.
144    """
145
146    top_p: Optional[float] = None
147    """
148    An alternative to sampling with temperature, called nucleus sampling, where the
149    model considers the results of the tokens with top_p probability mass. So 0.1
150    means only the tokens comprising the top 10% probability mass are considered.
151
152    We generally recommend altering this or `temperature` but not both.
153    """
154
155    background: Optional[bool] = None
156    """
157    Whether to run the model response in the background.
158    [Learn more](https://platform.openai.com/docs/guides/background).
159    """
160
161    conversation: Optional[Conversation] = None
162    """The conversation that this response belongs to.
163
164    Input items and output items from this response are automatically added to this
165    conversation.
166    """
167
168    max_output_tokens: Optional[int] = None
169    """
170    An upper bound for the number of tokens that can be generated for a response,
171    including visible output tokens and
172    [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
173    """
174
175    max_tool_calls: Optional[int] = None
176    """
177    The maximum number of total calls to built-in tools that can be processed in a
178    response. This maximum number applies across all built-in tool calls, not per
179    individual tool. Any further attempts to call a tool by the model will be
180    ignored.
181    """
182
183    previous_response_id: Optional[str] = None
184    """The unique ID of the previous response to the model.
185
186    Use this to create multi-turn conversations. Learn more about
187    [conversation state](https://platform.openai.com/docs/guides/conversation-state).
188    Cannot be used in conjunction with `conversation`.
189    """
190
191    prompt: Optional[ResponsePrompt] = None
192    """
193    Reference to a prompt template and its variables.
194    [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
195    """
196
197    prompt_cache_key: Optional[str] = None
198    """
199    Used by OpenAI to cache responses for similar requests to optimize your cache
200    hit rates. Replaces the `user` field.
201    [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
202    """
203
204    prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = None
205    """The retention policy for the prompt cache.
206
207    Set to `24h` to enable extended prompt caching, which keeps cached prefixes
208    active for longer, up to a maximum of 24 hours.
209    [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
210    """
211
212    reasoning: Optional[Reasoning] = None
213    """**gpt-5 and o-series models only**
214
215    Configuration options for
216    [reasoning models](https://platform.openai.com/docs/guides/reasoning).
217    """
218
219    safety_identifier: Optional[str] = None
220    """
221    A stable identifier used to help detect users of your application that may be
222    violating OpenAI's usage policies. The IDs should be a string that uniquely
223    identifies each user. We recommend hashing their username or email address, in
224    order to avoid sending us any identifying information.
225    [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
226    """
227
228    service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None
229    """Specifies the processing type used for serving the request.
230
231    - If set to 'auto', then the request will be processed with the service tier
232      configured in the Project settings. Unless otherwise configured, the Project
233      will use 'default'.
234    - If set to 'default', then the request will be processed with the standard
235      pricing and performance for the selected model.
236    - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
237      '[priority](https://openai.com/api-priority-processing/)', then the request
238      will be processed with the corresponding service tier.
239    - When not set, the default behavior is 'auto'.
240
241    When the `service_tier` parameter is set, the response body will include the
242    `service_tier` value based on the processing mode actually used to serve the
243    request. This response value may be different from the value set in the
244    parameter.
245    """
246
247    status: Optional[ResponseStatus] = None
248    """The status of the response generation.
249
250    One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or
251    `incomplete`.
252    """
253
254    text: Optional[ResponseTextConfig] = None
255    """Configuration options for a text response from the model.
256
257    Can be plain text or structured JSON data. Learn more:
258
259    - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
260    - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
261    """
262
263    top_logprobs: Optional[int] = None
264    """
265    An integer between 0 and 20 specifying the number of most likely tokens to
266    return at each token position, each with an associated log probability.
267    """
268
269    truncation: Optional[Literal["auto", "disabled"]] = None
270    """The truncation strategy to use for the model response.
271
272    - `auto`: If the input to this Response exceeds the model's context window size,
273      the model will truncate the response to fit the context window by dropping
274      items from the beginning of the conversation.
275    - `disabled` (default): If the input size will exceed the context window size
276      for a model, the request will fail with a 400 error.
277    """
278
279    usage: Optional[ResponseUsage] = None
280    """
281    Represents token usage details including input tokens, output tokens, a
282    breakdown of output tokens, and the total tokens used.
283    """
284
285    user: Optional[str] = None
286    """This field is being replaced by `safety_identifier` and `prompt_cache_key`.
287
288    Use `prompt_cache_key` instead to maintain caching optimizations. A stable
289    identifier for your end-users. Used to boost cache hit rates by better bucketing
290    similar requests and to help OpenAI detect and prevent abuse.
291    [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
292    """
293
294    @property
295    def output_text(self) -> str:
296        """Convenience property that aggregates all `output_text` items from the `output` list.
297
298        If no `output_text` content blocks exist, then an empty string is returned.
299        """
300        texts: List[str] = []
301        for output in self.output:
302            if output.type == "message":
303                for content in output.content:
304                    if content.type == "output_text":
305                        texts.append(content.text)
306
307        return "".join(texts)