main
1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
3from ..._models import BaseModel
4
5__all__ = ["ResponseUsage", "InputTokensDetails", "OutputTokensDetails"]
6
7
8class InputTokensDetails(BaseModel):
9 cached_tokens: int
10 """The number of tokens that were retrieved from the cache.
11
12 [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
13 """
14
15
16class OutputTokensDetails(BaseModel):
17 reasoning_tokens: int
18 """The number of reasoning tokens."""
19
20
21class ResponseUsage(BaseModel):
22 input_tokens: int
23 """The number of input tokens."""
24
25 input_tokens_details: InputTokensDetails
26 """A detailed breakdown of the input tokens."""
27
28 output_tokens: int
29 """The number of output tokens."""
30
31 output_tokens_details: OutputTokensDetails
32 """A detailed breakdown of the output tokens."""
33
34 total_tokens: int
35 """The total number of tokens used."""