main
1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
3from typing import Optional
4
5from .._models import BaseModel
6
7__all__ = ["CompletionUsage", "CompletionTokensDetails", "PromptTokensDetails"]
8
9
10class CompletionTokensDetails(BaseModel):
11 accepted_prediction_tokens: Optional[int] = None
12 """
13 When using Predicted Outputs, the number of tokens in the prediction that
14 appeared in the completion.
15 """
16
17 audio_tokens: Optional[int] = None
18 """Audio input tokens generated by the model."""
19
20 reasoning_tokens: Optional[int] = None
21 """Tokens generated by the model for reasoning."""
22
23 rejected_prediction_tokens: Optional[int] = None
24 """
25 When using Predicted Outputs, the number of tokens in the prediction that did
26 not appear in the completion. However, like reasoning tokens, these tokens are
27 still counted in the total completion tokens for purposes of billing, output,
28 and context window limits.
29 """
30
31
32class PromptTokensDetails(BaseModel):
33 audio_tokens: Optional[int] = None
34 """Audio input tokens present in the prompt."""
35
36 cached_tokens: Optional[int] = None
37 """Cached tokens present in the prompt."""
38
39
40class CompletionUsage(BaseModel):
41 completion_tokens: int
42 """Number of tokens in the generated completion."""
43
44 prompt_tokens: int
45 """Number of tokens in the prompt."""
46
47 total_tokens: int
48 """Total number of tokens used in the request (prompt + completion)."""
49
50 completion_tokens_details: Optional[CompletionTokensDetails] = None
51 """Breakdown of tokens used in a completion."""
52
53 prompt_tokens_details: Optional[PromptTokensDetails] = None
54 """Breakdown of tokens used in the prompt."""