main
 1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 2
 3from typing import Optional
 4from typing_extensions import Literal
 5
 6from ..._models import BaseModel
 7
 8__all__ = ["AudioTranscription"]
 9
10
11class AudioTranscription(BaseModel):
12    language: Optional[str] = None
13    """The language of the input audio.
14
15    Supplying the input language in
16    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
17    format will improve accuracy and latency.
18    """
19
20    model: Optional[
21        Literal["whisper-1", "gpt-4o-mini-transcribe", "gpt-4o-transcribe", "gpt-4o-transcribe-diarize"]
22    ] = None
23    """The model to use for transcription.
24
25    Current options are `whisper-1`, `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`,
26    and `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need
27    diarization with speaker labels.
28    """
29
30    prompt: Optional[str] = None
31    """
32    An optional text to guide the model's style or continue a previous audio
33    segment. For `whisper-1`, the
34    [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
35    For `gpt-4o-transcribe` models (excluding `gpt-4o-transcribe-diarize`), the
36    prompt is a free text string, for example "expect words related to technology".
37    """