Commit 3208335a

Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
2024-03-05 02:17:38
chore(api): update docs (#1212)
1 parent a0caa09
src/openai/resources/audio/speech.py
@@ -44,7 +44,7 @@ class Speech(SyncAPIResource):
         input: str,
         model: Union[str, Literal["tts-1", "tts-1-hd"]],
         voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
-        response_format: Literal["mp3", "opus", "aac", "flac", "pcm", "wav"] | NotGiven = NOT_GIVEN,
+        response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
         speed: float | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -68,11 +68,8 @@ class Speech(SyncAPIResource):
               available in the
               [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
 
-          response_format: The format to return audio in. Supported formats are `mp3`, `opus`, `aac`,
-              `flac`, `pcm`, and `wav`.
-
-              The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz
-              sample rate, mono channel, and 16-bit depth in signed little-endian format.
+          response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
+              `wav`, and `pcm`.
 
           speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
               the default.
@@ -120,7 +117,7 @@ class AsyncSpeech(AsyncAPIResource):
         input: str,
         model: Union[str, Literal["tts-1", "tts-1-hd"]],
         voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
-        response_format: Literal["mp3", "opus", "aac", "flac", "pcm", "wav"] | NotGiven = NOT_GIVEN,
+        response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
         speed: float | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
@@ -144,11 +141,8 @@ class AsyncSpeech(AsyncAPIResource):
               available in the
               [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
 
-          response_format: The format to return audio in. Supported formats are `mp3`, `opus`, `aac`,
-              `flac`, `pcm`, and `wav`.
-
-              The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz
-              sample rate, mono channel, and 16-bit depth in signed little-endian format.
+          response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
+              `wav`, and `pcm`.
 
           speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
               the default.
src/openai/resources/audio/transcriptions.py
@@ -60,7 +60,8 @@ class Transcriptions(SyncAPIResource):
               The audio file object (not file name) to transcribe, in one of these formats:
               flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
 
-          model: ID of the model to use. Only `whisper-1` is currently available.
+          model: ID of the model to use. Only `whisper-1` (which is powered by our open source
+              Whisper V2 model) is currently available.
 
           language: The language of the input audio. Supplying the input language in
               [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will
@@ -80,9 +81,11 @@ class Transcriptions(SyncAPIResource):
               [log probability](https://en.wikipedia.org/wiki/Log_probability) to
               automatically increase the temperature until certain thresholds are hit.
 
-          timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these
-              options: `word`, or `segment`. Note: There is no additional latency for segment
-              timestamps, but generating word timestamps incurs additional latency.
+          timestamp_granularities: The timestamp granularities to populate for this transcription.
+              `response_format` must be set `verbose_json` to use timestamp granularities.
+              Either or both of these options are supported: `word`, or `segment`. Note: There
+              is no additional latency for segment timestamps, but generating word timestamps
+              incurs additional latency.
 
           extra_headers: Send extra headers
 
@@ -154,7 +157,8 @@ class AsyncTranscriptions(AsyncAPIResource):
               The audio file object (not file name) to transcribe, in one of these formats:
               flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
 
-          model: ID of the model to use. Only `whisper-1` is currently available.
+          model: ID of the model to use. Only `whisper-1` (which is powered by our open source
+              Whisper V2 model) is currently available.
 
           language: The language of the input audio. Supplying the input language in
               [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will
@@ -174,9 +178,11 @@ class AsyncTranscriptions(AsyncAPIResource):
               [log probability](https://en.wikipedia.org/wiki/Log_probability) to
               automatically increase the temperature until certain thresholds are hit.
 
-          timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these
-              options: `word`, or `segment`. Note: There is no additional latency for segment
-              timestamps, but generating word timestamps incurs additional latency.
+          timestamp_granularities: The timestamp granularities to populate for this transcription.
+              `response_format` must be set `verbose_json` to use timestamp granularities.
+              Either or both of these options are supported: `word`, or `segment`. Note: There
+              is no additional latency for segment timestamps, but generating word timestamps
+              incurs additional latency.
 
           extra_headers: Send extra headers
 
src/openai/resources/audio/translations.py
@@ -57,7 +57,8 @@ class Translations(SyncAPIResource):
           file: The audio file object (not file name) translate, in one of these formats: flac,
               mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
 
-          model: ID of the model to use. Only `whisper-1` is currently available.
+          model: ID of the model to use. Only `whisper-1` (which is powered by our open source
+              Whisper V2 model) is currently available.
 
           prompt: An optional text to guide the model's style or continue a previous audio
               segment. The
@@ -138,7 +139,8 @@ class AsyncTranslations(AsyncAPIResource):
           file: The audio file object (not file name) translate, in one of these formats: flac,
               mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
 
-          model: ID of the model to use. Only `whisper-1` is currently available.
+          model: ID of the model to use. Only `whisper-1` (which is powered by our open source
+              Whisper V2 model) is currently available.
 
           prompt: An optional text to guide the model's style or continue a previous audio
               segment. The
src/openai/resources/chat/completions.py
@@ -208,9 +208,9 @@ class Completions(SyncAPIResource):
               tool. Use this to provide a list of functions the model may generate JSON inputs
               for.
 
-          top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return
-              at each token position, each with an associated log probability. `logprobs` must
-              be set to `true` if this parameter is used.
+          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+              return at each token position, each with an associated log probability.
+              `logprobs` must be set to `true` if this parameter is used.
 
           top_p: An alternative to sampling with temperature, called nucleus sampling, where the
               model considers the results of the tokens with top_p probability mass. So 0.1
@@ -398,9 +398,9 @@ class Completions(SyncAPIResource):
               tool. Use this to provide a list of functions the model may generate JSON inputs
               for.
 
-          top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return
-              at each token position, each with an associated log probability. `logprobs` must
-              be set to `true` if this parameter is used.
+          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+              return at each token position, each with an associated log probability.
+              `logprobs` must be set to `true` if this parameter is used.
 
           top_p: An alternative to sampling with temperature, called nucleus sampling, where the
               model considers the results of the tokens with top_p probability mass. So 0.1
@@ -588,9 +588,9 @@ class Completions(SyncAPIResource):
               tool. Use this to provide a list of functions the model may generate JSON inputs
               for.
 
-          top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return
-              at each token position, each with an associated log probability. `logprobs` must
-              be set to `true` if this parameter is used.
+          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+              return at each token position, each with an associated log probability.
+              `logprobs` must be set to `true` if this parameter is used.
 
           top_p: An alternative to sampling with temperature, called nucleus sampling, where the
               model considers the results of the tokens with top_p probability mass. So 0.1
@@ -875,9 +875,9 @@ class AsyncCompletions(AsyncAPIResource):
               tool. Use this to provide a list of functions the model may generate JSON inputs
               for.
 
-          top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return
-              at each token position, each with an associated log probability. `logprobs` must
-              be set to `true` if this parameter is used.
+          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+              return at each token position, each with an associated log probability.
+              `logprobs` must be set to `true` if this parameter is used.
 
           top_p: An alternative to sampling with temperature, called nucleus sampling, where the
               model considers the results of the tokens with top_p probability mass. So 0.1
@@ -1065,9 +1065,9 @@ class AsyncCompletions(AsyncAPIResource):
               tool. Use this to provide a list of functions the model may generate JSON inputs
               for.
 
-          top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return
-              at each token position, each with an associated log probability. `logprobs` must
-              be set to `true` if this parameter is used.
+          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+              return at each token position, each with an associated log probability.
+              `logprobs` must be set to `true` if this parameter is used.
 
           top_p: An alternative to sampling with temperature, called nucleus sampling, where the
               model considers the results of the tokens with top_p probability mass. So 0.1
@@ -1255,9 +1255,9 @@ class AsyncCompletions(AsyncAPIResource):
               tool. Use this to provide a list of functions the model may generate JSON inputs
               for.
 
-          top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return
-              at each token position, each with an associated log probability. `logprobs` must
-              be set to `true` if this parameter is used.
+          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
+              return at each token position, each with an associated log probability.
+              `logprobs` must be set to `true` if this parameter is used.
 
           top_p: An alternative to sampling with temperature, called nucleus sampling, where the
               model considers the results of the tokens with top_p probability mass. So 0.1
src/openai/resources/images.py
@@ -70,7 +70,8 @@ class Images(SyncAPIResource):
               `n=1` is supported.
 
           response_format: The format in which the generated images are returned. Must be one of `url` or
-              `b64_json`.
+              `b64_json`. URLs are only valid for 60 minutes after the image has been
+              generated.
 
           size: The size of the generated images. Must be one of `256x256`, `512x512`, or
               `1024x1024`.
@@ -151,7 +152,8 @@ class Images(SyncAPIResource):
           n: The number of images to generate. Must be between 1 and 10.
 
           response_format: The format in which the generated images are returned. Must be one of `url` or
-              `b64_json`.
+              `b64_json`. URLs are only valid for 60 minutes after the image has been
+              generated.
 
           size: The size of the generated images. Must be one of `256x256`, `512x512`, or
               `1024x1024`.
@@ -231,7 +233,8 @@ class Images(SyncAPIResource):
               for `dall-e-3`.
 
           response_format: The format in which the generated images are returned. Must be one of `url` or
-              `b64_json`.
+              `b64_json`. URLs are only valid for 60 minutes after the image has been
+              generated.
 
           size: The size of the generated images. Must be one of `256x256`, `512x512`, or
               `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
@@ -315,7 +318,8 @@ class AsyncImages(AsyncAPIResource):
               `n=1` is supported.
 
           response_format: The format in which the generated images are returned. Must be one of `url` or
-              `b64_json`.
+              `b64_json`. URLs are only valid for 60 minutes after the image has been
+              generated.
 
           size: The size of the generated images. Must be one of `256x256`, `512x512`, or
               `1024x1024`.
@@ -396,7 +400,8 @@ class AsyncImages(AsyncAPIResource):
           n: The number of images to generate. Must be between 1 and 10.
 
           response_format: The format in which the generated images are returned. Must be one of `url` or
-              `b64_json`.
+              `b64_json`. URLs are only valid for 60 minutes after the image has been
+              generated.
 
           size: The size of the generated images. Must be one of `256x256`, `512x512`, or
               `1024x1024`.
@@ -476,7 +481,8 @@ class AsyncImages(AsyncAPIResource):
               for `dall-e-3`.
 
           response_format: The format in which the generated images are returned. Must be one of `url` or
-              `b64_json`.
+              `b64_json`. URLs are only valid for 60 minutes after the image has been
+              generated.
 
           size: The size of the generated images. Must be one of `256x256`, `512x512`, or
               `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
src/openai/resources/moderations.py
@@ -46,7 +46,7 @@ class Moderations(SyncAPIResource):
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> ModerationCreateResponse:
         """
-        Classifies if text violates OpenAI's Content Policy
+        Classifies if text is potentially harmful.
 
         Args:
           input: The input text to classify
@@ -106,7 +106,7 @@ class AsyncModerations(AsyncAPIResource):
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> ModerationCreateResponse:
         """
-        Classifies if text violates OpenAI's Content Policy
+        Classifies if text is potentially harmful.
 
         Args:
           input: The input text to classify
src/openai/types/audio/speech_create_params.py
@@ -26,13 +26,10 @@ class SpeechCreateParams(TypedDict, total=False):
     [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
     """
 
-    response_format: Literal["mp3", "opus", "aac", "flac", "pcm", "wav"]
-    """The format to return audio in.
+    response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"]
+    """The format to audio in.
 
-    Supported formats are `mp3`, `opus`, `aac`, `flac`, `pcm`, and `wav`.
-
-    The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz
-    sample rate, mono channel, and 16-bit depth in signed little-endian format.
+    Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
     """
 
     speed: float
src/openai/types/audio/transcription.py
@@ -7,3 +7,4 @@ __all__ = ["Transcription"]
 
 class Transcription(BaseModel):
     text: str
+    """The transcribed text."""
src/openai/types/audio/transcription_create_params.py
@@ -18,7 +18,11 @@ class TranscriptionCreateParams(TypedDict, total=False):
     """
 
     model: Required[Union[str, Literal["whisper-1"]]]
-    """ID of the model to use. Only `whisper-1` is currently available."""
+    """ID of the model to use.
+
+    Only `whisper-1` (which is powered by our open source Whisper V2 model) is
+    currently available.
+    """
 
     language: str
     """The language of the input audio.
@@ -54,7 +58,8 @@ class TranscriptionCreateParams(TypedDict, total=False):
     timestamp_granularities: List[Literal["word", "segment"]]
     """The timestamp granularities to populate for this transcription.
 
-    Any of these options: `word`, or `segment`. Note: There is no additional latency
-    for segment timestamps, but generating word timestamps incurs additional
-    latency.
+    `response_format` must be set `verbose_json` to use timestamp granularities.
+    Either or both of these options are supported: `word`, or `segment`. Note: There
+    is no additional latency for segment timestamps, but generating word timestamps
+    incurs additional latency.
     """
src/openai/types/audio/translation_create_params.py
@@ -18,7 +18,11 @@ class TranslationCreateParams(TypedDict, total=False):
     """
 
     model: Required[Union[str, Literal["whisper-1"]]]
-    """ID of the model to use. Only `whisper-1` is currently available."""
+    """ID of the model to use.
+
+    Only `whisper-1` (which is powered by our open source Whisper V2 model) is
+    currently available.
+    """
 
     prompt: str
     """An optional text to guide the model's style or continue a previous audio
src/openai/types/beta/threads/run.py
@@ -22,8 +22,8 @@ __all__ = [
 
 
 class LastError(BaseModel):
-    code: Literal["server_error", "rate_limit_exceeded"]
-    """One of `server_error` or `rate_limit_exceeded`."""
+    code: Literal["server_error", "rate_limit_exceeded", "invalid_prompt"]
+    """One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`."""
 
     message: str
     """A human-readable description of the error."""
src/openai/types/chat/chat_completion_token_logprob.py
@@ -20,7 +20,12 @@ class TopLogprob(BaseModel):
     """
 
     logprob: float
-    """The log probability of this token."""
+    """The log probability of this token, if it is within the top 20 most likely
+    tokens.
+
+    Otherwise, the value `-9999.0` is used to signify that the token is very
+    unlikely.
+    """
 
 
 class ChatCompletionTokenLogprob(BaseModel):
@@ -36,7 +41,12 @@ class ChatCompletionTokenLogprob(BaseModel):
     """
 
     logprob: float
-    """The log probability of this token."""
+    """The log probability of this token, if it is within the top 20 most likely
+    tokens.
+
+    Otherwise, the value `-9999.0` is used to signify that the token is very
+    unlikely.
+    """
 
     top_logprobs: List[TopLogprob]
     """List of the most likely tokens and their log probability, at this token
src/openai/types/chat/completion_create_params.py
@@ -195,9 +195,9 @@ class CompletionCreateParamsBase(TypedDict, total=False):
 
     top_logprobs: Optional[int]
     """
-    An integer between 0 and 5 specifying the number of most likely tokens to return
-    at each token position, each with an associated log probability. `logprobs` must
-    be set to `true` if this parameter is used.
+    An integer between 0 and 20 specifying the number of most likely tokens to
+    return at each token position, each with an associated log probability.
+    `logprobs` must be set to `true` if this parameter is used.
     """
 
     top_p: Optional[float]
src/openai/types/image_create_variation_params.py
@@ -32,7 +32,8 @@ class ImageCreateVariationParams(TypedDict, total=False):
     response_format: Optional[Literal["url", "b64_json"]]
     """The format in which the generated images are returned.
 
-    Must be one of `url` or `b64_json`.
+    Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
+    image has been generated.
     """
 
     size: Optional[Literal["256x256", "512x512", "1024x1024"]]
src/openai/types/image_edit_params.py
@@ -43,7 +43,8 @@ class ImageEditParams(TypedDict, total=False):
     response_format: Optional[Literal["url", "b64_json"]]
     """The format in which the generated images are returned.
 
-    Must be one of `url` or `b64_json`.
+    Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
+    image has been generated.
     """
 
     size: Optional[Literal["256x256", "512x512", "1024x1024"]]
src/openai/types/image_generate_params.py
@@ -35,7 +35,8 @@ class ImageGenerateParams(TypedDict, total=False):
     response_format: Optional[Literal["url", "b64_json"]]
     """The format in which the generated images are returned.
 
-    Must be one of `url` or `b64_json`.
+    Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
+    image has been generated.
     """
 
     size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]]
src/openai/types/moderation.py
@@ -114,7 +114,4 @@ class Moderation(BaseModel):
     """A list of the categories along with their scores as predicted by model."""
 
     flagged: bool
-    """
-    Whether the content violates
-    [OpenAI's usage policies](/policies/usage-policies).
-    """
+    """Whether any of the below categories are flagged."""