Commit 6c8fcd54

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2024-06-19 03:53:21
feat(api): add service tier argument for chat completions (#1486)
1 parent 811f4e7
src/openai/resources/chat/completions.py
@@ -59,6 +59,7 @@ class Completions(SyncAPIResource):
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
         stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -163,6 +164,16 @@ class Completions(SyncAPIResource):
               should refer to the `system_fingerprint` response parameter to monitor changes
               in the backend.
 
+          service_tier: Specifies the latency tier to use for processing the request. This parameter is
+              relevant for customers subscribed to the scale tier service:
+
+              - If set to 'auto', the system will utilize scale tier credits until they are
+                exhausted.
+              - If set to 'default', the request will be processed in the shared cluster.
+
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
           stop: Up to 4 sequences where the API will stop generating further tokens.
 
           stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
@@ -236,6 +247,7 @@ class Completions(SyncAPIResource):
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
         stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -346,6 +358,16 @@ class Completions(SyncAPIResource):
               should refer to the `system_fingerprint` response parameter to monitor changes
               in the backend.
 
+          service_tier: Specifies the latency tier to use for processing the request. This parameter is
+              relevant for customers subscribed to the scale tier service:
+
+              - If set to 'auto', the system will utilize scale tier credits until they are
+                exhausted.
+              - If set to 'default', the request will be processed in the shared cluster.
+
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
           stop: Up to 4 sequences where the API will stop generating further tokens.
 
           stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -412,6 +434,7 @@ class Completions(SyncAPIResource):
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
         stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -522,6 +545,16 @@ class Completions(SyncAPIResource):
               should refer to the `system_fingerprint` response parameter to monitor changes
               in the backend.
 
+          service_tier: Specifies the latency tier to use for processing the request. This parameter is
+              relevant for customers subscribed to the scale tier service:
+
+              - If set to 'auto', the system will utilize scale tier credits until they are
+                exhausted.
+              - If set to 'default', the request will be processed in the shared cluster.
+
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
           stop: Up to 4 sequences where the API will stop generating further tokens.
 
           stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -587,6 +620,7 @@ class Completions(SyncAPIResource):
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
         stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -620,6 +654,7 @@ class Completions(SyncAPIResource):
                     "presence_penalty": presence_penalty,
                     "response_format": response_format,
                     "seed": seed,
+                    "service_tier": service_tier,
                     "stop": stop,
                     "stream": stream,
                     "stream_options": stream_options,
@@ -667,6 +702,7 @@ class AsyncCompletions(AsyncAPIResource):
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
         stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -771,6 +807,16 @@ class AsyncCompletions(AsyncAPIResource):
               should refer to the `system_fingerprint` response parameter to monitor changes
               in the backend.
 
+          service_tier: Specifies the latency tier to use for processing the request. This parameter is
+              relevant for customers subscribed to the scale tier service:
+
+              - If set to 'auto', the system will utilize scale tier credits until they are
+                exhausted.
+              - If set to 'default', the request will be processed in the shared cluster.
+
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
           stop: Up to 4 sequences where the API will stop generating further tokens.
 
           stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
@@ -844,6 +890,7 @@ class AsyncCompletions(AsyncAPIResource):
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
         stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -954,6 +1001,16 @@ class AsyncCompletions(AsyncAPIResource):
               should refer to the `system_fingerprint` response parameter to monitor changes
               in the backend.
 
+          service_tier: Specifies the latency tier to use for processing the request. This parameter is
+              relevant for customers subscribed to the scale tier service:
+
+              - If set to 'auto', the system will utilize scale tier credits until they are
+                exhausted.
+              - If set to 'default', the request will be processed in the shared cluster.
+
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
           stop: Up to 4 sequences where the API will stop generating further tokens.
 
           stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -1020,6 +1077,7 @@ class AsyncCompletions(AsyncAPIResource):
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
         stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
         temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -1130,6 +1188,16 @@ class AsyncCompletions(AsyncAPIResource):
               should refer to the `system_fingerprint` response parameter to monitor changes
               in the backend.
 
+          service_tier: Specifies the latency tier to use for processing the request. This parameter is
+              relevant for customers subscribed to the scale tier service:
+
+              - If set to 'auto', the system will utilize scale tier credits until they are
+                exhausted.
+              - If set to 'default', the request will be processed in the shared cluster.
+
+              When this parameter is set, the response body will include the `service_tier`
+              utilized.
+
           stop: Up to 4 sequences where the API will stop generating further tokens.
 
           stream_options: Options for streaming response. Only set this when you set `stream: true`.
@@ -1195,6 +1263,7 @@ class AsyncCompletions(AsyncAPIResource):
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
         stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
         stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -1228,6 +1297,7 @@ class AsyncCompletions(AsyncAPIResource):
                     "presence_penalty": presence_penalty,
                     "response_format": response_format,
                     "seed": seed,
+                    "service_tier": service_tier,
                     "stop": stop,
                     "stream": stream,
                     "stream_options": stream_options,
src/openai/types/chat/chat_completion.py
@@ -56,6 +56,13 @@ class ChatCompletion(BaseModel):
     object: Literal["chat.completion"]
     """The object type, which is always `chat.completion`."""
 
+    service_tier: Optional[Literal["scale", "default"]] = None
+    """The service tier used for processing the request.
+
+    This field is only included if the `service_tier` parameter is specified in the
+    request.
+    """
+
     system_fingerprint: Optional[str] = None
     """This fingerprint represents the backend configuration that the model runs with.
 
src/openai/types/chat/chat_completion_chunk.py
@@ -122,6 +122,13 @@ class ChatCompletionChunk(BaseModel):
     object: Literal["chat.completion.chunk"]
     """The object type, which is always `chat.completion.chunk`."""
 
+    service_tier: Optional[Literal["scale", "default"]] = None
+    """The service tier used for processing the request.
+
+    This field is only included if the `service_tier` parameter is specified in the
+    request.
+    """
+
     system_fingerprint: Optional[str] = None
     """
     This fingerprint represents the backend configuration that the model runs with.
src/openai/types/chat/completion_create_params.py
@@ -146,6 +146,19 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     in the backend.
     """
 
+    service_tier: Optional[Literal["auto", "default"]]
+    """Specifies the latency tier to use for processing the request.
+
+    This parameter is relevant for customers subscribed to the scale tier service:
+
+    - If set to 'auto', the system will utilize scale tier credits until they are
+      exhausted.
+    - If set to 'default', the request will be processed in the shared cluster.
+
+    When this parameter is set, the response body will include the `service_tier`
+    utilized.
+    """
+
     stop: Union[Optional[str], List[str]]
     """Up to 4 sequences where the API will stop generating further tokens."""
 
src/openai/_base_client.py
@@ -457,7 +457,7 @@ class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]):
                 raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`")
 
         headers = self._build_headers(options)
-        params = _merge_mappings(self._custom_query, options.params)
+        params = _merge_mappings(self.default_query, options.params)
         content_type = headers.get("Content-Type")
 
         # If the given Content-Type header is multipart/form-data then it
@@ -593,6 +593,12 @@ class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]):
             **self._custom_headers,
         }
 
+    @property
+    def default_query(self) -> dict[str, object]:
+        return {
+            **self._custom_query,
+        }
+
     def _validate_headers(
         self,
         headers: Headers,  # noqa: ARG002
tests/api_resources/chat/test_completions.py
@@ -60,6 +60,7 @@ class TestCompletions:
             presence_penalty=-2,
             response_format={"type": "json_object"},
             seed=-9223372036854776000,
+            service_tier="auto",
             stop="string",
             stream=False,
             stream_options={"include_usage": True},
@@ -176,6 +177,7 @@ class TestCompletions:
             presence_penalty=-2,
             response_format={"type": "json_object"},
             seed=-9223372036854776000,
+            service_tier="auto",
             stop="string",
             stream_options={"include_usage": True},
             temperature=1,
@@ -294,6 +296,7 @@ class TestAsyncCompletions:
             presence_penalty=-2,
             response_format={"type": "json_object"},
             seed=-9223372036854776000,
+            service_tier="auto",
             stop="string",
             stream=False,
             stream_options={"include_usage": True},
@@ -410,6 +413,7 @@ class TestAsyncCompletions:
             presence_penalty=-2,
             response_format={"type": "json_object"},
             seed=-9223372036854776000,
+            service_tier="auto",
             stop="string",
             stream_options={"include_usage": True},
             temperature=1,
.stats.yml
@@ -1,2 +1,2 @@
 configured_endpoints: 64
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5cb1810135c35c5024698f3365626471a04796e26e393aefe1aa0ba3c0891919.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8fe357c6b5a425d810d731e4102a052d8e38c5e2d66950e6de1025415160bf88.yml