Commit a228a539

Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
2023-11-09 03:51:57
feat(api): unify function types (#741)
Also fixes an enum `assistant.run.step` -> `thread.run.step`
1 parent ee28c46
src/openai/resources/chat/completions.py
@@ -137,8 +137,18 @@ class Completions(SyncAPIResource):
 
               [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
 
-          response_format: An object specifying the format that the model must output. Used to enable JSON
-              mode.
+          response_format: An object specifying the format that the model must output.
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in increased latency and appearance of a "stuck" request. Also
+              note that the message content may be partially cut off if
+              `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
+              or the conversation exceeded the max context length.
 
           seed: This feature is in Beta. If specified, our system will make a best effort to
               sample deterministically, such that repeated requests with the same `seed` and
@@ -304,8 +314,18 @@ class Completions(SyncAPIResource):
 
               [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
 
-          response_format: An object specifying the format that the model must output. Used to enable JSON
-              mode.
+          response_format: An object specifying the format that the model must output.
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in increased latency and appearance of a "stuck" request. Also
+              note that the message content may be partially cut off if
+              `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
+              or the conversation exceeded the max context length.
 
           seed: This feature is in Beta. If specified, our system will make a best effort to
               sample deterministically, such that repeated requests with the same `seed` and
@@ -464,8 +484,18 @@ class Completions(SyncAPIResource):
 
               [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
 
-          response_format: An object specifying the format that the model must output. Used to enable JSON
-              mode.
+          response_format: An object specifying the format that the model must output.
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in increased latency and appearance of a "stuck" request. Also
+              note that the message content may be partially cut off if
+              `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
+              or the conversation exceeded the max context length.
 
           seed: This feature is in Beta. If specified, our system will make a best effort to
               sample deterministically, such that repeated requests with the same `seed` and
@@ -704,8 +734,18 @@ class AsyncCompletions(AsyncAPIResource):
 
               [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
 
-          response_format: An object specifying the format that the model must output. Used to enable JSON
-              mode.
+          response_format: An object specifying the format that the model must output.
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in increased latency and appearance of a "stuck" request. Also
+              note that the message content may be partially cut off if
+              `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
+              or the conversation exceeded the max context length.
 
           seed: This feature is in Beta. If specified, our system will make a best effort to
               sample deterministically, such that repeated requests with the same `seed` and
@@ -871,8 +911,18 @@ class AsyncCompletions(AsyncAPIResource):
 
               [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
 
-          response_format: An object specifying the format that the model must output. Used to enable JSON
-              mode.
+          response_format: An object specifying the format that the model must output.
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in increased latency and appearance of a "stuck" request. Also
+              note that the message content may be partially cut off if
+              `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
+              or the conversation exceeded the max context length.
 
           seed: This feature is in Beta. If specified, our system will make a best effort to
               sample deterministically, such that repeated requests with the same `seed` and
@@ -1031,8 +1081,18 @@ class AsyncCompletions(AsyncAPIResource):
 
               [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
 
-          response_format: An object specifying the format that the model must output. Used to enable JSON
-              mode.
+          response_format: An object specifying the format that the model must output.
+
+              Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+              message the model generates is valid JSON.
+
+              **Important:** when using JSON mode, you **must** also instruct the model to
+              produce JSON yourself via a system or user message. Without this, the model may
+              generate an unending stream of whitespace until the generation reaches the token
+              limit, resulting in increased latency and appearance of a "stuck" request. Also
+              note that the message content may be partially cut off if
+              `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
+              or the conversation exceeded the max context length.
 
           seed: This feature is in Beta. If specified, our system will make a best effort to
               sample deterministically, such that repeated requests with the same `seed` and
src/openai/types/beta/threads/runs/run_step.py
@@ -65,8 +65,8 @@ class RunStep(BaseModel):
     a maxium of 512 characters long.
     """
 
-    object: Literal["assistant.run.step"]
-    """The object type, which is always `assistant.run.step``."""
+    object: Literal["thread.run.step"]
+    """The object type, which is always `thread.run.step``."""
 
     run_id: str
     """
@@ -76,8 +76,8 @@ class RunStep(BaseModel):
 
     status: Literal["in_progress", "cancelled", "failed", "completed", "expired"]
     """
-    The status of the run, which can be either `in_progress`, `cancelled`, `failed`,
-    `completed`, or `expired`.
+    The status of the run step, which can be either `in_progress`, `cancelled`,
+    `failed`, `completed`, or `expired`.
     """
 
     step_details: StepDetails
src/openai/types/beta/threads/run.py
@@ -1,9 +1,10 @@
 # File generated from our OpenAPI spec by Stainless.
 
 import builtins
-from typing import Dict, List, Union, Optional
+from typing import List, Union, Optional
 from typing_extensions import Literal
 
+from ...shared import FunctionObject
 from ...._models import BaseModel
 from .required_action_function_tool_call import RequiredActionFunctionToolCall
 
@@ -16,7 +17,6 @@ __all__ = [
     "ToolAssistantToolsCode",
     "ToolAssistantToolsRetrieval",
     "ToolAssistantToolsFunction",
-    "ToolAssistantToolsFunctionFunction",
 ]
 
 
@@ -51,36 +51,8 @@ class ToolAssistantToolsRetrieval(BaseModel):
     """The type of tool being defined: `retrieval`"""
 
 
-class ToolAssistantToolsFunctionFunction(BaseModel):
-    description: str
-    """
-    A description of what the function does, used by the model to choose when and
-    how to call the function.
-    """
-
-    name: str
-    """The name of the function to be called.
-
-    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
-    of 64.
-    """
-
-    parameters: Dict[str, builtins.object]
-    """The parameters the functions accepts, described as a JSON Schema object.
-
-    See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling)
-    for examples, and the
-    [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
-    documentation about the format.
-
-    To describe a function that accepts no parameters, provide the value
-    `{"type": "object", "properties": {}}`.
-    """
-
-
 class ToolAssistantToolsFunction(BaseModel):
-    function: ToolAssistantToolsFunctionFunction
-    """The function definition."""
+    function: FunctionObject
 
     type: Literal["function"]
     """The type of tool being defined: `function`"""
@@ -147,8 +119,8 @@ class Run(BaseModel):
     this run.
     """
 
-    object: Literal["assistant.run"]
-    """The object type, which is always `assistant.run`."""
+    object: Literal["thread.run"]
+    """The object type, which is always `thread.run`."""
 
     required_action: Optional[RequiredAction]
     """Details on the action required to continue the run.
src/openai/types/beta/threads/run_create_params.py
@@ -2,16 +2,17 @@
 
 from __future__ import annotations
 
-from typing import Dict, List, Union, Optional
+from typing import List, Union, Optional
 from typing_extensions import Literal, Required, TypedDict
 
+from ....types import shared_params
+
 __all__ = [
     "RunCreateParams",
     "Tool",
     "ToolAssistantToolsCode",
     "ToolAssistantToolsRetrieval",
     "ToolAssistantToolsFunction",
-    "ToolAssistantToolsFunctionFunction",
 ]
 
 
@@ -62,36 +63,8 @@ class ToolAssistantToolsRetrieval(TypedDict, total=False):
     """The type of tool being defined: `retrieval`"""
 
 
-class ToolAssistantToolsFunctionFunction(TypedDict, total=False):
-    description: Required[str]
-    """
-    A description of what the function does, used by the model to choose when and
-    how to call the function.
-    """
-
-    name: Required[str]
-    """The name of the function to be called.
-
-    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
-    of 64.
-    """
-
-    parameters: Required[Dict[str, object]]
-    """The parameters the functions accepts, described as a JSON Schema object.
-
-    See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling)
-    for examples, and the
-    [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
-    documentation about the format.
-
-    To describe a function that accepts no parameters, provide the value
-    `{"type": "object", "properties": {}}`.
-    """
-
-
 class ToolAssistantToolsFunction(TypedDict, total=False):
-    function: Required[ToolAssistantToolsFunctionFunction]
-    """The function definition."""
+    function: Required[shared_params.FunctionObject]
 
     type: Required[Literal["function"]]
     """The type of tool being defined: `function`"""
src/openai/types/beta/assistant.py
@@ -1,12 +1,13 @@
 # File generated from our OpenAPI spec by Stainless.
 
 import builtins
-from typing import Dict, List, Union, Optional
+from typing import List, Union, Optional
 from typing_extensions import Literal
 
+from ..shared import FunctionObject
 from ..._models import BaseModel
 
-__all__ = ["Assistant", "Tool", "ToolCodeInterpreter", "ToolRetrieval", "ToolFunction", "ToolFunctionFunction"]
+__all__ = ["Assistant", "Tool", "ToolCodeInterpreter", "ToolRetrieval", "ToolFunction"]
 
 
 class ToolCodeInterpreter(BaseModel):
@@ -19,36 +20,8 @@ class ToolRetrieval(BaseModel):
     """The type of tool being defined: `retrieval`"""
 
 
-class ToolFunctionFunction(BaseModel):
-    description: str
-    """
-    A description of what the function does, used by the model to choose when and
-    how to call the function.
-    """
-
-    name: str
-    """The name of the function to be called.
-
-    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
-    of 64.
-    """
-
-    parameters: Dict[str, builtins.object]
-    """The parameters the functions accepts, described as a JSON Schema object.
-
-    See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling)
-    for examples, and the
-    [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
-    documentation about the format.
-
-    To describe a function that accepts no parameters, provide the value
-    `{"type": "object", "properties": {}}`.
-    """
-
-
 class ToolFunction(BaseModel):
-    function: ToolFunctionFunction
-    """The function definition."""
+    function: FunctionObject
 
     type: Literal["function"]
     """The type of tool being defined: `function`"""
src/openai/types/beta/assistant_create_params.py
@@ -2,16 +2,17 @@
 
 from __future__ import annotations
 
-from typing import Dict, List, Union, Optional
+from typing import List, Union, Optional
 from typing_extensions import Literal, Required, TypedDict
 
+from ...types import shared_params
+
 __all__ = [
     "AssistantCreateParams",
     "Tool",
     "ToolAssistantToolsCode",
     "ToolAssistantToolsRetrieval",
     "ToolAssistantToolsFunction",
-    "ToolAssistantToolsFunctionFunction",
 ]
 
 
@@ -71,36 +72,8 @@ class ToolAssistantToolsRetrieval(TypedDict, total=False):
     """The type of tool being defined: `retrieval`"""
 
 
-class ToolAssistantToolsFunctionFunction(TypedDict, total=False):
-    description: Required[str]
-    """
-    A description of what the function does, used by the model to choose when and
-    how to call the function.
-    """
-
-    name: Required[str]
-    """The name of the function to be called.
-
-    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
-    of 64.
-    """
-
-    parameters: Required[Dict[str, object]]
-    """The parameters the functions accepts, described as a JSON Schema object.
-
-    See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling)
-    for examples, and the
-    [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
-    documentation about the format.
-
-    To describe a function that accepts no parameters, provide the value
-    `{"type": "object", "properties": {}}`.
-    """
-
-
 class ToolAssistantToolsFunction(TypedDict, total=False):
-    function: Required[ToolAssistantToolsFunctionFunction]
-    """The function definition."""
+    function: Required[shared_params.FunctionObject]
 
     type: Required[Literal["function"]]
     """The type of tool being defined: `function`"""
src/openai/types/beta/assistant_update_params.py
@@ -2,16 +2,17 @@
 
 from __future__ import annotations
 
-from typing import Dict, List, Union, Optional
+from typing import List, Union, Optional
 from typing_extensions import Literal, Required, TypedDict
 
+from ...types import shared_params
+
 __all__ = [
     "AssistantUpdateParams",
     "Tool",
     "ToolAssistantToolsCode",
     "ToolAssistantToolsRetrieval",
     "ToolAssistantToolsFunction",
-    "ToolAssistantToolsFunctionFunction",
 ]
 
 
@@ -73,36 +74,8 @@ class ToolAssistantToolsRetrieval(TypedDict, total=False):
     """The type of tool being defined: `retrieval`"""
 
 
-class ToolAssistantToolsFunctionFunction(TypedDict, total=False):
-    description: Required[str]
-    """
-    A description of what the function does, used by the model to choose when and
-    how to call the function.
-    """
-
-    name: Required[str]
-    """The name of the function to be called.
-
-    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
-    of 64.
-    """
-
-    parameters: Required[Dict[str, object]]
-    """The parameters the functions accepts, described as a JSON Schema object.
-
-    See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling)
-    for examples, and the
-    [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
-    documentation about the format.
-
-    To describe a function that accepts no parameters, provide the value
-    `{"type": "object", "properties": {}}`.
-    """
-
-
 class ToolAssistantToolsFunction(TypedDict, total=False):
-    function: Required[ToolAssistantToolsFunctionFunction]
-    """The function definition."""
+    function: Required[shared_params.FunctionObject]
 
     type: Required[Literal["function"]]
     """The type of tool being defined: `function`"""
src/openai/types/beta/thread_create_and_run_params.py
@@ -2,9 +2,11 @@
 
 from __future__ import annotations
 
-from typing import Dict, List, Union, Optional
+from typing import List, Union, Optional
 from typing_extensions import Literal, Required, TypedDict
 
+from ...types import shared_params
+
 __all__ = [
     "ThreadCreateAndRunParams",
     "Thread",
@@ -13,7 +15,6 @@ __all__ = [
     "ToolAssistantToolsCode",
     "ToolAssistantToolsRetrieval",
     "ToolAssistantToolsFunction",
-    "ToolAssistantToolsFunctionFunction",
 ]
 
 
@@ -110,36 +111,8 @@ class ToolAssistantToolsRetrieval(TypedDict, total=False):
     """The type of tool being defined: `retrieval`"""
 
 
-class ToolAssistantToolsFunctionFunction(TypedDict, total=False):
-    description: Required[str]
-    """
-    A description of what the function does, used by the model to choose when and
-    how to call the function.
-    """
-
-    name: Required[str]
-    """The name of the function to be called.
-
-    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
-    of 64.
-    """
-
-    parameters: Required[Dict[str, object]]
-    """The parameters the functions accepts, described as a JSON Schema object.
-
-    See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling)
-    for examples, and the
-    [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
-    documentation about the format.
-
-    To describe a function that accepts no parameters, provide the value
-    `{"type": "object", "properties": {}}`.
-    """
-
-
 class ToolAssistantToolsFunction(TypedDict, total=False):
-    function: Required[ToolAssistantToolsFunctionFunction]
-    """The function definition."""
+    function: Required[shared_params.FunctionObject]
 
     type: Required[Literal["function"]]
     """The type of tool being defined: `function`"""
src/openai/types/chat/chat_completion_chunk.py
@@ -111,8 +111,8 @@ class ChatCompletionChunk(BaseModel):
     """The object type, which is always `chat.completion.chunk`."""
 
     system_fingerprint: Optional[str] = None
-    """This fingerprint represents the backend configuration that the model runs with.
-
+    """
+    This fingerprint represents the backend configuration that the model runs with.
     Can be used in conjunction with the `seed` request parameter to understand when
     backend changes have been made that might impact determinism.
     """
src/openai/types/chat/chat_completion_tool_param.py
@@ -2,41 +2,15 @@
 
 from __future__ import annotations
 
-from typing import Dict
 from typing_extensions import Literal, Required, TypedDict
 
-__all__ = ["ChatCompletionToolParam", "Function"]
+from ...types import shared_params
 
-
-class Function(TypedDict, total=False):
-    name: Required[str]
-    """The name of the function to be called.
-
-    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
-    of 64.
-    """
-
-    parameters: Required[Dict[str, object]]
-    """The parameters the functions accepts, described as a JSON Schema object.
-
-    See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling)
-    for examples, and the
-    [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
-    documentation about the format.
-
-    To describe a function that accepts no parameters, provide the value
-    `{"type": "object", "properties": {}}`.
-    """
-
-    description: str
-    """
-    A description of what the function does, used by the model to choose when and
-    how to call the function.
-    """
+__all__ = ["ChatCompletionToolParam"]
 
 
 class ChatCompletionToolParam(TypedDict, total=False):
-    function: Required[Function]
+    function: Required[shared_params.FunctionObject]
 
     type: Required[Literal["function"]]
     """The type of the tool. Currently, only `function` is supported."""
src/openai/types/chat/completion_create_params.py
@@ -5,6 +5,7 @@ from __future__ import annotations
 from typing import Dict, List, Union, Optional
 from typing_extensions import Literal, Required, TypedDict
 
+from ...types import shared_params
 from .chat_completion_tool_param import ChatCompletionToolParam
 from .chat_completion_message_param import ChatCompletionMessageParam
 from .chat_completion_tool_choice_option_param import (
@@ -121,7 +122,16 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     response_format: ResponseFormat
     """An object specifying the format that the model must output.
 
-    Used to enable JSON mode.
+    Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+    message the model generates is valid JSON.
+
+    **Important:** when using JSON mode, you **must** also instruct the model to
+    produce JSON yourself via a system or user message. Without this, the model may
+    generate an unending stream of whitespace until the generation reaches the token
+    limit, resulting in increased latency and appearance of a "stuck" request. Also
+    note that the message content may be partially cut off if
+    `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
+    or the conversation exceeded the max context length.
     """
 
     seed: Optional[int]
@@ -193,7 +203,7 @@ class Function(TypedDict, total=False):
     of 64.
     """
 
-    parameters: Required[Dict[str, object]]
+    parameters: Required[shared_params.FunctionParameters]
     """The parameters the functions accepts, described as a JSON Schema object.
 
     See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling)
@@ -214,19 +224,7 @@ class Function(TypedDict, total=False):
 
 class ResponseFormat(TypedDict, total=False):
     type: Literal["text", "json_object"]
-    """Setting to `json_object` enables JSON mode.
-
-    This guarantees that the message the model generates is valid JSON.
-
-    Note that your system prompt must still instruct the model to produce JSON, and
-    to help ensure you don't forget, the API will throw an error if the string
-    `JSON` does not appear in your system message. Also note that the message
-    content may be partial (i.e. cut off) if `finish_reason="length"`, which
-    indicates the generation exceeded `max_tokens` or the conversation exceeded the
-    max context length.
-
-    Must be one of `text` or `json_object`.
-    """
+    """Must be one of `text` or `json_object`."""
 
 
 class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase):
src/openai/types/shared/__init__.py
@@ -0,0 +1,4 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from .function_object import FunctionObject as FunctionObject
+from .function_parameters import FunctionParameters as FunctionParameters
src/openai/types/shared/function_object.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .function_parameters import FunctionParameters
+
+__all__ = ["FunctionObject"]
+
+
+class FunctionObject(BaseModel):
+    name: str
+    """The name of the function to be called.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    parameters: FunctionParameters
+    """The parameters the functions accepts, described as a JSON Schema object.
+
+    See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling)
+    for examples, and the
+    [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+    documentation about the format.
+
+    To describe a function that accepts no parameters, provide the value
+    `{"type": "object", "properties": {}}`.
+    """
+
+    description: Optional[str] = None
+    """
+    A description of what the function does, used by the model to choose when and
+    how to call the function.
+    """
src/openai/types/shared/function_parameters.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from typing import Dict
+
+__all__ = ["FunctionParameters"]
+
+FunctionParameters = Dict[str, object]
src/openai/types/shared_params/__init__.py
@@ -0,0 +1,4 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from .function_object import FunctionObject as FunctionObject
+from .function_parameters import FunctionParameters as FunctionParameters
src/openai/types/shared_params/function_object.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from ...types import shared_params
+
+__all__ = ["FunctionObject"]
+
+
+class FunctionObject(TypedDict, total=False):
+    name: Required[str]
+    """The name of the function to be called.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    parameters: Required[shared_params.FunctionParameters]
+    """The parameters the functions accepts, described as a JSON Schema object.
+
+    See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling)
+    for examples, and the
+    [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+    documentation about the format.
+
+    To describe a function that accepts no parameters, provide the value
+    `{"type": "object", "properties": {}}`.
+    """
+
+    description: str
+    """
+    A description of what the function does, used by the model to choose when and
+    how to call the function.
+    """
src/openai/types/shared_params/function_parameters.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless.
+
+from __future__ import annotations
+
+from typing import Dict
+
+__all__ = ["FunctionParameters"]
+
+FunctionParameters = Dict[str, object]
src/openai/types/__init__.py
@@ -5,6 +5,8 @@ from __future__ import annotations
 from .edit import Edit as Edit
 from .image import Image as Image
 from .model import Model as Model
+from .shared import FunctionObject as FunctionObject
+from .shared import FunctionParameters as FunctionParameters
 from .embedding import Embedding as Embedding
 from .fine_tune import FineTune as FineTune
 from .completion import Completion as Completion
src/openai/types/completion_choice.py
@@ -15,7 +15,7 @@ class Logprobs(BaseModel):
 
     tokens: Optional[List[str]] = None
 
-    top_logprobs: Optional[List[Dict[str, int]]] = None
+    top_logprobs: Optional[List[Dict[str, float]]] = None
 
 
 class CompletionChoice(BaseModel):
api.md
@@ -1,3 +1,9 @@
+# Shared Types
+
+```python
+from openai.types import FunctionObject, FunctionParameters
+```
+
 # Completions
 
 Types: