Commit 4b302346

Robert Craigie <robert@craigie.dev>
2024-09-18 00:30:18
feat(client): add ._request_id property to object responses (#1707)
1 parent 192b8f2
src/openai/_legacy_response.py
@@ -25,7 +25,7 @@ import pydantic
 
 from ._types import NoneType
 from ._utils import is_given, extract_type_arg, is_annotated_type
-from ._models import BaseModel, is_basemodel
+from ._models import BaseModel, is_basemodel, add_request_id
 from ._constants import RAW_RESPONSE_HEADER
 from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
 from ._exceptions import APIResponseValidationError
@@ -138,8 +138,11 @@ class LegacyAPIResponse(Generic[R]):
         if is_given(self._options.post_parser):
             parsed = self._options.post_parser(parsed)
 
+        if isinstance(parsed, BaseModel):
+            add_request_id(parsed, self.request_id)
+
         self._parsed_by_type[cache_key] = parsed
-        return parsed
+        return cast(R, parsed)
 
     @property
     def headers(self) -> httpx.Headers:
src/openai/_models.py
@@ -2,7 +2,7 @@ from __future__ import annotations
 
 import os
 import inspect
-from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast
+from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast
 from datetime import date, datetime
 from typing_extensions import (
     Unpack,
@@ -94,6 +94,23 @@ class BaseModel(pydantic.BaseModel):
         class Config(pydantic.BaseConfig):  # pyright: ignore[reportDeprecated]
             extra: Any = pydantic.Extra.allow  # type: ignore
 
+    if TYPE_CHECKING:
+        _request_id: Optional[str] = None
+        """The ID of the request, returned via the X-Request-ID header. Useful for debugging requests and reporting issues to OpenAI.
+
+        This will **only** be set for the top-level response object, it will not be defined for nested objects. For example:
+        
+        ```py
+        completion = await client.chat.completions.create(...)
+        completion._request_id  # req_id_xxx
+        completion.usage._request_id  # raises `AttributeError`
+        ```
+
+        Note: unlike other properties that use an `_` prefix, this property
+        *is* public. Unless documented otherwise, all other `_` prefix properties,
+        methods and modules are *private*.
+        """
+
     def to_dict(
         self,
         *,
@@ -662,6 +679,21 @@ def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None:
     setattr(typ, "__pydantic_config__", config)  # noqa: B010
 
 
+def add_request_id(obj: BaseModel, request_id: str | None) -> None:
+    obj._request_id = request_id
+
+    # in Pydantic v1, using setattr like we do above causes the attribute
+    # to be included when serializing the model which we don't want in this
+    # case so we need to explicitly exclude it
+    if not PYDANTIC_V2:
+        try:
+            exclude_fields = obj.__exclude_fields__  # type: ignore
+        except AttributeError:
+            cast(Any, obj).__exclude_fields__ = {"_request_id", "__exclude_fields__"}
+        else:
+            cast(Any, obj).__exclude_fields__ = {*(exclude_fields or {}), "_request_id", "__exclude_fields__"}
+
+
 # our use of subclasssing here causes weirdness for type checkers,
 # so we just pretend that we don't subclass
 if TYPE_CHECKING:
src/openai/_response.py
@@ -26,7 +26,7 @@ import pydantic
 
 from ._types import NoneType
 from ._utils import is_given, extract_type_arg, is_annotated_type, extract_type_var_from_base
-from ._models import BaseModel, is_basemodel
+from ._models import BaseModel, is_basemodel, add_request_id
 from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER
 from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
 from ._exceptions import OpenAIError, APIResponseValidationError
@@ -315,8 +315,11 @@ class APIResponse(BaseAPIResponse[R]):
         if is_given(self._options.post_parser):
             parsed = self._options.post_parser(parsed)
 
+        if isinstance(parsed, BaseModel):
+            add_request_id(parsed, self.request_id)
+
         self._parsed_by_type[cache_key] = parsed
-        return parsed
+        return cast(R, parsed)
 
     def read(self) -> bytes:
         """Read and return the binary response content."""
@@ -419,8 +422,11 @@ class AsyncAPIResponse(BaseAPIResponse[R]):
         if is_given(self._options.post_parser):
             parsed = self._options.post_parser(parsed)
 
+        if isinstance(parsed, BaseModel):
+            add_request_id(parsed, self.request_id)
+
         self._parsed_by_type[cache_key] = parsed
-        return parsed
+        return cast(R, parsed)
 
     async def read(self) -> bytes:
         """Read and return the binary response content."""
tests/test_legacy_response.py
@@ -66,6 +66,27 @@ def test_response_parse_custom_model(client: OpenAI) -> None:
     assert obj.bar == 2
 
 
+def test_response_basemodel_request_id(client: OpenAI) -> None:
+    response = LegacyAPIResponse(
+        raw=httpx.Response(
+            200,
+            headers={"x-request-id": "my-req-id"},
+            content=json.dumps({"foo": "hello!", "bar": 2}),
+        ),
+        client=client,
+        stream=False,
+        stream_cls=None,
+        cast_to=str,
+        options=FinalRequestOptions.construct(method="get", url="/foo"),
+    )
+
+    obj = response.parse(to=CustomModel)
+    assert obj._request_id == "my-req-id"
+    assert obj.foo == "hello!"
+    assert obj.bar == 2
+    assert obj.to_dict() == {"foo": "hello!", "bar": 2}
+
+
 def test_response_parse_annotated_type(client: OpenAI) -> None:
     response = LegacyAPIResponse(
         raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
tests/test_response.py
@@ -156,6 +156,49 @@ async def test_async_response_parse_custom_model(async_client: AsyncOpenAI) -> N
     assert obj.bar == 2
 
 
+def test_response_basemodel_request_id(client: OpenAI) -> None:
+    response = APIResponse(
+        raw=httpx.Response(
+            200,
+            headers={"x-request-id": "my-req-id"},
+            content=json.dumps({"foo": "hello!", "bar": 2}),
+        ),
+        client=client,
+        stream=False,
+        stream_cls=None,
+        cast_to=str,
+        options=FinalRequestOptions.construct(method="get", url="/foo"),
+    )
+
+    obj = response.parse(to=CustomModel)
+    assert obj._request_id == "my-req-id"
+    assert obj.foo == "hello!"
+    assert obj.bar == 2
+    assert obj.to_dict() == {"foo": "hello!", "bar": 2}
+
+
+@pytest.mark.asyncio
+async def test_async_response_basemodel_request_id(client: OpenAI) -> None:
+    response = AsyncAPIResponse(
+        raw=httpx.Response(
+            200,
+            headers={"x-request-id": "my-req-id"},
+            content=json.dumps({"foo": "hello!", "bar": 2}),
+        ),
+        client=client,
+        stream=False,
+        stream_cls=None,
+        cast_to=str,
+        options=FinalRequestOptions.construct(method="get", url="/foo"),
+    )
+
+    obj = await response.parse(to=CustomModel)
+    assert obj._request_id == "my-req-id"
+    assert obj.foo == "hello!"
+    assert obj.bar == 2
+    assert obj.to_dict() == {"foo": "hello!", "bar": 2}
+
+
 def test_response_parse_annotated_type(client: OpenAI) -> None:
     response = APIResponse(
         raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
README.md
@@ -417,6 +417,24 @@ Error codes are as followed:
 | >=500       | `InternalServerError`      |
 | N/A         | `APIConnectionError`       |
 
+## Request IDs
+
+> For more information on debugging requests, see [these docs](https://platform.openai.com/docs/api-reference/debugging-requests)
+
+All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI.
+
+```python
+completion = await client.chat.completions.create(
+    messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-4"
+)
+print(completion._request_id)  # req_123
+```
+
+Note that unlike other properties that use an `_` prefix, the `_request_id` property
+*is* public. Unless documented otherwise, *all* other `_` prefix properties,
+methods and modules are *private*.
+
+
 ### Retries
 
 Certain errors are automatically retried 2 times by default, with a short exponential backoff.