Commit 1d8a28e3
Changed files (16)
src/openai/_utils/_proxy.py
@@ -59,5 +59,4 @@ class LazyProxy(Generic[T], ABC):
return cast(T, self)
@abstractmethod
- def __load__(self) -> T:
- ...
+ def __load__(self) -> T: ...
src/openai/_utils/_utils.py
@@ -211,20 +211,17 @@ def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]:
Example usage:
```py
@overload
- def foo(*, a: str) -> str:
- ...
+ def foo(*, a: str) -> str: ...
@overload
- def foo(*, b: bool) -> str:
- ...
+ def foo(*, b: bool) -> str: ...
# This enforces the same constraints that a static type checker would
# i.e. that either a or b must be passed to the function
@required_args(["a"], ["b"])
- def foo(*, a: str | None = None, b: bool | None = None) -> str:
- ...
+ def foo(*, a: str | None = None, b: bool | None = None) -> str: ...
```
"""
@@ -286,18 +283,15 @@ _V = TypeVar("_V")
@overload
-def strip_not_given(obj: None) -> None:
- ...
+def strip_not_given(obj: None) -> None: ...
@overload
-def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]:
- ...
+def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: ...
@overload
-def strip_not_given(obj: object) -> object:
- ...
+def strip_not_given(obj: object) -> object: ...
def strip_not_given(obj: object | None) -> object:
src/openai/cli/_errors.py
@@ -8,12 +8,10 @@ from ._utils import Colors, organization_info
from .._exceptions import APIError, OpenAIError
-class CLIError(OpenAIError):
- ...
+class CLIError(OpenAIError): ...
-class SilentCLIError(CLIError):
- ...
+class SilentCLIError(CLIError): ...
def display_error(err: CLIError | APIError | pydantic.ValidationError) -> None:
src/openai/lib/azure.py
@@ -80,8 +80,7 @@ class AzureOpenAI(BaseAzureClient[httpx.Client, Stream[Any]], OpenAI):
default_query: Mapping[str, object] | None = None,
http_client: httpx.Client | None = None,
_strict_response_validation: bool = False,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def __init__(
@@ -99,8 +98,7 @@ class AzureOpenAI(BaseAzureClient[httpx.Client, Stream[Any]], OpenAI):
default_query: Mapping[str, object] | None = None,
http_client: httpx.Client | None = None,
_strict_response_validation: bool = False,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def __init__(
@@ -118,8 +116,7 @@ class AzureOpenAI(BaseAzureClient[httpx.Client, Stream[Any]], OpenAI):
default_query: Mapping[str, object] | None = None,
http_client: httpx.Client | None = None,
_strict_response_validation: bool = False,
- ) -> None:
- ...
+ ) -> None: ...
def __init__(
self,
@@ -321,8 +318,7 @@ class AsyncAzureOpenAI(BaseAzureClient[httpx.AsyncClient, AsyncStream[Any]], Asy
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def __init__(
@@ -341,8 +337,7 @@ class AsyncAzureOpenAI(BaseAzureClient[httpx.AsyncClient, AsyncStream[Any]], Asy
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def __init__(
@@ -361,8 +356,7 @@ class AsyncAzureOpenAI(BaseAzureClient[httpx.AsyncClient, AsyncStream[Any]], Asy
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
- ) -> None:
- ...
+ ) -> None: ...
def __init__(
self,
src/openai/_base_client.py
@@ -125,16 +125,14 @@ class PageInfo:
self,
*,
url: URL,
- ) -> None:
- ...
+ ) -> None: ...
@overload
def __init__(
self,
*,
params: Query,
- ) -> None:
- ...
+ ) -> None: ...
def __init__(
self,
@@ -167,8 +165,7 @@ class BasePage(GenericModel, Generic[_T]):
return False
return self.next_page_info() is not None
- def next_page_info(self) -> Optional[PageInfo]:
- ...
+ def next_page_info(self) -> Optional[PageInfo]: ...
def _get_page_items(self) -> Iterable[_T]: # type: ignore[empty-body]
...
@@ -904,8 +901,7 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
*,
stream: Literal[True],
stream_cls: Type[_StreamT],
- ) -> _StreamT:
- ...
+ ) -> _StreamT: ...
@overload
def request(
@@ -915,8 +911,7 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
remaining_retries: Optional[int] = None,
*,
stream: Literal[False] = False,
- ) -> ResponseT:
- ...
+ ) -> ResponseT: ...
@overload
def request(
@@ -927,8 +922,7 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
*,
stream: bool = False,
stream_cls: Type[_StreamT] | None = None,
- ) -> ResponseT | _StreamT:
- ...
+ ) -> ResponseT | _StreamT: ...
def request(
self,
@@ -1172,8 +1166,7 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: Literal[False] = False,
- ) -> ResponseT:
- ...
+ ) -> ResponseT: ...
@overload
def get(
@@ -1184,8 +1177,7 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
options: RequestOptions = {},
stream: Literal[True],
stream_cls: type[_StreamT],
- ) -> _StreamT:
- ...
+ ) -> _StreamT: ...
@overload
def get(
@@ -1196,8 +1188,7 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
options: RequestOptions = {},
stream: bool,
stream_cls: type[_StreamT] | None = None,
- ) -> ResponseT | _StreamT:
- ...
+ ) -> ResponseT | _StreamT: ...
def get(
self,
@@ -1223,8 +1214,7 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: Literal[False] = False,
- ) -> ResponseT:
- ...
+ ) -> ResponseT: ...
@overload
def post(
@@ -1237,8 +1227,7 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
files: RequestFiles | None = None,
stream: Literal[True],
stream_cls: type[_StreamT],
- ) -> _StreamT:
- ...
+ ) -> _StreamT: ...
@overload
def post(
@@ -1251,8 +1240,7 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
files: RequestFiles | None = None,
stream: bool,
stream_cls: type[_StreamT] | None = None,
- ) -> ResponseT | _StreamT:
- ...
+ ) -> ResponseT | _StreamT: ...
def post(
self,
@@ -1485,8 +1473,7 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
*,
stream: Literal[False] = False,
remaining_retries: Optional[int] = None,
- ) -> ResponseT:
- ...
+ ) -> ResponseT: ...
@overload
async def request(
@@ -1497,8 +1484,7 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
stream: Literal[True],
stream_cls: type[_AsyncStreamT],
remaining_retries: Optional[int] = None,
- ) -> _AsyncStreamT:
- ...
+ ) -> _AsyncStreamT: ...
@overload
async def request(
@@ -1509,8 +1495,7 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
stream: bool,
stream_cls: type[_AsyncStreamT] | None = None,
remaining_retries: Optional[int] = None,
- ) -> ResponseT | _AsyncStreamT:
- ...
+ ) -> ResponseT | _AsyncStreamT: ...
async def request(
self,
@@ -1739,8 +1724,7 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: Literal[False] = False,
- ) -> ResponseT:
- ...
+ ) -> ResponseT: ...
@overload
async def get(
@@ -1751,8 +1735,7 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
options: RequestOptions = {},
stream: Literal[True],
stream_cls: type[_AsyncStreamT],
- ) -> _AsyncStreamT:
- ...
+ ) -> _AsyncStreamT: ...
@overload
async def get(
@@ -1763,8 +1746,7 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
options: RequestOptions = {},
stream: bool,
stream_cls: type[_AsyncStreamT] | None = None,
- ) -> ResponseT | _AsyncStreamT:
- ...
+ ) -> ResponseT | _AsyncStreamT: ...
async def get(
self,
@@ -1788,8 +1770,7 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: Literal[False] = False,
- ) -> ResponseT:
- ...
+ ) -> ResponseT: ...
@overload
async def post(
@@ -1802,8 +1783,7 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
options: RequestOptions = {},
stream: Literal[True],
stream_cls: type[_AsyncStreamT],
- ) -> _AsyncStreamT:
- ...
+ ) -> _AsyncStreamT: ...
@overload
async def post(
@@ -1816,8 +1796,7 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
options: RequestOptions = {},
stream: bool,
stream_cls: type[_AsyncStreamT] | None = None,
- ) -> ResponseT | _AsyncStreamT:
- ...
+ ) -> ResponseT | _AsyncStreamT: ...
async def post(
self,
src/openai/_compat.py
@@ -159,22 +159,19 @@ def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
# generic models
if TYPE_CHECKING:
- class GenericModel(pydantic.BaseModel):
- ...
+ class GenericModel(pydantic.BaseModel): ...
else:
if PYDANTIC_V2:
# there no longer needs to be a distinction in v2 but
# we still have to create our own subclass to avoid
# inconsistent MRO ordering errors
- class GenericModel(pydantic.BaseModel):
- ...
+ class GenericModel(pydantic.BaseModel): ...
else:
import pydantic.generics
- class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel):
- ...
+ class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
# cached properties
@@ -193,26 +190,21 @@ if TYPE_CHECKING:
func: Callable[[Any], _T]
attrname: str | None
- def __init__(self, func: Callable[[Any], _T]) -> None:
- ...
+ def __init__(self, func: Callable[[Any], _T]) -> None: ...
@overload
- def __get__(self, instance: None, owner: type[Any] | None = None) -> Self:
- ...
+ def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ...
@overload
- def __get__(self, instance: object, owner: type[Any] | None = None) -> _T:
- ...
+ def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ...
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self:
raise NotImplementedError()
- def __set_name__(self, owner: type[Any], name: str) -> None:
- ...
+ def __set_name__(self, owner: type[Any], name: str) -> None: ...
# __set__ is not defined at runtime, but @cached_property is designed to be settable
- def __set__(self, instance: object, value: _T) -> None:
- ...
+ def __set__(self, instance: object, value: _T) -> None: ...
else:
try:
from functools import cached_property as cached_property
src/openai/_files.py
@@ -39,13 +39,11 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
@overload
-def to_httpx_files(files: None) -> None:
- ...
+def to_httpx_files(files: None) -> None: ...
@overload
-def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles:
- ...
+def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
@@ -83,13 +81,11 @@ def _read_file_content(file: FileContent) -> HttpxFileContent:
@overload
-async def async_to_httpx_files(files: None) -> None:
- ...
+async def async_to_httpx_files(files: None) -> None: ...
@overload
-async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles:
- ...
+async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
src/openai/_legacy_response.py
@@ -92,12 +92,10 @@ class LegacyAPIResponse(Generic[R]):
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
@overload
- def parse(self, *, to: type[_T]) -> _T:
- ...
+ def parse(self, *, to: type[_T]) -> _T: ...
@overload
- def parse(self) -> R:
- ...
+ def parse(self) -> R: ...
def parse(self, *, to: type[_T] | None = None) -> R | _T:
"""Returns the rich python representation of this response's data.
src/openai/_response.py
@@ -268,12 +268,10 @@ class APIResponse(BaseAPIResponse[R]):
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
@overload
- def parse(self, *, to: type[_T]) -> _T:
- ...
+ def parse(self, *, to: type[_T]) -> _T: ...
@overload
- def parse(self) -> R:
- ...
+ def parse(self) -> R: ...
def parse(self, *, to: type[_T] | None = None) -> R | _T:
"""Returns the rich python representation of this response's data.
@@ -376,12 +374,10 @@ class AsyncAPIResponse(BaseAPIResponse[R]):
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
@overload
- async def parse(self, *, to: type[_T]) -> _T:
- ...
+ async def parse(self, *, to: type[_T]) -> _T: ...
@overload
- async def parse(self) -> R:
- ...
+ async def parse(self) -> R: ...
async def parse(self, *, to: type[_T] | None = None) -> R | _T:
"""Returns the rich python representation of this response's data.
src/openai/_types.py
@@ -112,8 +112,7 @@ class NotGiven:
For example:
```py
- def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
- ...
+ def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...
get(timeout=1) # 1s timeout
@@ -163,16 +162,14 @@ class ModelBuilderProtocol(Protocol):
*,
response: Response,
data: object,
- ) -> _T:
- ...
+ ) -> _T: ...
Headers = Mapping[str, Union[str, Omit]]
class HeadersLikeProtocol(Protocol):
- def get(self, __key: str) -> str | None:
- ...
+ def get(self, __key: str) -> str | None: ...
HeadersLike = Union[Headers, HeadersLikeProtocol]
tests/test_utils/test_typing.py
@@ -9,24 +9,19 @@ _T2 = TypeVar("_T2")
_T3 = TypeVar("_T3")
-class BaseGeneric(Generic[_T]):
- ...
+class BaseGeneric(Generic[_T]): ...
-class SubclassGeneric(BaseGeneric[_T]):
- ...
+class SubclassGeneric(BaseGeneric[_T]): ...
-class BaseGenericMultipleTypeArgs(Generic[_T, _T2, _T3]):
- ...
+class BaseGenericMultipleTypeArgs(Generic[_T, _T2, _T3]): ...
-class SubclassGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T, _T2, _T3]):
- ...
+class SubclassGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T, _T2, _T3]): ...
-class SubclassDifferentOrderGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T2, _T, _T3]):
- ...
+class SubclassDifferentOrderGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T2, _T, _T3]): ...
def test_extract_type_var() -> None:
tests/test_deepcopy.py
@@ -41,8 +41,7 @@ def test_nested_list() -> None:
assert_different_identities(obj1[1], obj2[1])
-class MyObject:
- ...
+class MyObject: ...
def test_ignores_other_types() -> None:
tests/test_legacy_response.py
@@ -12,8 +12,7 @@ from openai._base_client import FinalRequestOptions
from openai._legacy_response import LegacyAPIResponse
-class PydanticModel(pydantic.BaseModel):
- ...
+class PydanticModel(pydantic.BaseModel): ...
def test_response_parse_mismatched_basemodel(client: OpenAI) -> None:
tests/test_response.py
@@ -19,16 +19,13 @@ from openai._streaming import Stream
from openai._base_client import FinalRequestOptions
-class ConcreteBaseAPIResponse(APIResponse[bytes]):
- ...
+class ConcreteBaseAPIResponse(APIResponse[bytes]): ...
-class ConcreteAPIResponse(APIResponse[List[str]]):
- ...
+class ConcreteAPIResponse(APIResponse[List[str]]): ...
-class ConcreteAsyncAPIResponse(APIResponse[httpx.Response]):
- ...
+class ConcreteAsyncAPIResponse(APIResponse[httpx.Response]): ...
def test_extract_response_type_direct_classes() -> None:
@@ -56,8 +53,7 @@ def test_extract_response_type_binary_response() -> None:
assert extract_response_type(AsyncBinaryAPIResponse) == bytes
-class PydanticModel(pydantic.BaseModel):
- ...
+class PydanticModel(pydantic.BaseModel): ...
def test_response_parse_mismatched_basemodel(client: OpenAI) -> None:
pyproject.toml
@@ -83,8 +83,8 @@ format = { chain = [
"check:ruff",
"typecheck",
]}
-"check:ruff" = "ruff ."
-"fix:ruff" = "ruff --fix ."
+"check:ruff" = "ruff check ."
+"fix:ruff" = "ruff check --fix ."
typecheck = { chain = [
"typecheck:pyright",
@@ -168,6 +168,11 @@ reportPrivateUsage = false
line-length = 120
output-format = "grouped"
target-version = "py37"
+
+[tool.ruff.format]
+docstring-code-format = true
+
+[tool.ruff.lint]
select = [
# isort
"I",
@@ -198,9 +203,6 @@ unfixable = [
]
ignore-init-module-imports = true
-[tool.ruff.format]
-docstring-code-format = true
-
[tool.ruff.lint.flake8-tidy-imports.banned-api]
"functools.lru_cache".msg = "This function does not retain type information for the wrapped function's arguments; The `lru_cache` function from `_utils` should be used instead"
requirements-dev.lock
@@ -139,7 +139,7 @@ requests==2.31.0
respx==0.20.2
rich==13.7.1
# via inline-snapshot
-ruff==0.1.9
+ruff==0.5.6
setuptools==68.2.2
# via nodeenv
six==1.16.0