Commit a1203812
Changed files (2)
src
openai
resources
beta
threads
tests
src/openai/resources/beta/threads/threads.py
@@ -828,6 +828,7 @@ class Threads(SyncAPIResource):
None,
]
| NotGiven = NOT_GIVEN,
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -856,6 +857,7 @@ class Threads(SyncAPIResource):
max_prompt_tokens=max_prompt_tokens,
metadata=metadata,
model=model,
+ parallel_tool_calls=parallel_tool_calls,
response_format=response_format,
temperature=temperature,
stream=False,
@@ -908,6 +910,7 @@ class Threads(SyncAPIResource):
None,
]
| NotGiven = NOT_GIVEN,
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -962,6 +965,7 @@ class Threads(SyncAPIResource):
None,
]
| NotGiven = NOT_GIVEN,
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -1016,6 +1020,7 @@ class Threads(SyncAPIResource):
None,
]
| NotGiven = NOT_GIVEN,
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -1050,6 +1055,7 @@ class Threads(SyncAPIResource):
"max_prompt_tokens": max_prompt_tokens,
"metadata": metadata,
"model": model,
+ "parallel_tool_calls": parallel_tool_calls,
"response_format": response_format,
"temperature": temperature,
"tool_choice": tool_choice,
@@ -1838,6 +1844,7 @@ class AsyncThreads(AsyncAPIResource):
None,
]
| NotGiven = NOT_GIVEN,
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -1866,6 +1873,7 @@ class AsyncThreads(AsyncAPIResource):
max_prompt_tokens=max_prompt_tokens,
metadata=metadata,
model=model,
+ parallel_tool_calls=parallel_tool_calls,
response_format=response_format,
temperature=temperature,
stream=False,
@@ -1920,6 +1928,7 @@ class AsyncThreads(AsyncAPIResource):
None,
]
| NotGiven = NOT_GIVEN,
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -1974,6 +1983,7 @@ class AsyncThreads(AsyncAPIResource):
None,
]
| NotGiven = NOT_GIVEN,
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -2028,6 +2038,7 @@ class AsyncThreads(AsyncAPIResource):
None,
]
| NotGiven = NOT_GIVEN,
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -2064,6 +2075,7 @@ class AsyncThreads(AsyncAPIResource):
"max_prompt_tokens": max_prompt_tokens,
"metadata": metadata,
"model": model,
+ "parallel_tool_calls": parallel_tool_calls,
"response_format": response_format,
"temperature": temperature,
"tool_choice": tool_choice,
tests/lib/test_assistants.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+import inspect
+from typing import Any, Callable
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+
+
+def assert_signatures_in_sync(
+ source_func: Callable[..., Any],
+ check_func: Callable[..., Any],
+ *,
+ exclude_params: set[str] = set(),
+) -> None:
+ check_sig = inspect.signature(check_func)
+ source_sig = inspect.signature(source_func)
+
+ errors: list[str] = []
+
+ for name, generated_param in source_sig.parameters.items():
+ if name in exclude_params:
+ continue
+
+ custom_param = check_sig.parameters.get(name)
+ if not custom_param:
+ errors.append(f"the `{name}` param is missing")
+ continue
+
+ if custom_param.annotation != generated_param.annotation:
+ errors.append(
+ f"types for the `{name}` param are do not match; generated={repr(generated_param.annotation)} custom={repr(generated_param.annotation)}"
+ )
+ continue
+
+ if errors:
+ raise AssertionError(f"{len(errors)} errors encountered when comparing signatures:\n\n" + "\n\n".join(errors))
+
+
+@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
+def test_create_and_run_poll_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
+ checking_client = client if sync else async_client
+
+ assert_signatures_in_sync(
+ checking_client.beta.threads.create_and_run,
+ checking_client.beta.threads.create_and_run_poll,
+ exclude_params={"stream"},
+ )
+
+@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
+def test_create_and_run_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
+ checking_client = client if sync else async_client
+
+ assert_signatures_in_sync(
+ checking_client.beta.threads.create_and_run,
+ checking_client.beta.threads.create_and_run_stream,
+ exclude_params={"stream"},
+ )