Commit 845466f6
Changed files (2)
src
openai
resources
responses
tests
lib
responses
src/openai/resources/responses/responses.py
@@ -31,7 +31,6 @@ from ...lib._parsing._responses import (
parse_response,
type_to_text_format_param as _type_to_text_format_param,
)
-from ...types.shared.chat_model import ChatModel
from ...types.responses.response import Response
from ...types.responses.tool_param import ToolParam, ParseableToolParam
from ...types.shared_params.metadata import Metadata
@@ -881,22 +880,29 @@ class Responses(SyncAPIResource):
self,
*,
input: Union[str, ResponseInputParam],
- model: Union[str, ChatModel],
+ model: ResponsesModel,
background: Optional[bool] | NotGiven = NOT_GIVEN,
text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+ max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+ prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN,
+ prompt_cache_key: str | NotGiven = NOT_GIVEN,
reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+ safety_identifier: str | NotGiven = NOT_GIVEN,
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
@@ -913,22 +919,29 @@ class Responses(SyncAPIResource):
*,
response_id: str | NotGiven = NOT_GIVEN,
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
- model: Union[str, ChatModel] | NotGiven = NOT_GIVEN,
+ model: ResponsesModel | NotGiven = NOT_GIVEN,
background: Optional[bool] | NotGiven = NOT_GIVEN,
text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+ max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+ prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN,
+ prompt_cache_key: str | NotGiven = NOT_GIVEN,
reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+ safety_identifier: str | NotGiven = NOT_GIVEN,
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
@@ -943,18 +956,25 @@ class Responses(SyncAPIResource):
new_response_args = {
"input": input,
"model": model,
+ "conversation": conversation,
"include": include,
"instructions": instructions,
"max_output_tokens": max_output_tokens,
+ "max_tool_calls": max_tool_calls,
"metadata": metadata,
"parallel_tool_calls": parallel_tool_calls,
"previous_response_id": previous_response_id,
+ "prompt": prompt,
+ "prompt_cache_key": prompt_cache_key,
"reasoning": reasoning,
+ "safety_identifier": safety_identifier,
+ "service_tier": service_tier,
"store": store,
"stream_options": stream_options,
"temperature": temperature,
"text": text,
"tool_choice": tool_choice,
+ "top_logprobs": top_logprobs,
"top_p": top_p,
"truncation": truncation,
"user": user,
@@ -989,12 +1009,16 @@ class Responses(SyncAPIResource):
input=input,
model=model,
tools=tools,
+ conversation=conversation,
include=include,
instructions=instructions,
max_output_tokens=max_output_tokens,
+ max_tool_calls=max_tool_calls,
metadata=metadata,
parallel_tool_calls=parallel_tool_calls,
previous_response_id=previous_response_id,
+ prompt=prompt,
+ prompt_cache_key=prompt_cache_key,
store=store,
stream_options=stream_options,
stream=True,
@@ -1002,6 +1026,9 @@ class Responses(SyncAPIResource):
text=text,
tool_choice=tool_choice,
reasoning=reasoning,
+ safety_identifier=safety_identifier,
+ service_tier=service_tier,
+ top_logprobs=top_logprobs,
top_p=top_p,
truncation=truncation,
user=user,
@@ -1057,7 +1084,7 @@ class Responses(SyncAPIResource):
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2275,22 +2302,29 @@ class AsyncResponses(AsyncAPIResource):
self,
*,
input: Union[str, ResponseInputParam],
- model: Union[str, ChatModel],
+ model: ResponsesModel,
background: Optional[bool] | NotGiven = NOT_GIVEN,
text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+ max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+ prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN,
+ prompt_cache_key: str | NotGiven = NOT_GIVEN,
reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+ safety_identifier: str | NotGiven = NOT_GIVEN,
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
@@ -2307,22 +2341,29 @@ class AsyncResponses(AsyncAPIResource):
*,
response_id: str | NotGiven = NOT_GIVEN,
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
- model: Union[str, ChatModel] | NotGiven = NOT_GIVEN,
+ model: ResponsesModel | NotGiven = NOT_GIVEN,
background: Optional[bool] | NotGiven = NOT_GIVEN,
text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+ max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
previous_response_id: Optional[str] | NotGiven = NOT_GIVEN,
+ prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN,
+ prompt_cache_key: str | NotGiven = NOT_GIVEN,
reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN,
+ safety_identifier: str | NotGiven = NOT_GIVEN,
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
user: str | NotGiven = NOT_GIVEN,
@@ -2337,18 +2378,25 @@ class AsyncResponses(AsyncAPIResource):
new_response_args = {
"input": input,
"model": model,
+ "conversation": conversation,
"include": include,
"instructions": instructions,
"max_output_tokens": max_output_tokens,
+ "max_tool_calls": max_tool_calls,
"metadata": metadata,
"parallel_tool_calls": parallel_tool_calls,
"previous_response_id": previous_response_id,
+ "prompt": prompt,
+ "prompt_cache_key": prompt_cache_key,
"reasoning": reasoning,
+ "safety_identifier": safety_identifier,
+ "service_tier": service_tier,
"store": store,
"stream_options": stream_options,
"temperature": temperature,
"text": text,
"tool_choice": tool_choice,
+ "top_logprobs": top_logprobs,
"top_p": top_p,
"truncation": truncation,
"user": user,
@@ -2384,21 +2432,29 @@ class AsyncResponses(AsyncAPIResource):
model=model,
stream=True,
tools=tools,
+ conversation=conversation,
include=include,
instructions=instructions,
max_output_tokens=max_output_tokens,
+ max_tool_calls=max_tool_calls,
metadata=metadata,
parallel_tool_calls=parallel_tool_calls,
previous_response_id=previous_response_id,
+ prompt=prompt,
+ prompt_cache_key=prompt_cache_key,
store=store,
stream_options=stream_options,
temperature=temperature,
text=text,
tool_choice=tool_choice,
reasoning=reasoning,
+ safety_identifier=safety_identifier,
+ service_tier=service_tier,
+ top_logprobs=top_logprobs,
top_p=top_p,
truncation=truncation,
user=user,
+ background=background,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
@@ -2455,7 +2511,7 @@ class AsyncResponses(AsyncAPIResource):
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
tests/lib/responses/test_responses.py
@@ -6,7 +6,8 @@ import pytest
from respx import MockRouter
from inline_snapshot import snapshot
-from openai import OpenAI
+from openai import OpenAI, AsyncOpenAI
+from openai._utils import assert_signatures_in_sync
from ...conftest import base_url
from ..snapshots import make_snapshot_request
@@ -38,3 +39,24 @@ def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None:
assert response.output_text == snapshot(
"I can't provide real-time updates, but you can easily check the current weather in San Francisco using a weather website or app. Typically, San Francisco has cool, foggy summers and mild winters, so it's good to be prepared for variable weather!"
)
+
+
+@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
+def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
+ checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
+
+ assert_signatures_in_sync(
+ checking_client.responses.create,
+ checking_client.responses.stream,
+ exclude_params={"stream", "tools"},
+ )
+
+@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
+def test_parse_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
+ checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
+
+ assert_signatures_in_sync(
+ checking_client.responses.create,
+ checking_client.responses.parse,
+ exclude_params={"tools"},
+ )