Commit 62069d2f
Changed files (2)
tests
api_resources
tests/api_resources/audio/test_speech.py
@@ -27,7 +27,7 @@ class TestSpeech:
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = client.audio.speech.create(
input="string",
- model="string",
+ model="tts-1",
voice="alloy",
)
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
@@ -39,7 +39,7 @@ class TestSpeech:
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = client.audio.speech.create(
input="string",
- model="string",
+ model="tts-1",
voice="alloy",
response_format="mp3",
speed=0.25,
@@ -54,7 +54,7 @@ class TestSpeech:
response = client.audio.speech.with_raw_response.create(
input="string",
- model="string",
+ model="tts-1",
voice="alloy",
)
@@ -69,7 +69,7 @@ class TestSpeech:
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
with client.audio.speech.with_streaming_response.create(
input="string",
- model="string",
+ model="tts-1",
voice="alloy",
) as response:
assert not response.is_closed
@@ -90,7 +90,7 @@ class TestAsyncSpeech:
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = await async_client.audio.speech.create(
input="string",
- model="string",
+ model="tts-1",
voice="alloy",
)
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
@@ -102,7 +102,7 @@ class TestAsyncSpeech:
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = await async_client.audio.speech.create(
input="string",
- model="string",
+ model="tts-1",
voice="alloy",
response_format="mp3",
speed=0.25,
@@ -117,7 +117,7 @@ class TestAsyncSpeech:
response = await async_client.audio.speech.with_raw_response.create(
input="string",
- model="string",
+ model="tts-1",
voice="alloy",
)
@@ -132,7 +132,7 @@ class TestAsyncSpeech:
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
async with async_client.audio.speech.with_streaming_response.create(
input="string",
- model="string",
+ model="tts-1",
voice="alloy",
) as response:
assert not response.is_closed
tests/api_resources/test_completions.py
@@ -20,7 +20,7 @@ class TestCompletions:
@parametrize
def test_method_create_overload_1(self, client: OpenAI) -> None:
completion = client.completions.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
)
assert_matches_type(Completion, completion, path=["response"])
@@ -28,7 +28,7 @@ class TestCompletions:
@parametrize
def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
completion = client.completions.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
best_of=0,
echo=True,
@@ -52,7 +52,7 @@ class TestCompletions:
@parametrize
def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
response = client.completions.with_raw_response.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
)
@@ -64,7 +64,7 @@ class TestCompletions:
@parametrize
def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
with client.completions.with_streaming_response.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
) as response:
assert not response.is_closed
@@ -78,7 +78,7 @@ class TestCompletions:
@parametrize
def test_method_create_overload_2(self, client: OpenAI) -> None:
completion_stream = client.completions.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
)
@@ -87,7 +87,7 @@ class TestCompletions:
@parametrize
def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
completion_stream = client.completions.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
best_of=0,
@@ -111,7 +111,7 @@ class TestCompletions:
@parametrize
def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
response = client.completions.with_raw_response.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
)
@@ -123,7 +123,7 @@ class TestCompletions:
@parametrize
def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
with client.completions.with_streaming_response.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
) as response:
@@ -142,7 +142,7 @@ class TestAsyncCompletions:
@parametrize
async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.completions.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
)
assert_matches_type(Completion, completion, path=["response"])
@@ -150,7 +150,7 @@ class TestAsyncCompletions:
@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.completions.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
best_of=0,
echo=True,
@@ -174,7 +174,7 @@ class TestAsyncCompletions:
@parametrize
async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.completions.with_raw_response.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
)
@@ -186,7 +186,7 @@ class TestAsyncCompletions:
@parametrize
async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
async with async_client.completions.with_streaming_response.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
) as response:
assert not response.is_closed
@@ -200,7 +200,7 @@ class TestAsyncCompletions:
@parametrize
async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
completion_stream = await async_client.completions.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
)
@@ -209,7 +209,7 @@ class TestAsyncCompletions:
@parametrize
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
completion_stream = await async_client.completions.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
best_of=0,
@@ -233,7 +233,7 @@ class TestAsyncCompletions:
@parametrize
async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
response = await async_client.completions.with_raw_response.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
)
@@ -245,7 +245,7 @@ class TestAsyncCompletions:
@parametrize
async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
async with async_client.completions.with_streaming_response.create(
- model="string",
+ model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
) as response: