Commit f14f8593
Changed files (10)
examples
src
openai
resources
beta
vector_stores
tests
examples/assistant.py
@@ -1,4 +1,3 @@
-
import openai
# gets API Key from environment variable OPENAI_API_KEY
src/openai/resources/beta/vector_stores/files.py
@@ -611,7 +611,9 @@ class AsyncFiles(AsyncAPIResource):
polling helper method to wait for processing to complete).
"""
file_obj = await self._client.files.create(file=file, purpose="assistants")
- return await self.create(vector_store_id=vector_store_id, file_id=file_obj.id, chunking_strategy=chunking_strategy)
+ return await self.create(
+ vector_store_id=vector_store_id, file_id=file_obj.id, chunking_strategy=chunking_strategy
+ )
async def upload_and_poll(
self,
@@ -627,7 +629,7 @@ class AsyncFiles(AsyncAPIResource):
vector_store_id=vector_store_id,
file_id=file_obj.id,
poll_interval_ms=poll_interval_ms,
- chunking_strategy=chunking_strategy
+ chunking_strategy=chunking_strategy,
)
src/openai/types/audio/transcription.py
@@ -1,7 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
from ..._models import BaseModel
__all__ = ["Transcription"]
src/openai/types/audio/translation.py
@@ -1,7 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
from ..._models import BaseModel
__all__ = ["Translation"]
src/openai/types/beta/assistant_tool_choice_function.py
@@ -1,7 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
from ..._models import BaseModel
__all__ = ["AssistantToolChoiceFunction"]
src/openai/types/fine_tuning/fine_tuning_job_integration.py
@@ -1,7 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject
FineTuningJobIntegration = FineTuningJobWandbIntegrationObject
src/openai/types/batch_request_counts.py
@@ -1,7 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
from .._models import BaseModel
__all__ = ["BatchRequestCounts"]
src/openai/types/completion_usage.py
@@ -1,7 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
from .._models import BaseModel
__all__ = ["CompletionUsage"]
src/openai/types/model_deleted.py
@@ -1,7 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
from .._models import BaseModel
__all__ = ["ModelDeleted"]
tests/lib/test_assistants.py
@@ -48,6 +48,7 @@ def test_create_and_run_poll_method_definition_in_sync(sync: bool, client: OpenA
exclude_params={"stream"},
)
+
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
def test_create_and_run_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client