Commit d2738d42

Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
2024-05-01 12:00:17
feat(api): delete messages (#1388)
1 parent 11460b5
.github/workflows/ci.yml
@@ -39,5 +39,25 @@ jobs:
       - name: Ensure importable
         run: |
           rye run python -c 'import openai'
+  test:
+    name: test
+    runs-on: ubuntu-latest
+    if: github.repository == 'openai/openai-python'
+
+    steps:
+      - uses: actions/checkout@v4
+
+      - name: Install Rye
+        run: |
+          curl -sSf https://rye-up.com/get | bash
+          echo "$HOME/.rye/shims" >> $GITHUB_PATH
+        env:
+          RYE_VERSION: 0.24.0
+          RYE_INSTALL_OPTION: '--yes'
+
+      - name: Bootstrap
+        run: ./scripts/bootstrap
+
+      - name: Run tests
+        run: ./scripts/test
 
-  
bin/check-env-state.py
@@ -1,40 +0,0 @@
-"""Script that exits 1 if the current environment is not
-in sync with the `requirements-dev.lock` file.
-"""
-
-from pathlib import Path
-
-import importlib_metadata
-
-
-def should_run_sync() -> bool:
-    dev_lock = Path(__file__).parent.parent.joinpath("requirements-dev.lock")
-
-    for line in dev_lock.read_text().splitlines():
-        if not line or line.startswith("#") or line.startswith("-e"):
-            continue
-
-        dep, lock_version = line.split("==")
-
-        try:
-            version = importlib_metadata.version(dep)
-
-            if lock_version != version:
-                print(f"mismatch for {dep} current={version} lock={lock_version}")
-                return True
-        except Exception:
-            print(f"could not import {dep}")
-            return True
-
-    return False
-
-
-def main() -> None:
-    if should_run_sync():
-        exit(1)
-    else:
-        exit(0)
-
-
-if __name__ == "__main__":
-    main()
bin/check-test-server
@@ -1,50 +0,0 @@
-#!/usr/bin/env bash
-
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[0;33m'
-NC='\033[0m' # No Color
-
-function prism_is_running() {
-  curl --silent "http://localhost:4010" >/dev/null 2>&1
-}
-
-function is_overriding_api_base_url() {
-  [ -n "$TEST_API_BASE_URL" ]
-}
-
-if is_overriding_api_base_url ; then
-  # If someone is running the tests against the live API, we can trust they know
-  # what they're doing and exit early.
-  echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}"
-
-  exit 0
-elif prism_is_running ; then
-  echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
-  echo
-
-  exit 0
-else
-  echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
-  echo -e "running against your OpenAPI spec."
-  echo
-  echo -e "${YELLOW}To fix:${NC}"
-  echo
-  echo -e "1. Install Prism (requires Node 16+):"
-  echo
-  echo -e "  With npm:"
-  echo -e "    \$ ${YELLOW}npm install -g @stoplight/prism-cli${NC}"
-  echo
-  echo -e "  With yarn:"
-  echo -e "    \$ ${YELLOW}yarn global add @stoplight/prism-cli${NC}"
-  echo
-  echo -e "2. Run the mock server"
-  echo
-  echo -e "  To run the server, pass in the path of your OpenAPI"
-  echo -e "  spec to the prism command:"
-  echo
-  echo -e "    \$ ${YELLOW}prism mock path/to/your.openapi.yml${NC}"
-  echo
-
-  exit 1
-fi
bin/test
@@ -1,3 +0,0 @@
-#!/usr/bin/env bash
-
-bin/check-test-server && rye run pytest "$@"
bin/ruffen-docs.py → scripts/utils/ruffen-docs.py
File renamed without changes
scripts/bootstrap
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd "$(dirname "$0")/.."
+
+if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then
+  brew bundle check >/dev/null 2>&1 || {
+    echo "==> Installing Homebrew dependencies…"
+    brew bundle
+  }
+fi
+
+echo "==> Installing Python dependencies…"
+
+# experimental uv support makes installations significantly faster
+rye config --set-bool behavior.use-uv=true
+
+rye sync
scripts/format
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd "$(dirname "$0")/.."
+
+rye run format
+
scripts/lint
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd "$(dirname "$0")/.."
+
+rye run lint
+
scripts/mock
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd "$(dirname "$0")/.."
+
+if [[ -n "$1" && "$1" != '--'* ]]; then
+  URL="$1"
+  shift
+else
+  URL="$(grep 'openapi_spec_url' .stats.yml | cut -d' ' -f2)"
+fi
+
+# Check if the URL is empty
+if [ -z "$URL" ]; then
+  echo "Error: No OpenAPI spec path/url provided or found in .stats.yml"
+  exit 1
+fi
+
+echo "==> Starting mock server with URL ${URL}"
+
+# Run prism mock on the given spec
+if [ "$1" == "--daemon" ]; then
+  npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock "$URL" &> .prism.log &
+
+  # Wait for server to come online
+  echo -n "Waiting for server"
+  while ! grep -q "✖  fatal\|Prism is listening" ".prism.log" ; do
+    echo -n "."
+    sleep 0.1
+  done
+
+  if grep -q "✖  fatal" ".prism.log"; then
+    cat .prism.log
+    exit 1
+  fi
+
+  echo
+else
+  npm exec  --package=@stoplight/prism-cli@~5.3.2 -- prism mock "$URL"
+fi
scripts/test
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd "$(dirname "$0")/.."
+
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[0;33m'
+NC='\033[0m' # No Color
+
+function prism_is_running() {
+  curl --silent "http://localhost:4010" >/dev/null 2>&1
+}
+
+kill_server_on_port() {
+  pids=$(lsof -t -i tcp:"$1" || echo "")
+  if [ "$pids" != "" ]; then
+    kill "$pids"
+    echo "Stopped $pids."
+  fi
+}
+
+function is_overriding_api_base_url() {
+  [ -n "$TEST_API_BASE_URL" ]
+}
+
+if ! is_overriding_api_base_url && ! prism_is_running ; then
+  # When we exit this script, make sure to kill the background mock server process
+  trap 'kill_server_on_port 4010' EXIT
+
+  # Start the dev server
+  ./scripts/mock --daemon
+fi
+
+if is_overriding_api_base_url ; then
+  echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}"
+  echo
+elif ! prism_is_running ; then
+  echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
+  echo -e "running against your OpenAPI spec."
+  echo
+  echo -e "To run the server, pass in the path or url of your OpenAPI"
+  echo -e "spec to the prism command:"
+  echo
+  echo -e "  \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}"
+  echo
+
+  exit 1
+else
+  echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
+  echo
+fi
+
+# Run tests
+echo "==> Running tests"
+rye run pytest "$@"
src/openai/resources/beta/threads/messages.py
@@ -23,6 +23,7 @@ from ...._base_client import (
 )
 from ....types.beta.threads import message_list_params, message_create_params, message_update_params
 from ....types.beta.threads.message import Message
+from ....types.beta.threads.message_deleted import MessageDeleted
 
 __all__ = ["Messages", "AsyncMessages"]
 
@@ -252,6 +253,43 @@ class Messages(SyncAPIResource):
             model=Message,
         )
 
+    def delete(
+        self,
+        message_id: str,
+        *,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> MessageDeleted:
+        """
+        Deletes a message.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not message_id:
+            raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return self._delete(
+            f"/threads/{thread_id}/messages/{message_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=MessageDeleted,
+        )
+
 
 class AsyncMessages(AsyncAPIResource):
     @cached_property
@@ -478,6 +516,43 @@ class AsyncMessages(AsyncAPIResource):
             model=Message,
         )
 
+    async def delete(
+        self,
+        message_id: str,
+        *,
+        thread_id: str,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> MessageDeleted:
+        """
+        Deletes a message.
+
+        Args:
+          extra_headers: Send extra headers
+
+          extra_query: Add additional query parameters to the request
+
+          extra_body: Add additional JSON properties to the request
+
+          timeout: Override the client-level default timeout for this request, in seconds
+        """
+        if not thread_id:
+            raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+        if not message_id:
+            raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
+        extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
+        return await self._delete(
+            f"/threads/{thread_id}/messages/{message_id}",
+            options=make_request_options(
+                extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+            ),
+            cast_to=MessageDeleted,
+        )
+
 
 class MessagesWithRawResponse:
     def __init__(self, messages: Messages) -> None:
@@ -495,6 +570,9 @@ class MessagesWithRawResponse:
         self.list = _legacy_response.to_raw_response_wrapper(
             messages.list,
         )
+        self.delete = _legacy_response.to_raw_response_wrapper(
+            messages.delete,
+        )
 
 
 class AsyncMessagesWithRawResponse:
@@ -513,6 +591,9 @@ class AsyncMessagesWithRawResponse:
         self.list = _legacy_response.async_to_raw_response_wrapper(
             messages.list,
         )
+        self.delete = _legacy_response.async_to_raw_response_wrapper(
+            messages.delete,
+        )
 
 
 class MessagesWithStreamingResponse:
@@ -531,6 +612,9 @@ class MessagesWithStreamingResponse:
         self.list = to_streamed_response_wrapper(
             messages.list,
         )
+        self.delete = to_streamed_response_wrapper(
+            messages.delete,
+        )
 
 
 class AsyncMessagesWithStreamingResponse:
@@ -549,3 +633,6 @@ class AsyncMessagesWithStreamingResponse:
         self.list = async_to_streamed_response_wrapper(
             messages.list,
         )
+        self.delete = async_to_streamed_response_wrapper(
+            messages.delete,
+        )
src/openai/resources/batches.py
@@ -40,7 +40,7 @@ class Batches(SyncAPIResource):
         self,
         *,
         completion_window: Literal["24h"],
-        endpoint: Literal["/v1/chat/completions"],
+        endpoint: Literal["/v1/chat/completions", "/v1/embeddings"],
         input_file_id: str,
         metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -57,8 +57,8 @@ class Batches(SyncAPIResource):
           completion_window: The time frame within which the batch should be processed. Currently only `24h`
               is supported.
 
-          endpoint: The endpoint to be used for all requests in the batch. Currently only
-              `/v1/chat/completions` is supported.
+          endpoint: The endpoint to be used for all requests in the batch. Currently
+              `/v1/chat/completions` and `/v1/embeddings` are supported.
 
           input_file_id: The ID of an uploaded file that contains requests for the new batch.
 
@@ -228,7 +228,7 @@ class AsyncBatches(AsyncAPIResource):
         self,
         *,
         completion_window: Literal["24h"],
-        endpoint: Literal["/v1/chat/completions"],
+        endpoint: Literal["/v1/chat/completions", "/v1/embeddings"],
         input_file_id: str,
         metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -245,8 +245,8 @@ class AsyncBatches(AsyncAPIResource):
           completion_window: The time frame within which the batch should be processed. Currently only `24h`
               is supported.
 
-          endpoint: The endpoint to be used for all requests in the batch. Currently only
-              `/v1/chat/completions` is supported.
+          endpoint: The endpoint to be used for all requests in the batch. Currently
+              `/v1/chat/completions` and `/v1/embeddings` are supported.
 
           input_file_id: The ID of an uploaded file that contains requests for the new batch.
 
src/openai/types/beta/threads/__init__.py
@@ -11,6 +11,7 @@ from .run_status import RunStatus as RunStatus
 from .text_delta import TextDelta as TextDelta
 from .message_delta import MessageDelta as MessageDelta
 from .message_content import MessageContent as MessageContent
+from .message_deleted import MessageDeleted as MessageDeleted
 from .run_list_params import RunListParams as RunListParams
 from .annotation_delta import AnnotationDelta as AnnotationDelta
 from .image_file_delta import ImageFileDelta as ImageFileDelta
src/openai/types/beta/threads/message_deleted.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["MessageDeleted"]
+
+
+class MessageDeleted(BaseModel):
+    id: str
+
+    deleted: bool
+
+    object: Literal["thread.message.deleted"]
src/openai/types/fine_tuning/fine_tuning_job.py
@@ -110,5 +110,11 @@ class FineTuningJob(BaseModel):
     [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
     """
 
+    estimated_finish: Optional[int] = None
+    """
+    The Unix timestamp (in seconds) for when the fine-tuning job is estimated to
+    finish. The value will be null if the fine-tuning job is not running.
+    """
+
     integrations: Optional[List[FineTuningJobWandbIntegrationObject]] = None
     """A list of integrations to enable for this fine-tuning job."""
src/openai/types/batch_create_params.py
@@ -15,10 +15,10 @@ class BatchCreateParams(TypedDict, total=False):
     Currently only `24h` is supported.
     """
 
-    endpoint: Required[Literal["/v1/chat/completions"]]
+    endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings"]]
     """The endpoint to be used for all requests in the batch.
 
-    Currently only `/v1/chat/completions` is supported.
+    Currently `/v1/chat/completions` and `/v1/embeddings` are supported.
     """
 
     input_file_id: Required[str]
tests/api_resources/beta/threads/test_messages.py
@@ -10,7 +10,10 @@ import pytest
 from openai import OpenAI, AsyncOpenAI
 from tests.utils import assert_matches_type
 from openai.pagination import SyncCursorPage, AsyncCursorPage
-from openai.types.beta.threads import Message
+from openai.types.beta.threads import (
+    Message,
+    MessageDeleted,
+)
 
 base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
 
@@ -243,6 +246,54 @@ class TestMessages:
                 "",
             )
 
+    @parametrize
+    def test_method_delete(self, client: OpenAI) -> None:
+        message = client.beta.threads.messages.delete(
+            "string",
+            thread_id="string",
+        )
+        assert_matches_type(MessageDeleted, message, path=["response"])
+
+    @parametrize
+    def test_raw_response_delete(self, client: OpenAI) -> None:
+        response = client.beta.threads.messages.with_raw_response.delete(
+            "string",
+            thread_id="string",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        message = response.parse()
+        assert_matches_type(MessageDeleted, message, path=["response"])
+
+    @parametrize
+    def test_streaming_response_delete(self, client: OpenAI) -> None:
+        with client.beta.threads.messages.with_streaming_response.delete(
+            "string",
+            thread_id="string",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            message = response.parse()
+            assert_matches_type(MessageDeleted, message, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    def test_path_params_delete(self, client: OpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+            client.beta.threads.messages.with_raw_response.delete(
+                "string",
+                thread_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+            client.beta.threads.messages.with_raw_response.delete(
+                "",
+                thread_id="string",
+            )
+
 
 class TestAsyncMessages:
     parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
@@ -471,3 +522,51 @@ class TestAsyncMessages:
             await async_client.beta.threads.messages.with_raw_response.list(
                 "",
             )
+
+    @parametrize
+    async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+        message = await async_client.beta.threads.messages.delete(
+            "string",
+            thread_id="string",
+        )
+        assert_matches_type(MessageDeleted, message, path=["response"])
+
+    @parametrize
+    async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+        response = await async_client.beta.threads.messages.with_raw_response.delete(
+            "string",
+            thread_id="string",
+        )
+
+        assert response.is_closed is True
+        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+        message = response.parse()
+        assert_matches_type(MessageDeleted, message, path=["response"])
+
+    @parametrize
+    async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+        async with async_client.beta.threads.messages.with_streaming_response.delete(
+            "string",
+            thread_id="string",
+        ) as response:
+            assert not response.is_closed
+            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+            message = await response.parse()
+            assert_matches_type(MessageDeleted, message, path=["response"])
+
+        assert cast(Any, response.is_closed) is True
+
+    @parametrize
+    async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
+            await async_client.beta.threads.messages.with_raw_response.delete(
+                "string",
+                thread_id="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
+            await async_client.beta.threads.messages.with_raw_response.delete(
+                "",
+                thread_id="string",
+            )
.gitignore
@@ -12,3 +12,4 @@ dist
 .env
 .envrc
 codegen.log
+Brewfile.lock.json
.stats.yml
@@ -1,2 +1,2 @@
-configured_endpoints: 63
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0839c14b2b61dad4e830884410cfc3695546682ced009e50583c8bb5c44512d7.yml
+configured_endpoints: 64
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-97c9a5f089049dc9eb5cee9475558049003e37e42202cab39e59d75e08b4c613.yml
api.md
@@ -392,6 +392,7 @@ Methods:
 - <code title="get /threads/{thread_id}/messages/{message_id}">client.beta.threads.messages.<a href="./src/openai/resources/beta/threads/messages.py">retrieve</a>(message_id, \*, thread_id) -> <a href="./src/openai/types/beta/threads/message.py">Message</a></code>
 - <code title="post /threads/{thread_id}/messages/{message_id}">client.beta.threads.messages.<a href="./src/openai/resources/beta/threads/messages.py">update</a>(message_id, \*, thread_id, \*\*<a href="src/openai/types/beta/threads/message_update_params.py">params</a>) -> <a href="./src/openai/types/beta/threads/message.py">Message</a></code>
 - <code title="get /threads/{thread_id}/messages">client.beta.threads.messages.<a href="./src/openai/resources/beta/threads/messages.py">list</a>(thread_id, \*\*<a href="src/openai/types/beta/threads/message_list_params.py">params</a>) -> <a href="./src/openai/types/beta/threads/message.py">SyncCursorPage[Message]</a></code>
+- <code title="delete /threads/{thread_id}/messages/{message_id}">client.beta.threads.messages.<a href="./src/openai/resources/beta/threads/messages.py">delete</a>(message_id, \*, thread_id) -> <a href="./src/openai/types/beta/threads/message_deleted.py">MessageDeleted</a></code>
 
 # Batches
 
Brewfile
@@ -0,0 +1,2 @@
+brew "rye"
+
pyproject.toml
@@ -74,7 +74,7 @@ format = { chain = [
   "fix:ruff",
 ]}
 "format:black" = "black ."
-"format:docs" = "python bin/ruffen-docs.py README.md api.md"
+"format:docs" = "python scripts/utils/ruffen-docs.py README.md api.md"
 "format:ruff" = "ruff format"
 "format:isort" = "isort ."
 
@@ -197,5 +197,6 @@ known-first-party = ["openai", "tests"]
 
 [tool.ruff.per-file-ignores]
 "bin/**.py" = ["T201", "T203"]
+"scripts/**.py" = ["T201", "T203"]
 "tests/**.py" = ["T201", "T203"]
 "examples/**.py" = ["T201", "T203"]