main
  1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
  2
  3from __future__ import annotations
  4
  5import os
  6from typing import Any, cast
  7
  8import pytest
  9
 10from openai import OpenAI, AsyncOpenAI
 11from tests.utils import assert_matches_type
 12from openai.types.responses import InputTokenCountResponse
 13
 14base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
 15
 16
 17class TestInputTokens:
 18    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
 19
 20    @parametrize
 21    def test_method_count(self, client: OpenAI) -> None:
 22        input_token = client.responses.input_tokens.count()
 23        assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
 24
 25    @parametrize
 26    def test_method_count_with_all_params(self, client: OpenAI) -> None:
 27        input_token = client.responses.input_tokens.count(
 28            conversation="string",
 29            input="string",
 30            instructions="instructions",
 31            model="model",
 32            parallel_tool_calls=True,
 33            previous_response_id="resp_123",
 34            reasoning={
 35                "effort": "none",
 36                "generate_summary": "auto",
 37                "summary": "auto",
 38            },
 39            text={
 40                "format": {"type": "text"},
 41                "verbosity": "low",
 42            },
 43            tool_choice="none",
 44            tools=[
 45                {
 46                    "name": "name",
 47                    "parameters": {"foo": "bar"},
 48                    "strict": True,
 49                    "type": "function",
 50                    "description": "description",
 51                }
 52            ],
 53            truncation="auto",
 54        )
 55        assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
 56
 57    @parametrize
 58    def test_raw_response_count(self, client: OpenAI) -> None:
 59        response = client.responses.input_tokens.with_raw_response.count()
 60
 61        assert response.is_closed is True
 62        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 63        input_token = response.parse()
 64        assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
 65
 66    @parametrize
 67    def test_streaming_response_count(self, client: OpenAI) -> None:
 68        with client.responses.input_tokens.with_streaming_response.count() as response:
 69            assert not response.is_closed
 70            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
 71
 72            input_token = response.parse()
 73            assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
 74
 75        assert cast(Any, response.is_closed) is True
 76
 77
 78class TestAsyncInputTokens:
 79    parametrize = pytest.mark.parametrize(
 80        "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
 81    )
 82
 83    @parametrize
 84    async def test_method_count(self, async_client: AsyncOpenAI) -> None:
 85        input_token = await async_client.responses.input_tokens.count()
 86        assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
 87
 88    @parametrize
 89    async def test_method_count_with_all_params(self, async_client: AsyncOpenAI) -> None:
 90        input_token = await async_client.responses.input_tokens.count(
 91            conversation="string",
 92            input="string",
 93            instructions="instructions",
 94            model="model",
 95            parallel_tool_calls=True,
 96            previous_response_id="resp_123",
 97            reasoning={
 98                "effort": "none",
 99                "generate_summary": "auto",
100                "summary": "auto",
101            },
102            text={
103                "format": {"type": "text"},
104                "verbosity": "low",
105            },
106            tool_choice="none",
107            tools=[
108                {
109                    "name": "name",
110                    "parameters": {"foo": "bar"},
111                    "strict": True,
112                    "type": "function",
113                    "description": "description",
114                }
115            ],
116            truncation="auto",
117        )
118        assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
119
120    @parametrize
121    async def test_raw_response_count(self, async_client: AsyncOpenAI) -> None:
122        response = await async_client.responses.input_tokens.with_raw_response.count()
123
124        assert response.is_closed is True
125        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
126        input_token = response.parse()
127        assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
128
129    @parametrize
130    async def test_streaming_response_count(self, async_client: AsyncOpenAI) -> None:
131        async with async_client.responses.input_tokens.with_streaming_response.count() as response:
132            assert not response.is_closed
133            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
134
135            input_token = await response.parse()
136            assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
137
138        assert cast(Any, response.is_closed) is True