main
  1# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
  2
  3from __future__ import annotations
  4
  5import os
  6from typing import Any, cast
  7
  8import pytest
  9import pydantic
 10
 11from openai import OpenAI, AsyncOpenAI
 12from tests.utils import assert_matches_type
 13from openai.pagination import SyncCursorPage, AsyncCursorPage
 14from openai.types.chat import (
 15    ChatCompletion,
 16    ChatCompletionDeleted,
 17)
 18
 19base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
 20
 21
 22class TestCompletions:
 23    parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
 24
 25    @parametrize
 26    def test_method_create_overload_1(self, client: OpenAI) -> None:
 27        completion = client.chat.completions.create(
 28            messages=[
 29                {
 30                    "content": "string",
 31                    "role": "developer",
 32                }
 33            ],
 34            model="gpt-4o",
 35        )
 36        assert_matches_type(ChatCompletion, completion, path=["response"])
 37
 38    @parametrize
 39    def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
 40        completion = client.chat.completions.create(
 41            messages=[
 42                {
 43                    "content": "string",
 44                    "role": "developer",
 45                    "name": "name",
 46                }
 47            ],
 48            model="gpt-4o",
 49            audio={
 50                "format": "wav",
 51                "voice": "ash",
 52            },
 53            frequency_penalty=-2,
 54            function_call="none",
 55            functions=[
 56                {
 57                    "name": "name",
 58                    "description": "description",
 59                    "parameters": {"foo": "bar"},
 60                }
 61            ],
 62            logit_bias={"foo": 0},
 63            logprobs=True,
 64            max_completion_tokens=0,
 65            max_tokens=0,
 66            metadata={"foo": "string"},
 67            modalities=["text"],
 68            n=1,
 69            parallel_tool_calls=True,
 70            prediction={
 71                "content": "string",
 72                "type": "content",
 73            },
 74            presence_penalty=-2,
 75            prompt_cache_key="prompt-cache-key-1234",
 76            prompt_cache_retention="in-memory",
 77            reasoning_effort="none",
 78            response_format={"type": "text"},
 79            safety_identifier="safety-identifier-1234",
 80            seed=-9007199254740991,
 81            service_tier="auto",
 82            stop="\n",
 83            store=True,
 84            stream=False,
 85            stream_options={
 86                "include_obfuscation": True,
 87                "include_usage": True,
 88            },
 89            temperature=1,
 90            tool_choice="none",
 91            tools=[
 92                {
 93                    "function": {
 94                        "name": "name",
 95                        "description": "description",
 96                        "parameters": {"foo": "bar"},
 97                        "strict": True,
 98                    },
 99                    "type": "function",
100                }
101            ],
102            top_logprobs=0,
103            top_p=1,
104            user="user-1234",
105            verbosity="low",
106            web_search_options={
107                "search_context_size": "low",
108                "user_location": {
109                    "approximate": {
110                        "city": "city",
111                        "country": "country",
112                        "region": "region",
113                        "timezone": "timezone",
114                    },
115                    "type": "approximate",
116                },
117            },
118        )
119        assert_matches_type(ChatCompletion, completion, path=["response"])
120
121    @parametrize
122    def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
123        response = client.chat.completions.with_raw_response.create(
124            messages=[
125                {
126                    "content": "string",
127                    "role": "developer",
128                }
129            ],
130            model="gpt-4o",
131        )
132
133        assert response.is_closed is True
134        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
135        completion = response.parse()
136        assert_matches_type(ChatCompletion, completion, path=["response"])
137
138    @parametrize
139    def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
140        with client.chat.completions.with_streaming_response.create(
141            messages=[
142                {
143                    "content": "string",
144                    "role": "developer",
145                }
146            ],
147            model="gpt-4o",
148        ) as response:
149            assert not response.is_closed
150            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
151
152            completion = response.parse()
153            assert_matches_type(ChatCompletion, completion, path=["response"])
154
155        assert cast(Any, response.is_closed) is True
156
157    @parametrize
158    def test_method_create_overload_2(self, client: OpenAI) -> None:
159        completion_stream = client.chat.completions.create(
160            messages=[
161                {
162                    "content": "string",
163                    "role": "developer",
164                }
165            ],
166            model="gpt-4o",
167            stream=True,
168        )
169        completion_stream.response.close()
170
171    @parametrize
172    def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
173        completion_stream = client.chat.completions.create(
174            messages=[
175                {
176                    "content": "string",
177                    "role": "developer",
178                    "name": "name",
179                }
180            ],
181            model="gpt-4o",
182            stream=True,
183            audio={
184                "format": "wav",
185                "voice": "ash",
186            },
187            frequency_penalty=-2,
188            function_call="none",
189            functions=[
190                {
191                    "name": "name",
192                    "description": "description",
193                    "parameters": {"foo": "bar"},
194                }
195            ],
196            logit_bias={"foo": 0},
197            logprobs=True,
198            max_completion_tokens=0,
199            max_tokens=0,
200            metadata={"foo": "string"},
201            modalities=["text"],
202            n=1,
203            parallel_tool_calls=True,
204            prediction={
205                "content": "string",
206                "type": "content",
207            },
208            presence_penalty=-2,
209            prompt_cache_key="prompt-cache-key-1234",
210            prompt_cache_retention="in-memory",
211            reasoning_effort="none",
212            response_format={"type": "text"},
213            safety_identifier="safety-identifier-1234",
214            seed=-9007199254740991,
215            service_tier="auto",
216            stop="\n",
217            store=True,
218            stream_options={
219                "include_obfuscation": True,
220                "include_usage": True,
221            },
222            temperature=1,
223            tool_choice="none",
224            tools=[
225                {
226                    "function": {
227                        "name": "name",
228                        "description": "description",
229                        "parameters": {"foo": "bar"},
230                        "strict": True,
231                    },
232                    "type": "function",
233                }
234            ],
235            top_logprobs=0,
236            top_p=1,
237            user="user-1234",
238            verbosity="low",
239            web_search_options={
240                "search_context_size": "low",
241                "user_location": {
242                    "approximate": {
243                        "city": "city",
244                        "country": "country",
245                        "region": "region",
246                        "timezone": "timezone",
247                    },
248                    "type": "approximate",
249                },
250            },
251        )
252        completion_stream.response.close()
253
254    @parametrize
255    def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
256        response = client.chat.completions.with_raw_response.create(
257            messages=[
258                {
259                    "content": "string",
260                    "role": "developer",
261                }
262            ],
263            model="gpt-4o",
264            stream=True,
265        )
266
267        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
268        stream = response.parse()
269        stream.close()
270
271    @parametrize
272    def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
273        with client.chat.completions.with_streaming_response.create(
274            messages=[
275                {
276                    "content": "string",
277                    "role": "developer",
278                }
279            ],
280            model="gpt-4o",
281            stream=True,
282        ) as response:
283            assert not response.is_closed
284            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
285
286            stream = response.parse()
287            stream.close()
288
289        assert cast(Any, response.is_closed) is True
290
291    @parametrize
292    def test_method_retrieve(self, client: OpenAI) -> None:
293        completion = client.chat.completions.retrieve(
294            "completion_id",
295        )
296        assert_matches_type(ChatCompletion, completion, path=["response"])
297
298    @parametrize
299    def test_raw_response_retrieve(self, client: OpenAI) -> None:
300        response = client.chat.completions.with_raw_response.retrieve(
301            "completion_id",
302        )
303
304        assert response.is_closed is True
305        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
306        completion = response.parse()
307        assert_matches_type(ChatCompletion, completion, path=["response"])
308
309    @parametrize
310    def test_streaming_response_retrieve(self, client: OpenAI) -> None:
311        with client.chat.completions.with_streaming_response.retrieve(
312            "completion_id",
313        ) as response:
314            assert not response.is_closed
315            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
316
317            completion = response.parse()
318            assert_matches_type(ChatCompletion, completion, path=["response"])
319
320        assert cast(Any, response.is_closed) is True
321
322    @parametrize
323    def test_path_params_retrieve(self, client: OpenAI) -> None:
324        with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
325            client.chat.completions.with_raw_response.retrieve(
326                "",
327            )
328
329    @parametrize
330    def test_method_update(self, client: OpenAI) -> None:
331        completion = client.chat.completions.update(
332            completion_id="completion_id",
333            metadata={"foo": "string"},
334        )
335        assert_matches_type(ChatCompletion, completion, path=["response"])
336
337    @parametrize
338    def test_raw_response_update(self, client: OpenAI) -> None:
339        response = client.chat.completions.with_raw_response.update(
340            completion_id="completion_id",
341            metadata={"foo": "string"},
342        )
343
344        assert response.is_closed is True
345        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
346        completion = response.parse()
347        assert_matches_type(ChatCompletion, completion, path=["response"])
348
349    @parametrize
350    def test_streaming_response_update(self, client: OpenAI) -> None:
351        with client.chat.completions.with_streaming_response.update(
352            completion_id="completion_id",
353            metadata={"foo": "string"},
354        ) as response:
355            assert not response.is_closed
356            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
357
358            completion = response.parse()
359            assert_matches_type(ChatCompletion, completion, path=["response"])
360
361        assert cast(Any, response.is_closed) is True
362
363    @parametrize
364    def test_path_params_update(self, client: OpenAI) -> None:
365        with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
366            client.chat.completions.with_raw_response.update(
367                completion_id="",
368                metadata={"foo": "string"},
369            )
370
371    @parametrize
372    def test_method_list(self, client: OpenAI) -> None:
373        completion = client.chat.completions.list()
374        assert_matches_type(SyncCursorPage[ChatCompletion], completion, path=["response"])
375
376    @parametrize
377    def test_method_list_with_all_params(self, client: OpenAI) -> None:
378        completion = client.chat.completions.list(
379            after="after",
380            limit=0,
381            metadata={"foo": "string"},
382            model="model",
383            order="asc",
384        )
385        assert_matches_type(SyncCursorPage[ChatCompletion], completion, path=["response"])
386
387    @parametrize
388    def test_raw_response_list(self, client: OpenAI) -> None:
389        response = client.chat.completions.with_raw_response.list()
390
391        assert response.is_closed is True
392        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
393        completion = response.parse()
394        assert_matches_type(SyncCursorPage[ChatCompletion], completion, path=["response"])
395
396    @parametrize
397    def test_streaming_response_list(self, client: OpenAI) -> None:
398        with client.chat.completions.with_streaming_response.list() as response:
399            assert not response.is_closed
400            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
401
402            completion = response.parse()
403            assert_matches_type(SyncCursorPage[ChatCompletion], completion, path=["response"])
404
405        assert cast(Any, response.is_closed) is True
406
407    @parametrize
408    def test_method_delete(self, client: OpenAI) -> None:
409        completion = client.chat.completions.delete(
410            "completion_id",
411        )
412        assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
413
414    @parametrize
415    def test_raw_response_delete(self, client: OpenAI) -> None:
416        response = client.chat.completions.with_raw_response.delete(
417            "completion_id",
418        )
419
420        assert response.is_closed is True
421        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
422        completion = response.parse()
423        assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
424
425    @parametrize
426    def test_streaming_response_delete(self, client: OpenAI) -> None:
427        with client.chat.completions.with_streaming_response.delete(
428            "completion_id",
429        ) as response:
430            assert not response.is_closed
431            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
432
433            completion = response.parse()
434            assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
435
436        assert cast(Any, response.is_closed) is True
437
438    @parametrize
439    def test_path_params_delete(self, client: OpenAI) -> None:
440        with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
441            client.chat.completions.with_raw_response.delete(
442                "",
443            )
444
445    @parametrize
446    def test_method_create_disallows_pydantic(self, client: OpenAI) -> None:
447        class MyModel(pydantic.BaseModel):
448            a: str
449
450        with pytest.raises(TypeError, match=r"You tried to pass a `BaseModel` class"):
451            client.chat.completions.create(
452                messages=[
453                    {
454                        "content": "string",
455                        "role": "system",
456                    }
457                ],
458                model="gpt-4o",
459                response_format=cast(Any, MyModel),
460            )
461
462
463class TestAsyncCompletions:
464    parametrize = pytest.mark.parametrize(
465        "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
466    )
467
468    @parametrize
469    async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
470        completion = await async_client.chat.completions.create(
471            messages=[
472                {
473                    "content": "string",
474                    "role": "developer",
475                }
476            ],
477            model="gpt-4o",
478        )
479        assert_matches_type(ChatCompletion, completion, path=["response"])
480
481    @parametrize
482    async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
483        completion = await async_client.chat.completions.create(
484            messages=[
485                {
486                    "content": "string",
487                    "role": "developer",
488                    "name": "name",
489                }
490            ],
491            model="gpt-4o",
492            audio={
493                "format": "wav",
494                "voice": "ash",
495            },
496            frequency_penalty=-2,
497            function_call="none",
498            functions=[
499                {
500                    "name": "name",
501                    "description": "description",
502                    "parameters": {"foo": "bar"},
503                }
504            ],
505            logit_bias={"foo": 0},
506            logprobs=True,
507            max_completion_tokens=0,
508            max_tokens=0,
509            metadata={"foo": "string"},
510            modalities=["text"],
511            n=1,
512            parallel_tool_calls=True,
513            prediction={
514                "content": "string",
515                "type": "content",
516            },
517            presence_penalty=-2,
518            prompt_cache_key="prompt-cache-key-1234",
519            prompt_cache_retention="in-memory",
520            reasoning_effort="none",
521            response_format={"type": "text"},
522            safety_identifier="safety-identifier-1234",
523            seed=-9007199254740991,
524            service_tier="auto",
525            stop="\n",
526            store=True,
527            stream=False,
528            stream_options={
529                "include_obfuscation": True,
530                "include_usage": True,
531            },
532            temperature=1,
533            tool_choice="none",
534            tools=[
535                {
536                    "function": {
537                        "name": "name",
538                        "description": "description",
539                        "parameters": {"foo": "bar"},
540                        "strict": True,
541                    },
542                    "type": "function",
543                }
544            ],
545            top_logprobs=0,
546            top_p=1,
547            user="user-1234",
548            verbosity="low",
549            web_search_options={
550                "search_context_size": "low",
551                "user_location": {
552                    "approximate": {
553                        "city": "city",
554                        "country": "country",
555                        "region": "region",
556                        "timezone": "timezone",
557                    },
558                    "type": "approximate",
559                },
560            },
561        )
562        assert_matches_type(ChatCompletion, completion, path=["response"])
563
564    @parametrize
565    async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
566        response = await async_client.chat.completions.with_raw_response.create(
567            messages=[
568                {
569                    "content": "string",
570                    "role": "developer",
571                }
572            ],
573            model="gpt-4o",
574        )
575
576        assert response.is_closed is True
577        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
578        completion = response.parse()
579        assert_matches_type(ChatCompletion, completion, path=["response"])
580
581    @parametrize
582    async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
583        async with async_client.chat.completions.with_streaming_response.create(
584            messages=[
585                {
586                    "content": "string",
587                    "role": "developer",
588                }
589            ],
590            model="gpt-4o",
591        ) as response:
592            assert not response.is_closed
593            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
594
595            completion = await response.parse()
596            assert_matches_type(ChatCompletion, completion, path=["response"])
597
598        assert cast(Any, response.is_closed) is True
599
600    @parametrize
601    async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
602        completion_stream = await async_client.chat.completions.create(
603            messages=[
604                {
605                    "content": "string",
606                    "role": "developer",
607                }
608            ],
609            model="gpt-4o",
610            stream=True,
611        )
612        await completion_stream.response.aclose()
613
614    @parametrize
615    async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
616        completion_stream = await async_client.chat.completions.create(
617            messages=[
618                {
619                    "content": "string",
620                    "role": "developer",
621                    "name": "name",
622                }
623            ],
624            model="gpt-4o",
625            stream=True,
626            audio={
627                "format": "wav",
628                "voice": "ash",
629            },
630            frequency_penalty=-2,
631            function_call="none",
632            functions=[
633                {
634                    "name": "name",
635                    "description": "description",
636                    "parameters": {"foo": "bar"},
637                }
638            ],
639            logit_bias={"foo": 0},
640            logprobs=True,
641            max_completion_tokens=0,
642            max_tokens=0,
643            metadata={"foo": "string"},
644            modalities=["text"],
645            n=1,
646            parallel_tool_calls=True,
647            prediction={
648                "content": "string",
649                "type": "content",
650            },
651            presence_penalty=-2,
652            prompt_cache_key="prompt-cache-key-1234",
653            prompt_cache_retention="in-memory",
654            reasoning_effort="none",
655            response_format={"type": "text"},
656            safety_identifier="safety-identifier-1234",
657            seed=-9007199254740991,
658            service_tier="auto",
659            stop="\n",
660            store=True,
661            stream_options={
662                "include_obfuscation": True,
663                "include_usage": True,
664            },
665            temperature=1,
666            tool_choice="none",
667            tools=[
668                {
669                    "function": {
670                        "name": "name",
671                        "description": "description",
672                        "parameters": {"foo": "bar"},
673                        "strict": True,
674                    },
675                    "type": "function",
676                }
677            ],
678            top_logprobs=0,
679            top_p=1,
680            user="user-1234",
681            verbosity="low",
682            web_search_options={
683                "search_context_size": "low",
684                "user_location": {
685                    "approximate": {
686                        "city": "city",
687                        "country": "country",
688                        "region": "region",
689                        "timezone": "timezone",
690                    },
691                    "type": "approximate",
692                },
693            },
694        )
695        await completion_stream.response.aclose()
696
697    @parametrize
698    async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
699        response = await async_client.chat.completions.with_raw_response.create(
700            messages=[
701                {
702                    "content": "string",
703                    "role": "developer",
704                }
705            ],
706            model="gpt-4o",
707            stream=True,
708        )
709
710        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
711        stream = response.parse()
712        await stream.close()
713
714    @parametrize
715    async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
716        async with async_client.chat.completions.with_streaming_response.create(
717            messages=[
718                {
719                    "content": "string",
720                    "role": "developer",
721                }
722            ],
723            model="gpt-4o",
724            stream=True,
725        ) as response:
726            assert not response.is_closed
727            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
728
729            stream = await response.parse()
730            await stream.close()
731
732        assert cast(Any, response.is_closed) is True
733
734    @parametrize
735    async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
736        completion = await async_client.chat.completions.retrieve(
737            "completion_id",
738        )
739        assert_matches_type(ChatCompletion, completion, path=["response"])
740
741    @parametrize
742    async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
743        response = await async_client.chat.completions.with_raw_response.retrieve(
744            "completion_id",
745        )
746
747        assert response.is_closed is True
748        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
749        completion = response.parse()
750        assert_matches_type(ChatCompletion, completion, path=["response"])
751
752    @parametrize
753    async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
754        async with async_client.chat.completions.with_streaming_response.retrieve(
755            "completion_id",
756        ) as response:
757            assert not response.is_closed
758            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
759
760            completion = await response.parse()
761            assert_matches_type(ChatCompletion, completion, path=["response"])
762
763        assert cast(Any, response.is_closed) is True
764
765    @parametrize
766    async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
767        with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
768            await async_client.chat.completions.with_raw_response.retrieve(
769                "",
770            )
771
772    @parametrize
773    async def test_method_update(self, async_client: AsyncOpenAI) -> None:
774        completion = await async_client.chat.completions.update(
775            completion_id="completion_id",
776            metadata={"foo": "string"},
777        )
778        assert_matches_type(ChatCompletion, completion, path=["response"])
779
780    @parametrize
781    async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
782        response = await async_client.chat.completions.with_raw_response.update(
783            completion_id="completion_id",
784            metadata={"foo": "string"},
785        )
786
787        assert response.is_closed is True
788        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
789        completion = response.parse()
790        assert_matches_type(ChatCompletion, completion, path=["response"])
791
792    @parametrize
793    async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
794        async with async_client.chat.completions.with_streaming_response.update(
795            completion_id="completion_id",
796            metadata={"foo": "string"},
797        ) as response:
798            assert not response.is_closed
799            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
800
801            completion = await response.parse()
802            assert_matches_type(ChatCompletion, completion, path=["response"])
803
804        assert cast(Any, response.is_closed) is True
805
806    @parametrize
807    async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
808        with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
809            await async_client.chat.completions.with_raw_response.update(
810                completion_id="",
811                metadata={"foo": "string"},
812            )
813
814    @parametrize
815    async def test_method_list(self, async_client: AsyncOpenAI) -> None:
816        completion = await async_client.chat.completions.list()
817        assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"])
818
819    @parametrize
820    async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
821        completion = await async_client.chat.completions.list(
822            after="after",
823            limit=0,
824            metadata={"foo": "string"},
825            model="model",
826            order="asc",
827        )
828        assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"])
829
830    @parametrize
831    async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
832        response = await async_client.chat.completions.with_raw_response.list()
833
834        assert response.is_closed is True
835        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
836        completion = response.parse()
837        assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"])
838
839    @parametrize
840    async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
841        async with async_client.chat.completions.with_streaming_response.list() as response:
842            assert not response.is_closed
843            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
844
845            completion = await response.parse()
846            assert_matches_type(AsyncCursorPage[ChatCompletion], completion, path=["response"])
847
848        assert cast(Any, response.is_closed) is True
849
850    @parametrize
851    async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
852        completion = await async_client.chat.completions.delete(
853            "completion_id",
854        )
855        assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
856
857    @parametrize
858    async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
859        response = await async_client.chat.completions.with_raw_response.delete(
860            "completion_id",
861        )
862
863        assert response.is_closed is True
864        assert response.http_request.headers.get("X-Stainless-Lang") == "python"
865        completion = response.parse()
866        assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
867
868    @parametrize
869    async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
870        async with async_client.chat.completions.with_streaming_response.delete(
871            "completion_id",
872        ) as response:
873            assert not response.is_closed
874            assert response.http_request.headers.get("X-Stainless-Lang") == "python"
875
876            completion = await response.parse()
877            assert_matches_type(ChatCompletionDeleted, completion, path=["response"])
878
879        assert cast(Any, response.is_closed) is True
880
881    @parametrize
882    async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
883        with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
884            await async_client.chat.completions.with_raw_response.delete(
885                "",
886            )
887
888    @parametrize
889    async def test_method_create_disallows_pydantic(self, async_client: AsyncOpenAI) -> None:
890        class MyModel(pydantic.BaseModel):
891            a: str
892
893        with pytest.raises(TypeError, match=r"You tried to pass a `BaseModel` class"):
894            await async_client.chat.completions.create(
895                messages=[
896                    {
897                        "content": "string",
898                        "role": "system",
899                    }
900                ],
901                model="gpt-4o",
902                response_format=cast(Any, MyModel),
903            )