main
  1from __future__ import annotations
  2
  3from enum import Enum
  4from typing import List, Optional
  5from typing_extensions import Literal, TypeVar
  6
  7import pytest
  8from respx import MockRouter
  9from pydantic import Field, BaseModel
 10from inline_snapshot import snapshot
 11
 12import openai
 13from openai import OpenAI, AsyncOpenAI
 14from openai._utils import assert_signatures_in_sync
 15from openai._compat import PYDANTIC_V1
 16
 17from ..utils import print_obj
 18from ...conftest import base_url
 19from ..snapshots import make_snapshot_request, make_async_snapshot_request
 20from ..schema_types.query import Query
 21
 22_T = TypeVar("_T")
 23
 24# all the snapshots in this file are auto-generated from the live API
 25#
 26# you can update them with
 27#
 28# `OPENAI_LIVE=1 pytest --inline-snapshot=fix -p no:xdist -o addopts=""`
 29
 30
 31@pytest.mark.respx(base_url=base_url)
 32def test_parse_nothing(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
 33    completion = make_snapshot_request(
 34        lambda c: c.chat.completions.parse(
 35            model="gpt-4o-2024-08-06",
 36            messages=[
 37                {
 38                    "role": "user",
 39                    "content": "What's the weather like in SF?",
 40                },
 41            ],
 42        ),
 43        content_snapshot=snapshot(
 44            '{"id": "chatcmpl-ABfvaueLEMLNYbT8YzpJxsmiQ6HSY", "object": "chat.completion", "created": 1727346142, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "I\'m unable to provide real-time weather updates. To get the current weather in San Francisco, I recommend checking a reliable weather website or app like the Weather Channel or a local news station.", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 14, "completion_tokens": 37, "total_tokens": 51, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_b40fb1c6fb"}'
 45        ),
 46        path="/chat/completions",
 47        mock_client=client,
 48        respx_mock=respx_mock,
 49    )
 50
 51    assert print_obj(completion, monkeypatch) == snapshot(
 52        """\
 53ParsedChatCompletion[NoneType](
 54    choices=[
 55        ParsedChoice[NoneType](
 56            finish_reason='stop',
 57            index=0,
 58            logprobs=None,
 59            message=ParsedChatCompletionMessage[NoneType](
 60                annotations=None,
 61                audio=None,
 62                content="I'm unable to provide real-time weather updates. To get the current weather in San Francisco, I
 63recommend checking a reliable weather website or app like the Weather Channel or a local news station.",
 64                function_call=None,
 65                parsed=None,
 66                refusal=None,
 67                role='assistant',
 68                tool_calls=None
 69            )
 70        )
 71    ],
 72    created=1727346142,
 73    id='chatcmpl-ABfvaueLEMLNYbT8YzpJxsmiQ6HSY',
 74    model='gpt-4o-2024-08-06',
 75    object='chat.completion',
 76    service_tier=None,
 77    system_fingerprint='fp_b40fb1c6fb',
 78    usage=CompletionUsage(
 79        completion_tokens=37,
 80        completion_tokens_details=CompletionTokensDetails(
 81            accepted_prediction_tokens=None,
 82            audio_tokens=None,
 83            reasoning_tokens=0,
 84            rejected_prediction_tokens=None
 85        ),
 86        prompt_tokens=14,
 87        prompt_tokens_details=None,
 88        total_tokens=51
 89    )
 90)
 91"""
 92    )
 93
 94
 95@pytest.mark.respx(base_url=base_url)
 96def test_parse_pydantic_model(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
 97    class Location(BaseModel):
 98        city: str
 99        temperature: float
100        units: Literal["c", "f"]
101
102    completion = make_snapshot_request(
103        lambda c: c.chat.completions.parse(
104            model="gpt-4o-2024-08-06",
105            messages=[
106                {
107                    "role": "user",
108                    "content": "What's the weather like in SF?",
109                },
110            ],
111            response_format=Location,
112        ),
113        content_snapshot=snapshot(
114            '{"id": "chatcmpl-ABfvbtVnTu5DeC4EFnRYj8mtfOM99", "object": "chat.completion", "created": 1727346143, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 14, "total_tokens": 93, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}'
115        ),
116        path="/chat/completions",
117        mock_client=client,
118        respx_mock=respx_mock,
119    )
120
121    assert print_obj(completion, monkeypatch) == snapshot(
122        """\
123ParsedChatCompletion[Location](
124    choices=[
125        ParsedChoice[Location](
126            finish_reason='stop',
127            index=0,
128            logprobs=None,
129            message=ParsedChatCompletionMessage[Location](
130                annotations=None,
131                audio=None,
132                content='{"city":"San Francisco","temperature":65,"units":"f"}',
133                function_call=None,
134                parsed=Location(city='San Francisco', temperature=65.0, units='f'),
135                refusal=None,
136                role='assistant',
137                tool_calls=None
138            )
139        )
140    ],
141    created=1727346143,
142    id='chatcmpl-ABfvbtVnTu5DeC4EFnRYj8mtfOM99',
143    model='gpt-4o-2024-08-06',
144    object='chat.completion',
145    service_tier=None,
146    system_fingerprint='fp_5050236cbd',
147    usage=CompletionUsage(
148        completion_tokens=14,
149        completion_tokens_details=CompletionTokensDetails(
150            accepted_prediction_tokens=None,
151            audio_tokens=None,
152            reasoning_tokens=0,
153            rejected_prediction_tokens=None
154        ),
155        prompt_tokens=79,
156        prompt_tokens_details=None,
157        total_tokens=93
158    )
159)
160"""
161    )
162
163
164@pytest.mark.respx(base_url=base_url)
165def test_parse_pydantic_model_optional_default(
166    client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
167) -> None:
168    class Location(BaseModel):
169        city: str
170        temperature: float
171        units: Optional[Literal["c", "f"]] = None
172
173    completion = make_snapshot_request(
174        lambda c: c.chat.completions.parse(
175            model="gpt-4o-2024-08-06",
176            messages=[
177                {
178                    "role": "user",
179                    "content": "What's the weather like in SF?",
180                },
181            ],
182            response_format=Location,
183        ),
184        content_snapshot=snapshot(
185            '{"id": "chatcmpl-ABfvcC8grKYsRkSoMp9CCAhbXAd0b", "object": "chat.completion", "created": 1727346144, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 88, "completion_tokens": 14, "total_tokens": 102, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_b40fb1c6fb"}'
186        ),
187        path="/chat/completions",
188        mock_client=client,
189        respx_mock=respx_mock,
190    )
191
192    assert print_obj(completion, monkeypatch) == snapshot(
193        """\
194ParsedChatCompletion[Location](
195    choices=[
196        ParsedChoice[Location](
197            finish_reason='stop',
198            index=0,
199            logprobs=None,
200            message=ParsedChatCompletionMessage[Location](
201                annotations=None,
202                audio=None,
203                content='{"city":"San Francisco","temperature":65,"units":"f"}',
204                function_call=None,
205                parsed=Location(city='San Francisco', temperature=65.0, units='f'),
206                refusal=None,
207                role='assistant',
208                tool_calls=None
209            )
210        )
211    ],
212    created=1727346144,
213    id='chatcmpl-ABfvcC8grKYsRkSoMp9CCAhbXAd0b',
214    model='gpt-4o-2024-08-06',
215    object='chat.completion',
216    service_tier=None,
217    system_fingerprint='fp_b40fb1c6fb',
218    usage=CompletionUsage(
219        completion_tokens=14,
220        completion_tokens_details=CompletionTokensDetails(
221            accepted_prediction_tokens=None,
222            audio_tokens=None,
223            reasoning_tokens=0,
224            rejected_prediction_tokens=None
225        ),
226        prompt_tokens=88,
227        prompt_tokens_details=None,
228        total_tokens=102
229    )
230)
231"""
232    )
233
234
235@pytest.mark.respx(base_url=base_url)
236def test_parse_pydantic_model_enum(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
237    class Color(Enum):
238        """The detected color"""
239
240        RED = "red"
241        BLUE = "blue"
242        GREEN = "green"
243
244    class ColorDetection(BaseModel):
245        color: Color
246        hex_color_code: str = Field(description="The hex color code of the detected color")
247
248    if PYDANTIC_V1:
249        ColorDetection.update_forward_refs(**locals())  # type: ignore
250
251    completion = make_snapshot_request(
252        lambda c: c.chat.completions.parse(
253            model="gpt-4o-2024-08-06",
254            messages=[
255                {"role": "user", "content": "What color is a Coke can?"},
256            ],
257            response_format=ColorDetection,
258        ),
259        content_snapshot=snapshot(
260            '{"id": "chatcmpl-ABfvjIatz0zrZu50gRbMtlp0asZpz", "object": "chat.completion", "created": 1727346151, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"color\\":\\"red\\",\\"hex_color_code\\":\\"#FF0000\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 109, "completion_tokens": 14, "total_tokens": 123, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}'
261        ),
262        path="/chat/completions",
263        mock_client=client,
264        respx_mock=respx_mock,
265    )
266
267    assert print_obj(completion.choices[0], monkeypatch) == snapshot(
268        """\
269ParsedChoice[ColorDetection](
270    finish_reason='stop',
271    index=0,
272    logprobs=None,
273    message=ParsedChatCompletionMessage[ColorDetection](
274        annotations=None,
275        audio=None,
276        content='{"color":"red","hex_color_code":"#FF0000"}',
277        function_call=None,
278        parsed=ColorDetection(color=<Color.RED: 'red'>, hex_color_code='#FF0000'),
279        refusal=None,
280        role='assistant',
281        tool_calls=None
282    )
283)
284"""
285    )
286
287
288@pytest.mark.respx(base_url=base_url)
289def test_parse_pydantic_model_multiple_choices(
290    client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
291) -> None:
292    class Location(BaseModel):
293        city: str
294        temperature: float
295        units: Literal["c", "f"]
296
297    completion = make_snapshot_request(
298        lambda c: c.chat.completions.parse(
299            model="gpt-4o-2024-08-06",
300            messages=[
301                {
302                    "role": "user",
303                    "content": "What's the weather like in SF?",
304                },
305            ],
306            n=3,
307            response_format=Location,
308        ),
309        content_snapshot=snapshot(
310            '{"id": "chatcmpl-ABfvp8qzboW92q8ONDF4DPHlI7ckC", "object": "chat.completion", "created": 1727346157, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":64,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}, {"index": 1, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}, {"index": 2, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":63.0,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 44, "total_tokens": 123, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_b40fb1c6fb"}'
311        ),
312        path="/chat/completions",
313        mock_client=client,
314        respx_mock=respx_mock,
315    )
316
317    assert print_obj(completion.choices, monkeypatch) == snapshot(
318        """\
319[
320    ParsedChoice[Location](
321        finish_reason='stop',
322        index=0,
323        logprobs=None,
324        message=ParsedChatCompletionMessage[Location](
325            annotations=None,
326            audio=None,
327            content='{"city":"San Francisco","temperature":64,"units":"f"}',
328            function_call=None,
329            parsed=Location(city='San Francisco', temperature=64.0, units='f'),
330            refusal=None,
331            role='assistant',
332            tool_calls=None
333        )
334    ),
335    ParsedChoice[Location](
336        finish_reason='stop',
337        index=1,
338        logprobs=None,
339        message=ParsedChatCompletionMessage[Location](
340            annotations=None,
341            audio=None,
342            content='{"city":"San Francisco","temperature":65,"units":"f"}',
343            function_call=None,
344            parsed=Location(city='San Francisco', temperature=65.0, units='f'),
345            refusal=None,
346            role='assistant',
347            tool_calls=None
348        )
349    ),
350    ParsedChoice[Location](
351        finish_reason='stop',
352        index=2,
353        logprobs=None,
354        message=ParsedChatCompletionMessage[Location](
355            annotations=None,
356            audio=None,
357            content='{"city":"San Francisco","temperature":63.0,"units":"f"}',
358            function_call=None,
359            parsed=Location(city='San Francisco', temperature=63.0, units='f'),
360            refusal=None,
361            role='assistant',
362            tool_calls=None
363        )
364    )
365]
366"""
367    )
368
369
370@pytest.mark.respx(base_url=base_url)
371@pytest.mark.skipif(PYDANTIC_V1, reason="dataclasses only supported in v2")
372def test_parse_pydantic_dataclass(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
373    from pydantic.dataclasses import dataclass
374
375    @dataclass
376    class CalendarEvent:
377        name: str
378        date: str
379        participants: List[str]
380
381    completion = make_snapshot_request(
382        lambda c: c.chat.completions.parse(
383            model="gpt-4o-2024-08-06",
384            messages=[
385                {"role": "system", "content": "Extract the event information."},
386                {"role": "user", "content": "Alice and Bob are going to a science fair on Friday."},
387            ],
388            response_format=CalendarEvent,
389        ),
390        content_snapshot=snapshot(
391            '{"id": "chatcmpl-ABfvqhz4uUUWsw8Ohw2Mp9B4sKKV8", "object": "chat.completion", "created": 1727346158, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"name\\":\\"Science Fair\\",\\"date\\":\\"Friday\\",\\"participants\\":[\\"Alice\\",\\"Bob\\"]}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 92, "completion_tokens": 17, "total_tokens": 109, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}'
392        ),
393        path="/chat/completions",
394        mock_client=client,
395        respx_mock=respx_mock,
396    )
397
398    assert print_obj(completion, monkeypatch) == snapshot(
399        """\
400ParsedChatCompletion[CalendarEvent](
401    choices=[
402        ParsedChoice[CalendarEvent](
403            finish_reason='stop',
404            index=0,
405            logprobs=None,
406            message=ParsedChatCompletionMessage[CalendarEvent](
407                annotations=None,
408                audio=None,
409                content='{"name":"Science Fair","date":"Friday","participants":["Alice","Bob"]}',
410                function_call=None,
411                parsed=CalendarEvent(name='Science Fair', date='Friday', participants=['Alice', 'Bob']),
412                refusal=None,
413                role='assistant',
414                tool_calls=None
415            )
416        )
417    ],
418    created=1727346158,
419    id='chatcmpl-ABfvqhz4uUUWsw8Ohw2Mp9B4sKKV8',
420    model='gpt-4o-2024-08-06',
421    object='chat.completion',
422    service_tier=None,
423    system_fingerprint='fp_7568d46099',
424    usage=CompletionUsage(
425        completion_tokens=17,
426        completion_tokens_details=CompletionTokensDetails(
427            accepted_prediction_tokens=None,
428            audio_tokens=None,
429            reasoning_tokens=0,
430            rejected_prediction_tokens=None
431        ),
432        prompt_tokens=92,
433        prompt_tokens_details=None,
434        total_tokens=109
435    )
436)
437"""
438    )
439
440
441@pytest.mark.respx(base_url=base_url)
442def test_pydantic_tool_model_all_types(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
443    completion = make_snapshot_request(
444        lambda c: c.chat.completions.parse(
445            model="gpt-4o-2024-08-06",
446            messages=[
447                {
448                    "role": "user",
449                    "content": "look up all my orders in may of last year that were fulfilled but not delivered on time",
450                },
451            ],
452            tools=[openai.pydantic_function_tool(Query)],
453            response_format=Query,
454        ),
455        content_snapshot=snapshot(
456            '{"id": "chatcmpl-ABfvtNiaTNUF6OymZUnEFc9lPq9p1", "object": "chat.completion", "created": 1727346161, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_NKpApJybW1MzOjZO2FzwYw0d", "type": "function", "function": {"name": "Query", "arguments": "{\\"name\\":\\"May 2022 Fulfilled Orders Not Delivered on Time\\",\\"table_name\\":\\"orders\\",\\"columns\\":[\\"id\\",\\"status\\",\\"expected_delivery_date\\",\\"delivered_at\\",\\"shipped_at\\",\\"ordered_at\\",\\"canceled_at\\"],\\"conditions\\":[{\\"column\\":\\"ordered_at\\",\\"operator\\":\\">=\\",\\"value\\":\\"2022-05-01\\"},{\\"column\\":\\"ordered_at\\",\\"operator\\":\\"<=\\",\\"value\\":\\"2022-05-31\\"},{\\"column\\":\\"status\\",\\"operator\\":\\"=\\",\\"value\\":\\"fulfilled\\"},{\\"column\\":\\"delivered_at\\",\\"operator\\":\\">\\",\\"value\\":{\\"column_name\\":\\"expected_delivery_date\\"}}],\\"order_by\\":\\"asc\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 512, "completion_tokens": 132, "total_tokens": 644, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}'
457        ),
458        path="/chat/completions",
459        mock_client=client,
460        respx_mock=respx_mock,
461    )
462
463    assert print_obj(completion.choices[0], monkeypatch) == snapshot(
464        """\
465ParsedChoice[Query](
466    finish_reason='tool_calls',
467    index=0,
468    logprobs=None,
469    message=ParsedChatCompletionMessage[Query](
470        annotations=None,
471        audio=None,
472        content=None,
473        function_call=None,
474        parsed=None,
475        refusal=None,
476        role='assistant',
477        tool_calls=[
478            ParsedFunctionToolCall(
479                function=ParsedFunction(
480                    arguments='{"name":"May 2022 Fulfilled Orders Not Delivered on 
481Time","table_name":"orders","columns":["id","status","expected_delivery_date","delivered_at","shipped_at","ordered_at","
482canceled_at"],"conditions":[{"column":"ordered_at","operator":">=","value":"2022-05-01"},{"column":"ordered_at","operato
483r":"<=","value":"2022-05-31"},{"column":"status","operator":"=","value":"fulfilled"},{"column":"delivered_at","operator"
484:">","value":{"column_name":"expected_delivery_date"}}],"order_by":"asc"}',
485                    name='Query',
486                    parsed_arguments=Query(
487                        columns=[
488                            <Column.id: 'id'>,
489                            <Column.status: 'status'>,
490                            <Column.expected_delivery_date: 'expected_delivery_date'>,
491                            <Column.delivered_at: 'delivered_at'>,
492                            <Column.shipped_at: 'shipped_at'>,
493                            <Column.ordered_at: 'ordered_at'>,
494                            <Column.canceled_at: 'canceled_at'>
495                        ],
496                        conditions=[
497                            Condition(column='ordered_at', operator=<Operator.ge: '>='>, value='2022-05-01'),
498                            Condition(column='ordered_at', operator=<Operator.le: '<='>, value='2022-05-31'),
499                            Condition(column='status', operator=<Operator.eq: '='>, value='fulfilled'),
500                            Condition(
501                                column='delivered_at',
502                                operator=<Operator.gt: '>'>,
503                                value=DynamicValue(column_name='expected_delivery_date')
504                            )
505                        ],
506                        name='May 2022 Fulfilled Orders Not Delivered on Time',
507                        order_by=<OrderBy.asc: 'asc'>,
508                        table_name=<Table.orders: 'orders'>
509                    )
510                ),
511                id='call_NKpApJybW1MzOjZO2FzwYw0d',
512                type='function'
513            )
514        ]
515    )
516)
517"""
518    )
519
520
521@pytest.mark.respx(base_url=base_url)
522def test_parse_max_tokens_reached(client: OpenAI, respx_mock: MockRouter) -> None:
523    class Location(BaseModel):
524        city: str
525        temperature: float
526        units: Literal["c", "f"]
527
528    with pytest.raises(openai.LengthFinishReasonError):
529        make_snapshot_request(
530            lambda c: c.chat.completions.parse(
531                model="gpt-4o-2024-08-06",
532                messages=[
533                    {
534                        "role": "user",
535                        "content": "What's the weather like in SF?",
536                    },
537                ],
538                max_tokens=1,
539                response_format=Location,
540            ),
541            content_snapshot=snapshot(
542                '{"id": "chatcmpl-ABfvvX7eB1KsfeZj8VcF3z7G7SbaA", "object": "chat.completion", "created": 1727346163, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"", "refusal": null}, "logprobs": null, "finish_reason": "length"}], "usage": {"prompt_tokens": 79, "completion_tokens": 1, "total_tokens": 80, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}'
543            ),
544            path="/chat/completions",
545            mock_client=client,
546            respx_mock=respx_mock,
547        )
548
549
550@pytest.mark.respx(base_url=base_url)
551def test_parse_pydantic_model_refusal(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
552    class Location(BaseModel):
553        city: str
554        temperature: float
555        units: Literal["c", "f"]
556
557    completion = make_snapshot_request(
558        lambda c: c.chat.completions.parse(
559            model="gpt-4o-2024-08-06",
560            messages=[
561                {
562                    "role": "user",
563                    "content": "How do I make anthrax?",
564                },
565            ],
566            response_format=Location,
567        ),
568        content_snapshot=snapshot(
569            '{"id": "chatcmpl-ABfvwoKVWPQj2UPlAcAKM7s40GsRx", "object": "chat.completion", "created": 1727346164, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "refusal": "I\'m very sorry, but I can\'t assist with that."}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 12, "total_tokens": 91, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}'
570        ),
571        path="/chat/completions",
572        mock_client=client,
573        respx_mock=respx_mock,
574    )
575
576    assert print_obj(completion.choices, monkeypatch) == snapshot(
577        """\
578[
579    ParsedChoice[Location](
580        finish_reason='stop',
581        index=0,
582        logprobs=None,
583        message=ParsedChatCompletionMessage[Location](
584            annotations=None,
585            audio=None,
586            content=None,
587            function_call=None,
588            parsed=None,
589            refusal="I'm very sorry, but I can't assist with that.",
590            role='assistant',
591            tool_calls=None
592        )
593    )
594]
595"""
596    )
597
598
599@pytest.mark.respx(base_url=base_url)
600def test_parse_pydantic_tool(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
601    class GetWeatherArgs(BaseModel):
602        city: str
603        country: str
604        units: Literal["c", "f"] = "c"
605
606    completion = make_snapshot_request(
607        lambda c: c.chat.completions.parse(
608            model="gpt-4o-2024-08-06",
609            messages=[
610                {
611                    "role": "user",
612                    "content": "What's the weather like in Edinburgh?",
613                },
614            ],
615            tools=[
616                openai.pydantic_function_tool(GetWeatherArgs),
617            ],
618        ),
619        content_snapshot=snapshot(
620            '{"id": "chatcmpl-ABfvx6Z4dchiW2nya1N8KMsHFrQRE", "object": "chat.completion", "created": 1727346165, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_Y6qJ7ofLgOrBnMD5WbVAeiRV", "type": "function", "function": {"name": "GetWeatherArgs", "arguments": "{\\"city\\":\\"Edinburgh\\",\\"country\\":\\"UK\\",\\"units\\":\\"c\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 76, "completion_tokens": 24, "total_tokens": 100, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_e45dabd248"}'
621        ),
622        path="/chat/completions",
623        mock_client=client,
624        respx_mock=respx_mock,
625    )
626
627    assert print_obj(completion.choices, monkeypatch) == snapshot(
628        """\
629[
630    ParsedChoice[NoneType](
631        finish_reason='tool_calls',
632        index=0,
633        logprobs=None,
634        message=ParsedChatCompletionMessage[NoneType](
635            annotations=None,
636            audio=None,
637            content=None,
638            function_call=None,
639            parsed=None,
640            refusal=None,
641            role='assistant',
642            tool_calls=[
643                ParsedFunctionToolCall(
644                    function=ParsedFunction(
645                        arguments='{"city":"Edinburgh","country":"UK","units":"c"}',
646                        name='GetWeatherArgs',
647                        parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
648                    ),
649                    id='call_Y6qJ7ofLgOrBnMD5WbVAeiRV',
650                    type='function'
651                )
652            ]
653        )
654    )
655]
656"""
657    )
658
659
660@pytest.mark.respx(base_url=base_url)
661def test_parse_multiple_pydantic_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
662    class GetWeatherArgs(BaseModel):
663        """Get the temperature for the given country/city combo"""
664
665        city: str
666        country: str
667        units: Literal["c", "f"] = "c"
668
669    class GetStockPrice(BaseModel):
670        ticker: str
671        exchange: str
672
673    completion = make_snapshot_request(
674        lambda c: c.chat.completions.parse(
675            model="gpt-4o-2024-08-06",
676            messages=[
677                {
678                    "role": "user",
679                    "content": "What's the weather like in Edinburgh?",
680                },
681                {
682                    "role": "user",
683                    "content": "What's the price of AAPL?",
684                },
685            ],
686            tools=[
687                openai.pydantic_function_tool(GetWeatherArgs),
688                openai.pydantic_function_tool(
689                    GetStockPrice, name="get_stock_price", description="Fetch the latest price for a given ticker"
690                ),
691            ],
692        ),
693        content_snapshot=snapshot(
694            '{"id": "chatcmpl-ABfvyvfNWKcl7Ohqos4UFrmMs1v4C", "object": "chat.completion", "created": 1727346166, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_fdNz3vOBKYgOIpMdWotB9MjY", "type": "function", "function": {"name": "GetWeatherArgs", "arguments": "{\\"city\\": \\"Edinburgh\\", \\"country\\": \\"GB\\", \\"units\\": \\"c\\"}"}}, {"id": "call_h1DWI1POMJLb0KwIyQHWXD4p", "type": "function", "function": {"name": "get_stock_price", "arguments": "{\\"ticker\\": \\"AAPL\\", \\"exchange\\": \\"NASDAQ\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 149, "completion_tokens": 60, "total_tokens": 209, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_b40fb1c6fb"}'
695        ),
696        path="/chat/completions",
697        mock_client=client,
698        respx_mock=respx_mock,
699    )
700
701    assert print_obj(completion.choices, monkeypatch) == snapshot(
702        """\
703[
704    ParsedChoice[NoneType](
705        finish_reason='tool_calls',
706        index=0,
707        logprobs=None,
708        message=ParsedChatCompletionMessage[NoneType](
709            annotations=None,
710            audio=None,
711            content=None,
712            function_call=None,
713            parsed=None,
714            refusal=None,
715            role='assistant',
716            tool_calls=[
717                ParsedFunctionToolCall(
718                    function=ParsedFunction(
719                        arguments='{"city": "Edinburgh", "country": "GB", "units": "c"}',
720                        name='GetWeatherArgs',
721                        parsed_arguments=GetWeatherArgs(city='Edinburgh', country='GB', units='c')
722                    ),
723                    id='call_fdNz3vOBKYgOIpMdWotB9MjY',
724                    type='function'
725                ),
726                ParsedFunctionToolCall(
727                    function=ParsedFunction(
728                        arguments='{"ticker": "AAPL", "exchange": "NASDAQ"}',
729                        name='get_stock_price',
730                        parsed_arguments=GetStockPrice(exchange='NASDAQ', ticker='AAPL')
731                    ),
732                    id='call_h1DWI1POMJLb0KwIyQHWXD4p',
733                    type='function'
734                )
735            ]
736        )
737    )
738]
739"""
740    )
741
742
743@pytest.mark.respx(base_url=base_url)
744def test_parse_strict_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
745    completion = make_snapshot_request(
746        lambda c: c.chat.completions.parse(
747            model="gpt-4o-2024-08-06",
748            messages=[
749                {
750                    "role": "user",
751                    "content": "What's the weather like in SF?",
752                },
753            ],
754            tools=[
755                {
756                    "type": "function",
757                    "function": {
758                        "name": "get_weather",
759                        "parameters": {
760                            "type": "object",
761                            "properties": {
762                                "city": {"type": "string"},
763                                "state": {"type": "string"},
764                            },
765                            "required": [
766                                "city",
767                                "state",
768                            ],
769                            "additionalProperties": False,
770                        },
771                        "strict": True,
772                    },
773                }
774            ],
775        ),
776        content_snapshot=snapshot(
777            '{"id": "chatcmpl-ABfvzdvCI6RaIkiEFNjqGXCSYnlzf", "object": "chat.completion", "created": 1727346167, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_CUdUoJpsWWVdxXntucvnol1M", "type": "function", "function": {"name": "get_weather", "arguments": "{\\"city\\":\\"San Francisco\\",\\"state\\":\\"CA\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 48, "completion_tokens": 19, "total_tokens": 67, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}'
778        ),
779        path="/chat/completions",
780        mock_client=client,
781        respx_mock=respx_mock,
782    )
783
784    assert print_obj(completion.choices, monkeypatch) == snapshot(
785        """\
786[
787    ParsedChoice[NoneType](
788        finish_reason='tool_calls',
789        index=0,
790        logprobs=None,
791        message=ParsedChatCompletionMessage[NoneType](
792            annotations=None,
793            audio=None,
794            content=None,
795            function_call=None,
796            parsed=None,
797            refusal=None,
798            role='assistant',
799            tool_calls=[
800                ParsedFunctionToolCall(
801                    function=ParsedFunction(
802                        arguments='{"city":"San Francisco","state":"CA"}',
803                        name='get_weather',
804                        parsed_arguments={'city': 'San Francisco', 'state': 'CA'}
805                    ),
806                    id='call_CUdUoJpsWWVdxXntucvnol1M',
807                    type='function'
808                )
809            ]
810        )
811    )
812]
813"""
814    )
815
816
817def test_parse_non_strict_tools(client: OpenAI) -> None:
818    with pytest.raises(
819        ValueError, match="`get_weather` is not strict. Only `strict` function tools can be auto-parsed"
820    ):
821        client.chat.completions.parse(
822            model="gpt-4o-2024-08-06",
823            messages=[],
824            tools=[
825                {
826                    "type": "function",
827                    "function": {
828                        "name": "get_weather",
829                        "parameters": {},
830                    },
831                }
832            ],
833        )
834
835
836@pytest.mark.respx(base_url=base_url)
837def test_parse_pydantic_raw_response(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
838    class Location(BaseModel):
839        city: str
840        temperature: float
841        units: Literal["c", "f"]
842
843    response = make_snapshot_request(
844        lambda c: c.chat.completions.with_raw_response.parse(
845            model="gpt-4o-2024-08-06",
846            messages=[
847                {
848                    "role": "user",
849                    "content": "What's the weather like in SF?",
850                },
851            ],
852            response_format=Location,
853        ),
854        content_snapshot=snapshot(
855            '{"id": "chatcmpl-ABrDYCa8W1w66eUxKDO8TQF1m6trT", "object": "chat.completion", "created": 1727389540, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":58,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 14, "total_tokens": 93, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}'
856        ),
857        path="/chat/completions",
858        mock_client=client,
859        respx_mock=respx_mock,
860    )
861    assert response.http_request.headers.get("x-stainless-helper-method") == "chat.completions.parse"
862
863    completion = response.parse()
864    message = completion.choices[0].message
865    assert message.parsed is not None
866    assert isinstance(message.parsed.city, str)
867    assert print_obj(completion, monkeypatch) == snapshot(
868        """\
869ParsedChatCompletion[Location](
870    choices=[
871        ParsedChoice[Location](
872            finish_reason='stop',
873            index=0,
874            logprobs=None,
875            message=ParsedChatCompletionMessage[Location](
876                annotations=None,
877                audio=None,
878                content='{"city":"San Francisco","temperature":58,"units":"f"}',
879                function_call=None,
880                parsed=Location(city='San Francisco', temperature=58.0, units='f'),
881                refusal=None,
882                role='assistant',
883                tool_calls=None
884            )
885        )
886    ],
887    created=1727389540,
888    id='chatcmpl-ABrDYCa8W1w66eUxKDO8TQF1m6trT',
889    model='gpt-4o-2024-08-06',
890    object='chat.completion',
891    service_tier=None,
892    system_fingerprint='fp_5050236cbd',
893    usage=CompletionUsage(
894        completion_tokens=14,
895        completion_tokens_details=CompletionTokensDetails(
896            accepted_prediction_tokens=None,
897            audio_tokens=None,
898            reasoning_tokens=0,
899            rejected_prediction_tokens=None
900        ),
901        prompt_tokens=79,
902        prompt_tokens_details=None,
903        total_tokens=93
904    )
905)
906"""
907    )
908
909
910@pytest.mark.respx(base_url=base_url)
911@pytest.mark.asyncio
912async def test_async_parse_pydantic_raw_response(
913    async_client: AsyncOpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
914) -> None:
915    class Location(BaseModel):
916        city: str
917        temperature: float
918        units: Literal["c", "f"]
919
920    response = await make_async_snapshot_request(
921        lambda c: c.chat.completions.with_raw_response.parse(
922            model="gpt-4o-2024-08-06",
923            messages=[
924                {
925                    "role": "user",
926                    "content": "What's the weather like in SF?",
927                },
928            ],
929            response_format=Location,
930        ),
931        content_snapshot=snapshot(
932            '{"id": "chatcmpl-ABrDQWOiw0PK5JOsxl1D9ooeQgznq", "object": "chat.completion", "created": 1727389532, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 14, "total_tokens": 93, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}'
933        ),
934        path="/chat/completions",
935        mock_client=async_client,
936        respx_mock=respx_mock,
937    )
938    assert response.http_request.headers.get("x-stainless-helper-method") == "chat.completions.parse"
939
940    completion = response.parse()
941    message = completion.choices[0].message
942    assert message.parsed is not None
943    assert isinstance(message.parsed.city, str)
944    assert print_obj(completion, monkeypatch) == snapshot(
945        """\
946ParsedChatCompletion[Location](
947    choices=[
948        ParsedChoice[Location](
949            finish_reason='stop',
950            index=0,
951            logprobs=None,
952            message=ParsedChatCompletionMessage[Location](
953                annotations=None,
954                audio=None,
955                content='{"city":"San Francisco","temperature":65,"units":"f"}',
956                function_call=None,
957                parsed=Location(city='San Francisco', temperature=65.0, units='f'),
958                refusal=None,
959                role='assistant',
960                tool_calls=None
961            )
962        )
963    ],
964    created=1727389532,
965    id='chatcmpl-ABrDQWOiw0PK5JOsxl1D9ooeQgznq',
966    model='gpt-4o-2024-08-06',
967    object='chat.completion',
968    service_tier=None,
969    system_fingerprint='fp_5050236cbd',
970    usage=CompletionUsage(
971        completion_tokens=14,
972        completion_tokens_details=CompletionTokensDetails(
973            accepted_prediction_tokens=None,
974            audio_tokens=None,
975            reasoning_tokens=0,
976            rejected_prediction_tokens=None
977        ),
978        prompt_tokens=79,
979        prompt_tokens_details=None,
980        total_tokens=93
981    )
982)
983"""
984    )
985
986
987@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
988def test_parse_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
989    checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
990
991    assert_signatures_in_sync(
992        checking_client.chat.completions.create,
993        checking_client.chat.completions.parse,
994        exclude_params={"response_format", "stream"},
995    )