main
   1from __future__ import annotations
   2
   3import os
   4from typing import Any, Generic, Callable, Iterator, cast, overload
   5from typing_extensions import Literal, TypeVar
   6
   7import rich
   8import httpx
   9import pytest
  10from respx import MockRouter
  11from pydantic import BaseModel
  12from inline_snapshot import (
  13    external,
  14    snapshot,
  15    outsource,  # pyright: ignore[reportUnknownVariableType]
  16    get_snapshot_value,
  17)
  18
  19import openai
  20from openai import OpenAI, AsyncOpenAI
  21from openai._utils import consume_sync_iterator, assert_signatures_in_sync
  22from openai._compat import model_copy
  23from openai.types.chat import ChatCompletionChunk
  24from openai.lib.streaming.chat import (
  25    ContentDoneEvent,
  26    ChatCompletionStream,
  27    ChatCompletionStreamEvent,
  28    ChatCompletionStreamState,
  29    ChatCompletionStreamManager,
  30    ParsedChatCompletionSnapshot,
  31)
  32from openai.lib._parsing._completions import ResponseFormatT
  33
  34from ..utils import print_obj
  35from ...conftest import base_url
  36
  37_T = TypeVar("_T")
  38
  39# all the snapshots in this file are auto-generated from the live API
  40#
  41# you can update them with
  42#
  43# `OPENAI_LIVE=1 pytest --inline-snapshot=fix -p no:xdist -o addopts=""`
  44
  45
  46@pytest.mark.respx(base_url=base_url)
  47def test_parse_nothing(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
  48    listener = _make_stream_snapshot_request(
  49        lambda c: c.chat.completions.stream(
  50            model="gpt-4o-2024-08-06",
  51            messages=[
  52                {
  53                    "role": "user",
  54                    "content": "What's the weather like in SF?",
  55                },
  56            ],
  57        ),
  58        content_snapshot=snapshot(external("e2aad469b71d*.bin")),
  59        mock_client=client,
  60        respx_mock=respx_mock,
  61    )
  62
  63    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
  64        """\
  65[
  66    ParsedChoice[NoneType](
  67        finish_reason='stop',
  68        index=0,
  69        logprobs=None,
  70        message=ParsedChatCompletionMessage[NoneType](
  71            annotations=None,
  72            audio=None,
  73            content="I'm unable to provide real-time weather updates. To get the current weather in San Francisco, I 
  74recommend checking a reliable weather website or a weather app.",
  75            function_call=None,
  76            parsed=None,
  77            refusal=None,
  78            role='assistant',
  79            tool_calls=None
  80        )
  81    )
  82]
  83"""
  84    )
  85    assert print_obj(listener.get_event_by_type("content.done"), monkeypatch) == snapshot(
  86        """\
  87ContentDoneEvent[NoneType](
  88    content="I'm unable to provide real-time weather updates. To get the current weather in San Francisco, I recommend 
  89checking a reliable weather website or a weather app.",
  90    parsed=None,
  91    type='content.done'
  92)
  93"""
  94    )
  95
  96
  97@pytest.mark.respx(base_url=base_url)
  98def test_parse_pydantic_model(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
  99    class Location(BaseModel):
 100        city: str
 101        temperature: float
 102        units: Literal["c", "f"]
 103
 104    done_snapshots: list[ParsedChatCompletionSnapshot] = []
 105
 106    def on_event(stream: ChatCompletionStream[Location], event: ChatCompletionStreamEvent[Location]) -> None:
 107        if event.type == "content.done":
 108            done_snapshots.append(model_copy(stream.current_completion_snapshot, deep=True))
 109
 110    listener = _make_stream_snapshot_request(
 111        lambda c: c.chat.completions.stream(
 112            model="gpt-4o-2024-08-06",
 113            messages=[
 114                {
 115                    "role": "user",
 116                    "content": "What's the weather like in SF?",
 117                },
 118            ],
 119            response_format=Location,
 120        ),
 121        content_snapshot=snapshot(external("7e5ea4d12e7c*.bin")),
 122        mock_client=client,
 123        respx_mock=respx_mock,
 124        on_event=on_event,
 125    )
 126
 127    assert len(done_snapshots) == 1
 128    assert isinstance(done_snapshots[0].choices[0].message.parsed, Location)
 129
 130    for event in reversed(listener.events):
 131        if event.type == "content.delta":
 132            data = cast(Any, event.parsed)
 133            assert isinstance(data["city"], str), data
 134            assert isinstance(data["temperature"], (int, float)), data
 135            assert isinstance(data["units"], str), data
 136            break
 137    else:
 138        rich.print(listener.events)
 139        raise AssertionError("Did not find a `content.delta` event")
 140
 141    assert print_obj(listener.stream.get_final_completion(), monkeypatch) == snapshot(
 142        """\
 143ParsedChatCompletion[Location](
 144    choices=[
 145        ParsedChoice[Location](
 146            finish_reason='stop',
 147            index=0,
 148            logprobs=None,
 149            message=ParsedChatCompletionMessage[Location](
 150                annotations=None,
 151                audio=None,
 152                content='{"city":"San Francisco","temperature":61,"units":"f"}',
 153                function_call=None,
 154                parsed=Location(city='San Francisco', temperature=61.0, units='f'),
 155                refusal=None,
 156                role='assistant',
 157                tool_calls=None
 158            )
 159        )
 160    ],
 161    created=1727346169,
 162    id='chatcmpl-ABfw1e5abtU8OwGr15vOreYVb2MiF',
 163    model='gpt-4o-2024-08-06',
 164    object='chat.completion',
 165    service_tier=None,
 166    system_fingerprint='fp_5050236cbd',
 167    usage=CompletionUsage(
 168        completion_tokens=14,
 169        completion_tokens_details=CompletionTokensDetails(
 170            accepted_prediction_tokens=None,
 171            audio_tokens=None,
 172            reasoning_tokens=0,
 173            rejected_prediction_tokens=None
 174        ),
 175        prompt_tokens=79,
 176        prompt_tokens_details=None,
 177        total_tokens=93
 178    )
 179)
 180"""
 181    )
 182    assert print_obj(listener.get_event_by_type("content.done"), monkeypatch) == snapshot(
 183        """\
 184ContentDoneEvent[Location](
 185    content='{"city":"San Francisco","temperature":61,"units":"f"}',
 186    parsed=Location(city='San Francisco', temperature=61.0, units='f'),
 187    type='content.done'
 188)
 189"""
 190    )
 191
 192
 193@pytest.mark.respx(base_url=base_url)
 194def test_parse_pydantic_model_multiple_choices(
 195    client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
 196) -> None:
 197    class Location(BaseModel):
 198        city: str
 199        temperature: float
 200        units: Literal["c", "f"]
 201
 202    listener = _make_stream_snapshot_request(
 203        lambda c: c.chat.completions.stream(
 204            model="gpt-4o-2024-08-06",
 205            messages=[
 206                {
 207                    "role": "user",
 208                    "content": "What's the weather like in SF?",
 209                },
 210            ],
 211            n=3,
 212            response_format=Location,
 213        ),
 214        content_snapshot=snapshot(external("a491adda08c3*.bin")),
 215        mock_client=client,
 216        respx_mock=respx_mock,
 217    )
 218
 219    assert [e.type for e in listener.events] == snapshot(
 220        [
 221            "chunk",
 222            "content.delta",
 223            "chunk",
 224            "content.delta",
 225            "chunk",
 226            "content.delta",
 227            "chunk",
 228            "content.delta",
 229            "chunk",
 230            "content.delta",
 231            "chunk",
 232            "content.delta",
 233            "chunk",
 234            "content.delta",
 235            "chunk",
 236            "content.delta",
 237            "chunk",
 238            "content.delta",
 239            "chunk",
 240            "content.delta",
 241            "chunk",
 242            "content.delta",
 243            "chunk",
 244            "content.delta",
 245            "chunk",
 246            "content.delta",
 247            "chunk",
 248            "content.delta",
 249            "chunk",
 250            "content.delta",
 251            "chunk",
 252            "content.delta",
 253            "chunk",
 254            "content.delta",
 255            "chunk",
 256            "content.delta",
 257            "chunk",
 258            "content.delta",
 259            "chunk",
 260            "content.delta",
 261            "chunk",
 262            "content.delta",
 263            "chunk",
 264            "content.delta",
 265            "chunk",
 266            "content.delta",
 267            "chunk",
 268            "content.delta",
 269            "chunk",
 270            "content.delta",
 271            "chunk",
 272            "content.delta",
 273            "chunk",
 274            "content.delta",
 275            "chunk",
 276            "content.delta",
 277            "chunk",
 278            "content.delta",
 279            "chunk",
 280            "content.delta",
 281            "chunk",
 282            "content.delta",
 283            "chunk",
 284            "content.delta",
 285            "chunk",
 286            "content.delta",
 287            "chunk",
 288            "content.delta",
 289            "chunk",
 290            "content.delta",
 291            "chunk",
 292            "content.delta",
 293            "chunk",
 294            "content.delta",
 295            "chunk",
 296            "content.delta",
 297            "chunk",
 298            "content.delta",
 299            "chunk",
 300            "content.delta",
 301            "chunk",
 302            "content.delta",
 303            "chunk",
 304            "content.delta",
 305            "chunk",
 306            "content.delta",
 307            "chunk",
 308            "content.delta",
 309            "chunk",
 310            "content.delta",
 311            "chunk",
 312            "content.done",
 313            "chunk",
 314            "content.done",
 315            "chunk",
 316            "content.done",
 317            "chunk",
 318        ]
 319    )
 320    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
 321        """\
 322[
 323    ParsedChoice[Location](
 324        finish_reason='stop',
 325        index=0,
 326        logprobs=None,
 327        message=ParsedChatCompletionMessage[Location](
 328            annotations=None,
 329            audio=None,
 330            content='{"city":"San Francisco","temperature":65,"units":"f"}',
 331            function_call=None,
 332            parsed=Location(city='San Francisco', temperature=65.0, units='f'),
 333            refusal=None,
 334            role='assistant',
 335            tool_calls=None
 336        )
 337    ),
 338    ParsedChoice[Location](
 339        finish_reason='stop',
 340        index=1,
 341        logprobs=None,
 342        message=ParsedChatCompletionMessage[Location](
 343            annotations=None,
 344            audio=None,
 345            content='{"city":"San Francisco","temperature":61,"units":"f"}',
 346            function_call=None,
 347            parsed=Location(city='San Francisco', temperature=61.0, units='f'),
 348            refusal=None,
 349            role='assistant',
 350            tool_calls=None
 351        )
 352    ),
 353    ParsedChoice[Location](
 354        finish_reason='stop',
 355        index=2,
 356        logprobs=None,
 357        message=ParsedChatCompletionMessage[Location](
 358            annotations=None,
 359            audio=None,
 360            content='{"city":"San Francisco","temperature":59,"units":"f"}',
 361            function_call=None,
 362            parsed=Location(city='San Francisco', temperature=59.0, units='f'),
 363            refusal=None,
 364            role='assistant',
 365            tool_calls=None
 366        )
 367    )
 368]
 369"""
 370    )
 371
 372
 373@pytest.mark.respx(base_url=base_url)
 374def test_parse_max_tokens_reached(client: OpenAI, respx_mock: MockRouter) -> None:
 375    class Location(BaseModel):
 376        city: str
 377        temperature: float
 378        units: Literal["c", "f"]
 379
 380    with pytest.raises(openai.LengthFinishReasonError):
 381        _make_stream_snapshot_request(
 382            lambda c: c.chat.completions.stream(
 383                model="gpt-4o-2024-08-06",
 384                messages=[
 385                    {
 386                        "role": "user",
 387                        "content": "What's the weather like in SF?",
 388                    },
 389                ],
 390                max_tokens=1,
 391                response_format=Location,
 392            ),
 393            content_snapshot=snapshot(external("4cc50a6135d2*.bin")),
 394            mock_client=client,
 395            respx_mock=respx_mock,
 396        )
 397
 398
 399@pytest.mark.respx(base_url=base_url)
 400def test_parse_pydantic_model_refusal(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
 401    class Location(BaseModel):
 402        city: str
 403        temperature: float
 404        units: Literal["c", "f"]
 405
 406    listener = _make_stream_snapshot_request(
 407        lambda c: c.chat.completions.stream(
 408            model="gpt-4o-2024-08-06",
 409            messages=[
 410                {
 411                    "role": "user",
 412                    "content": "How do I make anthrax?",
 413                },
 414            ],
 415            response_format=Location,
 416        ),
 417        content_snapshot=snapshot(external("173417d55340*.bin")),
 418        mock_client=client,
 419        respx_mock=respx_mock,
 420    )
 421
 422    assert print_obj(listener.get_event_by_type("refusal.done"), monkeypatch) == snapshot("""\
 423RefusalDoneEvent(refusal="I'm sorry, I can't assist with that request.", type='refusal.done')
 424""")
 425
 426    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
 427        """\
 428[
 429    ParsedChoice[Location](
 430        finish_reason='stop',
 431        index=0,
 432        logprobs=None,
 433        message=ParsedChatCompletionMessage[Location](
 434            annotations=None,
 435            audio=None,
 436            content=None,
 437            function_call=None,
 438            parsed=None,
 439            refusal="I'm sorry, I can't assist with that request.",
 440            role='assistant',
 441            tool_calls=None
 442        )
 443    )
 444]
 445"""
 446    )
 447
 448
 449@pytest.mark.respx(base_url=base_url)
 450def test_content_logprobs_events(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
 451    listener = _make_stream_snapshot_request(
 452        lambda c: c.chat.completions.stream(
 453            model="gpt-4o-2024-08-06",
 454            messages=[
 455                {
 456                    "role": "user",
 457                    "content": "Say foo",
 458                },
 459            ],
 460            logprobs=True,
 461        ),
 462        content_snapshot=snapshot(external("83b060bae42e*.bin")),
 463        mock_client=client,
 464        respx_mock=respx_mock,
 465    )
 466
 467    assert print_obj([e for e in listener.events if e.type.startswith("logprobs")], monkeypatch) == snapshot("""\
 468[
 469    LogprobsContentDeltaEvent(
 470        content=[
 471            ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.0025094282, token='Foo', top_logprobs=[])
 472        ],
 473        snapshot=[
 474            ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.0025094282, token='Foo', top_logprobs=[])
 475        ],
 476        type='logprobs.content.delta'
 477    ),
 478    LogprobsContentDeltaEvent(
 479        content=[ChatCompletionTokenLogprob(bytes=[33], logprob=-0.26638845, token='!', top_logprobs=[])],
 480        snapshot=[
 481            ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.0025094282, token='Foo', top_logprobs=[]),
 482            ChatCompletionTokenLogprob(bytes=[33], logprob=-0.26638845, token='!', top_logprobs=[])
 483        ],
 484        type='logprobs.content.delta'
 485    ),
 486    LogprobsContentDoneEvent(
 487        content=[
 488            ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.0025094282, token='Foo', top_logprobs=[]),
 489            ChatCompletionTokenLogprob(bytes=[33], logprob=-0.26638845, token='!', top_logprobs=[])
 490        ],
 491        type='logprobs.content.done'
 492    )
 493]
 494""")
 495
 496    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot("""\
 497[
 498    ParsedChoice[NoneType](
 499        finish_reason='stop',
 500        index=0,
 501        logprobs=ChoiceLogprobs(
 502            content=[
 503                ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.0025094282, token='Foo', top_logprobs=[]),
 504                ChatCompletionTokenLogprob(bytes=[33], logprob=-0.26638845, token='!', top_logprobs=[])
 505            ],
 506            refusal=None
 507        ),
 508        message=ParsedChatCompletionMessage[NoneType](
 509            annotations=None,
 510            audio=None,
 511            content='Foo!',
 512            function_call=None,
 513            parsed=None,
 514            refusal=None,
 515            role='assistant',
 516            tool_calls=None
 517        )
 518    )
 519]
 520""")
 521
 522
 523@pytest.mark.respx(base_url=base_url)
 524def test_refusal_logprobs_events(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
 525    class Location(BaseModel):
 526        city: str
 527        temperature: float
 528        units: Literal["c", "f"]
 529
 530    listener = _make_stream_snapshot_request(
 531        lambda c: c.chat.completions.stream(
 532            model="gpt-4o-2024-08-06",
 533            messages=[
 534                {
 535                    "role": "user",
 536                    "content": "How do I make anthrax?",
 537                },
 538            ],
 539            logprobs=True,
 540            response_format=Location,
 541        ),
 542        content_snapshot=snapshot(external("569c877e6942*.bin")),
 543        mock_client=client,
 544        respx_mock=respx_mock,
 545    )
 546
 547    assert print_obj([e.type for e in listener.events if e.type.startswith("logprobs")], monkeypatch) == snapshot("""\
 548[
 549    'logprobs.refusal.delta',
 550    'logprobs.refusal.delta',
 551    'logprobs.refusal.delta',
 552    'logprobs.refusal.delta',
 553    'logprobs.refusal.delta',
 554    'logprobs.refusal.delta',
 555    'logprobs.refusal.delta',
 556    'logprobs.refusal.delta',
 557    'logprobs.refusal.delta',
 558    'logprobs.refusal.delta',
 559    'logprobs.refusal.delta',
 560    'logprobs.refusal.done'
 561]
 562""")
 563
 564    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot("""\
 565[
 566    ParsedChoice[Location](
 567        finish_reason='stop',
 568        index=0,
 569        logprobs=ChoiceLogprobs(
 570            content=None,
 571            refusal=[
 572                ChatCompletionTokenLogprob(bytes=[73, 39, 109], logprob=-0.0012038043, token="I'm", top_logprobs=[]),
 573                ChatCompletionTokenLogprob(
 574                    bytes=[32, 118, 101, 114, 121],
 575                    logprob=-0.8438816,
 576                    token=' very',
 577                    top_logprobs=[]
 578                ),
 579                ChatCompletionTokenLogprob(
 580                    bytes=[32, 115, 111, 114, 114, 121],
 581                    logprob=-3.4121115e-06,
 582                    token=' sorry',
 583                    top_logprobs=[]
 584                ),
 585                ChatCompletionTokenLogprob(bytes=[44], logprob=-3.3809047e-05, token=',', top_logprobs=[]),
 586                ChatCompletionTokenLogprob(
 587                    bytes=[32, 98, 117, 116],
 588                    logprob=-0.038048144,
 589                    token=' but',
 590                    top_logprobs=[]
 591                ),
 592                ChatCompletionTokenLogprob(bytes=[32, 73], logprob=-0.0016109125, token=' I', top_logprobs=[]),
 593                ChatCompletionTokenLogprob(
 594                    bytes=[32, 99, 97, 110, 39, 116],
 595                    logprob=-0.0073532974,
 596                    token=" can't",
 597                    top_logprobs=[]
 598                ),
 599                ChatCompletionTokenLogprob(
 600                    bytes=[32, 97, 115, 115, 105, 115, 116],
 601                    logprob=-0.0020837625,
 602                    token=' assist',
 603                    top_logprobs=[]
 604                ),
 605                ChatCompletionTokenLogprob(
 606                    bytes=[32, 119, 105, 116, 104],
 607                    logprob=-0.00318354,
 608                    token=' with',
 609                    top_logprobs=[]
 610                ),
 611                ChatCompletionTokenLogprob(
 612                    bytes=[32, 116, 104, 97, 116],
 613                    logprob=-0.0017186158,
 614                    token=' that',
 615                    top_logprobs=[]
 616                ),
 617                ChatCompletionTokenLogprob(bytes=[46], logprob=-0.57687104, token='.', top_logprobs=[])
 618            ]
 619        ),
 620        message=ParsedChatCompletionMessage[Location](
 621            annotations=None,
 622            audio=None,
 623            content=None,
 624            function_call=None,
 625            parsed=None,
 626            refusal="I'm very sorry, but I can't assist with that.",
 627            role='assistant',
 628            tool_calls=None
 629        )
 630    )
 631]
 632""")
 633
 634
 635@pytest.mark.respx(base_url=base_url)
 636def test_parse_pydantic_tool(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
 637    class GetWeatherArgs(BaseModel):
 638        city: str
 639        country: str
 640        units: Literal["c", "f"] = "c"
 641
 642    listener = _make_stream_snapshot_request(
 643        lambda c: c.chat.completions.stream(
 644            model="gpt-4o-2024-08-06",
 645            messages=[
 646                {
 647                    "role": "user",
 648                    "content": "What's the weather like in Edinburgh?",
 649                },
 650            ],
 651            tools=[
 652                openai.pydantic_function_tool(GetWeatherArgs),
 653            ],
 654        ),
 655        content_snapshot=snapshot(external("c6aa7e397b71*.bin")),
 656        mock_client=client,
 657        respx_mock=respx_mock,
 658    )
 659
 660    assert print_obj(listener.stream.current_completion_snapshot.choices, monkeypatch) == snapshot(
 661        """\
 662[
 663    ParsedChoice[object](
 664        finish_reason='tool_calls',
 665        index=0,
 666        logprobs=None,
 667        message=ParsedChatCompletionMessage[object](
 668            annotations=None,
 669            audio=None,
 670            content=None,
 671            function_call=None,
 672            parsed=None,
 673            refusal=None,
 674            role='assistant',
 675            tool_calls=[
 676                ParsedFunctionToolCall(
 677                    function=ParsedFunction(
 678                        arguments='{"city":"Edinburgh","country":"UK","units":"c"}',
 679                        name='GetWeatherArgs',
 680                        parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
 681                    ),
 682                    id='call_c91SqDXlYFuETYv8mUHzz6pp',
 683                    index=0,
 684                    type='function'
 685                )
 686            ]
 687        )
 688    )
 689]
 690"""
 691    )
 692
 693    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
 694        """\
 695[
 696    ParsedChoice[NoneType](
 697        finish_reason='tool_calls',
 698        index=0,
 699        logprobs=None,
 700        message=ParsedChatCompletionMessage[NoneType](
 701            annotations=None,
 702            audio=None,
 703            content=None,
 704            function_call=None,
 705            parsed=None,
 706            refusal=None,
 707            role='assistant',
 708            tool_calls=[
 709                ParsedFunctionToolCall(
 710                    function=ParsedFunction(
 711                        arguments='{"city":"Edinburgh","country":"UK","units":"c"}',
 712                        name='GetWeatherArgs',
 713                        parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
 714                    ),
 715                    id='call_c91SqDXlYFuETYv8mUHzz6pp',
 716                    index=0,
 717                    type='function'
 718                )
 719            ]
 720        )
 721    )
 722]
 723"""
 724    )
 725
 726
 727@pytest.mark.respx(base_url=base_url)
 728def test_parse_multiple_pydantic_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
 729    class GetWeatherArgs(BaseModel):
 730        """Get the temperature for the given country/city combo"""
 731
 732        city: str
 733        country: str
 734        units: Literal["c", "f"] = "c"
 735
 736    class GetStockPrice(BaseModel):
 737        ticker: str
 738        exchange: str
 739
 740    listener = _make_stream_snapshot_request(
 741        lambda c: c.chat.completions.stream(
 742            model="gpt-4o-2024-08-06",
 743            messages=[
 744                {
 745                    "role": "user",
 746                    "content": "What's the weather like in Edinburgh?",
 747                },
 748                {
 749                    "role": "user",
 750                    "content": "What's the price of AAPL?",
 751                },
 752            ],
 753            tools=[
 754                openai.pydantic_function_tool(GetWeatherArgs),
 755                openai.pydantic_function_tool(
 756                    GetStockPrice, name="get_stock_price", description="Fetch the latest price for a given ticker"
 757                ),
 758            ],
 759        ),
 760        content_snapshot=snapshot(external("f82268f2fefd*.bin")),
 761        mock_client=client,
 762        respx_mock=respx_mock,
 763    )
 764
 765    assert print_obj(listener.stream.current_completion_snapshot.choices, monkeypatch) == snapshot(
 766        """\
 767[
 768    ParsedChoice[object](
 769        finish_reason='tool_calls',
 770        index=0,
 771        logprobs=None,
 772        message=ParsedChatCompletionMessage[object](
 773            annotations=None,
 774            audio=None,
 775            content=None,
 776            function_call=None,
 777            parsed=None,
 778            refusal=None,
 779            role='assistant',
 780            tool_calls=[
 781                ParsedFunctionToolCall(
 782                    function=ParsedFunction(
 783                        arguments='{"city": "Edinburgh", "country": "GB", "units": "c"}',
 784                        name='GetWeatherArgs',
 785                        parsed_arguments=GetWeatherArgs(city='Edinburgh', country='GB', units='c')
 786                    ),
 787                    id='call_JMW1whyEaYG438VE1OIflxA2',
 788                    index=0,
 789                    type='function'
 790                ),
 791                ParsedFunctionToolCall(
 792                    function=ParsedFunction(
 793                        arguments='{"ticker": "AAPL", "exchange": "NASDAQ"}',
 794                        name='get_stock_price',
 795                        parsed_arguments=GetStockPrice(exchange='NASDAQ', ticker='AAPL')
 796                    ),
 797                    id='call_DNYTawLBoN8fj3KN6qU9N1Ou',
 798                    index=1,
 799                    type='function'
 800                )
 801            ]
 802        )
 803    )
 804]
 805"""
 806    )
 807    completion = listener.stream.get_final_completion()
 808    assert print_obj(completion.choices[0].message.tool_calls, monkeypatch) == snapshot(
 809        """\
 810[
 811    ParsedFunctionToolCall(
 812        function=ParsedFunction(
 813            arguments='{"city": "Edinburgh", "country": "GB", "units": "c"}',
 814            name='GetWeatherArgs',
 815            parsed_arguments=GetWeatherArgs(city='Edinburgh', country='GB', units='c')
 816        ),
 817        id='call_JMW1whyEaYG438VE1OIflxA2',
 818        index=0,
 819        type='function'
 820    ),
 821    ParsedFunctionToolCall(
 822        function=ParsedFunction(
 823            arguments='{"ticker": "AAPL", "exchange": "NASDAQ"}',
 824            name='get_stock_price',
 825            parsed_arguments=GetStockPrice(exchange='NASDAQ', ticker='AAPL')
 826        ),
 827        id='call_DNYTawLBoN8fj3KN6qU9N1Ou',
 828        index=1,
 829        type='function'
 830    )
 831]
 832"""
 833    )
 834
 835
 836@pytest.mark.respx(base_url=base_url)
 837def test_parse_strict_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
 838    listener = _make_stream_snapshot_request(
 839        lambda c: c.chat.completions.stream(
 840            model="gpt-4o-2024-08-06",
 841            messages=[
 842                {
 843                    "role": "user",
 844                    "content": "What's the weather like in SF?",
 845                },
 846            ],
 847            tools=[
 848                {
 849                    "type": "function",
 850                    "function": {
 851                        "name": "get_weather",
 852                        "parameters": {
 853                            "type": "object",
 854                            "properties": {
 855                                "city": {"type": "string"},
 856                                "state": {"type": "string"},
 857                            },
 858                            "required": [
 859                                "city",
 860                                "state",
 861                            ],
 862                            "additionalProperties": False,
 863                        },
 864                        "strict": True,
 865                    },
 866                }
 867            ],
 868        ),
 869        content_snapshot=snapshot(external("a247c49c5fcd*.bin")),
 870        mock_client=client,
 871        respx_mock=respx_mock,
 872    )
 873
 874    assert print_obj(listener.stream.current_completion_snapshot.choices, monkeypatch) == snapshot(
 875        """\
 876[
 877    ParsedChoice[object](
 878        finish_reason='tool_calls',
 879        index=0,
 880        logprobs=None,
 881        message=ParsedChatCompletionMessage[object](
 882            annotations=None,
 883            audio=None,
 884            content=None,
 885            function_call=None,
 886            parsed=None,
 887            refusal=None,
 888            role='assistant',
 889            tool_calls=[
 890                ParsedFunctionToolCall(
 891                    function=ParsedFunction(
 892                        arguments='{"city":"San Francisco","state":"CA"}',
 893                        name='get_weather',
 894                        parsed_arguments={'city': 'San Francisco', 'state': 'CA'}
 895                    ),
 896                    id='call_CTf1nWJLqSeRgDqaCG27xZ74',
 897                    index=0,
 898                    type='function'
 899                )
 900            ]
 901        )
 902    )
 903]
 904"""
 905    )
 906
 907
 908@pytest.mark.respx(base_url=base_url)
 909def test_non_pydantic_response_format(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
 910    listener = _make_stream_snapshot_request(
 911        lambda c: c.chat.completions.stream(
 912            model="gpt-4o-2024-08-06",
 913            messages=[
 914                {
 915                    "role": "user",
 916                    "content": "What's the weather like in SF? Give me any JSON back",
 917                },
 918            ],
 919            response_format={"type": "json_object"},
 920        ),
 921        content_snapshot=snapshot(external("d61558011839*.bin")),
 922        mock_client=client,
 923        respx_mock=respx_mock,
 924    )
 925
 926    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
 927        """\
 928[
 929    ParsedChoice[NoneType](
 930        finish_reason='stop',
 931        index=0,
 932        logprobs=None,
 933        message=ParsedChatCompletionMessage[NoneType](
 934            annotations=None,
 935            audio=None,
 936            content='\\n  {\\n    "location": "San Francisco, CA",\\n    "weather": {\\n      "temperature": "18°C",\\n      
 937"condition": "Partly Cloudy",\\n      "humidity": "72%",\\n      "windSpeed": "15 km/h",\\n      "windDirection": "NW"\\n   
 938},\\n    "forecast": [\\n      {\\n        "day": "Monday",\\n        "high": "20°C",\\n        "low": "14°C",\\n        
 939"condition": "Sunny"\\n      },\\n      {\\n        "day": "Tuesday",\\n        "high": "19°C",\\n        "low": "15°C",\\n   
 940"condition": "Mostly Cloudy"\\n      },\\n      {\\n        "day": "Wednesday",\\n        "high": "18°C",\\n        "low": 
 941"14°C",\\n        "condition": "Cloudy"\\n      }\\n    ]\\n  }\\n',
 942            function_call=None,
 943            parsed=None,
 944            refusal=None,
 945            role='assistant',
 946            tool_calls=None
 947        )
 948    )
 949]
 950"""
 951    )
 952
 953
 954@pytest.mark.respx(base_url=base_url)
 955def test_allows_non_strict_tools_but_no_parsing(
 956    client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
 957) -> None:
 958    listener = _make_stream_snapshot_request(
 959        lambda c: c.chat.completions.stream(
 960            model="gpt-4o-2024-08-06",
 961            messages=[{"role": "user", "content": "what's the weather in NYC?"}],
 962            tools=[
 963                {
 964                    "type": "function",
 965                    "function": {
 966                        "name": "get_weather",
 967                        "parameters": {"type": "object", "properties": {"city": {"type": "string"}}},
 968                    },
 969                }
 970            ],
 971        ),
 972        content_snapshot=snapshot(external("2018feb66ae1*.bin")),
 973        mock_client=client,
 974        respx_mock=respx_mock,
 975    )
 976
 977    assert print_obj(listener.get_event_by_type("tool_calls.function.arguments.done"), monkeypatch) == snapshot("""\
 978FunctionToolCallArgumentsDoneEvent(
 979    arguments='{"city":"New York City"}',
 980    index=0,
 981    name='get_weather',
 982    parsed_arguments=None,
 983    type='tool_calls.function.arguments.done'
 984)
 985""")
 986
 987    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
 988        """\
 989[
 990    ParsedChoice[NoneType](
 991        finish_reason='tool_calls',
 992        index=0,
 993        logprobs=None,
 994        message=ParsedChatCompletionMessage[NoneType](
 995            annotations=None,
 996            audio=None,
 997            content=None,
 998            function_call=None,
 999            parsed=None,
1000            refusal=None,
1001            role='assistant',
1002            tool_calls=[
1003                ParsedFunctionToolCall(
1004                    function=ParsedFunction(
1005                        arguments='{"city":"New York City"}',
1006                        name='get_weather',
1007                        parsed_arguments=None
1008                    ),
1009                    id='call_4XzlGBLtUe9dy3GVNV4jhq7h',
1010                    index=0,
1011                    type='function'
1012                )
1013            ]
1014        )
1015    )
1016]
1017"""
1018    )
1019
1020
1021@pytest.mark.respx(base_url=base_url)
1022def test_chat_completion_state_helper(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
1023    state = ChatCompletionStreamState()
1024
1025    def streamer(client: OpenAI) -> Iterator[ChatCompletionChunk]:
1026        stream = client.chat.completions.create(
1027            model="gpt-4o-2024-08-06",
1028            messages=[
1029                {
1030                    "role": "user",
1031                    "content": "What's the weather like in SF?",
1032                },
1033            ],
1034            stream=True,
1035        )
1036        for chunk in stream:
1037            state.handle_chunk(chunk)
1038            yield chunk
1039
1040    _make_raw_stream_snapshot_request(
1041        streamer,
1042        content_snapshot=snapshot(external("e2aad469b71d*.bin")),
1043        mock_client=client,
1044        respx_mock=respx_mock,
1045    )
1046
1047    assert print_obj(state.get_final_completion().choices, monkeypatch) == snapshot(
1048        """\
1049[
1050    ParsedChoice[NoneType](
1051        finish_reason='stop',
1052        index=0,
1053        logprobs=None,
1054        message=ParsedChatCompletionMessage[NoneType](
1055            annotations=None,
1056            audio=None,
1057            content="I'm unable to provide real-time weather updates. To get the current weather in San Francisco, I 
1058recommend checking a reliable weather website or a weather app.",
1059            function_call=None,
1060            parsed=None,
1061            refusal=None,
1062            role='assistant',
1063            tool_calls=None
1064        )
1065    )
1066]
1067"""
1068    )
1069
1070
1071@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
1072def test_stream_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
1073    checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
1074
1075    assert_signatures_in_sync(
1076        checking_client.chat.completions.create,
1077        checking_client.chat.completions.stream,
1078        exclude_params={"response_format", "stream"},
1079    )
1080
1081
1082class StreamListener(Generic[ResponseFormatT]):
1083    def __init__(self, stream: ChatCompletionStream[ResponseFormatT]) -> None:
1084        self.stream = stream
1085        self.events: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
1086
1087    def __iter__(self) -> Iterator[ChatCompletionStreamEvent[ResponseFormatT]]:
1088        for event in self.stream:
1089            self.events.append(event)
1090            yield event
1091
1092    @overload
1093    def get_event_by_type(self, event_type: Literal["content.done"]) -> ContentDoneEvent[ResponseFormatT] | None: ...
1094
1095    @overload
1096    def get_event_by_type(self, event_type: str) -> ChatCompletionStreamEvent[ResponseFormatT] | None: ...
1097
1098    def get_event_by_type(self, event_type: str) -> ChatCompletionStreamEvent[ResponseFormatT] | None:
1099        return next((e for e in self.events if e.type == event_type), None)
1100
1101
1102def _make_stream_snapshot_request(
1103    func: Callable[[OpenAI], ChatCompletionStreamManager[ResponseFormatT]],
1104    *,
1105    content_snapshot: Any,
1106    respx_mock: MockRouter,
1107    mock_client: OpenAI,
1108    on_event: Callable[[ChatCompletionStream[ResponseFormatT], ChatCompletionStreamEvent[ResponseFormatT]], Any]
1109    | None = None,
1110) -> StreamListener[ResponseFormatT]:
1111    live = os.environ.get("OPENAI_LIVE") == "1"
1112    if live:
1113
1114        def _on_response(response: httpx.Response) -> None:
1115            # update the content snapshot
1116            assert outsource(response.read()) == content_snapshot
1117
1118        respx_mock.stop()
1119
1120        client = OpenAI(
1121            http_client=httpx.Client(
1122                event_hooks={
1123                    "response": [_on_response],
1124                }
1125            )
1126        )
1127    else:
1128        respx_mock.post("/chat/completions").mock(
1129            return_value=httpx.Response(
1130                200,
1131                content=get_snapshot_value(content_snapshot),
1132                headers={"content-type": "text/event-stream"},
1133            )
1134        )
1135
1136        client = mock_client
1137
1138    with func(client) as stream:
1139        listener = StreamListener(stream)
1140
1141        for event in listener:
1142            if on_event:
1143                on_event(stream, event)
1144
1145    if live:
1146        client.close()
1147
1148    return listener
1149
1150
1151def _make_raw_stream_snapshot_request(
1152    func: Callable[[OpenAI], Iterator[ChatCompletionChunk]],
1153    *,
1154    content_snapshot: Any,
1155    respx_mock: MockRouter,
1156    mock_client: OpenAI,
1157) -> None:
1158    live = os.environ.get("OPENAI_LIVE") == "1"
1159    if live:
1160
1161        def _on_response(response: httpx.Response) -> None:
1162            # update the content snapshot
1163            assert outsource(response.read()) == content_snapshot
1164
1165        respx_mock.stop()
1166
1167        client = OpenAI(
1168            http_client=httpx.Client(
1169                event_hooks={
1170                    "response": [_on_response],
1171                }
1172            )
1173        )
1174    else:
1175        respx_mock.post("/chat/completions").mock(
1176            return_value=httpx.Response(
1177                200,
1178                content=get_snapshot_value(content_snapshot),
1179                headers={"content-type": "text/event-stream"},
1180            )
1181        )
1182
1183        client = mock_client
1184
1185    stream = func(client)
1186    consume_sync_iterator(stream)
1187
1188    if live:
1189        client.close()