Commit bf1ca86c
Changed files (104)
.github
workflows
.inline-snapshot
external
src
openai
lib
resources
beta
chat
fine_tuning
jobs
types
beta
threads
vector_stores
chat
fine_tuning
shared
shared_params
tests
api_resources
beta
fine_tuning
lib
.github/workflows/ci.yml
@@ -50,4 +50,3 @@ jobs:
- name: Run tests
run: ./scripts/test
-
.inline-snapshot/external/.gitignore
@@ -0,0 +1,2 @@
+# ignore all snapshots which are not refered in the source
+*-new.*
.inline-snapshot/external/1437bd06a9d5c414e56fd0840b9cda31e5a0af80decdbddd21c056545c6d4616.bin
@@ -0,0 +1,100 @@
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"{\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"{\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"{\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"city"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"city"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"city"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"\",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"\",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"temperature"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"temperature"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"temperature"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"64"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"68"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"64"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"units"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"units"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"units"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"f"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"f"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"f"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\"}"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"\"}"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"\"}"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":42,"total_tokens":59}}
+
+data: [DONE]
+
.inline-snapshot/external/3e0df46f250db854eacb34e3258ab9141d709b770a74dc025fb8770a42aabee9.bin
@@ -0,0 +1,180 @@
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"{\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"location"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" CA"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"temperature"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"N"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"/A"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"conditions"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"N"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"/A"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"humidity"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"N"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"/A"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"wind"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"_speed"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"N"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"/A"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"timestamp"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"N"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"/A"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"note"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"Real"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"-time"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" data"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" is"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" not"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" available"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Please"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" check"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" a"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" reliable"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" service"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" for"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" the"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" most"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" up"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"-to"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"-date"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" information"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" on"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"'s"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" conditions"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":".\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"}"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":19,"completion_tokens":86,"total_tokens":105}}
+
+data: [DONE]
+
.inline-snapshot/external/70c7df71ce729e178fc5e54f0cc4861e696495d9a45c19be02cf479e28c31316.bin
@@ -0,0 +1,12 @@
+data: {"id":"chatcmpl-9tDU7wVJ0lzoNjC1aNIjnP99zMW2C","object":"chat.completion.chunk","created":1722946903,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":{"content":[],"refusal":null},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU7wVJ0lzoNjC1aNIjnP99zMW2C","object":"chat.completion.chunk","created":1722946903,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"Foo"},"logprobs":{"content":[{"token":"Foo","logprob":-0.006764991,"bytes":[70,111,111],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU7wVJ0lzoNjC1aNIjnP99zMW2C","object":"chat.completion.chunk","created":1722946903,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"!"},"logprobs":{"content":[{"token":"!","logprob":-0.31380808,"bytes":[33],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU7wVJ0lzoNjC1aNIjnP99zMW2C","object":"chat.completion.chunk","created":1722946903,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tDU7wVJ0lzoNjC1aNIjnP99zMW2C","object":"chat.completion.chunk","created":1722946903,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":9,"completion_tokens":2,"total_tokens":11}}
+
+data: [DONE]
+
.inline-snapshot/external/7ae6c1a2631bf7444b8f70b592ec5f581ea9de2524599f06b0d405db8997b826.bin
@@ -0,0 +1,8 @@
+data: {"id":"chatcmpl-9tAC6v0rUCOp8tty9cizBsGmRcVIx","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"refusal":""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6v0rUCOp8tty9cizBsGmRcVIx","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"length"}]}
+
+data: {"id":"chatcmpl-9tAC6v0rUCOp8tty9cizBsGmRcVIx","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":1,"total_tokens":18}}
+
+data: [DONE]
+
.inline-snapshot/external/a346213bec7a572810bd1ffe290e08ddfad221d6632fdb200a95ca6c996238e2.bin
@@ -0,0 +1,52 @@
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"id":"call_g4Q1vRbE0CaHGOs5if8mHsBq","type":"function","function":{"name":"GetWeatherArgs","arguments":""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\"ci"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"ty\": "}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"Edinb"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"urgh"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\", \"c"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"ountry"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\": \""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"UK\", "}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"units"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\": \""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"c\"}"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"id":"call_gWj3HQxZEHnFvyJLEHIiJKBV","type":"function","function":{"name":"get_stock_price","arguments":""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"{\"ti"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"cker\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":": \"AAP"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"L\", "}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"\"exch"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"ange\":"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":" \"NA"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"SDAQ\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"}"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":149,"completion_tokens":60,"total_tokens":209}}
+
+data: [DONE]
+
.inline-snapshot/external/a7097cae6a1f8dea453977a1784b7ca16b9fadc5c4551ea066d305eb1607e1c6.bin
@@ -0,0 +1,28 @@
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_rQe3kzGnTr2epjx8HREg3F2a","type":"function","function":{"name":"get_weather","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"city"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"San"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Francisco"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"state"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"CA"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":48,"completion_tokens":19,"total_tokens":67}}
+
+data: [DONE]
+
.inline-snapshot/external/ae070a447e1ded1ad4819f7608abc40785d712248f65c8595c99879080d0eeb9.bin
@@ -0,0 +1,36 @@
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_Vz6ZXciy6Y0PYfT4d9W7fYB4","type":"function","function":{"name":"GetWeatherArgs","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"city"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Ed"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"inburgh"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"country"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"UK"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"units"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"c"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":76,"completion_tokens":24,"total_tokens":100}}
+
+data: [DONE]
+
.inline-snapshot/external/b9d6bee9f9b8ee5bdea06cd6955521e0258cc2cef0528a17fbdadb9cc76695f0.bin
@@ -0,0 +1,72 @@
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"I'm"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" unable"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" to"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" provide"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" real"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"-time"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" updates"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" To"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" get"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" the"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" latest"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" information"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" for"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" recommend"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" checking"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" a"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" reliable"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" website"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" or"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" using"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" a"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" app"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":14,"completion_tokens":32,"total_tokens":46}}
+
+data: [DONE]
+
.inline-snapshot/external/cb77dc69b6c8289a6f1e88fa24f05bd963fe093622e5bf9a95a3ebede64714bc.bin
@@ -0,0 +1,30 @@
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"refusal":""},"logprobs":{"content":null,"refusal":[]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":"I'm"},"logprobs":{"content":null,"refusal":[{"token":"I'm","logprob":-0.0010472201,"bytes":[73,39,109],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" very"},"logprobs":{"content":null,"refusal":[{"token":" very","logprob":-0.7292482,"bytes":[32,118,101,114,121],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" sorry"},"logprobs":{"content":null,"refusal":[{"token":" sorry","logprob":-5.080963e-6,"bytes":[32,115,111,114,114,121],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":","},"logprobs":{"content":null,"refusal":[{"token":",","logprob":-0.00004048445,"bytes":[44],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" but"},"logprobs":{"content":null,"refusal":[{"token":" but","logprob":-0.038046427,"bytes":[32,98,117,116],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" I"},"logprobs":{"content":null,"refusal":[{"token":" I","logprob":-0.0019351852,"bytes":[32,73],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" can't"},"logprobs":{"content":null,"refusal":[{"token":" can't","logprob":-0.008995773,"bytes":[32,99,97,110,39,116],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" assist"},"logprobs":{"content":null,"refusal":[{"token":" assist","logprob":-0.0033510819,"bytes":[32,97,115,115,105,115,116],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" with"},"logprobs":{"content":null,"refusal":[{"token":" with","logprob":-0.0036033941,"bytes":[32,119,105,116,104],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" that"},"logprobs":{"content":null,"refusal":[{"token":" that","logprob":-0.0015974608,"bytes":[32,116,104,97,116],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":"."},"logprobs":{"content":null,"refusal":[{"token":".","logprob":-0.6339823,"bytes":[46],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":12,"total_tokens":29}}
+
+data: [DONE]
+
.inline-snapshot/external/d79326933c1586e731a8235998194b58fc759adc3685170e0a61033241d2eda5.bin
@@ -0,0 +1,32 @@
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"refusal":""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":"I'm"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" very"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" sorry"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":","},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" but"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" can't"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" assist"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" with"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" that"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" request"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":"."},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":13,"total_tokens":30}}
+
+data: [DONE]
+
.inline-snapshot/external/ea9a417d533b9adfece02608f2ca00f3a963d785c6fe78c35d60d038cd7a8ba0.bin
@@ -0,0 +1,36 @@
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"{\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"city"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"temperature"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"63"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"units"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"f"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\"}"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":14,"total_tokens":31}}
+
+data: [DONE]
+
.inline-snapshot/external/fb75060ede89cac360ce8baf1513a82f959a175ec05ce3c07412bbc9fd436234.bin
@@ -0,0 +1,22 @@
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_9rqjEc1DQRADTYGVV45LbZwL","type":"function","function":{"name":"get_weather","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"city"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"New"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" York"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" City"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":44,"completion_tokens":16,"total_tokens":60}}
+
+data: [DONE]
+
examples/parsing.py
@@ -0,0 +1,36 @@
+from typing import List
+
+import rich
+from pydantic import BaseModel
+
+from openai import OpenAI
+
+
+class Step(BaseModel):
+ explanation: str
+ output: str
+
+
+class MathResponse(BaseModel):
+ steps: List[Step]
+ final_answer: str
+
+
+client = OpenAI()
+
+completion = client.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {"role": "system", "content": "You are a helpful math tutor."},
+ {"role": "user", "content": "solve 8x + 31 = 2"},
+ ],
+ response_format=MathResponse,
+)
+
+message = completion.choices[0].message
+if message.parsed:
+ rich.print(message.parsed.steps)
+
+ print("answer: ", message.parsed.final_answer)
+else:
+ print(message.refusal)
examples/parsing_stream.py
@@ -0,0 +1,42 @@
+from typing import List
+
+import rich
+from pydantic import BaseModel
+
+from openai import OpenAI
+
+
+class Step(BaseModel):
+ explanation: str
+ output: str
+
+
+class MathResponse(BaseModel):
+ steps: List[Step]
+ final_answer: str
+
+
+client = OpenAI()
+
+with client.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {"role": "system", "content": "You are a helpful math tutor."},
+ {"role": "user", "content": "solve 8x + 31 = 2"},
+ ],
+ response_format=MathResponse,
+) as stream:
+ for event in stream:
+ if event.type == "content.delta":
+ print(event.delta, end="", flush=True)
+ elif event.type == "content.done":
+ print("\n")
+ if event.parsed is not None:
+ print(f"answer: {event.parsed.final_answer}")
+ elif event.type == "refusal.delta":
+ print(event.delta, end="", flush=True)
+ elif event.type == "refusal.done":
+ print()
+
+print("---------------")
+rich.print(stream.get_final_completion())
examples/parsing_tools.py
@@ -0,0 +1,80 @@
+from enum import Enum
+from typing import List, Union
+
+import rich
+from pydantic import BaseModel
+
+import openai
+from openai import OpenAI
+
+
+class Table(str, Enum):
+ orders = "orders"
+ customers = "customers"
+ products = "products"
+
+
+class Column(str, Enum):
+ id = "id"
+ status = "status"
+ expected_delivery_date = "expected_delivery_date"
+ delivered_at = "delivered_at"
+ shipped_at = "shipped_at"
+ ordered_at = "ordered_at"
+ canceled_at = "canceled_at"
+
+
+class Operator(str, Enum):
+ eq = "="
+ gt = ">"
+ lt = "<"
+ le = "<="
+ ge = ">="
+ ne = "!="
+
+
+class OrderBy(str, Enum):
+ asc = "asc"
+ desc = "desc"
+
+
+class DynamicValue(BaseModel):
+ column_name: str
+
+
+class Condition(BaseModel):
+ column: str
+ operator: Operator
+ value: Union[str, int, DynamicValue]
+
+
+class Query(BaseModel):
+ table_name: Table
+ columns: List[Column]
+ conditions: List[Condition]
+ order_by: OrderBy
+
+
+client = OpenAI()
+
+completion = client.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "system",
+ "content": "You are a helpful assistant. The current date is August 6, 2024. You help users query for the data they are looking for by calling the query function.",
+ },
+ {
+ "role": "user",
+ "content": "look up all my orders in november of last year that were fulfilled but not delivered on time",
+ },
+ ],
+ tools=[
+ openai.pydantic_function_tool(Query),
+ ],
+)
+
+tool_call = (completion.choices[0].message.tool_calls or [])[0]
+rich.print(tool_call.function)
+assert isinstance(tool_call.function.parsed_arguments, Query)
+print(tool_call.function.parsed_arguments.table_name)
examples/parsing_tools_stream.py
@@ -0,0 +1,38 @@
+from __future__ import annotations
+
+import rich
+from pydantic import BaseModel
+
+import openai
+from openai import OpenAI
+
+
+class GetWeather(BaseModel):
+ city: str
+ country: str
+
+
+client = OpenAI()
+
+
+with client.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in SF and New York?",
+ },
+ ],
+ tools=[
+ # because we're using `.parse_stream()`, the returned tool calls
+ # will be automatically deserialized into this `GetWeather` type
+ openai.pydantic_function_tool(GetWeather, name="get_weather"),
+ ],
+ parallel_tool_calls=True,
+) as stream:
+ for event in stream:
+ if event.type == "tool_calls.function.arguments.delta" or event.type == "tool_calls.function.arguments.done":
+ rich.get_console().print(event, width=80)
+
+print("----\n")
+rich.print(stream.get_final_completion())
src/openai/lib/_parsing/__init__.py
@@ -0,0 +1,12 @@
+from ._completions import (
+ ResponseFormatT as ResponseFormatT,
+ has_parseable_input,
+ has_parseable_input as has_parseable_input,
+ maybe_parse_content as maybe_parse_content,
+ validate_input_tools as validate_input_tools,
+ parse_chat_completion as parse_chat_completion,
+ get_input_tool_by_name as get_input_tool_by_name,
+ solve_response_format_t as solve_response_format_t,
+ parse_function_tool_arguments as parse_function_tool_arguments,
+ type_to_response_format_param as type_to_response_format_param,
+)
src/openai/lib/_parsing/_completions.py
@@ -0,0 +1,254 @@
+from __future__ import annotations
+
+import json
+from typing import TYPE_CHECKING, Any, Iterable, cast
+from typing_extensions import TypeVar, TypeGuard, assert_never
+
+import pydantic
+
+from .._tools import PydanticFunctionTool
+from ..._types import NOT_GIVEN, NotGiven
+from ..._utils import is_dict, is_given
+from ..._compat import model_parse_json
+from ..._models import construct_type_unchecked
+from .._pydantic import to_strict_json_schema
+from ...types.chat import (
+ ParsedChoice,
+ ChatCompletion,
+ ParsedFunction,
+ ParsedChatCompletion,
+ ChatCompletionMessage,
+ ParsedFunctionToolCall,
+ ChatCompletionToolParam,
+ ParsedChatCompletionMessage,
+ completion_create_params,
+)
+from ..._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError
+from ...types.shared_params import FunctionDefinition
+from ...types.chat.completion_create_params import ResponseFormat as ResponseFormatParam
+from ...types.chat.chat_completion_message_tool_call import Function
+
+ResponseFormatT = TypeVar(
+ "ResponseFormatT",
+ # if it isn't given then we don't do any parsing
+ default=None,
+)
+_default_response_format: None = None
+
+
+def validate_input_tools(
+ tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+) -> None:
+ if not is_given(tools):
+ return
+
+ for tool in tools:
+ if tool["type"] != "function":
+ raise ValueError(
+ f'Currently only `function` tool types support auto-parsing; Received `{tool["type"]}`',
+ )
+
+ strict = tool["function"].get("strict")
+ if strict is not True:
+ raise ValueError(
+ f'`{tool["function"]["name"]}` is not strict. Only `strict` function tools can be auto-parsed'
+ )
+
+
+def parse_chat_completion(
+ *,
+ response_format: type[ResponseFormatT] | completion_create_params.ResponseFormat | NotGiven,
+ input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+ chat_completion: ChatCompletion | ParsedChatCompletion[object],
+) -> ParsedChatCompletion[ResponseFormatT]:
+ if is_given(input_tools):
+ input_tools = [t for t in input_tools]
+ else:
+ input_tools = []
+
+ choices: list[ParsedChoice[ResponseFormatT]] = []
+ for choice in chat_completion.choices:
+ if choice.finish_reason == "length":
+ raise LengthFinishReasonError()
+
+ if choice.finish_reason == "content_filter":
+ raise ContentFilterFinishReasonError()
+
+ message = choice.message
+
+ tool_calls: list[ParsedFunctionToolCall] = []
+ if message.tool_calls:
+ for tool_call in message.tool_calls:
+ if tool_call.type == "function":
+ tool_call_dict = tool_call.to_dict()
+ tool_calls.append(
+ construct_type_unchecked(
+ value={
+ **tool_call_dict,
+ "function": {
+ **cast(Any, tool_call_dict["function"]),
+ "parsed_arguments": parse_function_tool_arguments(
+ input_tools=input_tools, function=tool_call.function
+ ),
+ },
+ },
+ type_=ParsedFunctionToolCall,
+ )
+ )
+ elif TYPE_CHECKING: # type: ignore[unreachable]
+ assert_never(tool_call)
+ else:
+ tool_calls.append(tool_call)
+
+ choices.append(
+ construct_type_unchecked(
+ type_=cast(Any, ParsedChoice)[solve_response_format_t(response_format)],
+ value={
+ **choice.to_dict(),
+ "message": {
+ **message.to_dict(),
+ "parsed": maybe_parse_content(
+ response_format=response_format,
+ message=message,
+ ),
+ "tool_calls": tool_calls,
+ },
+ },
+ )
+ )
+
+ return cast(
+ ParsedChatCompletion[ResponseFormatT],
+ construct_type_unchecked(
+ type_=cast(Any, ParsedChatCompletion)[solve_response_format_t(response_format)],
+ value={
+ **chat_completion.to_dict(),
+ "choices": choices,
+ },
+ ),
+ )
+
+
+def get_input_tool_by_name(*, input_tools: list[ChatCompletionToolParam], name: str) -> ChatCompletionToolParam | None:
+ return next((t for t in input_tools if t.get("function", {}).get("name") == name), None)
+
+
+def parse_function_tool_arguments(
+ *, input_tools: list[ChatCompletionToolParam], function: Function | ParsedFunction
+) -> object:
+ input_tool = get_input_tool_by_name(input_tools=input_tools, name=function.name)
+ if not input_tool:
+ return None
+
+ input_fn = cast(object, input_tool.get("function"))
+ if isinstance(input_fn, PydanticFunctionTool):
+ return model_parse_json(input_fn.model, function.arguments)
+
+ input_fn = cast(FunctionDefinition, input_fn)
+
+ if not input_fn.get("strict"):
+ return None
+
+ return json.loads(function.arguments)
+
+
+def maybe_parse_content(
+ *,
+ response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+ message: ChatCompletionMessage | ParsedChatCompletionMessage[object],
+) -> ResponseFormatT | None:
+ if has_rich_response_format(response_format) and message.content is not None and not message.refusal:
+ return _parse_content(response_format, message.content)
+
+ return None
+
+
+def solve_response_format_t(
+ response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+) -> type[ResponseFormatT]:
+ """Return the runtime type for the given response format.
+
+ If no response format is given, or if we won't auto-parse the response format
+ then we default to `None`.
+ """
+ if has_rich_response_format(response_format):
+ return response_format
+
+ return cast("type[ResponseFormatT]", _default_response_format)
+
+
+def has_parseable_input(
+ *,
+ response_format: type | ResponseFormatParam | NotGiven,
+ input_tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+) -> bool:
+ if has_rich_response_format(response_format):
+ return True
+
+ for input_tool in input_tools or []:
+ if is_parseable_tool(input_tool):
+ return True
+
+ return False
+
+
+def has_rich_response_format(
+ response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+) -> TypeGuard[type[ResponseFormatT]]:
+ if not is_given(response_format):
+ return False
+
+ if is_response_format_param(response_format):
+ return False
+
+ return True
+
+
+def is_response_format_param(response_format: object) -> TypeGuard[ResponseFormatParam]:
+ return is_dict(response_format)
+
+
+def is_parseable_tool(input_tool: ChatCompletionToolParam) -> bool:
+ input_fn = cast(object, input_tool.get("function"))
+ if isinstance(input_fn, PydanticFunctionTool):
+ return True
+
+ return cast(FunctionDefinition, input_fn).get("strict") or False
+
+
+def is_basemodel_type(typ: type) -> TypeGuard[type[pydantic.BaseModel]]:
+ return issubclass(typ, pydantic.BaseModel)
+
+
+def _parse_content(response_format: type[ResponseFormatT], content: str) -> ResponseFormatT:
+ if is_basemodel_type(response_format):
+ return cast(ResponseFormatT, model_parse_json(response_format, content))
+
+ raise TypeError(f"Unable to automatically parse response format type {response_format}")
+
+
+def type_to_response_format_param(
+ response_format: type | completion_create_params.ResponseFormat | NotGiven,
+) -> ResponseFormatParam | NotGiven:
+ if not is_given(response_format):
+ return NOT_GIVEN
+
+ if is_response_format_param(response_format):
+ return response_format
+
+ # type checkers don't narrow the negation of a `TypeGuard` as it isn't
+ # a safe default behaviour but we know that at this point the `response_format`
+ # can only be a `type`
+ response_format = cast(type, response_format)
+
+ if not is_basemodel_type(response_format):
+ raise TypeError(f"Unsupported response_format type - {response_format}")
+
+ return {
+ "type": "json_schema",
+ "json_schema": {
+ "schema": to_strict_json_schema(response_format),
+ "name": response_format.__name__,
+ "strict": True,
+ },
+ }
src/openai/lib/streaming/chat/__init__.py
@@ -0,0 +1,26 @@
+from ._types import (
+ ParsedChoiceSnapshot as ParsedChoiceSnapshot,
+ ParsedChatCompletionSnapshot as ParsedChatCompletionSnapshot,
+ ParsedChatCompletionMessageSnapshot as ParsedChatCompletionMessageSnapshot,
+)
+from ._events import (
+ ChunkEvent as ChunkEvent,
+ ContentDoneEvent as ContentDoneEvent,
+ RefusalDoneEvent as RefusalDoneEvent,
+ ContentDeltaEvent as ContentDeltaEvent,
+ RefusalDeltaEvent as RefusalDeltaEvent,
+ LogprobsContentDoneEvent as LogprobsContentDoneEvent,
+ LogprobsRefusalDoneEvent as LogprobsRefusalDoneEvent,
+ ChatCompletionStreamEvent as ChatCompletionStreamEvent,
+ LogprobsContentDeltaEvent as LogprobsContentDeltaEvent,
+ LogprobsRefusalDeltaEvent as LogprobsRefusalDeltaEvent,
+ ParsedChatCompletionSnapshot as ParsedChatCompletionSnapshot,
+ FunctionToolCallArgumentsDoneEvent as FunctionToolCallArgumentsDoneEvent,
+ FunctionToolCallArgumentsDeltaEvent as FunctionToolCallArgumentsDeltaEvent,
+)
+from ._completions import (
+ ChatCompletionStream as ChatCompletionStream,
+ AsyncChatCompletionStream as AsyncChatCompletionStream,
+ ChatCompletionStreamManager as ChatCompletionStreamManager,
+ AsyncChatCompletionStreamManager as AsyncChatCompletionStreamManager,
+)
src/openai/lib/streaming/chat/_completions.py
@@ -0,0 +1,724 @@
+from __future__ import annotations
+
+import inspect
+from types import TracebackType
+from typing import TYPE_CHECKING, Any, Generic, Callable, Iterable, Awaitable, AsyncIterator, cast
+from typing_extensions import Self, Iterator, assert_never
+
+from jiter import from_json
+
+from ._types import ParsedChoiceSnapshot, ParsedChatCompletionSnapshot, ParsedChatCompletionMessageSnapshot
+from ._events import (
+ ChunkEvent,
+ ContentDoneEvent,
+ RefusalDoneEvent,
+ ContentDeltaEvent,
+ RefusalDeltaEvent,
+ LogprobsContentDoneEvent,
+ LogprobsRefusalDoneEvent,
+ ChatCompletionStreamEvent,
+ LogprobsContentDeltaEvent,
+ LogprobsRefusalDeltaEvent,
+ FunctionToolCallArgumentsDoneEvent,
+ FunctionToolCallArgumentsDeltaEvent,
+)
+from .._deltas import accumulate_delta
+from ...._types import NOT_GIVEN, NotGiven
+from ...._utils import is_given, consume_sync_iterator, consume_async_iterator
+from ...._compat import model_dump
+from ...._models import build, construct_type
+from ..._parsing import (
+ ResponseFormatT,
+ has_parseable_input,
+ maybe_parse_content,
+ parse_chat_completion,
+ get_input_tool_by_name,
+ solve_response_format_t,
+ parse_function_tool_arguments,
+)
+from ...._streaming import Stream, AsyncStream
+from ....types.chat import ChatCompletionChunk, ParsedChatCompletion, ChatCompletionToolParam
+from ...._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError
+from ....types.chat.chat_completion import ChoiceLogprobs
+from ....types.chat.chat_completion_chunk import Choice as ChoiceChunk
+from ....types.chat.completion_create_params import ResponseFormat as ResponseFormatParam
+
+
+class ChatCompletionStream(Generic[ResponseFormatT]):
+ """Wrapper over the Chat Completions streaming API that adds helpful
+ events such as `content.done`, supports automatically parsing
+ responses & tool calls and accumulates a `ChatCompletion` object
+ from each individual chunk.
+
+ https://platform.openai.com/docs/api-reference/streaming
+ """
+
+ def __init__(
+ self,
+ *,
+ raw_stream: Stream[ChatCompletionChunk],
+ response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+ input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+ ) -> None:
+ self._raw_stream = raw_stream
+ self._response = raw_stream.response
+ self._iterator = self.__stream__()
+ self._state = ChatCompletionStreamState(response_format=response_format, input_tools=input_tools)
+
+ def __next__(self) -> ChatCompletionStreamEvent[ResponseFormatT]:
+ return self._iterator.__next__()
+
+ def __iter__(self) -> Iterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+ for item in self._iterator:
+ yield item
+
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ self.close()
+
+ def close(self) -> None:
+ """
+ Close the response and release the connection.
+
+ Automatically called if the response body is read to completion.
+ """
+ self._response.close()
+
+ def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
+ """Waits until the stream has been read to completion and returns
+ the accumulated `ParsedChatCompletion` object.
+
+ If you passed a class type to `.stream()`, the `completion.choices[0].message.parsed`
+ property will be the content deserialised into that class, if there was any content returned
+ by the API.
+ """
+ self.until_done()
+ return self._state.get_final_completion()
+
+ def until_done(self) -> Self:
+ """Blocks until the stream has been consumed."""
+ consume_sync_iterator(self)
+ return self
+
+ @property
+ def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
+ return self._state.current_completion_snapshot
+
+ def __stream__(self) -> Iterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+ for sse_event in self._raw_stream:
+ events_to_fire = self._state.handle_chunk(sse_event)
+ for event in events_to_fire:
+ yield event
+
+
+class ChatCompletionStreamManager(Generic[ResponseFormatT]):
+ """Context manager over a `ChatCompletionStream` that is returned by `.stream()`.
+
+ This context manager ensures the response cannot be leaked if you don't read
+ the stream to completion.
+
+ Usage:
+ ```py
+ with client.beta.chat.completions.stream(...) as stream:
+ for event in stream:
+ ...
+ ```
+ """
+
+ def __init__(
+ self,
+ api_request: Callable[[], Stream[ChatCompletionChunk]],
+ *,
+ response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+ input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+ ) -> None:
+ self.__stream: ChatCompletionStream[ResponseFormatT] | None = None
+ self.__api_request = api_request
+ self.__response_format = response_format
+ self.__input_tools = input_tools
+
+ def __enter__(self) -> ChatCompletionStream[ResponseFormatT]:
+ raw_stream = self.__api_request()
+
+ self.__stream = ChatCompletionStream(
+ raw_stream=raw_stream,
+ response_format=self.__response_format,
+ input_tools=self.__input_tools,
+ )
+
+ return self.__stream
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ if self.__stream is not None:
+ self.__stream.close()
+
+
+class AsyncChatCompletionStream(Generic[ResponseFormatT]):
+ """Wrapper over the Chat Completions streaming API that adds helpful
+ events such as `content.done`, supports automatically parsing
+ responses & tool calls and accumulates a `ChatCompletion` object
+ from each individual chunk.
+
+ https://platform.openai.com/docs/api-reference/streaming
+ """
+
+ def __init__(
+ self,
+ *,
+ raw_stream: AsyncStream[ChatCompletionChunk],
+ response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+ input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+ ) -> None:
+ self._raw_stream = raw_stream
+ self._response = raw_stream.response
+ self._iterator = self.__stream__()
+ self._state = ChatCompletionStreamState(response_format=response_format, input_tools=input_tools)
+
+ async def __anext__(self) -> ChatCompletionStreamEvent[ResponseFormatT]:
+ return await self._iterator.__anext__()
+
+ async def __aiter__(self) -> AsyncIterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+ async for item in self._iterator:
+ yield item
+
+ async def __aenter__(self) -> Self:
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ await self.close()
+
+ async def close(self) -> None:
+ """
+ Close the response and release the connection.
+
+ Automatically called if the response body is read to completion.
+ """
+ await self._response.aclose()
+
+ async def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
+ """Waits until the stream has been read to completion and returns
+ the accumulated `ParsedChatCompletion` object.
+
+ If you passed a class type to `.stream()`, the `completion.choices[0].message.parsed`
+ property will be the content deserialised into that class, if there was any content returned
+ by the API.
+ """
+ await self.until_done()
+ return self._state.get_final_completion()
+
+ async def until_done(self) -> Self:
+ """Blocks until the stream has been consumed."""
+ await consume_async_iterator(self)
+ return self
+
+ @property
+ def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
+ return self._state.current_completion_snapshot
+
+ async def __stream__(self) -> AsyncIterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+ async for sse_event in self._raw_stream:
+ events_to_fire = self._state.handle_chunk(sse_event)
+ for event in events_to_fire:
+ yield event
+
+
+class AsyncChatCompletionStreamManager(Generic[ResponseFormatT]):
+ """Context manager over a `AsyncChatCompletionStream` that is returned by `.stream()`.
+
+ This context manager ensures the response cannot be leaked if you don't read
+ the stream to completion.
+
+ Usage:
+ ```py
+ async with client.beta.chat.completions.stream(...) as stream:
+ for event in stream:
+ ...
+ ```
+ """
+
+ def __init__(
+ self,
+ api_request: Awaitable[AsyncStream[ChatCompletionChunk]],
+ *,
+ response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+ input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+ ) -> None:
+ self.__stream: AsyncChatCompletionStream[ResponseFormatT] | None = None
+ self.__api_request = api_request
+ self.__response_format = response_format
+ self.__input_tools = input_tools
+
+ async def __aenter__(self) -> AsyncChatCompletionStream[ResponseFormatT]:
+ raw_stream = await self.__api_request
+
+ self.__stream = AsyncChatCompletionStream(
+ raw_stream=raw_stream,
+ response_format=self.__response_format,
+ input_tools=self.__input_tools,
+ )
+
+ return self.__stream
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ if self.__stream is not None:
+ await self.__stream.close()
+
+
+class ChatCompletionStreamState(Generic[ResponseFormatT]):
+ def __init__(
+ self,
+ *,
+ input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+ response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+ ) -> None:
+ self.__current_completion_snapshot: ParsedChatCompletionSnapshot | None = None
+ self.__choice_event_states: list[ChoiceEventState] = []
+
+ self._input_tools = [tool for tool in input_tools] if is_given(input_tools) else []
+ self._response_format = response_format
+ self._rich_response_format: type | NotGiven = response_format if inspect.isclass(response_format) else NOT_GIVEN
+
+ def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
+ return parse_chat_completion(
+ chat_completion=self.current_completion_snapshot,
+ response_format=self._rich_response_format,
+ input_tools=self._input_tools,
+ )
+
+ @property
+ def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
+ assert self.__current_completion_snapshot is not None
+ return self.__current_completion_snapshot
+
+ def handle_chunk(self, chunk: ChatCompletionChunk) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
+ """Accumulate a new chunk into the snapshot and returns a list of events to yield."""
+ self.__current_completion_snapshot = self._accumulate_chunk(chunk)
+
+ return self._build_events(
+ chunk=chunk,
+ completion_snapshot=self.__current_completion_snapshot,
+ )
+
+ def _get_choice_state(self, choice: ChoiceChunk) -> ChoiceEventState:
+ try:
+ return self.__choice_event_states[choice.index]
+ except IndexError:
+ choice_state = ChoiceEventState(input_tools=self._input_tools)
+ self.__choice_event_states.append(choice_state)
+ return choice_state
+
+ def _accumulate_chunk(self, chunk: ChatCompletionChunk) -> ParsedChatCompletionSnapshot:
+ completion_snapshot = self.__current_completion_snapshot
+
+ if completion_snapshot is None:
+ return _convert_initial_chunk_into_snapshot(chunk)
+
+ for choice in chunk.choices:
+ try:
+ choice_snapshot = completion_snapshot.choices[choice.index]
+ previous_tool_calls = choice_snapshot.message.tool_calls or []
+
+ choice_snapshot.message = cast(
+ ParsedChatCompletionMessageSnapshot,
+ construct_type(
+ type_=ParsedChatCompletionMessageSnapshot,
+ value=accumulate_delta(
+ cast(
+ "dict[object, object]",
+ model_dump(
+ choice_snapshot.message,
+ # we don't want to serialise / deserialise our custom properties
+ # as they won't appear in the delta and we don't want to have to
+ # continuosly reparse the content
+ exclude={
+ "parsed": True,
+ "tool_calls": {
+ idx: {"function": {"parsed_arguments": True}}
+ for idx, _ in enumerate(choice_snapshot.message.tool_calls or [])
+ },
+ },
+ ),
+ ),
+ cast("dict[object, object]", choice.delta.to_dict()),
+ ),
+ ),
+ )
+
+ # ensure tools that have already been parsed are added back into the newly
+ # constructed message snapshot
+ for tool_index, prev_tool in enumerate(previous_tool_calls):
+ new_tool = (choice_snapshot.message.tool_calls or [])[tool_index]
+
+ if prev_tool.type == "function":
+ assert new_tool.type == "function"
+ new_tool.function.parsed_arguments = prev_tool.function.parsed_arguments
+ elif TYPE_CHECKING: # type: ignore[unreachable]
+ assert_never(prev_tool)
+ except IndexError:
+ choice_snapshot = cast(
+ ParsedChoiceSnapshot,
+ construct_type(
+ type_=ParsedChoiceSnapshot,
+ value={
+ **choice.model_dump(exclude_unset=True, exclude={"delta"}),
+ "message": choice.delta.to_dict(),
+ },
+ ),
+ )
+ completion_snapshot.choices.append(choice_snapshot)
+
+ if choice.finish_reason:
+ choice_snapshot.finish_reason = choice.finish_reason
+
+ if has_parseable_input(response_format=self._response_format, input_tools=self._input_tools):
+ if choice.finish_reason == "length":
+ raise LengthFinishReasonError()
+
+ if choice.finish_reason == "content_filter":
+ raise ContentFilterFinishReasonError()
+
+ if (
+ choice_snapshot.message.content
+ and not choice_snapshot.message.refusal
+ and is_given(self._rich_response_format)
+ ):
+ choice_snapshot.message.parsed = from_json(
+ bytes(choice_snapshot.message.content, "utf-8"),
+ partial_mode=True,
+ )
+
+ for tool_call_chunk in choice.delta.tool_calls or []:
+ tool_call_snapshot = (choice_snapshot.message.tool_calls or [])[tool_call_chunk.index]
+
+ if tool_call_snapshot.type == "function":
+ input_tool = get_input_tool_by_name(
+ input_tools=self._input_tools, name=tool_call_snapshot.function.name
+ )
+
+ if (
+ input_tool
+ and input_tool.get("function", {}).get("strict")
+ and tool_call_snapshot.function.arguments
+ ):
+ tool_call_snapshot.function.parsed_arguments = from_json(
+ bytes(tool_call_snapshot.function.arguments, "utf-8"),
+ partial_mode=True,
+ )
+ elif TYPE_CHECKING: # type: ignore[unreachable]
+ assert_never(tool_call_snapshot)
+
+ if choice.logprobs is not None:
+ if choice_snapshot.logprobs is None:
+ choice_snapshot.logprobs = build(
+ ChoiceLogprobs,
+ content=choice.logprobs.content,
+ refusal=choice.logprobs.refusal,
+ )
+ else:
+ if choice.logprobs.content:
+ if choice_snapshot.logprobs.content is None:
+ choice_snapshot.logprobs.content = []
+
+ choice_snapshot.logprobs.content.extend(choice.logprobs.content)
+
+ if choice.logprobs.refusal:
+ if choice_snapshot.logprobs.refusal is None:
+ choice_snapshot.logprobs.refusal = []
+
+ choice_snapshot.logprobs.refusal.extend(choice.logprobs.refusal)
+
+ completion_snapshot.usage = chunk.usage
+ completion_snapshot.system_fingerprint = chunk.system_fingerprint
+
+ return completion_snapshot
+
+ def _build_events(
+ self,
+ *,
+ chunk: ChatCompletionChunk,
+ completion_snapshot: ParsedChatCompletionSnapshot,
+ ) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
+ events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
+
+ events_to_fire.append(
+ build(ChunkEvent, type="chunk", chunk=chunk, snapshot=completion_snapshot),
+ )
+
+ for choice in chunk.choices:
+ choice_state = self._get_choice_state(choice)
+ choice_snapshot = completion_snapshot.choices[choice.index]
+
+ if choice.delta.content is not None and choice_snapshot.message.content is not None:
+ events_to_fire.append(
+ build(
+ ContentDeltaEvent,
+ type="content.delta",
+ delta=choice.delta.content,
+ snapshot=choice_snapshot.message.content,
+ parsed=choice_snapshot.message.parsed,
+ )
+ )
+
+ if choice.delta.refusal is not None and choice_snapshot.message.refusal is not None:
+ events_to_fire.append(
+ build(
+ RefusalDeltaEvent,
+ type="refusal.delta",
+ delta=choice.delta.refusal,
+ snapshot=choice_snapshot.message.refusal,
+ )
+ )
+
+ if choice.delta.tool_calls:
+ tool_calls = choice_snapshot.message.tool_calls
+ assert tool_calls is not None
+
+ for tool_call_delta in choice.delta.tool_calls:
+ tool_call = tool_calls[tool_call_delta.index]
+
+ if tool_call.type == "function":
+ assert tool_call_delta.function is not None
+ events_to_fire.append(
+ build(
+ FunctionToolCallArgumentsDeltaEvent,
+ type="tool_calls.function.arguments.delta",
+ name=tool_call.function.name,
+ index=tool_call_delta.index,
+ arguments=tool_call.function.arguments,
+ parsed_arguments=tool_call.function.parsed_arguments,
+ arguments_delta=tool_call_delta.function.arguments or "",
+ )
+ )
+ elif TYPE_CHECKING: # type: ignore[unreachable]
+ assert_never(tool_call)
+
+ if choice.logprobs is not None and choice_snapshot.logprobs is not None:
+ if choice.logprobs.content and choice_snapshot.logprobs.content:
+ events_to_fire.append(
+ build(
+ LogprobsContentDeltaEvent,
+ type="logprobs.content.delta",
+ content=choice.logprobs.content,
+ snapshot=choice_snapshot.logprobs.content,
+ ),
+ )
+
+ if choice.logprobs.refusal and choice_snapshot.logprobs.refusal:
+ events_to_fire.append(
+ build(
+ LogprobsRefusalDeltaEvent,
+ type="logprobs.refusal.delta",
+ refusal=choice.logprobs.refusal,
+ snapshot=choice_snapshot.logprobs.refusal,
+ ),
+ )
+
+ events_to_fire.extend(
+ choice_state.get_done_events(
+ choice_chunk=choice,
+ choice_snapshot=choice_snapshot,
+ response_format=self._response_format,
+ )
+ )
+
+ return events_to_fire
+
+
+class ChoiceEventState:
+ def __init__(self, *, input_tools: list[ChatCompletionToolParam]) -> None:
+ self._input_tools = input_tools
+
+ self._content_done = False
+ self._refusal_done = False
+ self._logprobs_content_done = False
+ self._logprobs_refusal_done = False
+ self._done_tool_calls: set[int] = set()
+ self.__current_tool_call_index: int | None = None
+
+ def get_done_events(
+ self,
+ *,
+ choice_chunk: ChoiceChunk,
+ choice_snapshot: ParsedChoiceSnapshot,
+ response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+ ) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
+ events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
+
+ if choice_snapshot.finish_reason:
+ events_to_fire.extend(
+ self._content_done_events(choice_snapshot=choice_snapshot, response_format=response_format)
+ )
+
+ if (
+ self.__current_tool_call_index is not None
+ and self.__current_tool_call_index not in self._done_tool_calls
+ ):
+ self._add_tool_done_event(
+ events_to_fire=events_to_fire,
+ choice_snapshot=choice_snapshot,
+ tool_index=self.__current_tool_call_index,
+ )
+
+ for tool_call in choice_chunk.delta.tool_calls or []:
+ if self.__current_tool_call_index != tool_call.index:
+ events_to_fire.extend(
+ self._content_done_events(choice_snapshot=choice_snapshot, response_format=response_format)
+ )
+
+ if self.__current_tool_call_index is not None:
+ self._add_tool_done_event(
+ events_to_fire=events_to_fire,
+ choice_snapshot=choice_snapshot,
+ tool_index=self.__current_tool_call_index,
+ )
+
+ self.__current_tool_call_index = tool_call.index
+
+ return events_to_fire
+
+ def _content_done_events(
+ self,
+ *,
+ choice_snapshot: ParsedChoiceSnapshot,
+ response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+ ) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
+ events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
+
+ if choice_snapshot.message.content and not self._content_done:
+ self._content_done = True
+
+ parsed = maybe_parse_content(
+ response_format=response_format,
+ message=choice_snapshot.message,
+ )
+
+ # update the parsed content to now use the richer `response_format`
+ # as opposed to the raw JSON-parsed object as the content is now
+ # complete and can be fully validated.
+ choice_snapshot.message.parsed = parsed
+
+ events_to_fire.append(
+ build(
+ # we do this dance so that when the `ContentDoneEvent` instance
+ # is printed at runtime the class name will include the solved
+ # type variable, e.g. `ContentDoneEvent[MyModelType]`
+ cast( # pyright: ignore[reportUnnecessaryCast]
+ "type[ContentDoneEvent[ResponseFormatT]]",
+ cast(Any, ContentDoneEvent)[solve_response_format_t(response_format)],
+ ),
+ type="content.done",
+ content=choice_snapshot.message.content,
+ parsed=parsed,
+ ),
+ )
+
+ if choice_snapshot.message.refusal is not None and not self._refusal_done:
+ self._refusal_done = True
+ events_to_fire.append(
+ build(RefusalDoneEvent, type="refusal.done", refusal=choice_snapshot.message.refusal),
+ )
+
+ if (
+ choice_snapshot.logprobs is not None
+ and choice_snapshot.logprobs.content is not None
+ and not self._logprobs_content_done
+ ):
+ self._logprobs_content_done = True
+ events_to_fire.append(
+ build(LogprobsContentDoneEvent, type="logprobs.content.done", content=choice_snapshot.logprobs.content),
+ )
+
+ if (
+ choice_snapshot.logprobs is not None
+ and choice_snapshot.logprobs.refusal is not None
+ and not self._logprobs_refusal_done
+ ):
+ self._logprobs_refusal_done = True
+ events_to_fire.append(
+ build(LogprobsRefusalDoneEvent, type="logprobs.refusal.done", refusal=choice_snapshot.logprobs.refusal),
+ )
+
+ return events_to_fire
+
+ def _add_tool_done_event(
+ self,
+ *,
+ events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]],
+ choice_snapshot: ParsedChoiceSnapshot,
+ tool_index: int,
+ ) -> None:
+ if tool_index in self._done_tool_calls:
+ return
+
+ self._done_tool_calls.add(tool_index)
+
+ assert choice_snapshot.message.tool_calls is not None
+ tool_call_snapshot = choice_snapshot.message.tool_calls[tool_index]
+
+ if tool_call_snapshot.type == "function":
+ parsed_arguments = parse_function_tool_arguments(
+ input_tools=self._input_tools, function=tool_call_snapshot.function
+ )
+
+ # update the parsed content to potentially use a richer type
+ # as opposed to the raw JSON-parsed object as the content is now
+ # complete and can be fully validated.
+ tool_call_snapshot.function.parsed_arguments = parsed_arguments
+
+ events_to_fire.append(
+ build(
+ FunctionToolCallArgumentsDoneEvent,
+ type="tool_calls.function.arguments.done",
+ index=tool_index,
+ name=tool_call_snapshot.function.name,
+ arguments=tool_call_snapshot.function.arguments,
+ parsed_arguments=parsed_arguments,
+ )
+ )
+ elif TYPE_CHECKING: # type: ignore[unreachable]
+ assert_never(tool_call_snapshot)
+
+
+def _convert_initial_chunk_into_snapshot(chunk: ChatCompletionChunk) -> ParsedChatCompletionSnapshot:
+ data = chunk.to_dict()
+ choices = cast("list[object]", data["choices"])
+
+ for choice in chunk.choices:
+ choices[choice.index] = {
+ **choice.model_dump(exclude_unset=True, exclude={"delta"}),
+ "message": choice.delta.to_dict(),
+ }
+
+ return cast(
+ ParsedChatCompletionSnapshot,
+ construct_type(
+ type_=ParsedChatCompletionSnapshot,
+ value={
+ "system_fingerprint": None,
+ **data,
+ "object": "chat.completion",
+ },
+ ),
+ )
src/openai/lib/streaming/chat/_events.py
@@ -0,0 +1,123 @@
+from typing import List, Union, Generic, Optional
+from typing_extensions import Literal
+
+from ._types import ParsedChatCompletionSnapshot
+from ...._models import BaseModel, GenericModel
+from ..._parsing import ResponseFormatT
+from ....types.chat import ChatCompletionChunk, ChatCompletionTokenLogprob
+
+
+class ChunkEvent(BaseModel):
+ type: Literal["chunk"]
+
+ chunk: ChatCompletionChunk
+
+ snapshot: ParsedChatCompletionSnapshot
+
+
+class ContentDeltaEvent(BaseModel):
+ """This event is yielded for every chunk with `choice.delta.content` data."""
+
+ type: Literal["content.delta"]
+
+ delta: str
+
+ snapshot: str
+
+ parsed: Optional[object] = None
+
+
+class ContentDoneEvent(GenericModel, Generic[ResponseFormatT]):
+ type: Literal["content.done"]
+
+ content: str
+
+ parsed: Optional[ResponseFormatT] = None
+
+
+class RefusalDeltaEvent(BaseModel):
+ type: Literal["refusal.delta"]
+
+ delta: str
+
+ snapshot: str
+
+
+class RefusalDoneEvent(BaseModel):
+ type: Literal["refusal.done"]
+
+ refusal: str
+
+
+class FunctionToolCallArgumentsDeltaEvent(BaseModel):
+ type: Literal["tool_calls.function.arguments.delta"]
+
+ name: str
+
+ index: int
+
+ arguments: str
+ """Accumulated raw JSON string"""
+
+ parsed_arguments: object
+ """The parsed arguments so far"""
+
+ arguments_delta: str
+ """The JSON string delta"""
+
+
+class FunctionToolCallArgumentsDoneEvent(BaseModel):
+ type: Literal["tool_calls.function.arguments.done"]
+
+ name: str
+
+ index: int
+
+ arguments: str
+ """Accumulated raw JSON string"""
+
+ parsed_arguments: object
+ """The parsed arguments"""
+
+
+class LogprobsContentDeltaEvent(BaseModel):
+ type: Literal["logprobs.content.delta"]
+
+ content: List[ChatCompletionTokenLogprob]
+
+ snapshot: List[ChatCompletionTokenLogprob]
+
+
+class LogprobsContentDoneEvent(BaseModel):
+ type: Literal["logprobs.content.done"]
+
+ content: List[ChatCompletionTokenLogprob]
+
+
+class LogprobsRefusalDeltaEvent(BaseModel):
+ type: Literal["logprobs.refusal.delta"]
+
+ refusal: List[ChatCompletionTokenLogprob]
+
+ snapshot: List[ChatCompletionTokenLogprob]
+
+
+class LogprobsRefusalDoneEvent(BaseModel):
+ type: Literal["logprobs.refusal.done"]
+
+ refusal: List[ChatCompletionTokenLogprob]
+
+
+ChatCompletionStreamEvent = Union[
+ ChunkEvent,
+ ContentDeltaEvent,
+ ContentDoneEvent[ResponseFormatT],
+ RefusalDeltaEvent,
+ RefusalDoneEvent,
+ FunctionToolCallArgumentsDeltaEvent,
+ FunctionToolCallArgumentsDoneEvent,
+ LogprobsContentDeltaEvent,
+ LogprobsContentDoneEvent,
+ LogprobsRefusalDeltaEvent,
+ LogprobsRefusalDoneEvent,
+]
src/openai/lib/streaming/chat/_types.py
@@ -0,0 +1,20 @@
+from __future__ import annotations
+
+from typing_extensions import TypeAlias
+
+from ....types.chat import ParsedChoice, ParsedChatCompletion, ParsedChatCompletionMessage
+
+ParsedChatCompletionSnapshot: TypeAlias = ParsedChatCompletion[object]
+"""Snapshot type representing an in-progress accumulation of
+a `ParsedChatCompletion` object.
+"""
+
+ParsedChatCompletionMessageSnapshot: TypeAlias = ParsedChatCompletionMessage[object]
+"""Snapshot type representing an in-progress accumulation of
+a `ParsedChatCompletionMessage` object.
+
+If the content has been fully accumulated, the `.parsed` content will be
+the `response_format` instance, otherwise it'll be the raw JSON parsed version.
+"""
+
+ParsedChoiceSnapshot: TypeAlias = ParsedChoice[object]
src/openai/lib/streaming/_deltas.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+from ..._utils import is_dict, is_list
+
+
+def accumulate_delta(acc: dict[object, object], delta: dict[object, object]) -> dict[object, object]:
+ for key, delta_value in delta.items():
+ if key not in acc:
+ acc[key] = delta_value
+ continue
+
+ acc_value = acc[key]
+ if acc_value is None:
+ acc[key] = delta_value
+ continue
+
+ # the `index` property is used in arrays of objects so it should
+ # not be accumulated like other values e.g.
+ # [{'foo': 'bar', 'index': 0}]
+ #
+ # the same applies to `type` properties as they're used for
+ # discriminated unions
+ if key == "index" or key == "type":
+ acc[key] = delta_value
+ continue
+
+ if isinstance(acc_value, str) and isinstance(delta_value, str):
+ acc_value += delta_value
+ elif isinstance(acc_value, (int, float)) and isinstance(delta_value, (int, float)):
+ acc_value += delta_value
+ elif is_dict(acc_value) and is_dict(delta_value):
+ acc_value = accumulate_delta(acc_value, delta_value)
+ elif is_list(acc_value) and is_list(delta_value):
+ # for lists of non-dictionary items we'll only ever get new entries
+ # in the array, existing entries will never be changed
+ if all(isinstance(x, (str, int, float)) for x in acc_value):
+ acc_value.extend(delta_value)
+ continue
+
+ for delta_entry in delta_value:
+ if not is_dict(delta_entry):
+ raise TypeError(f"Unexpected list delta entry is not a dictionary: {delta_entry}")
+
+ try:
+ index = delta_entry["index"]
+ except KeyError as exc:
+ raise RuntimeError(f"Expected list delta entry to have an `index` key; {delta_entry}") from exc
+
+ if not isinstance(index, int):
+ raise TypeError(f"Unexpected, list delta entry `index` value is not an integer; {index}")
+
+ try:
+ acc_entry = acc_value[index]
+ except IndexError:
+ acc_value.insert(index, delta_entry)
+ else:
+ if not is_dict(acc_entry):
+ raise TypeError("not handled yet")
+
+ acc_value[index] = accumulate_delta(acc_entry, delta_entry)
+
+ acc[key] = acc_value
+
+ return acc
src/openai/lib/__init__.py
@@ -0,0 +1,2 @@
+from ._tools import pydantic_function_tool as pydantic_function_tool
+from ._parsing import ResponseFormatT as ResponseFormatT
src/openai/lib/_pydantic.py
@@ -0,0 +1,71 @@
+from __future__ import annotations
+
+from typing import Any
+from typing_extensions import TypeGuard
+
+import pydantic
+
+from .._utils import is_dict as _is_dict, is_list
+from .._compat import model_json_schema
+
+
+def to_strict_json_schema(model: type[pydantic.BaseModel]) -> dict[str, Any]:
+ return _ensure_strict_json_schema(model_json_schema(model), path=())
+
+
+def _ensure_strict_json_schema(
+ json_schema: object,
+ path: tuple[str, ...],
+) -> dict[str, Any]:
+ """Mutates the given JSON schema to ensure it conforms to the `strict` standard
+ that the API expects.
+ """
+ if not is_dict(json_schema):
+ raise TypeError(f"Expected {json_schema} to be a dictionary; path={path}")
+
+ typ = json_schema.get("type")
+ if typ == "object" and "additionalProperties" not in json_schema:
+ json_schema["additionalProperties"] = False
+
+ # object types
+ # { 'type': 'object', 'properties': { 'a': {...} } }
+ properties = json_schema.get("properties")
+ if is_dict(properties):
+ json_schema["required"] = [prop for prop in properties.keys()]
+ json_schema["properties"] = {
+ key: _ensure_strict_json_schema(prop_schema, path=(*path, "properties", key))
+ for key, prop_schema in properties.items()
+ }
+
+ # arrays
+ # { 'type': 'array', 'items': {...} }
+ items = json_schema.get("items")
+ if is_dict(items):
+ json_schema["items"] = _ensure_strict_json_schema(items, path=(*path, "items"))
+
+ # unions
+ any_of = json_schema.get("anyOf")
+ if is_list(any_of):
+ json_schema["anyOf"] = [
+ _ensure_strict_json_schema(variant, path=(*path, "anyOf", str(i))) for i, variant in enumerate(any_of)
+ ]
+
+ # intersections
+ all_of = json_schema.get("allOf")
+ if is_list(all_of):
+ json_schema["allOf"] = [
+ _ensure_strict_json_schema(entry, path=(*path, "anyOf", str(i))) for i, entry in enumerate(all_of)
+ ]
+
+ defs = json_schema.get("$defs")
+ if is_dict(defs):
+ for def_name, def_schema in defs.items():
+ _ensure_strict_json_schema(def_schema, path=(*path, "$defs", def_name))
+
+ return json_schema
+
+
+def is_dict(obj: object) -> TypeGuard[dict[str, object]]:
+ # just pretend that we know there are only `str` keys
+ # as that check is not worth the performance cost
+ return _is_dict(obj)
src/openai/lib/_tools.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+from typing import Any, Dict, cast
+
+import pydantic
+
+from ._pydantic import to_strict_json_schema
+from ..types.chat import ChatCompletionToolParam
+from ..types.shared_params import FunctionDefinition
+
+
+class PydanticFunctionTool(Dict[str, Any]):
+ """Dictionary wrapper so we can pass the given base model
+ throughout the entire request stack without having to special
+ case it.
+ """
+
+ model: type[pydantic.BaseModel]
+
+ def __init__(self, defn: FunctionDefinition, model: type[pydantic.BaseModel]) -> None:
+ super().__init__(defn)
+ self.model = model
+
+ def cast(self) -> FunctionDefinition:
+ return cast(FunctionDefinition, self)
+
+
+def pydantic_function_tool(
+ model: type[pydantic.BaseModel],
+ *,
+ name: str | None = None, # inferred from class name by default
+ description: str | None = None, # inferred from class docstring by default
+) -> ChatCompletionToolParam:
+ if description is None:
+ # note: we intentionally don't use `.getdoc()` to avoid
+ # including pydantic's docstrings
+ description = model.__doc__
+
+ function = PydanticFunctionTool(
+ {
+ "name": name or model.__name__,
+ "strict": True,
+ "parameters": to_strict_json_schema(model),
+ },
+ model,
+ ).cast()
+
+ if description is not None:
+ function["description"] = description
+
+ return {
+ "type": "function",
+ "function": function,
+ }
src/openai/resources/beta/chat/__init__.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .chat import Chat, AsyncChat
+from .completions import Completions, AsyncCompletions
+
+__all__ = [
+ "Completions",
+ "AsyncCompletions",
+ "Chat",
+ "AsyncChat",
+]
src/openai/resources/beta/chat/chat.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ...._compat import cached_property
+from .completions import Completions, AsyncCompletions
+from ...._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["Chat", "AsyncChat"]
+
+
+class Chat(SyncAPIResource):
+ @cached_property
+ def completions(self) -> Completions:
+ return Completions(self._client)
+
+
+class AsyncChat(AsyncAPIResource):
+ @cached_property
+ def completions(self) -> AsyncCompletions:
+ return AsyncCompletions(self._client)
src/openai/resources/beta/chat/completions.py
@@ -0,0 +1,449 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable, Optional
+from functools import partial
+from typing_extensions import Literal
+
+import httpx
+
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._streaming import Stream
+from ....types.chat import completion_create_params
+from ....lib._parsing import (
+ ResponseFormatT,
+ validate_input_tools as _validate_input_tools,
+ parse_chat_completion as _parse_chat_completion,
+ type_to_response_format_param as _type_to_response_format,
+)
+from ....types.chat_model import ChatModel
+from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager
+from ....types.chat.chat_completion_chunk import ChatCompletionChunk
+from ....types.chat.parsed_chat_completion import ParsedChatCompletion
+from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
+from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
+from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
+from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
+
+__all__ = ["Completions", "AsyncCompletions"]
+
+
+class Completions(SyncAPIResource):
+ def parse(
+ self,
+ *,
+ messages: Iterable[ChatCompletionMessageParam],
+ model: Union[str, ChatModel],
+ response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
+ frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+ function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+ functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+ logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+ logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+ max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+ presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+ seed: Optional[int] | NotGiven = NOT_GIVEN,
+ service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+ stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+ tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ParsedChatCompletion[ResponseFormatT]:
+ """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
+ & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
+
+ You can pass a pydantic model to this method and it will automatically convert the model
+ into a JSON schema, send it to the API and parse the response content back into the given model.
+
+ This method will also automatically parse `function` tool calls if:
+ - You use the `openai.pydantic_function_tool()` helper method
+ - You mark your tool schema with `"strict": True`
+
+ Example usage:
+ ```py
+ from pydantic import BaseModel
+ from openai import OpenAI
+
+ class Step(BaseModel):
+ explanation: str
+ output: str
+
+ class MathResponse(BaseModel):
+ steps: List[Step]
+ final_answer: str
+
+ client = OpenAI()
+ completion = client.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {"role": "system", "content": "You are a helpful math tutor."},
+ {"role": "user", "content": "solve 8x + 31 = 2"},
+ ],
+ response_format=MathResponse,
+ )
+
+ message = completion.choices[0].message
+ if message.parsed:
+ print(message.parsed.steps)
+ print("answer: ", message.parsed.final_answer)
+ ```
+ """
+ _validate_input_tools(tools)
+
+ extra_headers = {
+ "X-Stainless-Helper-Method": "beta.chat.completions.parse",
+ **(extra_headers or {}),
+ }
+
+ raw_completion = self._client.chat.completions.create(
+ messages=messages,
+ model=model,
+ response_format=_type_to_response_format(response_format),
+ frequency_penalty=frequency_penalty,
+ function_call=function_call,
+ functions=functions,
+ logit_bias=logit_bias,
+ logprobs=logprobs,
+ max_tokens=max_tokens,
+ n=n,
+ parallel_tool_calls=parallel_tool_calls,
+ presence_penalty=presence_penalty,
+ seed=seed,
+ service_tier=service_tier,
+ stop=stop,
+ stream_options=stream_options,
+ temperature=temperature,
+ tool_choice=tool_choice,
+ tools=tools,
+ top_logprobs=top_logprobs,
+ top_p=top_p,
+ user=user,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ )
+ return _parse_chat_completion(
+ response_format=response_format,
+ chat_completion=raw_completion,
+ input_tools=tools,
+ )
+
+ def stream(
+ self,
+ *,
+ messages: Iterable[ChatCompletionMessageParam],
+ model: Union[str, ChatModel],
+ response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
+ frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+ function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+ functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+ logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+ logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+ max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+ presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+ seed: Optional[int] | NotGiven = NOT_GIVEN,
+ service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+ stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+ tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ChatCompletionStreamManager[ResponseFormatT]:
+ """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
+ and automatic accumulation of each delta.
+
+ This also supports all of the parsing utilities that `.parse()` does.
+
+ Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
+
+ ```py
+ with client.beta.chat.completions.stream(
+ model='gpt-4o-2024-08-06',
+ messages=[...],
+ ) as stream:
+ for event in stream:
+ if event.type == 'content.delta':
+ print(event.content, flush=True, end='')
+ ```
+
+ When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
+
+ When the context manager exits, the response will be closed, however the `stream` instance is still available outside
+ the context manager.
+ """
+ extra_headers = {
+ "X-Stainless-Helper-Method": "beta.chat.completions.stream",
+ **(extra_headers or {}),
+ }
+
+ api_request: partial[Stream[ChatCompletionChunk]] = partial(
+ self._client.chat.completions.create,
+ messages=messages,
+ model=model,
+ stream=True,
+ response_format=_type_to_response_format(response_format),
+ frequency_penalty=frequency_penalty,
+ function_call=function_call,
+ functions=functions,
+ logit_bias=logit_bias,
+ logprobs=logprobs,
+ max_tokens=max_tokens,
+ n=n,
+ parallel_tool_calls=parallel_tool_calls,
+ presence_penalty=presence_penalty,
+ seed=seed,
+ service_tier=service_tier,
+ stop=stop,
+ stream_options=stream_options,
+ temperature=temperature,
+ tool_choice=tool_choice,
+ tools=tools,
+ top_logprobs=top_logprobs,
+ top_p=top_p,
+ user=user,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ )
+ return ChatCompletionStreamManager(
+ api_request,
+ response_format=response_format,
+ input_tools=tools,
+ )
+
+
+class AsyncCompletions(AsyncAPIResource):
+ async def parse(
+ self,
+ *,
+ messages: Iterable[ChatCompletionMessageParam],
+ model: Union[str, ChatModel],
+ response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
+ frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+ function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+ functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+ logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+ logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+ max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+ presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+ seed: Optional[int] | NotGiven = NOT_GIVEN,
+ service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+ stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+ tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ParsedChatCompletion[ResponseFormatT]:
+ """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
+ & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
+
+ You can pass a pydantic model to this method and it will automatically convert the model
+ into a JSON schema, send it to the API and parse the response content back into the given model.
+
+ This method will also automatically parse `function` tool calls if:
+ - You use the `openai.pydantic_function_tool()` helper method
+ - You mark your tool schema with `"strict": True`
+
+ Example usage:
+ ```py
+ from pydantic import BaseModel
+ from openai import AsyncOpenAI
+
+ class Step(BaseModel):
+ explanation: str
+ output: str
+
+ class MathResponse(BaseModel):
+ steps: List[Step]
+ final_answer: str
+
+ client = AsyncOpenAI()
+ completion = await client.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {"role": "system", "content": "You are a helpful math tutor."},
+ {"role": "user", "content": "solve 8x + 31 = 2"},
+ ],
+ response_format=MathResponse,
+ )
+
+ message = completion.choices[0].message
+ if message.parsed:
+ print(message.parsed.steps)
+ print("answer: ", message.parsed.final_answer)
+ ```
+ """
+ _validate_input_tools(tools)
+
+ extra_headers = {
+ "X-Stainless-Helper-Method": "beta.chat.completions.parse",
+ **(extra_headers or {}),
+ }
+
+ raw_completion = await self._client.chat.completions.create(
+ messages=messages,
+ model=model,
+ response_format=_type_to_response_format(response_format),
+ frequency_penalty=frequency_penalty,
+ function_call=function_call,
+ functions=functions,
+ logit_bias=logit_bias,
+ logprobs=logprobs,
+ max_tokens=max_tokens,
+ n=n,
+ parallel_tool_calls=parallel_tool_calls,
+ presence_penalty=presence_penalty,
+ seed=seed,
+ service_tier=service_tier,
+ stop=stop,
+ stream_options=stream_options,
+ temperature=temperature,
+ tool_choice=tool_choice,
+ tools=tools,
+ top_logprobs=top_logprobs,
+ top_p=top_p,
+ user=user,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ )
+ return _parse_chat_completion(
+ response_format=response_format,
+ chat_completion=raw_completion,
+ input_tools=tools,
+ )
+
+ def stream(
+ self,
+ *,
+ messages: Iterable[ChatCompletionMessageParam],
+ model: Union[str, ChatModel],
+ response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
+ frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+ function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+ functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+ logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+ logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+ max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+ n: Optional[int] | NotGiven = NOT_GIVEN,
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+ presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+ seed: Optional[int] | NotGiven = NOT_GIVEN,
+ service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+ stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+ tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
+ user: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncChatCompletionStreamManager[ResponseFormatT]:
+ """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
+ and automatic accumulation of each delta.
+
+ This also supports all of the parsing utilities that `.parse()` does.
+
+ Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
+
+ ```py
+ async with client.beta.chat.completions.stream(
+ model='gpt-4o-2024-08-06',
+ messages=[...],
+ ) as stream:
+ async for event in stream:
+ if event.type == 'content.delta':
+ print(event.content, flush=True, end='')
+ ```
+
+ When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
+
+ When the context manager exits, the response will be closed, however the `stream` instance is still available outside
+ the context manager.
+ """
+ _validate_input_tools(tools)
+
+ extra_headers = {
+ "X-Stainless-Helper-Method": "beta.chat.completions.stream",
+ **(extra_headers or {}),
+ }
+
+ api_request = self._client.chat.completions.create(
+ messages=messages,
+ model=model,
+ stream=True,
+ response_format=_type_to_response_format(response_format),
+ frequency_penalty=frequency_penalty,
+ function_call=function_call,
+ functions=functions,
+ logit_bias=logit_bias,
+ logprobs=logprobs,
+ max_tokens=max_tokens,
+ n=n,
+ parallel_tool_calls=parallel_tool_calls,
+ presence_penalty=presence_penalty,
+ seed=seed,
+ service_tier=service_tier,
+ stop=stop,
+ stream_options=stream_options,
+ temperature=temperature,
+ tool_choice=tool_choice,
+ tools=tools,
+ top_logprobs=top_logprobs,
+ top_p=top_p,
+ user=user,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ )
+ return AsyncChatCompletionStreamManager(
+ api_request,
+ response_format=response_format,
+ input_tools=tools,
+ )
src/openai/resources/beta/threads/runs/runs.py
@@ -145,6 +145,11 @@ class Runs(SyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -275,6 +280,11 @@ class Runs(SyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -401,6 +411,11 @@ class Runs(SyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -1443,6 +1458,11 @@ class AsyncRuns(AsyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -1573,6 +1593,11 @@ class AsyncRuns(AsyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -1699,6 +1724,11 @@ class AsyncRuns(AsyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
src/openai/resources/beta/threads/threads.py
@@ -323,6 +323,11 @@ class Threads(SyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -452,6 +457,11 @@ class Threads(SyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -577,6 +587,11 @@ class Threads(SyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -1131,6 +1146,11 @@ class AsyncThreads(AsyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -1260,6 +1280,11 @@ class AsyncThreads(AsyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -1385,6 +1410,11 @@ class AsyncThreads(AsyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
src/openai/resources/beta/assistants.py
@@ -88,6 +88,11 @@ class Assistants(SyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -233,6 +238,11 @@ class Assistants(SyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -453,6 +463,11 @@ class AsyncAssistants(AsyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
@@ -598,6 +613,11 @@ class AsyncAssistants(AsyncAPIResource):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
src/openai/resources/beta/beta.py
@@ -11,6 +11,7 @@ from .threads import (
AsyncThreadsWithStreamingResponse,
)
from ..._compat import cached_property
+from .chat.chat import Chat, AsyncChat
from .assistants import (
Assistants,
AsyncAssistants,
@@ -35,6 +36,10 @@ __all__ = ["Beta", "AsyncBeta"]
class Beta(SyncAPIResource):
+ @cached_property
+ def chat(self) -> Chat:
+ return Chat(self._client)
+
@cached_property
def vector_stores(self) -> VectorStores:
return VectorStores(self._client)
@@ -57,6 +62,10 @@ class Beta(SyncAPIResource):
class AsyncBeta(AsyncAPIResource):
+ @cached_property
+ def chat(self) -> AsyncChat:
+ return AsyncChat(self._client)
+
@cached_property
def vector_stores(self) -> AsyncVectorStores:
return AsyncVectorStores(self._client)
src/openai/resources/chat/completions.py
@@ -19,9 +19,7 @@ from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..._streaming import Stream, AsyncStream
from ...types.chat import completion_create_params
-from ..._base_client import (
- make_request_options,
-)
+from ..._base_client import make_request_options
from ...types.chat_model import ChatModel
from ...types.chat.chat_completion import ChatCompletion
from ...types.chat.chat_completion_chunk import ChatCompletionChunk
@@ -144,6 +142,8 @@ class Completions(SyncAPIResource):
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
response_format: An object specifying the format that the model must output. Compatible with
+ [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
@@ -340,6 +340,8 @@ class Completions(SyncAPIResource):
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
response_format: An object specifying the format that the model must output. Compatible with
+ [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
@@ -529,6 +531,8 @@ class Completions(SyncAPIResource):
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
response_format: An object specifying the format that the model must output. Compatible with
+ [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
@@ -793,6 +797,8 @@ class AsyncCompletions(AsyncAPIResource):
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
response_format: An object specifying the format that the model must output. Compatible with
+ [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
@@ -989,6 +995,8 @@ class AsyncCompletions(AsyncAPIResource):
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
response_format: An object specifying the format that the model must output. Compatible with
+ [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
@@ -1178,6 +1186,8 @@ class AsyncCompletions(AsyncAPIResource):
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
response_format: An object specifying the format that the model must output. Compatible with
+ [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
src/openai/resources/fine_tuning/jobs/jobs.py
@@ -52,7 +52,7 @@ class Jobs(SyncAPIResource):
def create(
self,
*,
- model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]],
+ model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
training_file: str,
hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
@@ -77,7 +77,7 @@ class Jobs(SyncAPIResource):
Args:
model: The name of the model to fine-tune. You can select one of the
- [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
+ [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
training_file: The ID of an uploaded file that contains training data.
@@ -107,7 +107,7 @@ class Jobs(SyncAPIResource):
name.
For example, a `suffix` of "custom-model-name" would produce a model name like
- `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
+ `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
validation_file: The ID of an uploaded file that contains validation data.
@@ -332,7 +332,7 @@ class AsyncJobs(AsyncAPIResource):
async def create(
self,
*,
- model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]],
+ model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
training_file: str,
hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
@@ -357,7 +357,7 @@ class AsyncJobs(AsyncAPIResource):
Args:
model: The name of the model to fine-tune. You can select one of the
- [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
+ [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
training_file: The ID of an uploaded file that contains training data.
@@ -387,7 +387,7 @@ class AsyncJobs(AsyncAPIResource):
name.
For example, a `suffix` of "custom-model-name" would produce a model name like
- `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
+ `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
validation_file: The ID of an uploaded file that contains validation data.
src/openai/types/beta/threads/__init__.py
@@ -25,11 +25,13 @@ from .run_update_params import RunUpdateParams as RunUpdateParams
from .text_content_block import TextContentBlock as TextContentBlock
from .message_delta_event import MessageDeltaEvent as MessageDeltaEvent
from .message_list_params import MessageListParams as MessageListParams
+from .refusal_delta_block import RefusalDeltaBlock as RefusalDeltaBlock
from .file_path_annotation import FilePathAnnotation as FilePathAnnotation
from .image_url_delta_block import ImageURLDeltaBlock as ImageURLDeltaBlock
from .message_content_delta import MessageContentDelta as MessageContentDelta
from .message_create_params import MessageCreateParams as MessageCreateParams
from .message_update_params import MessageUpdateParams as MessageUpdateParams
+from .refusal_content_block import RefusalContentBlock as RefusalContentBlock
from .image_file_delta_block import ImageFileDeltaBlock as ImageFileDeltaBlock
from .image_url_content_block import ImageURLContentBlock as ImageURLContentBlock
from .file_citation_annotation import FileCitationAnnotation as FileCitationAnnotation
src/openai/types/beta/threads/message_content.py
@@ -5,11 +5,14 @@ from typing_extensions import Annotated, TypeAlias
from ...._utils import PropertyInfo
from .text_content_block import TextContentBlock
+from .refusal_content_block import RefusalContentBlock
from .image_url_content_block import ImageURLContentBlock
from .image_file_content_block import ImageFileContentBlock
__all__ = ["MessageContent"]
+
MessageContent: TypeAlias = Annotated[
- Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock], PropertyInfo(discriminator="type")
+ Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock, RefusalContentBlock],
+ PropertyInfo(discriminator="type"),
]
src/openai/types/beta/threads/message_content_delta.py
@@ -5,11 +5,13 @@ from typing_extensions import Annotated, TypeAlias
from ...._utils import PropertyInfo
from .text_delta_block import TextDeltaBlock
+from .refusal_delta_block import RefusalDeltaBlock
from .image_url_delta_block import ImageURLDeltaBlock
from .image_file_delta_block import ImageFileDeltaBlock
__all__ = ["MessageContentDelta"]
MessageContentDelta: TypeAlias = Annotated[
- Union[ImageFileDeltaBlock, TextDeltaBlock, ImageURLDeltaBlock], PropertyInfo(discriminator="type")
+ Union[ImageFileDeltaBlock, TextDeltaBlock, RefusalDeltaBlock, ImageURLDeltaBlock],
+ PropertyInfo(discriminator="type"),
]
src/openai/types/beta/threads/refusal_content_block.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RefusalContentBlock"]
+
+
+class RefusalContentBlock(BaseModel):
+ refusal: str
+
+ type: Literal["refusal"]
+ """Always `refusal`."""
src/openai/types/beta/threads/refusal_delta_block.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RefusalDeltaBlock"]
+
+
+class RefusalDeltaBlock(BaseModel):
+ index: int
+ """The index of the refusal part in the message."""
+
+ type: Literal["refusal"]
+ """Always `refusal`."""
+
+ refusal: Optional[str] = None
src/openai/types/beta/threads/run.py
@@ -171,6 +171,11 @@ class Run(BaseModel):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
src/openai/types/beta/threads/run_create_params.py
@@ -97,6 +97,11 @@ class RunCreateParamsBase(TypedDict, total=False):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
src/openai/types/beta/vector_stores/vector_store_file.py
@@ -17,7 +17,7 @@ __all__ = [
class LastError(BaseModel):
- code: Literal["internal_error", "file_not_found", "parsing_error", "unhandled_mime_type"]
+ code: Literal["server_error", "unsupported_file", "invalid_file"]
"""One of `server_error` or `rate_limit_exceeded`."""
message: str
src/openai/types/beta/__init__.py
@@ -23,7 +23,6 @@ from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
-from .assistant_response_format import AssistantResponseFormat as AssistantResponseFormat
from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam
@@ -31,7 +30,6 @@ from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpr
from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption
from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams
from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction
-from .assistant_response_format_param import AssistantResponseFormatParam as AssistantResponseFormatParam
from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption
from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam
from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam
src/openai/types/beta/assistant.py
@@ -89,6 +89,11 @@ class Assistant(BaseModel):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
src/openai/types/beta/assistant_create_params.py
@@ -60,6 +60,11 @@ class AssistantCreateParams(TypedDict, total=False):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
src/openai/types/beta/assistant_response_format.py
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["AssistantResponseFormat"]
-
-
-class AssistantResponseFormat(BaseModel):
- type: Optional[Literal["text", "json_object"]] = None
- """Must be one of `text` or `json_object`."""
src/openai/types/beta/assistant_response_format_option.py
@@ -3,8 +3,12 @@
from typing import Union
from typing_extensions import Literal, TypeAlias
-from .assistant_response_format import AssistantResponseFormat
+from ..shared.response_format_text import ResponseFormatText
+from ..shared.response_format_json_object import ResponseFormatJSONObject
+from ..shared.response_format_json_schema import ResponseFormatJSONSchema
__all__ = ["AssistantResponseFormatOption"]
-AssistantResponseFormatOption: TypeAlias = Union[Literal["none", "auto"], AssistantResponseFormat]
+AssistantResponseFormatOption: TypeAlias = Union[
+ Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema
+]
src/openai/types/beta/assistant_response_format_option_param.py
@@ -5,8 +5,13 @@ from __future__ import annotations
from typing import Union
from typing_extensions import Literal, TypeAlias
-from .assistant_response_format_param import AssistantResponseFormatParam
+from ...types import shared_params
__all__ = ["AssistantResponseFormatOptionParam"]
-AssistantResponseFormatOptionParam: TypeAlias = Union[Literal["none", "auto"], AssistantResponseFormatParam]
+AssistantResponseFormatOptionParam: TypeAlias = Union[
+ Literal["auto"],
+ shared_params.ResponseFormatText,
+ shared_params.ResponseFormatJSONObject,
+ shared_params.ResponseFormatJSONSchema,
+]
src/openai/types/beta/assistant_response_format_param.py
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["AssistantResponseFormatParam"]
-
-
-class AssistantResponseFormatParam(TypedDict, total=False):
- type: Literal["text", "json_object"]
- """Must be one of `text` or `json_object`."""
src/openai/types/beta/assistant_update_params.py
@@ -49,6 +49,11 @@ class AssistantUpdateParams(TypedDict, total=False):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
src/openai/types/beta/file_search_tool.py
@@ -12,8 +12,8 @@ class FileSearch(BaseModel):
max_num_results: Optional[int] = None
"""The maximum number of results the file search tool should output.
- The default is 20 for gpt-4\\** models and 5 for gpt-3.5-turbo. This number should
- be between 1 and 50 inclusive.
+ The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
+ should be between 1 and 50 inclusive.
Note that the file search tool may output fewer than `max_num_results` results.
See the
src/openai/types/beta/file_search_tool_param.py
@@ -11,8 +11,8 @@ class FileSearch(TypedDict, total=False):
max_num_results: int
"""The maximum number of results the file search tool should output.
- The default is 20 for gpt-4\\** models and 5 for gpt-3.5-turbo. This number should
- be between 1 and 50 inclusive.
+ The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
+ should be between 1 and 50 inclusive.
Note that the file search tool may output fewer than `max_num_results` results.
See the
src/openai/types/beta/thread_create_and_run_params.py
@@ -100,6 +100,11 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ Outputs which guarantees the model will match your supplied JSON schema. Learn
+ more in the
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
src/openai/types/chat/__init__.py
@@ -5,8 +5,17 @@ from __future__ import annotations
from .chat_completion import ChatCompletion as ChatCompletion
from .chat_completion_role import ChatCompletionRole as ChatCompletionRole
from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
+from .parsed_chat_completion import (
+ ParsedChoice as ParsedChoice,
+ ParsedChatCompletion as ParsedChatCompletion,
+ ParsedChatCompletionMessage as ParsedChatCompletionMessage,
+)
from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
+from .parsed_function_tool_call import (
+ ParsedFunction as ParsedFunction,
+ ParsedFunctionToolCall as ParsedFunctionToolCall,
+)
from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam
from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam
from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
@@ -37,6 +46,9 @@ from .chat_completion_content_part_image_param import (
from .chat_completion_tool_choice_option_param import (
ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam,
)
+from .chat_completion_content_part_refusal_param import (
+ ChatCompletionContentPartRefusalParam as ChatCompletionContentPartRefusalParam,
+)
from .chat_completion_function_call_option_param import (
ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam,
)
src/openai/types/chat/chat_completion.py
@@ -15,6 +15,9 @@ class ChoiceLogprobs(BaseModel):
content: Optional[List[ChatCompletionTokenLogprob]] = None
"""A list of message content tokens with log probability information."""
+ refusal: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message refusal tokens with log probability information."""
+
class Choice(BaseModel):
finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
src/openai/types/chat/chat_completion_assistant_message_param.py
@@ -2,12 +2,16 @@
from __future__ import annotations
-from typing import Iterable, Optional
-from typing_extensions import Literal, Required, TypedDict
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam
+from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam
-__all__ = ["ChatCompletionAssistantMessageParam", "FunctionCall"]
+__all__ = ["ChatCompletionAssistantMessageParam", "ContentArrayOfContentPart", "FunctionCall"]
+
+ContentArrayOfContentPart: TypeAlias = Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam]
class FunctionCall(TypedDict, total=False):
@@ -27,7 +31,7 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False):
role: Required[Literal["assistant"]]
"""The role of the messages author, in this case `assistant`."""
- content: Optional[str]
+ content: Union[str, Iterable[ContentArrayOfContentPart], None]
"""The contents of the assistant message.
Required unless `tool_calls` or `function_call` is specified.
@@ -47,5 +51,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False):
role.
"""
+ refusal: Optional[str]
+ """The refusal message by the assistant."""
+
tool_calls: Iterable[ChatCompletionMessageToolCallParam]
"""The tool calls generated by the model, such as function calls."""
src/openai/types/chat/chat_completion_chunk.py
@@ -67,6 +67,9 @@ class ChoiceDelta(BaseModel):
model.
"""
+ refusal: Optional[str] = None
+ """The refusal message generated by the model."""
+
role: Optional[Literal["system", "user", "assistant", "tool"]] = None
"""The role of the author of this message."""
@@ -77,6 +80,9 @@ class ChoiceLogprobs(BaseModel):
content: Optional[List[ChatCompletionTokenLogprob]] = None
"""A list of message content tokens with log probability information."""
+ refusal: Optional[List[ChatCompletionTokenLogprob]] = None
+ """A list of message refusal tokens with log probability information."""
+
class Choice(BaseModel):
delta: ChoiceDelta
src/openai/types/chat/chat_completion_content_part_refusal_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionContentPartRefusalParam"]
+
+
+class ChatCompletionContentPartRefusalParam(TypedDict, total=False):
+ refusal: Required[str]
+ """The refusal message generated by the model."""
+
+ type: Required[Literal["refusal"]]
+ """The type of the content part."""
src/openai/types/chat/chat_completion_message.py
@@ -26,6 +26,9 @@ class ChatCompletionMessage(BaseModel):
content: Optional[str] = None
"""The contents of the message."""
+ refusal: Optional[str] = None
+ """The refusal message generated by the model."""
+
role: Literal["assistant"]
"""The role of the author of this message."""
src/openai/types/chat/chat_completion_system_message_param.py
@@ -2,13 +2,16 @@
from __future__ import annotations
+from typing import Union, Iterable
from typing_extensions import Literal, Required, TypedDict
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
__all__ = ["ChatCompletionSystemMessageParam"]
class ChatCompletionSystemMessageParam(TypedDict, total=False):
- content: Required[str]
+ content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
"""The contents of the system message."""
role: Required[Literal["system"]]
src/openai/types/chat/chat_completion_tool_message_param.py
@@ -2,13 +2,16 @@
from __future__ import annotations
+from typing import Union, Iterable
from typing_extensions import Literal, Required, TypedDict
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
__all__ = ["ChatCompletionToolMessageParam"]
class ChatCompletionToolMessageParam(TypedDict, total=False):
- content: Required[str]
+ content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
"""The contents of the tool message."""
role: Required[Literal["tool"]]
src/openai/types/chat/completion_create_params.py
@@ -121,7 +121,8 @@ class CompletionCreateParamsBase(TypedDict, total=False):
response_format: ResponseFormat
"""An object specifying the format that the model must output.
- Compatible with
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+ [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
@@ -250,9 +251,9 @@ class Function(TypedDict, total=False):
"""
-class ResponseFormat(TypedDict, total=False):
- type: Literal["text", "json_object"]
- """Must be one of `text` or `json_object`."""
+ResponseFormat: TypeAlias = Union[
+ shared_params.ResponseFormatText, shared_params.ResponseFormatJSONObject, shared_params.ResponseFormatJSONSchema
+]
class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase):
src/openai/types/chat/parsed_chat_completion.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Generic, TypeVar, Optional
+
+from ..._models import GenericModel
+from .chat_completion import Choice, ChatCompletion
+from .chat_completion_message import ChatCompletionMessage
+from .parsed_function_tool_call import ParsedFunctionToolCall
+
+__all__ = ["ParsedChatCompletion", "ParsedChoice"]
+
+
+ContentType = TypeVar("ContentType")
+
+
+# we need to disable this check because we're overriding properties
+# with subclasses of their types which is technically unsound as
+# properties can be mutated.
+# pyright: reportIncompatibleVariableOverride=false
+
+
+class ParsedChatCompletionMessage(ChatCompletionMessage, GenericModel, Generic[ContentType]):
+ parsed: Optional[ContentType] = None
+ """The auto-parsed message contents"""
+
+ tool_calls: Optional[List[ParsedFunctionToolCall]] = None # type: ignore[assignment]
+ """The tool calls generated by the model, such as function calls."""
+
+
+class ParsedChoice(Choice, GenericModel, Generic[ContentType]):
+ message: ParsedChatCompletionMessage[ContentType]
+ """A chat completion message generated by the model."""
+
+
+class ParsedChatCompletion(ChatCompletion, GenericModel, Generic[ContentType]):
+ choices: List[ParsedChoice[ContentType]] # type: ignore[assignment]
+ """A list of chat completion choices.
+
+ Can be more than one if `n` is greater than 1.
+ """
src/openai/types/chat/parsed_function_tool_call.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .chat_completion_message_tool_call import Function, ChatCompletionMessageToolCall
+
+__all__ = ["ParsedFunctionToolCall", "ParsedFunction"]
+
+# we need to disable this check because we're overriding properties
+# with subclasses of their types which is technically unsound as
+# properties can be mutated.
+# pyright: reportIncompatibleVariableOverride=false
+
+
+class ParsedFunction(Function):
+ parsed_arguments: Optional[object] = None
+ """
+ The arguments to call the function with.
+
+ If you used `openai.pydantic_function_tool()` then this will be an
+ instance of the given `BaseModel`.
+
+ Otherwise, this will be the parsed JSON arguments.
+ """
+
+
+class ParsedFunctionToolCall(ChatCompletionMessageToolCall):
+ function: ParsedFunction
+ """The function that the model called."""
src/openai/types/fine_tuning/job_create_params.py
@@ -9,11 +9,11 @@ __all__ = ["JobCreateParams", "Hyperparameters", "Integration", "IntegrationWand
class JobCreateParams(TypedDict, total=False):
- model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]]]
+ model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]]]
"""The name of the model to fine-tune.
You can select one of the
- [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
+ [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
"""
training_file: Required[str]
@@ -54,7 +54,7 @@ class JobCreateParams(TypedDict, total=False):
name.
For example, a `suffix` of "custom-model-name" would produce a model name like
- `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
+ `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
"""
validation_file: Optional[str]
src/openai/types/__init__.py
@@ -9,6 +9,9 @@ from .shared import (
ErrorObject as ErrorObject,
FunctionDefinition as FunctionDefinition,
FunctionParameters as FunctionParameters,
+ ResponseFormatText as ResponseFormatText,
+ ResponseFormatJSONObject as ResponseFormatJSONObject,
+ ResponseFormatJSONSchema as ResponseFormatJSONSchema,
)
from .upload import Upload as Upload
from .embedding import Embedding as Embedding
src/openai/types/chat_model.py
@@ -6,6 +6,7 @@ __all__ = ["ChatModel"]
ChatModel: TypeAlias = Literal[
"gpt-4o",
+ "gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
src/openai/__init__.py
@@ -26,8 +26,10 @@ from ._exceptions import (
AuthenticationError,
InternalServerError,
PermissionDeniedError,
+ LengthFinishReasonError,
UnprocessableEntityError,
APIResponseValidationError,
+ ContentFilterFinishReasonError,
)
from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient
from ._utils._logs import setup_logging as _setup_logging
@@ -55,6 +57,8 @@ __all__ = [
"UnprocessableEntityError",
"RateLimitError",
"InternalServerError",
+ "LengthFinishReasonError",
+ "ContentFilterFinishReasonError",
"Timeout",
"RequestOptions",
"Client",
@@ -72,7 +76,7 @@ __all__ = [
"DefaultAsyncHttpxClient",
]
-from .lib import azure as _azure
+from .lib import azure as _azure, pydantic_function_tool as pydantic_function_tool
from .version import VERSION as VERSION
from .lib.azure import AzureOpenAI as AzureOpenAI, AsyncAzureOpenAI as AsyncAzureOpenAI
from .lib._old_api import *
src/openai/_client.py
@@ -151,7 +151,7 @@ class OpenAI(SyncAPIClient):
@property
@override
def qs(self) -> Querystring:
- return Querystring(array_format="comma")
+ return Querystring(array_format="brackets")
@property
@override
@@ -365,7 +365,7 @@ class AsyncOpenAI(AsyncAPIClient):
@property
@override
def qs(self) -> Querystring:
- return Querystring(array_format="comma")
+ return Querystring(array_format="brackets")
@property
@override
src/openai/_compat.py
@@ -159,6 +159,18 @@ def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
return model.parse_obj(data) # pyright: ignore[reportDeprecated]
+def model_parse_json(model: type[_ModelT], data: str | bytes) -> _ModelT:
+ if PYDANTIC_V2:
+ return model.model_validate_json(data)
+ return model.parse_raw(data) # pyright: ignore[reportDeprecated]
+
+
+def model_json_schema(model: type[_ModelT]) -> dict[str, Any]:
+ if PYDANTIC_V2:
+ return model.model_json_schema()
+ return model.schema() # pyright: ignore[reportDeprecated]
+
+
# generic models
if TYPE_CHECKING:
src/openai/_exceptions.py
@@ -19,6 +19,8 @@ __all__ = [
"UnprocessableEntityError",
"RateLimitError",
"InternalServerError",
+ "LengthFinishReasonError",
+ "ContentFilterFinishReasonError",
]
@@ -125,3 +127,17 @@ class RateLimitError(APIStatusError):
class InternalServerError(APIStatusError):
pass
+
+
+class LengthFinishReasonError(OpenAIError):
+ def __init__(self) -> None:
+ super().__init__(
+ f"Could not parse response content as the length limit was reached",
+ )
+
+
+class ContentFilterFinishReasonError(OpenAIError):
+ def __init__(self) -> None:
+ super().__init__(
+ f"Could not parse response content as the request was rejected by the content filter",
+ )
tests/api_resources/beta/threads/test_runs.py
@@ -135,9 +135,9 @@ class TestRuns:
max_completion_tokens=256,
max_prompt_tokens=256,
metadata={},
- model="gpt-4-turbo",
+ model="gpt-4o",
parallel_tool_calls=True,
- response_format="none",
+ response_format="auto",
stream=False,
temperature=1,
tool_choice="none",
@@ -299,9 +299,9 @@ class TestRuns:
max_completion_tokens=256,
max_prompt_tokens=256,
metadata={},
- model="gpt-4-turbo",
+ model="gpt-4o",
parallel_tool_calls=True,
- response_format="none",
+ response_format="auto",
temperature=1,
tool_choice="none",
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
@@ -801,9 +801,9 @@ class TestAsyncRuns:
max_completion_tokens=256,
max_prompt_tokens=256,
metadata={},
- model="gpt-4-turbo",
+ model="gpt-4o",
parallel_tool_calls=True,
- response_format="none",
+ response_format="auto",
stream=False,
temperature=1,
tool_choice="none",
@@ -965,9 +965,9 @@ class TestAsyncRuns:
max_completion_tokens=256,
max_prompt_tokens=256,
metadata={},
- model="gpt-4-turbo",
+ model="gpt-4o",
parallel_tool_calls=True,
- response_format="none",
+ response_format="auto",
temperature=1,
tool_choice="none",
tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
tests/api_resources/beta/test_assistants.py
@@ -24,19 +24,19 @@ class TestAssistants:
@parametrize
def test_method_create(self, client: OpenAI) -> None:
assistant = client.beta.assistants.create(
- model="gpt-4-turbo",
+ model="gpt-4o",
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
assistant = client.beta.assistants.create(
- model="gpt-4-turbo",
- description="string",
- instructions="string",
+ model="gpt-4o",
+ description="description",
+ instructions="instructions",
metadata={},
- name="string",
- response_format="none",
+ name="name",
+ response_format="auto",
temperature=1,
tool_resources={
"code_interpreter": {"file_ids": ["string", "string", "string"]},
@@ -59,7 +59,7 @@ class TestAssistants:
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.beta.assistants.with_raw_response.create(
- model="gpt-4-turbo",
+ model="gpt-4o",
)
assert response.is_closed is True
@@ -70,7 +70,7 @@ class TestAssistants:
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.beta.assistants.with_streaming_response.create(
- model="gpt-4-turbo",
+ model="gpt-4o",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -83,14 +83,14 @@ class TestAssistants:
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
assistant = client.beta.assistants.retrieve(
- "string",
+ "assistant_id",
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.beta.assistants.with_raw_response.retrieve(
- "string",
+ "assistant_id",
)
assert response.is_closed is True
@@ -101,7 +101,7 @@ class TestAssistants:
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.beta.assistants.with_streaming_response.retrieve(
- "string",
+ "assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -121,20 +121,20 @@ class TestAssistants:
@parametrize
def test_method_update(self, client: OpenAI) -> None:
assistant = client.beta.assistants.update(
- "string",
+ assistant_id="assistant_id",
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
def test_method_update_with_all_params(self, client: OpenAI) -> None:
assistant = client.beta.assistants.update(
- "string",
- description="string",
- instructions="string",
+ assistant_id="assistant_id",
+ description="description",
+ instructions="instructions",
metadata={},
- model="string",
- name="string",
- response_format="none",
+ model="model",
+ name="name",
+ response_format="auto",
temperature=1,
tool_resources={
"code_interpreter": {"file_ids": ["string", "string", "string"]},
@@ -148,7 +148,7 @@ class TestAssistants:
@parametrize
def test_raw_response_update(self, client: OpenAI) -> None:
response = client.beta.assistants.with_raw_response.update(
- "string",
+ assistant_id="assistant_id",
)
assert response.is_closed is True
@@ -159,7 +159,7 @@ class TestAssistants:
@parametrize
def test_streaming_response_update(self, client: OpenAI) -> None:
with client.beta.assistants.with_streaming_response.update(
- "string",
+ assistant_id="assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -173,7 +173,7 @@ class TestAssistants:
def test_path_params_update(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
client.beta.assistants.with_raw_response.update(
- "",
+ assistant_id="",
)
@parametrize
@@ -184,8 +184,8 @@ class TestAssistants:
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
assistant = client.beta.assistants.list(
- after="string",
- before="string",
+ after="after",
+ before="before",
limit=0,
order="asc",
)
@@ -214,14 +214,14 @@ class TestAssistants:
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
assistant = client.beta.assistants.delete(
- "string",
+ "assistant_id",
)
assert_matches_type(AssistantDeleted, assistant, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.beta.assistants.with_raw_response.delete(
- "string",
+ "assistant_id",
)
assert response.is_closed is True
@@ -232,7 +232,7 @@ class TestAssistants:
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.beta.assistants.with_streaming_response.delete(
- "string",
+ "assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -256,19 +256,19 @@ class TestAsyncAssistants:
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.create(
- model="gpt-4-turbo",
+ model="gpt-4o",
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.create(
- model="gpt-4-turbo",
- description="string",
- instructions="string",
+ model="gpt-4o",
+ description="description",
+ instructions="instructions",
metadata={},
- name="string",
- response_format="none",
+ name="name",
+ response_format="auto",
temperature=1,
tool_resources={
"code_interpreter": {"file_ids": ["string", "string", "string"]},
@@ -291,7 +291,7 @@ class TestAsyncAssistants:
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.assistants.with_raw_response.create(
- model="gpt-4-turbo",
+ model="gpt-4o",
)
assert response.is_closed is True
@@ -302,7 +302,7 @@ class TestAsyncAssistants:
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.assistants.with_streaming_response.create(
- model="gpt-4-turbo",
+ model="gpt-4o",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -315,14 +315,14 @@ class TestAsyncAssistants:
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.retrieve(
- "string",
+ "assistant_id",
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.assistants.with_raw_response.retrieve(
- "string",
+ "assistant_id",
)
assert response.is_closed is True
@@ -333,7 +333,7 @@ class TestAsyncAssistants:
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.assistants.with_streaming_response.retrieve(
- "string",
+ "assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -353,20 +353,20 @@ class TestAsyncAssistants:
@parametrize
async def test_method_update(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.update(
- "string",
+ assistant_id="assistant_id",
)
assert_matches_type(Assistant, assistant, path=["response"])
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.update(
- "string",
- description="string",
- instructions="string",
+ assistant_id="assistant_id",
+ description="description",
+ instructions="instructions",
metadata={},
- model="string",
- name="string",
- response_format="none",
+ model="model",
+ name="name",
+ response_format="auto",
temperature=1,
tool_resources={
"code_interpreter": {"file_ids": ["string", "string", "string"]},
@@ -380,7 +380,7 @@ class TestAsyncAssistants:
@parametrize
async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.assistants.with_raw_response.update(
- "string",
+ assistant_id="assistant_id",
)
assert response.is_closed is True
@@ -391,7 +391,7 @@ class TestAsyncAssistants:
@parametrize
async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.assistants.with_streaming_response.update(
- "string",
+ assistant_id="assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -405,7 +405,7 @@ class TestAsyncAssistants:
async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
await async_client.beta.assistants.with_raw_response.update(
- "",
+ assistant_id="",
)
@parametrize
@@ -416,8 +416,8 @@ class TestAsyncAssistants:
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.list(
- after="string",
- before="string",
+ after="after",
+ before="before",
limit=0,
order="asc",
)
@@ -446,14 +446,14 @@ class TestAsyncAssistants:
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
assistant = await async_client.beta.assistants.delete(
- "string",
+ "assistant_id",
)
assert_matches_type(AssistantDeleted, assistant, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.assistants.with_raw_response.delete(
- "string",
+ "assistant_id",
)
assert response.is_closed is True
@@ -464,7 +464,7 @@ class TestAsyncAssistants:
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.assistants.with_streaming_response.delete(
- "string",
+ "assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
tests/api_resources/beta/test_threads.py
@@ -302,9 +302,9 @@ class TestThreads:
max_completion_tokens=256,
max_prompt_tokens=256,
metadata={},
- model="gpt-4-turbo",
+ model="gpt-4o",
parallel_tool_calls=True,
- response_format="none",
+ response_format="auto",
stream=False,
temperature=1,
thread={
@@ -473,9 +473,9 @@ class TestThreads:
max_completion_tokens=256,
max_prompt_tokens=256,
metadata={},
- model="gpt-4-turbo",
+ model="gpt-4o",
parallel_tool_calls=True,
- response_format="none",
+ response_format="auto",
temperature=1,
thread={
"messages": [
@@ -912,9 +912,9 @@ class TestAsyncThreads:
max_completion_tokens=256,
max_prompt_tokens=256,
metadata={},
- model="gpt-4-turbo",
+ model="gpt-4o",
parallel_tool_calls=True,
- response_format="none",
+ response_format="auto",
stream=False,
temperature=1,
thread={
@@ -1083,9 +1083,9 @@ class TestAsyncThreads:
max_completion_tokens=256,
max_prompt_tokens=256,
metadata={},
- model="gpt-4-turbo",
+ model="gpt-4o",
parallel_tool_calls=True,
- response_format="none",
+ response_format="auto",
temperature=1,
thread={
"messages": [
tests/api_resources/chat/test_completions.py
@@ -28,7 +28,7 @@ class TestCompletions:
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
)
assert_matches_type(ChatCompletion, completion, path=["response"])
@@ -42,7 +42,7 @@ class TestCompletions:
"name": "string",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
frequency_penalty=-2,
function_call="none",
functions=[
@@ -58,7 +58,7 @@ class TestCompletions:
n=1,
parallel_tool_calls=True,
presence_penalty=-2,
- response_format={"type": "json_object"},
+ response_format={"type": "text"},
seed=-9007199254740991,
service_tier="auto",
stop="string",
@@ -73,6 +73,7 @@ class TestCompletions:
"description": "string",
"name": "string",
"parameters": {"foo": "bar"},
+ "strict": True,
},
},
{
@@ -81,6 +82,7 @@ class TestCompletions:
"description": "string",
"name": "string",
"parameters": {"foo": "bar"},
+ "strict": True,
},
},
{
@@ -89,6 +91,7 @@ class TestCompletions:
"description": "string",
"name": "string",
"parameters": {"foo": "bar"},
+ "strict": True,
},
},
],
@@ -107,7 +110,7 @@ class TestCompletions:
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
)
assert response.is_closed is True
@@ -124,7 +127,7 @@ class TestCompletions:
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -143,7 +146,7 @@ class TestCompletions:
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
stream=True,
)
completion_stream.response.close()
@@ -158,7 +161,7 @@ class TestCompletions:
"name": "string",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
stream=True,
frequency_penalty=-2,
function_call="none",
@@ -175,7 +178,7 @@ class TestCompletions:
n=1,
parallel_tool_calls=True,
presence_penalty=-2,
- response_format={"type": "json_object"},
+ response_format={"type": "text"},
seed=-9007199254740991,
service_tier="auto",
stop="string",
@@ -189,6 +192,7 @@ class TestCompletions:
"description": "string",
"name": "string",
"parameters": {"foo": "bar"},
+ "strict": True,
},
},
{
@@ -197,6 +201,7 @@ class TestCompletions:
"description": "string",
"name": "string",
"parameters": {"foo": "bar"},
+ "strict": True,
},
},
{
@@ -205,6 +210,7 @@ class TestCompletions:
"description": "string",
"name": "string",
"parameters": {"foo": "bar"},
+ "strict": True,
},
},
],
@@ -223,7 +229,7 @@ class TestCompletions:
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
stream=True,
)
@@ -240,7 +246,7 @@ class TestCompletions:
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
stream=True,
) as response:
assert not response.is_closed
@@ -264,7 +270,7 @@ class TestAsyncCompletions:
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
)
assert_matches_type(ChatCompletion, completion, path=["response"])
@@ -278,7 +284,7 @@ class TestAsyncCompletions:
"name": "string",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
frequency_penalty=-2,
function_call="none",
functions=[
@@ -294,7 +300,7 @@ class TestAsyncCompletions:
n=1,
parallel_tool_calls=True,
presence_penalty=-2,
- response_format={"type": "json_object"},
+ response_format={"type": "text"},
seed=-9007199254740991,
service_tier="auto",
stop="string",
@@ -309,6 +315,7 @@ class TestAsyncCompletions:
"description": "string",
"name": "string",
"parameters": {"foo": "bar"},
+ "strict": True,
},
},
{
@@ -317,6 +324,7 @@ class TestAsyncCompletions:
"description": "string",
"name": "string",
"parameters": {"foo": "bar"},
+ "strict": True,
},
},
{
@@ -325,6 +333,7 @@ class TestAsyncCompletions:
"description": "string",
"name": "string",
"parameters": {"foo": "bar"},
+ "strict": True,
},
},
],
@@ -343,7 +352,7 @@ class TestAsyncCompletions:
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
)
assert response.is_closed is True
@@ -360,7 +369,7 @@ class TestAsyncCompletions:
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -379,7 +388,7 @@ class TestAsyncCompletions:
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
stream=True,
)
await completion_stream.response.aclose()
@@ -394,7 +403,7 @@ class TestAsyncCompletions:
"name": "string",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
stream=True,
frequency_penalty=-2,
function_call="none",
@@ -411,7 +420,7 @@ class TestAsyncCompletions:
n=1,
parallel_tool_calls=True,
presence_penalty=-2,
- response_format={"type": "json_object"},
+ response_format={"type": "text"},
seed=-9007199254740991,
service_tier="auto",
stop="string",
@@ -425,6 +434,7 @@ class TestAsyncCompletions:
"description": "string",
"name": "string",
"parameters": {"foo": "bar"},
+ "strict": True,
},
},
{
@@ -433,6 +443,7 @@ class TestAsyncCompletions:
"description": "string",
"name": "string",
"parameters": {"foo": "bar"},
+ "strict": True,
},
},
{
@@ -441,6 +452,7 @@ class TestAsyncCompletions:
"description": "string",
"name": "string",
"parameters": {"foo": "bar"},
+ "strict": True,
},
},
],
@@ -459,7 +471,7 @@ class TestAsyncCompletions:
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
stream=True,
)
@@ -476,7 +488,7 @@ class TestAsyncCompletions:
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
stream=True,
) as response:
assert not response.is_closed
tests/api_resources/fine_tuning/test_jobs.py
@@ -24,7 +24,7 @@ class TestJobs:
@parametrize
def test_method_create(self, client: OpenAI) -> None:
job = client.fine_tuning.jobs.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
training_file="file-abc123",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@@ -32,7 +32,7 @@ class TestJobs:
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
job = client.fine_tuning.jobs.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
training_file="file-abc123",
hyperparameters={
"batch_size": "auto",
@@ -77,7 +77,7 @@ class TestJobs:
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.fine_tuning.jobs.with_raw_response.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
training_file="file-abc123",
)
@@ -89,7 +89,7 @@ class TestJobs:
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.fine_tuning.jobs.with_streaming_response.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
training_file="file-abc123",
) as response:
assert not response.is_closed
@@ -263,7 +263,7 @@ class TestAsyncJobs:
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
job = await async_client.fine_tuning.jobs.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
training_file="file-abc123",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@@ -271,7 +271,7 @@ class TestAsyncJobs:
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
job = await async_client.fine_tuning.jobs.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
training_file="file-abc123",
hyperparameters={
"batch_size": "auto",
@@ -316,7 +316,7 @@ class TestAsyncJobs:
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.jobs.with_raw_response.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
training_file="file-abc123",
)
@@ -328,7 +328,7 @@ class TestAsyncJobs:
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.jobs.with_streaming_response.create(
- model="gpt-3.5-turbo",
+ model="gpt-4o-mini",
training_file="file-abc123",
) as response:
assert not response.is_closed
tests/api_resources/test_models.py
@@ -21,14 +21,14 @@ class TestModels:
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
model = client.models.retrieve(
- "gpt-3.5-turbo",
+ "gpt-4o-mini",
)
assert_matches_type(Model, model, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.models.with_raw_response.retrieve(
- "gpt-3.5-turbo",
+ "gpt-4o-mini",
)
assert response.is_closed is True
@@ -39,7 +39,7 @@ class TestModels:
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.models.with_streaming_response.retrieve(
- "gpt-3.5-turbo",
+ "gpt-4o-mini",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -84,14 +84,14 @@ class TestModels:
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
model = client.models.delete(
- "ft:gpt-3.5-turbo:acemeco:suffix:abc123",
+ "ft:gpt-4o-mini:acemeco:suffix:abc123",
)
assert_matches_type(ModelDeleted, model, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.models.with_raw_response.delete(
- "ft:gpt-3.5-turbo:acemeco:suffix:abc123",
+ "ft:gpt-4o-mini:acemeco:suffix:abc123",
)
assert response.is_closed is True
@@ -102,7 +102,7 @@ class TestModels:
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.models.with_streaming_response.delete(
- "ft:gpt-3.5-turbo:acemeco:suffix:abc123",
+ "ft:gpt-4o-mini:acemeco:suffix:abc123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -126,14 +126,14 @@ class TestAsyncModels:
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
model = await async_client.models.retrieve(
- "gpt-3.5-turbo",
+ "gpt-4o-mini",
)
assert_matches_type(Model, model, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.models.with_raw_response.retrieve(
- "gpt-3.5-turbo",
+ "gpt-4o-mini",
)
assert response.is_closed is True
@@ -144,7 +144,7 @@ class TestAsyncModels:
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.models.with_streaming_response.retrieve(
- "gpt-3.5-turbo",
+ "gpt-4o-mini",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -189,14 +189,14 @@ class TestAsyncModels:
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
model = await async_client.models.delete(
- "ft:gpt-3.5-turbo:acemeco:suffix:abc123",
+ "ft:gpt-4o-mini:acemeco:suffix:abc123",
)
assert_matches_type(ModelDeleted, model, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.models.with_raw_response.delete(
- "ft:gpt-3.5-turbo:acemeco:suffix:abc123",
+ "ft:gpt-4o-mini:acemeco:suffix:abc123",
)
assert response.is_closed is True
@@ -207,7 +207,7 @@ class TestAsyncModels:
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.models.with_streaming_response.delete(
- "ft:gpt-3.5-turbo:acemeco:suffix:abc123",
+ "ft:gpt-4o-mini:acemeco:suffix:abc123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
tests/lib/chat/__init__.py
tests/lib/chat/_utils.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+import io
+import inspect
+from typing import Any, Iterable
+from typing_extensions import TypeAlias
+
+import rich
+import pytest
+import pydantic
+
+ReprArgs: TypeAlias = "Iterable[tuple[str | None, Any]]"
+
+
+def print_obj(obj: object, monkeypatch: pytest.MonkeyPatch) -> str:
+ """Pretty print an object to a string"""
+
+ # monkeypatch pydantic model printing so that model fields
+ # are always printed in the same order so we can reliably
+ # use this for snapshot tests
+ original_repr = pydantic.BaseModel.__repr_args__
+
+ def __repr_args__(self: pydantic.BaseModel) -> ReprArgs:
+ return sorted(original_repr(self), key=lambda arg: arg[0] or arg)
+
+ with monkeypatch.context() as m:
+ m.setattr(pydantic.BaseModel, "__repr_args__", __repr_args__)
+
+ buf = io.StringIO()
+
+ console = rich.console.Console(file=buf, width=120)
+ console.print(obj)
+
+ string = buf.getvalue()
+
+ # we remove all `fn_name.<locals>.` occurences
+ # so that we can share the same snapshots between
+ # pydantic v1 and pydantic v2 as their output for
+ # generic models differs, e.g.
+ #
+ # v2: `ParsedChatCompletion[test_parse_pydantic_model.<locals>.Location]`
+ # v1: `ParsedChatCompletion[Location]`
+ return clear_locals(string, stacklevel=2)
+
+
+def get_caller_name(*, stacklevel: int = 1) -> str:
+ frame = inspect.currentframe()
+ assert frame is not None
+
+ for i in range(stacklevel):
+ frame = frame.f_back
+ assert frame is not None, f"no {i}th frame"
+
+ return frame.f_code.co_name
+
+
+def clear_locals(string: str, *, stacklevel: int) -> str:
+ caller = get_caller_name(stacklevel=stacklevel + 1)
+ return string.replace(f"{caller}.<locals>.", "")
tests/lib/chat/test_completions.py
@@ -0,0 +1,633 @@
+from __future__ import annotations
+
+import os
+import json
+from typing import Any, Callable
+from typing_extensions import Literal, TypeVar
+
+import httpx
+import pytest
+from respx import MockRouter
+from pydantic import BaseModel
+from inline_snapshot import snapshot
+
+import openai
+from openai import OpenAI, AsyncOpenAI
+from openai._utils import assert_signatures_in_sync
+
+from ._utils import print_obj
+from ...conftest import base_url
+from ..schema_types.query import Query
+
+_T = TypeVar("_T")
+
+# all the snapshots in this file are auto-generated from the live API
+#
+# you can update them with
+#
+# `OPENAI_LIVE=1 pytest --inline-snapshot=fix`
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_nothing(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ completion = _make_snapshot_request(
+ lambda c: c.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in SF?",
+ },
+ ],
+ ),
+ content_snapshot=snapshot(
+ '{"id": "chatcmpl-9tABLlmqdEOYnmmWATUI3dNKlfXa3", "object": "chat.completion", "created": 1722934207, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "I\'m unable to provide real-time weather updates. For the current weather in San Francisco, I recommend checking a reliable weather website or app.", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 14, "completion_tokens": 27, "total_tokens": 41}, "system_fingerprint": "fp_e1a05a1dce"}'
+ ),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(completion, monkeypatch) == snapshot(
+ """\
+ParsedChatCompletion[NoneType](
+ choices=[
+ ParsedChoice[NoneType](
+ finish_reason='stop',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[NoneType](
+ content="I'm unable to provide real-time weather updates. For the current weather in San Francisco, I
+recommend checking a reliable weather website or app.",
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[]
+ )
+ )
+ ],
+ created=1722934207,
+ id='chatcmpl-9tABLlmqdEOYnmmWATUI3dNKlfXa3',
+ model='gpt-4o-2024-08-06',
+ object='chat.completion',
+ service_tier=None,
+ system_fingerprint='fp_e1a05a1dce',
+ usage=CompletionUsage(completion_tokens=27, prompt_tokens=14, total_tokens=41)
+)
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_model(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ class Location(BaseModel):
+ city: str
+ temperature: float
+ units: Literal["c", "f"]
+
+ completion = _make_snapshot_request(
+ lambda c: c.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in SF?",
+ },
+ ],
+ response_format=Location,
+ ),
+ content_snapshot=snapshot(
+ '{"id": "chatcmpl-9tABUwdw3Kbe3VPRnMofh9lJkFkLV", "object": "chat.completion", "created": 1722934216, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 17, "completion_tokens": 14, "total_tokens": 31}, "system_fingerprint": "fp_e1a05a1dce"}'
+ ),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(completion, monkeypatch) == snapshot(
+ """\
+ParsedChatCompletion[Location](
+ choices=[
+ ParsedChoice[Location](
+ finish_reason='stop',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[Location](
+ content='{"city":"San Francisco","temperature":65,"units":"f"}',
+ function_call=None,
+ parsed=Location(city='San Francisco', temperature=65.0, units='f'),
+ refusal=None,
+ role='assistant',
+ tool_calls=[]
+ )
+ )
+ ],
+ created=1722934216,
+ id='chatcmpl-9tABUwdw3Kbe3VPRnMofh9lJkFkLV',
+ model='gpt-4o-2024-08-06',
+ object='chat.completion',
+ service_tier=None,
+ system_fingerprint='fp_e1a05a1dce',
+ usage=CompletionUsage(completion_tokens=14, prompt_tokens=17, total_tokens=31)
+)
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_model_multiple_choices(
+ client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
+) -> None:
+ class Location(BaseModel):
+ city: str
+ temperature: float
+ units: Literal["c", "f"]
+
+ completion = _make_snapshot_request(
+ lambda c: c.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in SF?",
+ },
+ ],
+ n=3,
+ response_format=Location,
+ ),
+ content_snapshot=snapshot(
+ '{"id": "chatcmpl-9tABVfBu4ZdyQFKe8RgsWsyL7UoIj", "object": "chat.completion", "created": 1722934217, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":58.0,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}, {"index": 1, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":61,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}, {"index": 2, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 17, "completion_tokens": 44, "total_tokens": 61}, "system_fingerprint": "fp_e1a05a1dce"}'
+ ),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(completion.choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[Location](
+ finish_reason='stop',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[Location](
+ content='{"city":"San Francisco","temperature":58.0,"units":"f"}',
+ function_call=None,
+ parsed=Location(city='San Francisco', temperature=58.0, units='f'),
+ refusal=None,
+ role='assistant',
+ tool_calls=[]
+ )
+ ),
+ ParsedChoice[Location](
+ finish_reason='stop',
+ index=1,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[Location](
+ content='{"city":"San Francisco","temperature":61,"units":"f"}',
+ function_call=None,
+ parsed=Location(city='San Francisco', temperature=61.0, units='f'),
+ refusal=None,
+ role='assistant',
+ tool_calls=[]
+ )
+ ),
+ ParsedChoice[Location](
+ finish_reason='stop',
+ index=2,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[Location](
+ content='{"city":"San Francisco","temperature":65,"units":"f"}',
+ function_call=None,
+ parsed=Location(city='San Francisco', temperature=65.0, units='f'),
+ refusal=None,
+ role='assistant',
+ tool_calls=[]
+ )
+ )
+]
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_pydantic_tool_model_all_types(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ completion = _make_snapshot_request(
+ lambda c: c.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "look up all my orders in may of last year that were fulfilled but not delivered on time",
+ },
+ ],
+ tools=[openai.pydantic_function_tool(Query)],
+ response_format=Query,
+ ),
+ content_snapshot=snapshot(
+ '{"id": "chatcmpl-9tABVRLORZbby5zZjZhyrUdDU1XhB", "object": "chat.completion", "created": 1722934217, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_VcgQcA1C047fQnXDG0PQXG7O", "type": "function", "function": {"name": "Query", "arguments": "{\\"table_name\\":\\"orders\\",\\"columns\\":[\\"id\\",\\"status\\",\\"expected_delivery_date\\",\\"delivered_at\\"],\\"conditions\\":[{\\"column\\":\\"ordered_at\\",\\"operator\\":\\"=\\",\\"value\\":\\"2022-05\\"},{\\"column\\":\\"status\\",\\"operator\\":\\"=\\",\\"value\\":\\"fulfilled\\"},{\\"column\\":\\"delivered_at\\",\\"operator\\":\\">\\",\\"value\\":{\\"column_name\\":\\"expected_delivery_date\\"}}],\\"order_by\\":\\"asc\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 195, "completion_tokens": 85, "total_tokens": 280}, "system_fingerprint": "fp_e1a05a1dce"}'
+ ),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(completion.choices[0], monkeypatch) == snapshot(
+ """\
+ParsedChoice[Query](
+ finish_reason='tool_calls',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[Query](
+ content=None,
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"table_name":"orders","columns":["id","status","expected_delivery_date","delivered_at"],
+"conditions":[{"column":"ordered_at","operator":"=","value":"2022-05"},{"column":"status","operator":"=","value":"fulfil
+led"},{"column":"delivered_at","operator":">","value":{"column_name":"expected_delivery_date"}}],"order_by":"asc"}',
+ name='Query',
+ parsed_arguments=Query(
+ columns=[
+ <Column.id: 'id'>,
+ <Column.status: 'status'>,
+ <Column.expected_delivery_date: 'expected_delivery_date'>,
+ <Column.delivered_at: 'delivered_at'>
+ ],
+ conditions=[
+ Condition(column='ordered_at', operator=<Operator.eq: '='>, value='2022-05'),
+ Condition(column='status', operator=<Operator.eq: '='>, value='fulfilled'),
+ Condition(
+ column='delivered_at',
+ operator=<Operator.gt: '>'>,
+ value=DynamicValue(column_name='expected_delivery_date')
+ )
+ ],
+ order_by=<OrderBy.asc: 'asc'>,
+ table_name=<Table.orders: 'orders'>
+ )
+ ),
+ id='call_VcgQcA1C047fQnXDG0PQXG7O',
+ type='function'
+ )
+ ]
+ )
+)
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_max_tokens_reached(client: OpenAI, respx_mock: MockRouter) -> None:
+ class Location(BaseModel):
+ city: str
+ temperature: float
+ units: Literal["c", "f"]
+
+ with pytest.raises(openai.LengthFinishReasonError):
+ _make_snapshot_request(
+ lambda c: c.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in SF?",
+ },
+ ],
+ max_tokens=1,
+ response_format=Location,
+ ),
+ content_snapshot=snapshot(
+ '{"id": "chatcmpl-9tABXbi3qast6oJvdaqQcK9C7k9fn", "object": "chat.completion", "created": 1722934219, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"", "refusal": null}, "logprobs": null, "finish_reason": "length"}], "usage": {"prompt_tokens": 17, "completion_tokens": 1, "total_tokens": 18}, "system_fingerprint": "fp_e1a05a1dce"}'
+ ),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_model_refusal(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ class Location(BaseModel):
+ city: str
+ temperature: float
+ units: Literal["c", "f"]
+
+ completion = _make_snapshot_request(
+ lambda c: c.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "How do I make anthrax?",
+ },
+ ],
+ response_format=Location,
+ ),
+ content_snapshot=snapshot(
+ '{"id": "chatcmpl-9tABXJEffhEWxp24MeLxkDJCMtWmx", "object": "chat.completion", "created": 1722934219, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "refusal": "I\'m very sorry, but I can\'t assist with that."}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 17, "completion_tokens": 12, "total_tokens": 29}, "system_fingerprint": "fp_e1a05a1dce"}'
+ ),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(completion.choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[Location](
+ finish_reason='stop',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[Location](
+ content=None,
+ function_call=None,
+ parsed=None,
+ refusal="I'm very sorry, but I can't assist with that.",
+ role='assistant',
+ tool_calls=[]
+ )
+ )
+]
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_tool(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ class GetWeatherArgs(BaseModel):
+ city: str
+ country: str
+ units: Literal["c", "f"] = "c"
+
+ completion = _make_snapshot_request(
+ lambda c: c.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in Edinburgh?",
+ },
+ ],
+ tools=[
+ openai.pydantic_function_tool(GetWeatherArgs),
+ ],
+ ),
+ content_snapshot=snapshot(
+ '{"id": "chatcmpl-9tABgtKnF7Gbri4CmpOocmhg0UgBF", "object": "chat.completion", "created": 1722934228, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_9rqjEc1DQRADTYGVV45LbZwL", "type": "function", "function": {"name": "GetWeatherArgs", "arguments": "{\\"city\\":\\"Edinburgh\\",\\"country\\":\\"UK\\",\\"units\\":\\"c\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 76, "completion_tokens": 24, "total_tokens": 100}, "system_fingerprint": "fp_e1a05a1dce"}'
+ ),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(completion.choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[NoneType](
+ finish_reason='tool_calls',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[NoneType](
+ content=None,
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"city":"Edinburgh","country":"UK","units":"c"}',
+ name='GetWeatherArgs',
+ parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
+ ),
+ id='call_9rqjEc1DQRADTYGVV45LbZwL',
+ type='function'
+ )
+ ]
+ )
+ )
+]
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_multiple_pydantic_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ class GetWeatherArgs(BaseModel):
+ """Get the temperature for the given country/city combo"""
+
+ city: str
+ country: str
+ units: Literal["c", "f"] = "c"
+
+ class GetStockPrice(BaseModel):
+ ticker: str
+ exchange: str
+
+ completion = _make_snapshot_request(
+ lambda c: c.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in Edinburgh?",
+ },
+ {
+ "role": "user",
+ "content": "What's the price of AAPL?",
+ },
+ ],
+ tools=[
+ openai.pydantic_function_tool(GetWeatherArgs),
+ openai.pydantic_function_tool(
+ GetStockPrice, name="get_stock_price", description="Fetch the latest price for a given ticker"
+ ),
+ ],
+ ),
+ content_snapshot=snapshot(
+ '{"id": "chatcmpl-9tABqDpvDTi0Cg8PHtKdNSFoh4UJv", "object": "chat.completion", "created": 1722934238, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_Yeg67XmQbMcohm3NGj0g12ty", "type": "function", "function": {"name": "GetWeatherArgs", "arguments": "{\\"city\\": \\"Edinburgh\\", \\"country\\": \\"GB\\", \\"units\\": \\"c\\"}"}}, {"id": "call_OGg3UZC2ksjAg7yrLXy8t1MO", "type": "function", "function": {"name": "get_stock_price", "arguments": "{\\"ticker\\": \\"AAPL\\", \\"exchange\\": \\"NASDAQ\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 149, "completion_tokens": 60, "total_tokens": 209}, "system_fingerprint": "fp_e1a05a1dce"}'
+ ),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(completion.choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[NoneType](
+ finish_reason='tool_calls',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[NoneType](
+ content=None,
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"city": "Edinburgh", "country": "GB", "units": "c"}',
+ name='GetWeatherArgs',
+ parsed_arguments=GetWeatherArgs(city='Edinburgh', country='GB', units='c')
+ ),
+ id='call_Yeg67XmQbMcohm3NGj0g12ty',
+ type='function'
+ ),
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"ticker": "AAPL", "exchange": "NASDAQ"}',
+ name='get_stock_price',
+ parsed_arguments=GetStockPrice(exchange='NASDAQ', ticker='AAPL')
+ ),
+ id='call_OGg3UZC2ksjAg7yrLXy8t1MO',
+ type='function'
+ )
+ ]
+ )
+ )
+]
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_strict_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ completion = _make_snapshot_request(
+ lambda c: c.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in SF?",
+ },
+ ],
+ tools=[
+ {
+ "type": "function",
+ "function": {
+ "name": "get_weather",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "city": {"type": "string"},
+ "state": {"type": "string"},
+ },
+ "required": [
+ "city",
+ "state",
+ ],
+ "additionalProperties": False,
+ },
+ "strict": True,
+ },
+ }
+ ],
+ ),
+ content_snapshot=snapshot(
+ '{"id": "chatcmpl-9tAC0vDx3MfupXmsduSZavLVaLcrA", "object": "chat.completion", "created": 1722934248, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_iNznvWR4R81mizFFHjgh7o4i", "type": "function", "function": {"name": "get_weather", "arguments": "{\\"city\\":\\"San Francisco\\",\\"state\\":\\"CA\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 48, "completion_tokens": 19, "total_tokens": 67}, "system_fingerprint": "fp_e1a05a1dce"}'
+ ),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(completion.choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[NoneType](
+ finish_reason='tool_calls',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[NoneType](
+ content=None,
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"city":"San Francisco","state":"CA"}',
+ name='get_weather',
+ parsed_arguments={'city': 'San Francisco', 'state': 'CA'}
+ ),
+ id='call_iNznvWR4R81mizFFHjgh7o4i',
+ type='function'
+ )
+ ]
+ )
+ )
+]
+"""
+ )
+
+
+def test_parse_non_strict_tools(client: OpenAI) -> None:
+ with pytest.raises(
+ ValueError, match="`get_weather` is not strict. Only `strict` function tools can be auto-parsed"
+ ):
+ client.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[],
+ tools=[
+ {
+ "type": "function",
+ "function": {
+ "name": "get_weather",
+ "parameters": {},
+ },
+ }
+ ],
+ )
+
+
+@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
+def test_parse_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
+ checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
+
+ assert_signatures_in_sync(
+ checking_client.chat.completions.create,
+ checking_client.beta.chat.completions.parse,
+ exclude_params={"response_format", "stream"},
+ )
+
+
+def _make_snapshot_request(
+ func: Callable[[OpenAI], _T],
+ *,
+ content_snapshot: Any,
+ respx_mock: MockRouter,
+ mock_client: OpenAI,
+) -> _T:
+ live = os.environ.get("OPENAI_LIVE") == "1"
+ if live:
+
+ def _on_response(response: httpx.Response) -> None:
+ # update the content snapshot
+ assert json.dumps(json.loads(response.read())) == content_snapshot
+
+ respx_mock.stop()
+
+ client = OpenAI(
+ http_client=httpx.Client(
+ event_hooks={
+ "response": [_on_response],
+ }
+ )
+ )
+ else:
+ respx_mock.post("/chat/completions").mock(
+ return_value=httpx.Response(
+ 200,
+ content=content_snapshot._old_value,
+ headers={"content-type": "application/json"},
+ )
+ )
+
+ client = mock_client
+
+ result = func(client)
+
+ if live:
+ client.close()
+
+ return result
tests/lib/chat/test_completions_streaming.py
@@ -0,0 +1,1047 @@
+from __future__ import annotations
+
+import os
+from typing import Any, Generic, Callable, Iterator, cast, overload
+from typing_extensions import Literal, TypeVar
+
+import rich
+import httpx
+import pytest
+from respx import MockRouter
+from pydantic import BaseModel
+from inline_snapshot import external, snapshot, outsource
+
+import openai
+from openai import OpenAI, AsyncOpenAI
+from openai._utils import assert_signatures_in_sync
+from openai._compat import model_copy
+from openai.lib.streaming.chat import (
+ ContentDoneEvent,
+ ChatCompletionStream,
+ ChatCompletionStreamEvent,
+ ChatCompletionStreamManager,
+ ParsedChatCompletionSnapshot,
+)
+from openai.lib._parsing._completions import ResponseFormatT
+
+from ._utils import print_obj
+from ...conftest import base_url
+
+_T = TypeVar("_T")
+
+# all the snapshots in this file are auto-generated from the live API
+#
+# you can update them with
+#
+# `OPENAI_LIVE=1 pytest --inline-snapshot=fix`
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_nothing(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ listener = _make_stream_snapshot_request(
+ lambda c: c.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in SF?",
+ },
+ ],
+ ),
+ content_snapshot=snapshot(external("b9d6bee9f9b8*.bin")),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[NoneType](
+ finish_reason='stop',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[NoneType](
+ content="I'm unable to provide real-time weather updates. To get the latest weather information for San
+Francisco, I recommend checking a reliable weather website or using a weather app.",
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[]
+ )
+ )
+]
+"""
+ )
+ assert print_obj(listener.get_event_by_type("content.done"), monkeypatch) == snapshot(
+ """\
+ContentDoneEvent[NoneType](
+ content="I'm unable to provide real-time weather updates. To get the latest weather information for San Francisco, I
+recommend checking a reliable weather website or using a weather app.",
+ parsed=None,
+ type='content.done'
+)
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_model(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ class Location(BaseModel):
+ city: str
+ temperature: float
+ units: Literal["c", "f"]
+
+ done_snapshots: list[ParsedChatCompletionSnapshot] = []
+
+ def on_event(stream: ChatCompletionStream[Location], event: ChatCompletionStreamEvent[Location]) -> None:
+ if event.type == "content.done":
+ done_snapshots.append(model_copy(stream.current_completion_snapshot, deep=True))
+
+ listener = _make_stream_snapshot_request(
+ lambda c: c.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in SF?",
+ },
+ ],
+ response_format=Location,
+ ),
+ content_snapshot=snapshot(external("ea9a417d533b*.bin")),
+ mock_client=client,
+ respx_mock=respx_mock,
+ on_event=on_event,
+ )
+
+ assert len(done_snapshots) == 1
+ assert isinstance(done_snapshots[0].choices[0].message.parsed, Location)
+
+ for event in reversed(listener.events):
+ if event.type == "content.delta":
+ data = cast(Any, event.parsed)
+ assert isinstance(data["city"], str), data
+ assert isinstance(data["temperature"], (int, float)), data
+ assert isinstance(data["units"], str), data
+ break
+ else:
+ rich.print(listener.events)
+ raise AssertionError("Did not find a `content.delta` event")
+
+ assert print_obj(listener.stream.get_final_completion(), monkeypatch) == snapshot(
+ """\
+ParsedChatCompletion[Location](
+ choices=[
+ ParsedChoice[Location](
+ finish_reason='stop',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[Location](
+ content='{"city":"San Francisco","temperature":63,"units":"f"}',
+ function_call=None,
+ parsed=Location(city='San Francisco', temperature=63.0, units='f'),
+ refusal=None,
+ role='assistant',
+ tool_calls=[]
+ )
+ )
+ ],
+ created=1722934250,
+ id='chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv',
+ model='gpt-4o-so',
+ object='chat.completion',
+ service_tier=None,
+ system_fingerprint='fp_e1a05a1dce',
+ usage=CompletionUsage(completion_tokens=14, prompt_tokens=17, total_tokens=31)
+)
+"""
+ )
+ assert print_obj(listener.get_event_by_type("content.done"), monkeypatch) == snapshot(
+ """\
+ContentDoneEvent[Location](
+ content='{"city":"San Francisco","temperature":63,"units":"f"}',
+ parsed=Location(city='San Francisco', temperature=63.0, units='f'),
+ type='content.done'
+)
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_model_multiple_choices(
+ client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
+) -> None:
+ class Location(BaseModel):
+ city: str
+ temperature: float
+ units: Literal["c", "f"]
+
+ listener = _make_stream_snapshot_request(
+ lambda c: c.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in SF?",
+ },
+ ],
+ n=3,
+ response_format=Location,
+ ),
+ content_snapshot=snapshot(external("1437bd06a9d5*.bin")),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert [e.type for e in listener.events] == snapshot(
+ [
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.delta",
+ "chunk",
+ "content.done",
+ "chunk",
+ "content.done",
+ "chunk",
+ "content.done",
+ "chunk",
+ ]
+ )
+ assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[Location](
+ finish_reason='stop',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[Location](
+ content='{"city":"San Francisco","temperature":64,"units":"f"}',
+ function_call=None,
+ parsed=Location(city='San Francisco', temperature=64.0, units='f'),
+ refusal=None,
+ role='assistant',
+ tool_calls=[]
+ )
+ ),
+ ParsedChoice[Location](
+ finish_reason='stop',
+ index=1,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[Location](
+ content='{"city":"San Francisco","temperature":68,"units":"f"}',
+ function_call=None,
+ parsed=Location(city='San Francisco', temperature=68.0, units='f'),
+ refusal=None,
+ role='assistant',
+ tool_calls=[]
+ )
+ ),
+ ParsedChoice[Location](
+ finish_reason='stop',
+ index=2,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[Location](
+ content='{"city":"San Francisco","temperature":64,"units":"f"}',
+ function_call=None,
+ parsed=Location(city='San Francisco', temperature=64.0, units='f'),
+ refusal=None,
+ role='assistant',
+ tool_calls=[]
+ )
+ )
+]
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_max_tokens_reached(client: OpenAI, respx_mock: MockRouter) -> None:
+ class Location(BaseModel):
+ city: str
+ temperature: float
+ units: Literal["c", "f"]
+
+ with pytest.raises(openai.LengthFinishReasonError):
+ _make_stream_snapshot_request(
+ lambda c: c.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in SF?",
+ },
+ ],
+ max_tokens=1,
+ response_format=Location,
+ ),
+ content_snapshot=snapshot(external("7ae6c1a2631b*.bin")),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_model_refusal(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ class Location(BaseModel):
+ city: str
+ temperature: float
+ units: Literal["c", "f"]
+
+ listener = _make_stream_snapshot_request(
+ lambda c: c.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "How do I make anthrax?",
+ },
+ ],
+ response_format=Location,
+ ),
+ content_snapshot=snapshot(external("d79326933c15*.bin")),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(listener.get_event_by_type("refusal.done"), monkeypatch) == snapshot("""\
+RefusalDoneEvent(refusal="I'm very sorry, but I can't assist with that request.", type='refusal.done')
+""")
+
+ assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[Location](
+ finish_reason='stop',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[Location](
+ content=None,
+ function_call=None,
+ parsed=None,
+ refusal="I'm very sorry, but I can't assist with that request.",
+ role='assistant',
+ tool_calls=[]
+ )
+ )
+]
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_content_logprobs_events(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ listener = _make_stream_snapshot_request(
+ lambda c: c.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "Say foo",
+ },
+ ],
+ logprobs=True,
+ ),
+ content_snapshot=snapshot(external("70c7df71ce72*.bin")),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj([e for e in listener.events if e.type.startswith("logprobs")], monkeypatch) == snapshot("""\
+[
+ LogprobsContentDeltaEvent(
+ content=[ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.006764991, token='Foo', top_logprobs=[])],
+ snapshot=[
+ ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.006764991, token='Foo', top_logprobs=[])
+ ],
+ type='logprobs.content.delta'
+ ),
+ LogprobsContentDeltaEvent(
+ content=[ChatCompletionTokenLogprob(bytes=[33], logprob=-0.31380808, token='!', top_logprobs=[])],
+ snapshot=[
+ ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.006764991, token='Foo', top_logprobs=[]),
+ ChatCompletionTokenLogprob(bytes=[33], logprob=-0.31380808, token='!', top_logprobs=[])
+ ],
+ type='logprobs.content.delta'
+ ),
+ LogprobsContentDoneEvent(
+ content=[
+ ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.006764991, token='Foo', top_logprobs=[]),
+ ChatCompletionTokenLogprob(bytes=[33], logprob=-0.31380808, token='!', top_logprobs=[])
+ ],
+ type='logprobs.content.done'
+ )
+]
+""")
+
+ assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot("""\
+[
+ ParsedChoice[NoneType](
+ finish_reason='stop',
+ index=0,
+ logprobs=ChoiceLogprobs(
+ content=[
+ ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.006764991, token='Foo', top_logprobs=[]),
+ ChatCompletionTokenLogprob(bytes=[33], logprob=-0.31380808, token='!', top_logprobs=[])
+ ],
+ refusal=None
+ ),
+ message=ParsedChatCompletionMessage[NoneType](
+ content='Foo!',
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[]
+ )
+ )
+]
+""")
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_refusal_logprobs_events(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ class Location(BaseModel):
+ city: str
+ temperature: float
+ units: Literal["c", "f"]
+
+ listener = _make_stream_snapshot_request(
+ lambda c: c.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "How do I make anthrax?",
+ },
+ ],
+ logprobs=True,
+ response_format=Location,
+ ),
+ content_snapshot=snapshot(external("cb77dc69b6c8*.bin")),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj([e.type for e in listener.events if e.type.startswith("logprobs")], monkeypatch) == snapshot("""\
+[
+ 'logprobs.refusal.delta',
+ 'logprobs.refusal.delta',
+ 'logprobs.refusal.delta',
+ 'logprobs.refusal.delta',
+ 'logprobs.refusal.delta',
+ 'logprobs.refusal.delta',
+ 'logprobs.refusal.delta',
+ 'logprobs.refusal.delta',
+ 'logprobs.refusal.delta',
+ 'logprobs.refusal.delta',
+ 'logprobs.refusal.delta',
+ 'logprobs.refusal.done'
+]
+""")
+
+ assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot("""\
+[
+ ParsedChoice[Location](
+ finish_reason='stop',
+ index=0,
+ logprobs=ChoiceLogprobs(
+ content=None,
+ refusal=[
+ ChatCompletionTokenLogprob(bytes=[73, 39, 109], logprob=-0.0010472201, token="I'm", top_logprobs=[]),
+ ChatCompletionTokenLogprob(
+ bytes=[32, 118, 101, 114, 121],
+ logprob=-0.7292482,
+ token=' very',
+ top_logprobs=[]
+ ),
+ ChatCompletionTokenLogprob(
+ bytes=[32, 115, 111, 114, 114, 121],
+ logprob=-5.080963e-06,
+ token=' sorry',
+ top_logprobs=[]
+ ),
+ ChatCompletionTokenLogprob(bytes=[44], logprob=-4.048445e-05, token=',', top_logprobs=[]),
+ ChatCompletionTokenLogprob(
+ bytes=[32, 98, 117, 116],
+ logprob=-0.038046427,
+ token=' but',
+ top_logprobs=[]
+ ),
+ ChatCompletionTokenLogprob(bytes=[32, 73], logprob=-0.0019351852, token=' I', top_logprobs=[]),
+ ChatCompletionTokenLogprob(
+ bytes=[32, 99, 97, 110, 39, 116],
+ logprob=-0.008995773,
+ token=" can't",
+ top_logprobs=[]
+ ),
+ ChatCompletionTokenLogprob(
+ bytes=[32, 97, 115, 115, 105, 115, 116],
+ logprob=-0.0033510819,
+ token=' assist',
+ top_logprobs=[]
+ ),
+ ChatCompletionTokenLogprob(
+ bytes=[32, 119, 105, 116, 104],
+ logprob=-0.0036033941,
+ token=' with',
+ top_logprobs=[]
+ ),
+ ChatCompletionTokenLogprob(
+ bytes=[32, 116, 104, 97, 116],
+ logprob=-0.0015974608,
+ token=' that',
+ top_logprobs=[]
+ ),
+ ChatCompletionTokenLogprob(bytes=[46], logprob=-0.6339823, token='.', top_logprobs=[])
+ ]
+ ),
+ message=ParsedChatCompletionMessage[Location](
+ content=None,
+ function_call=None,
+ parsed=None,
+ refusal="I'm very sorry, but I can't assist with that.",
+ role='assistant',
+ tool_calls=[]
+ )
+ )
+]
+""")
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_tool(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ class GetWeatherArgs(BaseModel):
+ city: str
+ country: str
+ units: Literal["c", "f"] = "c"
+
+ listener = _make_stream_snapshot_request(
+ lambda c: c.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in Edinburgh?",
+ },
+ ],
+ tools=[
+ openai.pydantic_function_tool(GetWeatherArgs),
+ ],
+ ),
+ content_snapshot=snapshot(external("ae070a447e1d*.bin")),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(listener.stream.current_completion_snapshot.choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[object](
+ finish_reason='tool_calls',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[object](
+ content=None,
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"city":"Edinburgh","country":"UK","units":"c"}',
+ name='GetWeatherArgs',
+ parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
+ ),
+ id='call_Vz6ZXciy6Y0PYfT4d9W7fYB4',
+ index=0,
+ type='function'
+ )
+ ]
+ )
+ )
+]
+"""
+ )
+
+ assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[NoneType](
+ finish_reason='tool_calls',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[NoneType](
+ content=None,
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"city":"Edinburgh","country":"UK","units":"c"}',
+ name='GetWeatherArgs',
+ parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
+ ),
+ id='call_Vz6ZXciy6Y0PYfT4d9W7fYB4',
+ index=0,
+ type='function'
+ )
+ ]
+ )
+ )
+]
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_multiple_pydantic_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ class GetWeatherArgs(BaseModel):
+ """Get the temperature for the given country/city combo"""
+
+ city: str
+ country: str
+ units: Literal["c", "f"] = "c"
+
+ class GetStockPrice(BaseModel):
+ ticker: str
+ exchange: str
+
+ listener = _make_stream_snapshot_request(
+ lambda c: c.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in Edinburgh?",
+ },
+ {
+ "role": "user",
+ "content": "What's the price of AAPL?",
+ },
+ ],
+ tools=[
+ openai.pydantic_function_tool(GetWeatherArgs),
+ openai.pydantic_function_tool(
+ GetStockPrice, name="get_stock_price", description="Fetch the latest price for a given ticker"
+ ),
+ ],
+ ),
+ content_snapshot=snapshot(external("a346213bec7a*.bin")),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(listener.stream.current_completion_snapshot.choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[object](
+ finish_reason='tool_calls',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[object](
+ content=None,
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"city": "Edinburgh", "country": "UK", "units": "c"}',
+ name='GetWeatherArgs',
+ parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
+ ),
+ id='call_g4Q1vRbE0CaHGOs5if8mHsBq',
+ index=0,
+ type='function'
+ ),
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"ticker": "AAPL", "exchange": "NASDAQ"}',
+ name='get_stock_price',
+ parsed_arguments=GetStockPrice(exchange='NASDAQ', ticker='AAPL')
+ ),
+ id='call_gWj3HQxZEHnFvyJLEHIiJKBV',
+ index=1,
+ type='function'
+ )
+ ]
+ )
+ )
+]
+"""
+ )
+ completion = listener.stream.get_final_completion()
+ assert print_obj(completion.choices[0].message.tool_calls, monkeypatch) == snapshot(
+ """\
+[
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"city": "Edinburgh", "country": "UK", "units": "c"}',
+ name='GetWeatherArgs',
+ parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
+ ),
+ id='call_g4Q1vRbE0CaHGOs5if8mHsBq',
+ index=0,
+ type='function'
+ ),
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"ticker": "AAPL", "exchange": "NASDAQ"}',
+ name='get_stock_price',
+ parsed_arguments=GetStockPrice(exchange='NASDAQ', ticker='AAPL')
+ ),
+ id='call_gWj3HQxZEHnFvyJLEHIiJKBV',
+ index=1,
+ type='function'
+ )
+]
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_strict_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ listener = _make_stream_snapshot_request(
+ lambda c: c.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in SF?",
+ },
+ ],
+ tools=[
+ {
+ "type": "function",
+ "function": {
+ "name": "get_weather",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "city": {"type": "string"},
+ "state": {"type": "string"},
+ },
+ "required": [
+ "city",
+ "state",
+ ],
+ "additionalProperties": False,
+ },
+ "strict": True,
+ },
+ }
+ ],
+ ),
+ content_snapshot=snapshot(external("a7097cae6a1f*.bin")),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(listener.stream.current_completion_snapshot.choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[object](
+ finish_reason='tool_calls',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[object](
+ content=None,
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"city":"San Francisco","state":"CA"}',
+ name='get_weather',
+ parsed_arguments={'city': 'San Francisco', 'state': 'CA'}
+ ),
+ id='call_rQe3kzGnTr2epjx8HREg3F2a',
+ index=0,
+ type='function'
+ )
+ ]
+ )
+ )
+]
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_non_pydantic_response_format(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+ listener = _make_stream_snapshot_request(
+ lambda c: c.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "user",
+ "content": "What's the weather like in SF? Give me any JSON back",
+ },
+ ],
+ response_format={"type": "json_object"},
+ ),
+ content_snapshot=snapshot(external("3e0df46f250d*.bin")),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[NoneType](
+ finish_reason='stop',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[NoneType](
+ content='{\\n "location": "San Francisco, CA",\\n "temperature": "N/A",\\n "conditions": "N/A",\\n
+"humidity": "N/A",\\n "wind_speed": "N/A",\\n "timestamp": "N/A",\\n "note": "Real-time weather data is not available.
+Please check a reliable weather service for the most up-to-date information on San Francisco\\'s weather conditions."}',
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[]
+ )
+ )
+]
+"""
+ )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_allows_non_strict_tools_but_no_parsing(
+ client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
+) -> None:
+ listener = _make_stream_snapshot_request(
+ lambda c: c.beta.chat.completions.stream(
+ model="gpt-4o-2024-08-06",
+ messages=[{"role": "user", "content": "what's the weather in NYC?"}],
+ tools=[
+ {
+ "type": "function",
+ "function": {
+ "name": "get_weather",
+ "parameters": {"type": "object", "properties": {"city": {"type": "string"}}},
+ },
+ }
+ ],
+ ),
+ content_snapshot=snapshot(external("fb75060ede89*.bin")),
+ mock_client=client,
+ respx_mock=respx_mock,
+ )
+
+ assert print_obj(listener.get_event_by_type("tool_calls.function.arguments.done"), monkeypatch) == snapshot("""\
+FunctionToolCallArgumentsDoneEvent(
+ arguments='{"city":"New York City"}',
+ index=0,
+ name='get_weather',
+ parsed_arguments=None,
+ type='tool_calls.function.arguments.done'
+)
+""")
+
+ assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
+ """\
+[
+ ParsedChoice[NoneType](
+ finish_reason='stop',
+ index=0,
+ logprobs=None,
+ message=ParsedChatCompletionMessage[NoneType](
+ content=None,
+ function_call=None,
+ parsed=None,
+ refusal=None,
+ role='assistant',
+ tool_calls=[
+ ParsedFunctionToolCall(
+ function=ParsedFunction(
+ arguments='{"city":"New York City"}',
+ name='get_weather',
+ parsed_arguments=None
+ ),
+ id='call_9rqjEc1DQRADTYGVV45LbZwL',
+ index=0,
+ type='function'
+ )
+ ]
+ )
+ )
+]
+"""
+ )
+
+
+@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
+def test_stream_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
+ checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
+
+ assert_signatures_in_sync(
+ checking_client.chat.completions.create,
+ checking_client.beta.chat.completions.stream,
+ exclude_params={"response_format", "stream"},
+ )
+
+
+class StreamListener(Generic[ResponseFormatT]):
+ def __init__(self, stream: ChatCompletionStream[ResponseFormatT]) -> None:
+ self.stream = stream
+ self.events: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
+
+ def __iter__(self) -> Iterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+ for event in self.stream:
+ self.events.append(event)
+ yield event
+
+ @overload
+ def get_event_by_type(self, event_type: Literal["content.done"]) -> ContentDoneEvent[ResponseFormatT] | None: ...
+
+ @overload
+ def get_event_by_type(self, event_type: str) -> ChatCompletionStreamEvent[ResponseFormatT] | None: ...
+
+ def get_event_by_type(self, event_type: str) -> ChatCompletionStreamEvent[ResponseFormatT] | None:
+ return next((e for e in self.events if e.type == event_type), None)
+
+
+def _make_stream_snapshot_request(
+ func: Callable[[OpenAI], ChatCompletionStreamManager[ResponseFormatT]],
+ *,
+ content_snapshot: Any,
+ respx_mock: MockRouter,
+ mock_client: OpenAI,
+ on_event: Callable[[ChatCompletionStream[ResponseFormatT], ChatCompletionStreamEvent[ResponseFormatT]], Any]
+ | None = None,
+) -> StreamListener[ResponseFormatT]:
+ live = os.environ.get("OPENAI_LIVE") == "1"
+ if live:
+
+ def _on_response(response: httpx.Response) -> None:
+ # update the content snapshot
+ assert outsource(response.read()) == content_snapshot
+
+ respx_mock.stop()
+
+ client = OpenAI(
+ http_client=httpx.Client(
+ event_hooks={
+ "response": [_on_response],
+ }
+ )
+ )
+ else:
+ respx_mock.post("/chat/completions").mock(
+ return_value=httpx.Response(
+ 200,
+ content=content_snapshot._old_value._load_value(),
+ headers={"content-type": "text/event-stream"},
+ )
+ )
+
+ client = mock_client
+
+ with func(client) as stream:
+ listener = StreamListener(stream)
+
+ for event in listener:
+ if on_event:
+ on_event(stream, event)
+
+ if live:
+ client.close()
+
+ return listener
tests/lib/schema_types/query.py
@@ -0,0 +1,51 @@
+from enum import Enum
+from typing import List, Union
+
+from pydantic import BaseModel
+
+
+class Table(str, Enum):
+ orders = "orders"
+ customers = "customers"
+ products = "products"
+
+
+class Column(str, Enum):
+ id = "id"
+ status = "status"
+ expected_delivery_date = "expected_delivery_date"
+ delivered_at = "delivered_at"
+ shipped_at = "shipped_at"
+ ordered_at = "ordered_at"
+ canceled_at = "canceled_at"
+
+
+class Operator(str, Enum):
+ eq = "="
+ gt = ">"
+ lt = "<"
+ le = "<="
+ ge = ">="
+ ne = "!="
+
+
+class OrderBy(str, Enum):
+ asc = "asc"
+ desc = "desc"
+
+
+class DynamicValue(BaseModel):
+ column_name: str
+
+
+class Condition(BaseModel):
+ column: str
+ operator: Operator
+ value: Union[str, int, DynamicValue]
+
+
+class Query(BaseModel):
+ table_name: Table
+ columns: List[Column]
+ conditions: List[Condition]
+ order_by: OrderBy
tests/lib/__init__.py
tests/lib/test_pydantic.py
@@ -0,0 +1,161 @@
+from __future__ import annotations
+
+from inline_snapshot import snapshot
+
+import openai
+from openai._compat import PYDANTIC_V2
+
+from .schema_types.query import Query
+
+
+def test_most_types() -> None:
+ if PYDANTIC_V2:
+ assert openai.pydantic_function_tool(Query)["function"] == snapshot(
+ {
+ "name": "Query",
+ "strict": True,
+ "parameters": {
+ "$defs": {
+ "Column": {
+ "enum": [
+ "id",
+ "status",
+ "expected_delivery_date",
+ "delivered_at",
+ "shipped_at",
+ "ordered_at",
+ "canceled_at",
+ ],
+ "title": "Column",
+ "type": "string",
+ },
+ "Condition": {
+ "properties": {
+ "column": {"title": "Column", "type": "string"},
+ "operator": {"$ref": "#/$defs/Operator"},
+ "value": {
+ "anyOf": [
+ {"type": "string"},
+ {"type": "integer"},
+ {"$ref": "#/$defs/DynamicValue"},
+ ],
+ "title": "Value",
+ },
+ },
+ "required": ["column", "operator", "value"],
+ "title": "Condition",
+ "type": "object",
+ "additionalProperties": False,
+ },
+ "DynamicValue": {
+ "properties": {"column_name": {"title": "Column Name", "type": "string"}},
+ "required": ["column_name"],
+ "title": "DynamicValue",
+ "type": "object",
+ "additionalProperties": False,
+ },
+ "Operator": {"enum": ["=", ">", "<", "<=", ">=", "!="], "title": "Operator", "type": "string"},
+ "OrderBy": {"enum": ["asc", "desc"], "title": "OrderBy", "type": "string"},
+ "Table": {"enum": ["orders", "customers", "products"], "title": "Table", "type": "string"},
+ },
+ "properties": {
+ "table_name": {"$ref": "#/$defs/Table"},
+ "columns": {
+ "items": {"$ref": "#/$defs/Column"},
+ "title": "Columns",
+ "type": "array",
+ },
+ "conditions": {
+ "items": {"$ref": "#/$defs/Condition"},
+ "title": "Conditions",
+ "type": "array",
+ },
+ "order_by": {"$ref": "#/$defs/OrderBy"},
+ },
+ "required": ["table_name", "columns", "conditions", "order_by"],
+ "title": "Query",
+ "type": "object",
+ "additionalProperties": False,
+ },
+ }
+ )
+ else:
+ assert openai.pydantic_function_tool(Query)["function"] == snapshot(
+ {
+ "name": "Query",
+ "strict": True,
+ "parameters": {
+ "title": "Query",
+ "type": "object",
+ "properties": {
+ "table_name": {"$ref": "#/definitions/Table"},
+ "columns": {"type": "array", "items": {"$ref": "#/definitions/Column"}},
+ "conditions": {
+ "title": "Conditions",
+ "type": "array",
+ "items": {"$ref": "#/definitions/Condition"},
+ },
+ "order_by": {"$ref": "#/definitions/OrderBy"},
+ },
+ "required": ["table_name", "columns", "conditions", "order_by"],
+ "definitions": {
+ "Table": {
+ "title": "Table",
+ "description": "An enumeration.",
+ "enum": ["orders", "customers", "products"],
+ "type": "string",
+ },
+ "Column": {
+ "title": "Column",
+ "description": "An enumeration.",
+ "enum": [
+ "id",
+ "status",
+ "expected_delivery_date",
+ "delivered_at",
+ "shipped_at",
+ "ordered_at",
+ "canceled_at",
+ ],
+ "type": "string",
+ },
+ "Operator": {
+ "title": "Operator",
+ "description": "An enumeration.",
+ "enum": ["=", ">", "<", "<=", ">=", "!="],
+ "type": "string",
+ },
+ "DynamicValue": {
+ "title": "DynamicValue",
+ "type": "object",
+ "properties": {"column_name": {"title": "Column Name", "type": "string"}},
+ "required": ["column_name"],
+ },
+ "Condition": {
+ "title": "Condition",
+ "type": "object",
+ "properties": {
+ "column": {"title": "Column", "type": "string"},
+ "operator": {"$ref": "#/definitions/Operator"},
+ "value": {
+ "title": "Value",
+ "anyOf": [
+ {"type": "string"},
+ {"type": "integer"},
+ {"$ref": "#/definitions/DynamicValue"},
+ ],
+ },
+ },
+ "required": ["column", "operator", "value"],
+ },
+ "OrderBy": {
+ "title": "OrderBy",
+ "description": "An enumeration.",
+ "enum": ["asc", "desc"],
+ "type": "string",
+ },
+ },
+ "additionalProperties": False,
+ },
+ }
+ )
tests/test_client.py
@@ -780,11 +780,11 @@ class TestOpenAI:
response = client.chat.completions.with_raw_response.create(
messages=[
{
- "content": "content",
+ "content": "string",
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
)
assert response.retries_taken == failures_before_success
@@ -811,11 +811,11 @@ class TestOpenAI:
with client.chat.completions.with_streaming_response.create(
messages=[
{
- "content": "content",
+ "content": "string",
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
) as response:
assert response.retries_taken == failures_before_success
@@ -1574,11 +1574,11 @@ class TestAsyncOpenAI:
response = await client.chat.completions.with_raw_response.create(
messages=[
{
- "content": "content",
+ "content": "string",
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
)
assert response.retries_taken == failures_before_success
@@ -1606,10 +1606,10 @@ class TestAsyncOpenAI:
async with client.chat.completions.with_streaming_response.create(
messages=[
{
- "content": "content",
+ "content": "string",
"role": "system",
}
],
- model="gpt-4-turbo",
+ model="gpt-4o",
) as response:
assert response.retries_taken == failures_before_success
.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 68
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b04761ffd2adad3cc19a6dc6fc696ac445878219972f891881a967340fa9a6b0.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c36d30a94622922f83d56a025cdf0095ff7cb18a5138838c698c8443f21fb3a8.yml
api.md
@@ -1,7 +1,14 @@
# Shared Types
```python
-from openai.types import ErrorObject, FunctionDefinition, FunctionParameters
+from openai.types import (
+ ErrorObject,
+ FunctionDefinition,
+ FunctionParameters,
+ ResponseFormatJSONObject,
+ ResponseFormatJSONSchema,
+ ResponseFormatText,
+)
```
# Completions
@@ -35,6 +42,7 @@ from openai.types.chat import (
ChatCompletionChunk,
ChatCompletionContentPart,
ChatCompletionContentPartImage,
+ ChatCompletionContentPartRefusal,
ChatCompletionContentPartText,
ChatCompletionFunctionCallOption,
ChatCompletionFunctionMessageParam,
@@ -296,7 +304,6 @@ Types:
```python
from openai.types.beta import (
- AssistantResponseFormat,
AssistantResponseFormatOption,
AssistantToolChoice,
AssistantToolChoiceFunction,
@@ -397,6 +404,8 @@ from openai.types.beta.threads import (
MessageDeleted,
MessageDelta,
MessageDeltaEvent,
+ RefusalContentBlock,
+ RefusalDeltaBlock,
Text,
TextContentBlock,
TextContentBlockParam,
helpers.md
@@ -1,6 +1,280 @@
+# Structured Outputs Parsing Helpers
+
+The OpenAI API supports extracting JSON from the model with the `response_format` request param, for more details on the API, see [this guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+The SDK provides a `client.beta.chat.completions.parse()` method which is a wrapper over the `client.chat.completions.create()` that
+provides richer integrations with Python specific types & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
+
+## Auto-parsing response content with Pydantic models
+
+You can pass a pydantic model to the `.parse()` method and the SDK will automatically convert the model
+into a JSON schema, send it to the API and parse the response content back into the given model.
+
+```py
+from typing import List
+from pydantic import BaseModel
+from openai import OpenAI
+
+class Step(BaseModel):
+ explanation: str
+ output: str
+
+class MathResponse(BaseModel):
+ steps: List[Step]
+ final_answer: str
+
+client = OpenAI()
+completion = client.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {"role": "system", "content": "You are a helpful math tutor."},
+ {"role": "user", "content": "solve 8x + 31 = 2"},
+ ],
+ response_format=MathResponse,
+)
+
+message = completion.choices[0].message
+if message.parsed:
+ print(message.parsed.steps)
+ print("answer: ", message.parsed.final_answer)
+else:
+ print(message.refusal)
+```
+
+## Auto-parsing function tool calls
+
+The `.parse()` method will also automatically parse `function` tool calls if:
+- You use the `openai.pydantic_function_tool()` helper method
+- You mark your tool schema with `"strict": True`
+
+For example:
+
+```py
+from enum import Enum
+from typing import List, Union
+from pydantic import BaseModel
+import openai
+
+class Table(str, Enum):
+ orders = "orders"
+ customers = "customers"
+ products = "products"
+
+class Column(str, Enum):
+ id = "id"
+ status = "status"
+ expected_delivery_date = "expected_delivery_date"
+ delivered_at = "delivered_at"
+ shipped_at = "shipped_at"
+ ordered_at = "ordered_at"
+ canceled_at = "canceled_at"
+
+class Operator(str, Enum):
+ eq = "="
+ gt = ">"
+ lt = "<"
+ le = "<="
+ ge = ">="
+ ne = "!="
+
+class OrderBy(str, Enum):
+ asc = "asc"
+ desc = "desc"
+
+class DynamicValue(BaseModel):
+ column_name: str
+
+class Condition(BaseModel):
+ column: str
+ operator: Operator
+ value: Union[str, int, DynamicValue]
+
+class Query(BaseModel):
+ table_name: Table
+ columns: List[Column]
+ conditions: List[Condition]
+ order_by: OrderBy
+
+client = openai.OpenAI()
+completion = client.beta.chat.completions.parse(
+ model="gpt-4o-2024-08-06",
+ messages=[
+ {
+ "role": "system",
+ "content": "You are a helpful assistant. The current date is August 6, 2024. You help users query for the data they are looking for by calling the query function.",
+ },
+ {
+ "role": "user",
+ "content": "look up all my orders in may of last year that were fulfilled but not delivered on time",
+ },
+ ],
+ tools=[
+ openai.pydantic_function_tool(Query),
+ ],
+)
+
+tool_call = (completion.choices[0].message.tool_calls or [])[0]
+print(tool_call.function)
+assert isinstance(tool_call.function.parsed_arguments, Query)
+print(tool_call.function.parsed_arguments.table_name)
+```
+
+### Differences from `.create()`
+
+The `beta.chat.completions.parse()` method imposes some additional restrictions on it's usage that `chat.completions.create()` does not.
+
+- If the completion completes with `finish_reason` set to `length` or `content_filter`, the `LengthFinishReasonError` / `ContentFilterFinishReasonError` errors will be raised.
+- Only strict function tools can be passed, e.g. `{'type': 'function', 'function': {..., 'strict': True}}`
+
# Streaming Helpers
-OpenAI supports streaming responses when interacting with the [Assistant](#assistant-streaming-api) APIs.
+OpenAI supports streaming responses when interacting with the [Chat Completion] & [Assistant](#assistant-streaming-api) APIs.
+
+## Chat Completions API
+
+The SDK provides a `.beta.chat.completions.stream()` method that wraps the `.chat.completions.create(stream=True)` stream providing a more granular event API & automatic accumulation of each delta.
+
+It also supports all aforementioned [parsing helpers](#parsing-helpers).
+
+Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
+
+```py
+async with client.beta.chat.completions.stream(
+ model='gpt-4o-2024-08-06',
+ messages=[...],
+) as stream:
+ async for event in stream:
+ if event.type == 'content.delta':
+ print(event.content, flush=True, end='')
+```
+
+When the context manager is entered, a `ChatCompletionStream` / `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator in the sync client and an async iterator in the async client. The full list of events that are yielded by the iterator are outlined [below](#chat-completions-events).
+
+When the context manager exits, the response will be closed, however the `stream` instance is still available outside
+the context manager.
+
+### Chat Completions Events
+
+These events allow you to track the progress of the chat completion generation, access partial results, and handle different aspects of the stream separately.
+
+Below is a list of the different event types you may encounter:
+
+#### ChunkEvent
+
+Emitted for every chunk received from the API.
+
+- `type`: `"chunk"`
+- `chunk`: The raw `ChatCompletionChunk` object received from the API
+- `snapshot`: The current accumulated state of the chat completion
+
+#### ContentDeltaEvent
+
+Emitted for every chunk containing new content.
+
+- `type`: `"content.delta"`
+- `delta`: The new content string received in this chunk
+- `snapshot`: The accumulated content so far
+- `parsed`: The partially parsed content (if applicable)
+
+#### ContentDoneEvent
+
+Emitted when the content generation is complete. May be fired multiple times if there are multiple choices.
+
+- `type`: `"content.done"`
+- `content`: The full generated content
+- `parsed`: The fully parsed content (if applicable)
+
+#### RefusalDeltaEvent
+
+Emitted when a chunk contains part of a content refusal.
+
+- `type`: `"refusal.delta"`
+- `delta`: The new refusal content string received in this chunk
+- `snapshot`: The accumulated refusal content string so far
+
+#### RefusalDoneEvent
+
+Emitted when the refusal content is complete.
+
+- `type`: `"refusal.done"`
+- `refusal`: The full refusal content
+
+#### FunctionToolCallArgumentsDeltaEvent
+
+Emitted when a chunk contains part of a function tool call's arguments.
+
+- `type`: `"tool_calls.function.arguments.delta"`
+- `name`: The name of the function being called
+- `index`: The index of the tool call
+- `arguments`: The accumulated raw JSON string of arguments
+- `parsed_arguments`: The partially parsed arguments object
+- `arguments_delta`: The new JSON string fragment received in this chunk
+
+#### FunctionToolCallArgumentsDoneEvent
+
+Emitted when a function tool call's arguments are complete.
+
+- `type`: `"tool_calls.function.arguments.done"`
+- `name`: The name of the function being called
+- `index`: The index of the tool call
+- `arguments`: The full raw JSON string of arguments
+- `parsed_arguments`: The fully parsed arguments object. If you used `openai.pydantic_function_tool()` this will be an instance of the given model.
+
+#### LogprobsContentDeltaEvent
+
+Emitted when a chunk contains new content [log probabilities](https://cookbook.openai.com/examples/using_logprobs).
+
+- `type`: `"logprobs.content.delta"`
+- `content`: A list of the new log probabilities received in this chunk
+- `snapshot`: A list of the accumulated log probabilities so far
+
+#### LogprobsContentDoneEvent
+
+Emitted when all content [log probabilities](https://cookbook.openai.com/examples/using_logprobs) have been received.
+
+- `type`: `"logprobs.content.done"`
+- `content`: The full list of token log probabilities for the content
+
+#### LogprobsRefusalDeltaEvent
+
+Emitted when a chunk contains new refusal [log probabilities](https://cookbook.openai.com/examples/using_logprobs).
+
+- `type`: `"logprobs.refusal.delta"`
+- `refusal`: A list of the new log probabilities received in this chunk
+- `snapshot`: A list of the accumulated log probabilities so far
+
+#### LogprobsRefusalDoneEvent
+
+Emitted when all refusal [log probabilities](https://cookbook.openai.com/examples/using_logprobs) have been received.
+
+- `type`: `"logprobs.refusal.done"`
+- `refusal`: The full list of token log probabilities for the refusal
+
+### Chat Completions stream methods
+
+A handful of helper methods are provided on the stream class for additional convenience,
+
+**`.get_final_completion()`**
+
+Returns the accumulated `ParsedChatCompletion` object
+
+```py
+async with client.beta.chat.completions.stream(...) as stream:
+ ...
+
+completion = await stream.get_final_completion()
+print(completion.choices[0].message)
+```
+
+**`.until_done()`**
+
+If you want to wait for the stream to complete, you can use the `.until_done()` method.
+
+```py
+async with client.beta.chat.completions.stream(...) as stream:
+ await stream.until_done()
+ # stream is now finished
+```
## Assistant Streaming API
pyproject.toml
@@ -10,12 +10,13 @@ authors = [
dependencies = [
"httpx>=0.23.0, <1",
"pydantic>=1.9.0, <3",
- "typing-extensions>=4.7, <5",
+ "typing-extensions>=4.11, <5",
"anyio>=3.5.0, <5",
"distro>=1.7.0, <2",
"sniffio",
"cached-property; python_version < '3.8'",
- "tqdm > 4"
+ "tqdm > 4",
+ "jiter>=0.4.0, <1",
]
requires-python = ">= 3.7.1"
classifiers = [
requirements-dev.lock
@@ -72,6 +72,8 @@ importlib-metadata==7.0.0
iniconfig==2.0.0
# via pytest
inline-snapshot==0.10.2
+jiter==0.5.0
+ # via openai
markdown-it-py==3.0.0
# via rich
mdurl==0.1.2
@@ -169,7 +171,7 @@ types-pytz==2024.1.0.20240417
types-toml==0.10.8.20240310
# via inline-snapshot
types-tqdm==4.66.0.2
-typing-extensions==4.8.0
+typing-extensions==4.12.2
# via azure-core
# via black
# via mypy
requirements.lock
@@ -30,6 +30,8 @@ httpx==0.25.2
idna==3.4
# via anyio
# via httpx
+jiter==0.5.0
+ # via openai
numpy==1.26.4
# via openai
# via pandas
@@ -56,7 +58,7 @@ tqdm==4.66.1
# via openai
types-pytz==2024.1.0.20240417
# via pandas-stubs
-typing-extensions==4.8.0
+typing-extensions==4.12.2
# via openai
# via pydantic
# via pydantic-core