Commit 48188cc8

stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
2025-07-30 19:40:14
release: 1.97.2 (#2494) tag: v1.97.2
* codegen metadata * fix(parsing): ignore empty metadata * chore(internal): refactor stream event processing to be more future proof * fixup! * fixup! * fixup! * update comment * chore(project): add settings file for vscode * flip logic around * release: 1.97.2 --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com> Co-authored-by: David Meadows <dmeadows@stainless.com>
1 parent e6c6757
.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+    "python.analysis.importFormat": "relative",
+}
src/openai/_streaming.py
@@ -59,14 +59,11 @@ class Stream(Generic[_T]):
             if sse.data.startswith("[DONE]"):
                 break
 
-            if sse.event is None or (
-                sse.event.startswith("response.") or 
-                sse.event.startswith("transcript.") or 
-                sse.event.startswith("image_edit.") or 
-                sse.event.startswith("image_generation.")
-            ):
+            # we have to special case the Assistants `thread.` events since we won't have an "event" key in the data
+            if sse.event and sse.event.startswith("thread."):
                 data = sse.json()
-                if is_mapping(data) and data.get("error"):
+
+                if sse.event == "error" and is_mapping(data) and data.get("error"):
                     message = None
                     error = data.get("error")
                     if is_mapping(error):
@@ -80,12 +77,10 @@ class Stream(Generic[_T]):
                         body=data["error"],
                     )
 
-                yield process_data(data=data, cast_to=cast_to, response=response)
-
+                yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
             else:
                 data = sse.json()
-
-                if sse.event == "error" and is_mapping(data) and data.get("error"):
+                if is_mapping(data) and data.get("error"):
                     message = None
                     error = data.get("error")
                     if is_mapping(error):
@@ -99,7 +94,7 @@ class Stream(Generic[_T]):
                         body=data["error"],
                     )
 
-                yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
+                yield process_data(data=data, cast_to=cast_to, response=response)
 
         # Ensure the entire stream is consumed
         for _sse in iterator:
@@ -166,9 +161,11 @@ class AsyncStream(Generic[_T]):
             if sse.data.startswith("[DONE]"):
                 break
 
-            if sse.event is None or sse.event.startswith("response.") or sse.event.startswith("transcript."):
+            # we have to special case the Assistants `thread.` events since we won't have an "event" key in the data
+            if sse.event and sse.event.startswith("thread."):
                 data = sse.json()
-                if is_mapping(data) and data.get("error"):
+
+                if sse.event == "error" and is_mapping(data) and data.get("error"):
                     message = None
                     error = data.get("error")
                     if is_mapping(error):
@@ -182,12 +179,10 @@ class AsyncStream(Generic[_T]):
                         body=data["error"],
                     )
 
-                yield process_data(data=data, cast_to=cast_to, response=response)
-
+                yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
             else:
                 data = sse.json()
-
-                if sse.event == "error" and is_mapping(data) and data.get("error"):
+                if is_mapping(data) and data.get("error"):
                     message = None
                     error = data.get("error")
                     if is_mapping(error):
@@ -201,7 +196,7 @@ class AsyncStream(Generic[_T]):
                         body=data["error"],
                     )
 
-                yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
+                yield process_data(data=data, cast_to=cast_to, response=response)
 
         # Ensure the entire stream is consumed
         async for _sse in iterator:
src/openai/_version.py
@@ -1,4 +1,4 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 __title__ = "openai"
-__version__ = "1.97.1"  # x-release-please-version
+__version__ = "1.97.2"  # x-release-please-version
.gitignore
@@ -1,5 +1,4 @@
 .prism.log
-.vscode
 _dev
 
 __pycache__
.release-please-manifest.json
@@ -1,3 +1,3 @@
 {
-  ".": "1.97.1"
+  ".": "1.97.2"
 }
\ No newline at end of file
CHANGELOG.md
@@ -1,5 +1,14 @@
 # Changelog
 
+## 1.97.2 (2025-07-30)
+
+Full Changelog: [v1.97.1...v1.97.2](https://github.com/openai/openai-python/compare/v1.97.1...v1.97.2)
+
+### Chores
+
+* **client:** refactor streaming slightly to better future proof it ([71c0c74](https://github.com/openai/openai-python/commit/71c0c747132221b798e419bc5a37baf67173d34e))
+* **project:** add settings file for vscode ([29c22c9](https://github.com/openai/openai-python/commit/29c22c90fd229983355089f95d0bba9de15efedb))
+
 ## 1.97.1 (2025-07-22)
 
 Full Changelog: [v1.97.0...v1.97.1](https://github.com/openai/openai-python/compare/v1.97.0...v1.97.1)
pyproject.toml
@@ -1,6 +1,6 @@
 [project]
 name = "openai"
-version = "1.97.1"
+version = "1.97.2"
 description = "The official Python library for the openai API"
 dynamic = ["readme"]
 license = "Apache-2.0"