Commit 62b73b9b

Atty Eleti <atty@openai.com>
2023-03-02 01:38:18
Add ChatCompletions and Audio endpoints (#237) tag: v0.27.0
1 parent 62ebb44
openai/api_resources/__init__.py
@@ -1,3 +1,5 @@
+from openai.api_resources.audio import Audio  # noqa: F401
+from openai.api_resources.chat_completion import ChatCompletion  # noqa: F401
 from openai.api_resources.completion import Completion  # noqa: F401
 from openai.api_resources.customer import Customer  # noqa: F401
 from openai.api_resources.deployment import Deployment  # noqa: F401
openai/api_resources/audio.py
@@ -0,0 +1,205 @@
+from typing import Any, List
+
+import openai
+from openai import api_requestor, util
+from openai.api_resources.abstract import APIResource
+
+
+class Audio(APIResource):
+    OBJECT_NAME = "audio"
+
+    @classmethod
+    def _get_url(cls, action):
+        return cls.class_url() + f"/{action}"
+
+    @classmethod
+    def _prepare_request(
+        cls,
+        file,
+        filename,
+        model,
+        api_key=None,
+        api_base=None,
+        api_type=None,
+        api_version=None,
+        organization=None,
+        **params,
+    ):
+        requestor = api_requestor.APIRequestor(
+            api_key,
+            api_base=api_base or openai.api_base,
+            api_type=api_type,
+            api_version=api_version,
+            organization=organization,
+        )
+        files: List[Any] = []
+        data = {
+            "model": model,
+            **params,
+        }
+        files.append(("file", (filename, file, "application/octet-stream")))
+        return requestor, files, data
+
+    @classmethod
+    def transcribe(
+        cls,
+        model,
+        file,
+        api_key=None,
+        api_base=None,
+        api_type=None,
+        api_version=None,
+        organization=None,
+        **params,
+    ):
+        requestor, files, data = cls._prepare_request(file, file.name, model, **params)
+        url = cls._get_url("transcriptions")
+        response, _, api_key = requestor.request("post", url, files=files, params=data)
+        return util.convert_to_openai_object(
+            response, api_key, api_version, organization
+        )
+
+    @classmethod
+    def translate(
+        cls,
+        model,
+        file,
+        api_key=None,
+        api_base=None,
+        api_type=None,
+        api_version=None,
+        organization=None,
+        **params,
+    ):
+        requestor, files, data = cls._prepare_request(file, file.name, model, **params)
+        url = cls._get_url("translations")
+        response, _, api_key = requestor.request("post", url, files=files, params=data)
+        return util.convert_to_openai_object(
+            response, api_key, api_version, organization
+        )
+
+    @classmethod
+    def transcribe_raw(
+        cls,
+        model,
+        file,
+        filename,
+        api_key=None,
+        api_base=None,
+        api_type=None,
+        api_version=None,
+        organization=None,
+        **params,
+    ):
+        requestor, files, data = cls._prepare_request(file, filename, model, **params)
+        url = cls._get_url("transcriptions")
+        response, _, api_key = requestor.request("post", url, files=files, params=data)
+        return util.convert_to_openai_object(
+            response, api_key, api_version, organization
+        )
+
+    @classmethod
+    def translate_raw(
+        cls,
+        model,
+        file,
+        filename,
+        api_key=None,
+        api_base=None,
+        api_type=None,
+        api_version=None,
+        organization=None,
+        **params,
+    ):
+        requestor, files, data = cls._prepare_request(file, filename, model, **params)
+        url = cls._get_url("translations")
+        response, _, api_key = requestor.request("post", url, files=files, params=data)
+        return util.convert_to_openai_object(
+            response, api_key, api_version, organization
+        )
+
+    @classmethod
+    async def atranscribe(
+        cls,
+        model,
+        file,
+        api_key=None,
+        api_base=None,
+        api_type=None,
+        api_version=None,
+        organization=None,
+        **params,
+    ):
+        requestor, files, data = cls._prepare_request(file, file.name, model, **params)
+        url = cls._get_url("transcriptions")
+        response, _, api_key = await requestor.arequest(
+            "post", url, files=files, params=data
+        )
+        return util.convert_to_openai_object(
+            response, api_key, api_version, organization
+        )
+
+    @classmethod
+    async def atranslate(
+        cls,
+        model,
+        file,
+        api_key=None,
+        api_base=None,
+        api_type=None,
+        api_version=None,
+        organization=None,
+        **params,
+    ):
+        requestor, files, data = cls._prepare_request(file, file.name, model, **params)
+        url = cls._get_url("translations")
+        response, _, api_key = await requestor.arequest(
+            "post", url, files=files, params=data
+        )
+        return util.convert_to_openai_object(
+            response, api_key, api_version, organization
+        )
+
+    @classmethod
+    async def atranscribe_raw(
+        cls,
+        model,
+        file,
+        filename,
+        api_key=None,
+        api_base=None,
+        api_type=None,
+        api_version=None,
+        organization=None,
+        **params,
+    ):
+        requestor, files, data = cls._prepare_request(file, filename, model, **params)
+        url = cls._get_url("transcriptions")
+        response, _, api_key = await requestor.arequest(
+            "post", url, files=files, params=data
+        )
+        return util.convert_to_openai_object(
+            response, api_key, api_version, organization
+        )
+
+    @classmethod
+    async def atranslate_raw(
+        cls,
+        model,
+        file,
+        filename,
+        api_key=None,
+        api_base=None,
+        api_type=None,
+        api_version=None,
+        organization=None,
+        **params,
+    ):
+        requestor, files, data = cls._prepare_request(file, filename, model, **params)
+        url = cls._get_url("translations")
+        response, _, api_key = await requestor.arequest(
+            "post", url, files=files, params=data
+        )
+        return util.convert_to_openai_object(
+            response, api_key, api_version, organization
+        )
openai/api_resources/chat_completion.py
@@ -0,0 +1,50 @@
+import time
+
+from openai import util
+from openai.api_resources.abstract.engine_api_resource import EngineAPIResource
+from openai.error import TryAgain
+
+
+class ChatCompletion(EngineAPIResource):
+    engine_required = False
+    OBJECT_NAME = "chat.completions"
+
+    @classmethod
+    def create(cls, *args, **kwargs):
+        """
+        Creates a new chat completion for the provided messages and parameters.
+
+        See https://platform.openai.com/docs/api-reference/chat-completions/create
+        for a list of valid parameters.
+        """
+        start = time.time()
+        timeout = kwargs.pop("timeout", None)
+
+        while True:
+            try:
+                return super().create(*args, **kwargs)
+            except TryAgain as e:
+                if timeout is not None and time.time() > start + timeout:
+                    raise
+
+                util.log_info("Waiting for model to warm up", error=e)
+
+    @classmethod
+    async def acreate(cls, *args, **kwargs):
+        """
+        Creates a new chat completion for the provided messages and parameters.
+
+        See https://platform.openai.com/docs/api-reference/chat-completions/create
+        for a list of valid parameters.
+        """
+        start = time.time()
+        timeout = kwargs.pop("timeout", None)
+
+        while True:
+            try:
+                return await super().acreate(*args, **kwargs)
+            except TryAgain as e:
+                if timeout is not None and time.time() > start + timeout:
+                    raise
+
+                util.log_info("Waiting for model to warm up", error=e)
openai/api_resources/completion.py
@@ -14,7 +14,7 @@ class Completion(EngineAPIResource):
         """
         Creates a new completion for the provided prompt and parameters.
 
-        See https://beta.openai.com/docs/api-reference/completions/create for a list
+        See https://platform.openai.com/docs/api-reference/completions/create for a list
         of valid parameters.
         """
         start = time.time()
@@ -34,7 +34,7 @@ class Completion(EngineAPIResource):
         """
         Creates a new completion for the provided prompt and parameters.
 
-        See https://beta.openai.com/docs/api-reference/completions/create for a list
+        See https://platform.openai.com/docs/api-reference/completions/create for a list
         of valid parameters.
         """
         start = time.time()
openai/api_resources/embedding.py
@@ -16,7 +16,7 @@ class Embedding(EngineAPIResource):
         """
         Creates a new embedding for the provided input and parameters.
 
-        See https://beta.openai.com/docs/api-reference/embeddings for a list
+        See https://platform.openai.com/docs/api-reference/embeddings for a list
         of valid parameters.
         """
         start = time.time()
@@ -56,7 +56,7 @@ class Embedding(EngineAPIResource):
         """
         Creates a new embedding for the provided input and parameters.
 
-        See https://beta.openai.com/docs/api-reference/embeddings for a list
+        See https://platform.openai.com/docs/api-reference/embeddings for a list
         of valid parameters.
         """
         start = time.time()
openai/tests/asyncio/test_endpoints.py
@@ -7,17 +7,18 @@ from aiohttp import ClientSession
 import openai
 from openai import error
 
-
 pytestmark = [pytest.mark.asyncio]
 
 
 # FILE TESTS
 async def test_file_upload():
     result = await openai.File.acreate(
-        file=io.StringIO(json.dumps({"text": "test file data"})),
-        purpose="search",
+        file=io.StringIO(
+            json.dumps({"prompt": "test file data", "completion": "tada"})
+        ),
+        purpose="fine-tune",
     )
-    assert result.purpose == "search"
+    assert result.purpose == "fine-tune"
     assert "id" in result
 
     result = await openai.File.aretrieve(id=result.id)
openai/tests/test_endpoints.py
@@ -22,6 +22,32 @@ def test_file_upload():
     assert result.status == "uploaded"
 
 
+# CHAT COMPLETION TESTS
+def test_chat_completions():
+    result = openai.ChatCompletion.create(
+        model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello!"}]
+    )
+    assert len(result.choices) == 1
+
+
+def test_chat_completions_multiple():
+    result = openai.ChatCompletion.create(
+        model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello!"}], n=5
+    )
+    assert len(result.choices) == 5
+
+
+def test_chat_completions_streaming():
+    result = None
+    events = openai.ChatCompletion.create(
+        model="gpt-3.5-turbo",
+        messages=[{"role": "user", "content": "Hello!"}],
+        stream=True,
+    )
+    for result in events:
+        assert len(result.choices) == 1
+
+
 # COMPLETION TESTS
 def test_completions():
     result = openai.Completion.create(prompt="This was a test", n=5, engine="ada")
openai/tests/test_long_examples_validator.py
@@ -4,7 +4,12 @@ from tempfile import NamedTemporaryFile
 
 import pytest
 
-from openai.datalib import HAS_PANDAS, HAS_NUMPY, NUMPY_INSTRUCTIONS, PANDAS_INSTRUCTIONS
+from openai.datalib import (
+    HAS_NUMPY,
+    HAS_PANDAS,
+    NUMPY_INSTRUCTIONS,
+    PANDAS_INSTRUCTIONS,
+)
 
 
 @pytest.mark.skipif(not HAS_PANDAS, reason=PANDAS_INSTRUCTIONS)
@@ -29,7 +34,8 @@ def test_long_examples_validator() -> None:
         {"prompt": long_prompt, "completion": long_completion},  # 2 of 2 duplicates
     ]
 
-    with NamedTemporaryFile(suffix="jsonl", mode="w") as training_data:
+    with NamedTemporaryFile(suffix=".jsonl", mode="w") as training_data:
+        print(training_data.name)
         for prompt_completion_row in unprepared_training_data:
             training_data.write(json.dumps(prompt_completion_row) + "\n")
             training_data.flush()
openai/__init__.py
@@ -7,6 +7,8 @@ from contextvars import ContextVar
 from typing import Optional, TYPE_CHECKING
 
 from openai.api_resources import (
+    Audio,
+    ChatCompletion,
     Completion,
     Customer,
     Edit,
@@ -52,6 +54,8 @@ aiosession: ContextVar[Optional["ClientSession"]] = ContextVar(
 
 __all__ = [
     "APIError",
+    "Audio",
+    "ChatCompletion",
     "Completion",
     "Customer",
     "Edit",
@@ -74,7 +78,7 @@ __all__ = [
     "app_info",
     "ca_bundle_path",
     "debug",
-    "enable_elemetry",
+    "enable_telemetry",
     "log",
     "organization",
     "proxy",
openai/api_requestor.py
@@ -476,8 +476,8 @@ class APIRequestor:
                 abs_url = _build_api_url(abs_url, encoded_params)
         elif method in {"post", "put"}:
             if params and files:
-                raise ValueError("At most one of params and files may be specified.")
-            if params:
+                data = params
+            if params and not files:
                 data = json.dumps(params).encode()
                 headers["Content-Type"] = "application/json"
         else:
openai/cli.py
@@ -108,6 +108,44 @@ class Engine:
         display(engines)
 
 
+class ChatCompletion:
+    @classmethod
+    def create(cls, args):
+        if args.n is not None and args.n > 1 and args.stream:
+            raise ValueError(
+                "Can't stream chat completions with n>1 with the current CLI"
+            )
+
+        messages = [
+            {"role": role, "content": content} for role, content in args.message
+        ]
+
+        resp = openai.ChatCompletion.create(
+            # Required
+            model=args.model,
+            messages=messages,
+            # Optional
+            n=args.n,
+            max_tokens=100,
+            temperature=args.temperature,
+            top_p=args.top_p,
+            stop=args.stop,
+            stream=args.stream,
+        )
+        if not args.stream:
+            resp = [resp]
+
+        for part in resp:
+            choices = part["choices"]
+            for c_idx, c in enumerate(sorted(choices, key=lambda s: s["index"])):
+                if len(choices) > 1:
+                    sys.stdout.write("===== Chat Completion {} =====\n".format(c_idx))
+                sys.stdout.write(c["message"]["content"])
+                if len(choices) > 1:
+                    sys.stdout.write("\n")
+                sys.stdout.flush()
+
+
 class Completion:
     @classmethod
     def create(cls, args):
@@ -255,6 +293,43 @@ class Image:
         print(resp)
 
 
+class Audio:
+    @classmethod
+    def transcribe(cls, args):
+        with open(args.file, "rb") as r:
+            file_reader = BufferReader(r.read(), desc="Upload progress")
+
+        resp = openai.Audio.transcribe_raw(
+            # Required
+            model=args.model,
+            file=file_reader,
+            filename=args.file,
+            # Optional
+            response_format=args.response_format,
+            language=args.language,
+            temperature=args.temperature,
+            prompt=args.prompt,
+        )
+        print(resp)
+
+    @classmethod
+    def translate(cls, args):
+        with open(args.file, "rb") as r:
+            file_reader = BufferReader(r.read(), desc="Upload progress")
+        resp = openai.Audio.translate_raw(
+            # Required
+            model=args.model,
+            file=file_reader,
+            filename=args.file,
+            # Optional
+            response_format=args.response_format,
+            language=args.language,
+            temperature=args.temperature,
+            prompt=args.prompt,
+        )
+        print(resp)
+
+
 class FineTune:
     @classmethod
     def list(cls, args):
@@ -505,7 +580,6 @@ class FineTune:
 
     @classmethod
     def prepare_data(cls, args):
-
         sys.stdout.write("Analyzing...\n")
         fname = args.file
         auto_accept = args.quiet
@@ -633,12 +707,68 @@ Mutually exclusive with `top_p`.""",
     )
     sub.set_defaults(func=Engine.generate)
 
+    # Chat Completions
+    sub = subparsers.add_parser("chat_completions.create")
+
+    sub._action_groups.pop()
+    req = sub.add_argument_group("required arguments")
+    opt = sub.add_argument_group("optional arguments")
+
+    req.add_argument(
+        "-m",
+        "--model",
+        help="The model to use.",
+        required=True,
+    )
+    req.add_argument(
+        "-g",
+        "--message",
+        action="append",
+        nargs=2,
+        metavar=("ROLE", "CONTENT"),
+        help="A message in `{role} {content}` format. Use this argument multiple times to add multiple messages.",
+        required=True,
+    )
+    opt.add_argument(
+        "-n",
+        "--n",
+        help="How many completions to generate for the conversation.",
+        type=int,
+    )
+    opt.add_argument(
+        "-M", "--max-tokens", help="The maximum number of tokens to generate.", type=int
+    )
+    opt.add_argument(
+        "-t",
+        "--temperature",
+        help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+
+Mutually exclusive with `top_p`.""",
+        type=float,
+    )
+    opt.add_argument(
+        "-P",
+        "--top_p",
+        help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
+
+            Mutually exclusive with `temperature`.""",
+        type=float,
+    )
+    opt.add_argument(
+        "--stop",
+        help="A stop sequence at which to stop generating tokens for the message.",
+    )
+    opt.add_argument(
+        "--stream", help="Stream messages as they're ready.", action="store_true"
+    )
+    sub.set_defaults(func=ChatCompletion.create)
+
     # Completions
     sub = subparsers.add_parser("completions.create")
     sub.add_argument(
         "-e",
         "--engine",
-        help="The engine to use. See https://beta.openai.com/docs/engines for more about what engines are available.",
+        help="The engine to use. See https://platform.openai.com/docs/engines for more about what engines are available.",
     )
     sub.add_argument(
         "-m",
@@ -725,7 +855,7 @@ Mutually exclusive with `top_p`.""",
     sub.add_argument(
         "-p",
         "--purpose",
-        help="Why are you uploading this file? (see https://beta.openai.com/docs/api-reference/ for purposes)",
+        help="Why are you uploading this file? (see https://platform.openai.com/docs/api-reference/ for purposes)",
         required=True,
     )
     sub.set_defaults(func=File.create)
@@ -924,6 +1054,30 @@ Mutually exclusive with `top_p`.""",
     sub.add_argument("--response-format", type=str, default="url")
     sub.set_defaults(func=Image.create_variation)
 
+    # Audio
+    # transcriptions
+    sub = subparsers.add_parser("audio.transcribe")
+    # Required
+    sub.add_argument("-m", "--model", type=str, default="whisper-1")
+    sub.add_argument("-f", "--file", type=str, required=True)
+    # Optional
+    sub.add_argument("--response-format", type=str)
+    sub.add_argument("--language", type=str)
+    sub.add_argument("-t", "--temperature", type=float)
+    sub.add_argument("--prompt", type=str)
+    sub.set_defaults(func=Audio.transcribe)
+    # translations
+    sub = subparsers.add_parser("audio.translate")
+    # Required
+    sub.add_argument("-m", "--model", type=str, default="whisper-1")
+    sub.add_argument("-f", "--file", type=str, required=True)
+    # Optional
+    sub.add_argument("--response-format", type=str)
+    sub.add_argument("--language", type=str)
+    sub.add_argument("-t", "--temperature", type=float)
+    sub.add_argument("--prompt", type=str)
+    sub.set_defaults(func=Audio.translate)
+
 
 def wandb_register(parser):
     subparsers = parser.add_subparsers(
openai/validators.py
@@ -242,7 +242,7 @@ def common_prompt_suffix_validator(df):
             immediate_msg += f"\n  WARNING: Some of your prompts contain the suffix `{common_suffix}` more than once. We strongly suggest that you review your prompts and add a unique suffix"
 
     else:
-        immediate_msg = "\n- Your data does not contain a common separator at the end of your prompts. Having a separator string appended to the end of the prompt makes it clearer to the fine-tuned model where the completion should begin. See https://beta.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples. If you intend to do open-ended generation, then you should leave the prompts empty"
+        immediate_msg = "\n- Your data does not contain a common separator at the end of your prompts. Having a separator string appended to the end of the prompt makes it clearer to the fine-tuned model where the completion should begin. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples. If you intend to do open-ended generation, then you should leave the prompts empty"
 
     if common_suffix == "":
         optional_msg = (
@@ -393,7 +393,7 @@ def common_completion_suffix_validator(df):
             immediate_msg += f"\n  WARNING: Some of your completions contain the suffix `{common_suffix}` more than once. We suggest that you review your completions and add a unique ending"
 
     else:
-        immediate_msg = "\n- Your data does not contain a common ending at the end of your completions. Having a common ending string appended to the end of the completion makes it clearer to the fine-tuned model where the completion should end. See https://beta.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples."
+        immediate_msg = "\n- Your data does not contain a common ending at the end of your completions. Having a common ending string appended to the end of the completion makes it clearer to the fine-tuned model where the completion should end. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples."
 
     if common_suffix == "":
         optional_msg = (
@@ -428,7 +428,7 @@ def completions_space_start_validator(df):
     immediate_msg = None
 
     if df.completion.str[:1].nunique() != 1 or df.completion.values[0][0] != " ":
-        immediate_msg = "\n- The completion should start with a whitespace character (` `). This tends to produce better results due to the tokenization we use. See https://beta.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details"
+        immediate_msg = "\n- The completion should start with a whitespace character (` `). This tends to produce better results due to the tokenization we use. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details"
         optional_msg = "Add a whitespace character to the beginning of the completion"
         optional_fn = add_space_start
     return Remediation(
@@ -462,7 +462,7 @@ def lower_case_validator(df, column):
     if count_upper * 2 > count_lower:
         return Remediation(
             name="lower_case",
-            immediate_msg=f"\n- More than a third of your `{column}` column/key is uppercase. Uppercase {column}s tends to perform worse than a mixture of case encountered in normal language. We recommend to lower case the data if that makes sense in your domain. See https://beta.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details",
+            immediate_msg=f"\n- More than a third of your `{column}` column/key is uppercase. Uppercase {column}s tends to perform worse than a mixture of case encountered in normal language. We recommend to lower case the data if that makes sense in your domain. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details",
             optional_msg=f"Lowercase all your data in column/key `{column}`",
             optional_fn=lower_case,
         )
openai/version.py
@@ -1,1 +1,1 @@
-VERSION = "0.26.5"
+VERSION = "0.27.0"
README.md
@@ -45,7 +45,7 @@ pip install openai[datalib]
 
 ## Usage
 
-The library needs to be configured with your account's secret key which is available on the [website](https://beta.openai.com/account/api-keys). Either set it as the `OPENAI_API_KEY` environment variable before using the library:
+The library needs to be configured with your account's secret key which is available on the [website](https://platform.openai.com/account/api-keys). Either set it as the `OPENAI_API_KEY` environment variable before using the library:
 
 ```bash
 export OPENAI_API_KEY='sk-...'
@@ -57,14 +57,14 @@ Or set `openai.api_key` to its value:
 import openai
 openai.api_key = "sk-..."
 
-# list engines
-engines = openai.Engine.list()
+# list models
+models = openai.Model.list()
 
-# print the first engine's id
-print(engines.data[0].id)
+# print the first model's id
+print(models.data[0].id)
 
 # create a completion
-completion = openai.Completion.create(engine="ada", prompt="Hello world")
+completion = openai.Completion.create(model="ada", prompt="Hello world")
 
 # print the completion
 print(completion.choices[0].text)
@@ -127,11 +127,14 @@ which makes it easy to interact with the API from your terminal. Run
 `openai api -h` for usage.
 
 ```sh
-# list engines
-openai api engines.list
+# list models
+openai api models.list
 
 # create a completion
-openai api completions.create -e ada -p "Hello world"
+openai api completions.create -m ada -p "Hello world"
+
+# create a chat completion
+openai api chat_completions.create -m gpt-3.5-turbo -g user "Hello world"
 
 # generate images via DALL·E API
 openai api image.create -p "two dogs playing chess, cartoon" -n 1
@@ -152,6 +155,18 @@ Examples of how to use this Python library to accomplish various tasks can be fo
 
 Prior to July 2022, this OpenAI Python library hosted code examples in its examples folder, but since then all examples have been migrated to the [OpenAI Cookbook](https://github.com/openai/openai-cookbook/).
 
+### Chat
+
+Conversational models such as `gpt-3.5-turbo` can be called using the chat completions endpoint.
+
+```python
+import openai
+openai.api_key = "sk-..."  # supply your API key however you choose
+
+completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world!"}])
+print(completion.choices[0].message.content)
+```
+
 ### Embeddings
 
 In the OpenAI Python library, an embedding represents a text string as a fixed-length vector of floating point numbers. Embeddings are designed to measure the similarity or relevance between text strings.
@@ -169,7 +184,7 @@ text_string = "sample text"
 model_id = "text-similarity-davinci-001"
 
 # compute the embedding of the text
-embedding = openai.Embedding.create(input=text_string, engine=model_id)['data'][0]['embedding']
+embedding = openai.Embedding.create(input=text_string, model=model_id)['data'][0]['embedding']
 ```
 
 An example of how to call the embeddings method is shown in this [get embeddings notebook](https://github.com/openai/openai-cookbook/blob/main/examples/Get_embeddings.ipynb).
@@ -208,7 +223,7 @@ For more information on fine-tuning, read the [fine-tuning guide](https://beta.o
 
 ### Moderation
 
-OpenAI provides a Moderation endpoint that can be used to check whether content complies with the OpenAI [content policy](https://beta.openai.com/docs/usage-policies)
+OpenAI provides a Moderation endpoint that can be used to check whether content complies with the OpenAI [content policy](https://platform.openai.com/docs/usage-policies)
 
 ```python
 import openai
@@ -217,7 +232,7 @@ openai.api_key = "sk-..."  # supply your API key however you choose
 moderation_resp = openai.Moderation.create(input="Here is some perfectly innocuous text that follows all OpenAI content policies.")
 ```
 
-See the [moderation guide](https://beta.openai.com/docs/guides/moderation) for more details.
+See the [moderation guide](https://platform.openai.com/docs/guides/moderation) for more details.
 
 ## Image generation (DALL·E)
 
@@ -229,6 +244,15 @@ image_resp = openai.Image.create(prompt="two dogs playing chess, oil painting",
 
 ```
 
+## Audio transcription (Whisper)
+```python
+import openai
+openai.api_key = "sk-..."  # supply your API key however you choose
+f = open("path/to/file.mp3", "rb")
+transcript = openai.Audio.transcribe("whisper-1", f)
+
+```
+
 ## Async API
 
 Async support is available in the API by prepending `a` to a network-bound method:
@@ -238,7 +262,7 @@ import openai
 openai.api_key = "sk-..."  # supply your API key however you choose
 
 async def create_completion():
-    completion_resp = await openai.Completion.acreate(prompt="This is a test", engine="davinci")
+    completion_resp = await openai.Completion.acreate(prompt="This is a test", model="davinci")
 
 ```
 
@@ -255,7 +279,7 @@ openai.aiosession.set(ClientSession())
 await openai.aiosession.get().close()
 ```
 
-See the [usage guide](https://beta.openai.com/docs/guides/images) for more details.
+See the [usage guide](https://platform.openai.com/docs/guides/images) for more details.
 
 ## Requirements