Commit 250c33d1
Changed files (4)
openai
openai/api_resources/abstract/engine_api_resource.py
@@ -37,12 +37,6 @@ class EngineAPIResource(APIResource):
organization=None,
**params,
):
- """
- Create a new instance of this model.
-
- Parameters:
- timeout (float): the number of seconds to wait on the promise returned by the API, where 0 means wait forever.
- """
engine = params.pop("engine", None)
timeout = params.pop("timeout", None)
stream = params.get("stream", False)
openai/api_resources/completion.py
@@ -3,16 +3,29 @@ import time
from openai import util
from openai.api_resources.abstract import DeletableAPIResource, ListableAPIResource
from openai.api_resources.abstract.engine_api_resource import EngineAPIResource
-from openai.error import TryAgain
+from openai.error import TryAgain, InvalidRequestError
class Completion(EngineAPIResource, ListableAPIResource, DeletableAPIResource):
- engine_required = True
+ engine_required = False
OBJECT_NAME = "completion"
@classmethod
- def create(cls, *args, timeout=None, **kwargs):
+ def create(cls, *args, **kwargs):
+ """
+ Creates a new completion for the provided prompt and parameters.
+
+ See https://beta.openai.com/docs/api-reference/completions/create for a list
+ of valid parameters.
+ """
start = time.time()
+ timeout = kwargs.get("timeout", None)
+ if kwargs.get("model", None) is None and kwargs.get("engine", None) is None:
+ raise InvalidRequestError(
+ "Must provide an 'engine' or 'model' parameter to create a Completion.",
+ param="engine",
+ )
+
while True:
try:
return super().create(*args, **kwargs)
openai/cli.py
@@ -138,8 +138,14 @@ class Completion:
if args.n is not None and args.n > 1 and args.stream:
raise ValueError("Can't stream completions with n>1 with the current CLI")
+ if args.engine and args.model:
+ warnings.warn(
+ "In most cases, you should not be specifying both engine and model."
+ )
+
resp = openai.Completion.create(
engine=args.engine,
+ model=args.model,
n=args.n,
max_tokens=args.max_tokens,
logprobs=args.logprobs,
@@ -253,30 +259,14 @@ class FineTune:
return
sys.stdout.write(
- "Created job: {job_id}\n"
- "Streaming events until the job is complete...\n\n"
- "(Ctrl-C will interrupt the stream, but not cancel the job)\n".format(
+ "Created fine-tune: {job_id}\n"
+ "Streaming events until fine-tuning is complete...\n\n"
+ "(Ctrl-C will interrupt the stream, but not cancel the fine-tune)\n".format(
job_id=resp["id"]
)
)
cls._stream_events(resp["id"])
- resp = openai.FineTune.retrieve(id=resp["id"])
- status = resp["status"]
- sys.stdout.write("\nJob complete! Status: {status}".format(status=status))
- if status == "succeeded":
- sys.stdout.write(" ๐")
- sys.stdout.write(
- "\nTry out your fine-tuned model: {model}\n"
- "(Pass this as the model parameter to a completion request)".format(
- model=resp["fine_tuned_model"]
- )
- )
- # TODO(rachel): Print instructions on how to use the model here.
- elif status == "failed":
- sys.stdout.write("\nPlease contact support@openai.com for assistance.")
- sys.stdout.write("\n")
-
@classmethod
def get(cls, args):
resp = openai.FineTune.retrieve(id=args.id)
@@ -296,8 +286,8 @@ class FineTune:
status = openai.FineTune.retrieve(job_id).status
sys.stdout.write(
"\nStream interrupted. Job is still {status}. "
- "To cancel your job, run:\n"
- "`openai api fine_tunes.cancel -i {job_id}`\n".format(
+ "To cancel your job, run:\n\n"
+ "openai api fine_tunes.cancel -i {job_id}\n".format(
status=status, job_id=job_id
)
)
@@ -318,6 +308,22 @@ class FineTune:
sys.stdout.write("\n")
sys.stdout.flush()
+ resp = openai.FineTune.retrieve(id=job_id)
+ status = resp["status"]
+ if status == "succeeded":
+ sys.stdout.write("\nJob complete! Status: succeeded ๐")
+ sys.stdout.write(
+ "\nTry out your fine-tuned model:\n\n"
+ "openai api completions.create -m {model} -p <YOUR_PROMPT>".format(
+ model=resp["fine_tuned_model"]
+ )
+ )
+ elif status == "failed":
+ sys.stdout.write(
+ "\nJob failed. Please contact support@openai.com if you need assistance."
+ )
+ sys.stdout.write("\n")
+
@classmethod
def cancel(cls, args):
resp = openai.FineTune.cancel(id=args.id)
@@ -422,7 +428,16 @@ Mutually exclusive with `top_p`.""",
# Completions
sub = subparsers.add_parser("completions.create")
- sub.add_argument("-e", "--engine", required=True, help="The engine to use")
+ sub.add_argument(
+ "-e",
+ "--engine",
+ help="The engine to use. See https://beta.openai.com/docs/engines for more about what engines are available.",
+ )
+ sub.add_argument(
+ "-m",
+ "--model",
+ help="The model to use. At most one of `engine` or `model` should be specified.",
+ )
sub.add_argument(
"--stream", help="Stream tokens as they're ready.", action="store_true"
)
openai/version.py
@@ -1,1 +1,1 @@
-VERSION = "0.7.0"
+VERSION = "0.8.0"