-
Notifications
You must be signed in to change notification settings - Fork 3.4k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Support model-based (/engineless) completions in openai cli (#20)
* Support model-based (/engineless) completions in openai cli (#31) * Engineless completions in the SDK * Cosmetic improvements to fine tuning CLI * Remove done TODO * Raise error if neither engine nor model provided * Undocument the `timeout` parameter on completions, because it doesn't do anything and causes user confusion. * Move things around * Update message * Some day we should care about versions * Minor version bump
- Loading branch information
Showing
4 changed files
with
54 additions
and
32 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -138,8 +138,14 @@ def create(cls, args): | |
if args.n is not None and args.n > 1 and args.stream: | ||
raise ValueError("Can't stream completions with n>1 with the current CLI") | ||
|
||
if args.engine and args.model: | ||
warnings.warn( | ||
"In most cases, you should not be specifying both engine and model." | ||
) | ||
|
||
resp = openai.Completion.create( | ||
engine=args.engine, | ||
model=args.model, | ||
n=args.n, | ||
max_tokens=args.max_tokens, | ||
logprobs=args.logprobs, | ||
|
@@ -253,30 +259,14 @@ def create(cls, args): | |
return | ||
|
||
sys.stdout.write( | ||
"Created job: {job_id}\n" | ||
"Streaming events until the job is complete...\n\n" | ||
"(Ctrl-C will interrupt the stream, but not cancel the job)\n".format( | ||
"Created fine-tune: {job_id}\n" | ||
"Streaming events until fine-tuning is complete...\n\n" | ||
"(Ctrl-C will interrupt the stream, but not cancel the fine-tune)\n".format( | ||
job_id=resp["id"] | ||
) | ||
) | ||
cls._stream_events(resp["id"]) | ||
|
||
resp = openai.FineTune.retrieve(id=resp["id"]) | ||
status = resp["status"] | ||
sys.stdout.write("\nJob complete! Status: {status}".format(status=status)) | ||
if status == "succeeded": | ||
sys.stdout.write(" 🎉") | ||
sys.stdout.write( | ||
"\nTry out your fine-tuned model: {model}\n" | ||
"(Pass this as the model parameter to a completion request)".format( | ||
model=resp["fine_tuned_model"] | ||
) | ||
) | ||
# TODO(rachel): Print instructions on how to use the model here. | ||
elif status == "failed": | ||
sys.stdout.write("\nPlease contact [email protected] for assistance.") | ||
sys.stdout.write("\n") | ||
|
||
@classmethod | ||
def get(cls, args): | ||
resp = openai.FineTune.retrieve(id=args.id) | ||
|
@@ -296,8 +286,8 @@ def signal_handler(sig, frame): | |
status = openai.FineTune.retrieve(job_id).status | ||
sys.stdout.write( | ||
"\nStream interrupted. Job is still {status}. " | ||
"To cancel your job, run:\n" | ||
"`openai api fine_tunes.cancel -i {job_id}`\n".format( | ||
"To cancel your job, run:\n\n" | ||
"openai api fine_tunes.cancel -i {job_id}\n".format( | ||
status=status, job_id=job_id | ||
) | ||
) | ||
|
@@ -318,6 +308,22 @@ def signal_handler(sig, frame): | |
sys.stdout.write("\n") | ||
sys.stdout.flush() | ||
|
||
resp = openai.FineTune.retrieve(id=job_id) | ||
status = resp["status"] | ||
if status == "succeeded": | ||
sys.stdout.write("\nJob complete! Status: succeeded 🎉") | ||
sys.stdout.write( | ||
"\nTry out your fine-tuned model:\n\n" | ||
"openai api completions.create -m {model} -p <YOUR_PROMPT>".format( | ||
model=resp["fine_tuned_model"] | ||
) | ||
) | ||
elif status == "failed": | ||
sys.stdout.write( | ||
"\nJob failed. Please contact [email protected] if you need assistance." | ||
) | ||
sys.stdout.write("\n") | ||
|
||
@classmethod | ||
def cancel(cls, args): | ||
resp = openai.FineTune.cancel(id=args.id) | ||
|
@@ -422,7 +428,16 @@ def help(args): | |
|
||
# Completions | ||
sub = subparsers.add_parser("completions.create") | ||
sub.add_argument("-e", "--engine", required=True, help="The engine to use") | ||
sub.add_argument( | ||
"-e", | ||
"--engine", | ||
help="The engine to use. See https://beta.openai.com/docs/engines for more about what engines are available.", | ||
) | ||
sub.add_argument( | ||
"-m", | ||
"--model", | ||
help="The model to use. At most one of `engine` or `model` should be specified.", | ||
) | ||
sub.add_argument( | ||
"--stream", help="Stream tokens as they're ready.", action="store_true" | ||
) | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1 @@ | ||
VERSION = "0.7.0" | ||
VERSION = "0.8.0" |