From 75df4d8a8eb03cf390981969e65a797f10c3a880 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 12 Jan 2024 10:01:49 -0500
Subject: [PATCH] refactor(api): remove deprecated endpoints (#621)
The fine tunes and edits APIs are no longer
provided by OpenAI.
This is not a breaking change as attempting to
call these APIs, even on older versions, will
result in an error at runtime.
---
.stats.yml | 2 +-
README.md | 4 +-
api.md | 26 --
src/index.ts | 16 -
src/resources/chat/completions.ts | 4 +-
src/resources/completions.ts | 24 +-
src/resources/edits.ts | 109 -------
src/resources/fine-tunes.ts | 423 -------------------------
src/resources/fine-tuning/jobs.ts | 3 +-
src/resources/index.ts | 12 -
tests/api-resources/edits.test.ts | 36 ---
tests/api-resources/fine-tunes.test.ts | 117 -------
12 files changed, 13 insertions(+), 763 deletions(-)
delete mode 100644 src/resources/edits.ts
delete mode 100644 src/resources/fine-tunes.ts
delete mode 100644 tests/api-resources/edits.test.ts
delete mode 100644 tests/api-resources/fine-tunes.test.ts
diff --git a/.stats.yml b/.stats.yml
index 03b0268ff..c550abf3c 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1 @@
-configured_endpoints: 57
+configured_endpoints: 51
diff --git a/README.md b/README.md
index 90c69a922..429d3fec6 100644
--- a/README.md
+++ b/README.md
@@ -272,8 +272,8 @@ a subclass of `APIError` will be thrown:
```ts
async function main() {
- const fineTune = await openai.fineTunes
- .create({ training_file: 'file-XGinujblHPwGLSztz8cPS8XY' })
+ const job = await openai.fineTuning.jobs
+ .create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123' })
.catch((err) => {
if (err instanceof OpenAI.APIError) {
console.log(err.status); // 400
diff --git a/api.md b/api.md
index 82ffae114..68d8545cc 100644
--- a/api.md
+++ b/api.md
@@ -48,16 +48,6 @@ Methods:
- client.chat.completions.create({ ...params }) -> ChatCompletion
-# Edits
-
-Types:
-
-- Edit
-
-Methods:
-
-- client.edits.create({ ...params }) -> Edit
-
# Embeddings
Types:
@@ -169,22 +159,6 @@ Methods:
- client.fineTuning.jobs.cancel(fineTuningJobId) -> FineTuningJob
- client.fineTuning.jobs.listEvents(fineTuningJobId, { ...params }) -> FineTuningJobEventsPage
-# FineTunes
-
-Types:
-
-- FineTune
-- FineTuneEvent
-- FineTuneEventsListResponse
-
-Methods:
-
-- client.fineTunes.create({ ...params }) -> FineTune
-- client.fineTunes.retrieve(fineTuneId) -> FineTune
-- client.fineTunes.list() -> FineTunesPage
-- client.fineTunes.cancel(fineTuneId) -> FineTune
-- client.fineTunes.listEvents(fineTuneId, { ...params }) -> FineTuneEventsListResponse
-
# Beta
## Chat
diff --git a/src/index.ts b/src/index.ts
index 265fb8f1c..f72850cf9 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -142,7 +142,6 @@ export class OpenAI extends Core.APIClient {
completions: API.Completions = new API.Completions(this);
chat: API.Chat = new API.Chat(this);
- edits: API.Edits = new API.Edits(this);
embeddings: API.Embeddings = new API.Embeddings(this);
files: API.Files = new API.Files(this);
images: API.Images = new API.Images(this);
@@ -150,7 +149,6 @@ export class OpenAI extends Core.APIClient {
moderations: API.Moderations = new API.Moderations(this);
models: API.Models = new API.Models(this);
fineTuning: API.FineTuning = new API.FineTuning(this);
- fineTunes: API.FineTunes = new API.FineTunes(this);
beta: API.Beta = new API.Beta(this);
protected override defaultQuery(): Core.DefaultQuery | undefined {
@@ -251,10 +249,6 @@ export namespace OpenAI {
export import ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming;
export import ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming;
- export import Edits = API.Edits;
- export import Edit = API.Edit;
- export import EditCreateParams = API.EditCreateParams;
-
export import Embeddings = API.Embeddings;
export import CreateEmbeddingResponse = API.CreateEmbeddingResponse;
export import Embedding = API.Embedding;
@@ -289,16 +283,6 @@ export namespace OpenAI {
export import FineTuning = API.FineTuning;
- export import FineTunes = API.FineTunes;
- export import FineTune = API.FineTune;
- export import FineTuneEvent = API.FineTuneEvent;
- export import FineTuneEventsListResponse = API.FineTuneEventsListResponse;
- export import FineTunesPage = API.FineTunesPage;
- export import FineTuneCreateParams = API.FineTuneCreateParams;
- export import FineTuneListEventsParams = API.FineTuneListEventsParams;
- export import FineTuneListEventsParamsNonStreaming = API.FineTuneListEventsParamsNonStreaming;
- export import FineTuneListEventsParamsStreaming = API.FineTuneListEventsParamsStreaming;
-
export import Beta = API.Beta;
export import FunctionDefinition = API.FunctionDefinition;
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index fce37ca53..6882e5f44 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -594,7 +594,7 @@ export interface ChatCompletionTool {
* will not call a function and instead generates a message. `auto` means the model
* can pick between generating a message or calling a function. Specifying a
* particular function via
- * `{"type: "function", "function": {"name": "my_function"}}` forces the model to
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
* call that function.
*
* `none` is the default when no functions are present. `auto` is the default if
@@ -807,7 +807,7 @@ export interface ChatCompletionCreateParamsBase {
* will not call a function and instead generates a message. `auto` means the model
* can pick between generating a message or calling a function. Specifying a
* particular function via
- * `{"type: "function", "function": {"name": "my_function"}}` forces the model to
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
* call that function.
*
* `none` is the default when no functions are present. `auto` is the default if
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index 00769fdbb..f3e262f5f 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -131,18 +131,7 @@ export interface CompletionCreateParamsBase {
* [Model overview](https://platform.openai.com/docs/models/overview) for
* descriptions of them.
*/
- model:
- | (string & {})
- | 'babbage-002'
- | 'davinci-002'
- | 'gpt-3.5-turbo-instruct'
- | 'text-davinci-003'
- | 'text-davinci-002'
- | 'text-davinci-001'
- | 'code-davinci-002'
- | 'text-curie-001'
- | 'text-babbage-001'
- | 'text-ada-001';
+ model: (string & {}) | 'gpt-3.5-turbo-instruct' | 'davinci-002' | 'babbage-002';
/**
* The prompt(s) to generate completions for, encoded as a string, array of
@@ -186,12 +175,11 @@ export interface CompletionCreateParamsBase {
*
* Accepts a JSON object that maps tokens (specified by their token ID in the GPT
* tokenizer) to an associated bias value from -100 to 100. You can use this
- * [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
- * convert text to token IDs. Mathematically, the bias is added to the logits
- * generated by the model prior to sampling. The exact effect will vary per model,
- * but values between -1 and 1 should decrease or increase likelihood of selection;
- * values like -100 or 100 should result in a ban or exclusive selection of the
- * relevant token.
+ * [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+ * Mathematically, the bias is added to the logits generated by the model prior to
+ * sampling. The exact effect will vary per model, but values between -1 and 1
+ * should decrease or increase likelihood of selection; values like -100 or 100
+ * should result in a ban or exclusive selection of the relevant token.
*
* As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
* from being generated.
diff --git a/src/resources/edits.ts b/src/resources/edits.ts
deleted file mode 100644
index 0b7b4802b..000000000
--- a/src/resources/edits.ts
+++ /dev/null
@@ -1,109 +0,0 @@
-// File generated from our OpenAPI spec by Stainless.
-
-import * as Core from 'openai/core';
-import { APIResource } from 'openai/resource';
-import * as EditsAPI from 'openai/resources/edits';
-import * as CompletionsAPI from 'openai/resources/completions';
-
-export class Edits extends APIResource {
- /**
- * Creates a new edit for the provided input, instruction, and parameters.
- *
- * @deprecated The Edits API is deprecated; please use Chat Completions instead.
- *
- * https://openai.com/blog/gpt-4-api-general-availability#deprecation-of-the-edits-api
- */
- create(body: EditCreateParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/edits', { body, ...options });
- }
-}
-
-export interface Edit {
- /**
- * A list of edit choices. Can be more than one if `n` is greater than 1.
- */
- choices: Array;
-
- /**
- * The Unix timestamp (in seconds) of when the edit was created.
- */
- created: number;
-
- /**
- * The object type, which is always `edit`.
- */
- object: 'edit';
-
- /**
- * Usage statistics for the completion request.
- */
- usage: CompletionsAPI.CompletionUsage;
-}
-
-export namespace Edit {
- export interface Choice {
- /**
- * The reason the model stopped generating tokens. This will be `stop` if the model
- * hit a natural stop point or a provided stop sequence, `length` if the maximum
- * number of tokens specified in the request was reached, or `content_filter` if
- * content was omitted due to a flag from our content filters.
- */
- finish_reason: 'stop' | 'length';
-
- /**
- * The index of the choice in the list of choices.
- */
- index: number;
-
- /**
- * The edited result.
- */
- text: string;
- }
-}
-
-export interface EditCreateParams {
- /**
- * The instruction that tells the model how to edit the prompt.
- */
- instruction: string;
-
- /**
- * ID of the model to use. You can use the `text-davinci-edit-001` or
- * `code-davinci-edit-001` model with this endpoint.
- */
- model: (string & {}) | 'text-davinci-edit-001' | 'code-davinci-edit-001';
-
- /**
- * The input text to use as a starting point for the edit.
- */
- input?: string | null;
-
- /**
- * How many edits to generate for the input and instruction.
- */
- n?: number | null;
-
- /**
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- * make the output more random, while lower values like 0.2 will make it more
- * focused and deterministic.
- *
- * We generally recommend altering this or `top_p` but not both.
- */
- temperature?: number | null;
-
- /**
- * An alternative to sampling with temperature, called nucleus sampling, where the
- * model considers the results of the tokens with top_p probability mass. So 0.1
- * means only the tokens comprising the top 10% probability mass are considered.
- *
- * We generally recommend altering this or `temperature` but not both.
- */
- top_p?: number | null;
-}
-
-export namespace Edits {
- export import Edit = EditsAPI.Edit;
- export import EditCreateParams = EditsAPI.EditCreateParams;
-}
diff --git a/src/resources/fine-tunes.ts b/src/resources/fine-tunes.ts
deleted file mode 100644
index 8e8193720..000000000
--- a/src/resources/fine-tunes.ts
+++ /dev/null
@@ -1,423 +0,0 @@
-// File generated from our OpenAPI spec by Stainless.
-
-import * as Core from 'openai/core';
-import { APIPromise } from 'openai/core';
-import { APIResource } from 'openai/resource';
-import * as FineTunesAPI from 'openai/resources/fine-tunes';
-import * as FilesAPI from 'openai/resources/files';
-import { Page } from 'openai/pagination';
-import { Stream } from 'openai/streaming';
-
-export class FineTunes extends APIResource {
- /**
- * Creates a job that fine-tunes a specified model from a given dataset.
- *
- * Response includes details of the enqueued job including job status and the name
- * of the fine-tuned models once complete.
- *
- * [Learn more about fine-tuning](https://platform.openai.com/docs/guides/legacy-fine-tuning)
- */
- create(body: FineTuneCreateParams, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post('/fine-tunes', { body, ...options });
- }
-
- /**
- * Gets info about the fine-tune job.
- *
- * [Learn more about fine-tuning](https://platform.openai.com/docs/guides/legacy-fine-tuning)
- */
- retrieve(fineTuneId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/fine-tunes/${fineTuneId}`, options);
- }
-
- /**
- * List your organization's fine-tuning jobs
- */
- list(options?: Core.RequestOptions): Core.PagePromise {
- return this._client.getAPIList('/fine-tunes', FineTunesPage, options);
- }
-
- /**
- * Immediately cancel a fine-tune job.
- */
- cancel(fineTuneId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post(`/fine-tunes/${fineTuneId}/cancel`, options);
- }
-
- /**
- * Get fine-grained status updates for a fine-tune job.
- */
- listEvents(
- fineTuneId: string,
- query?: FineTuneListEventsParamsNonStreaming,
- options?: Core.RequestOptions,
- ): APIPromise;
- listEvents(
- fineTuneId: string,
- query: FineTuneListEventsParamsStreaming,
- options?: Core.RequestOptions,
- ): APIPromise>;
- listEvents(
- fineTuneId: string,
- query?: FineTuneListEventsParamsBase | undefined,
- options?: Core.RequestOptions,
- ): APIPromise | FineTuneEventsListResponse>;
- listEvents(
- fineTuneId: string,
- query?: FineTuneListEventsParams | undefined,
- options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
- return this._client.get(`/fine-tunes/${fineTuneId}/events`, {
- query,
- timeout: 86400000,
- ...options,
- stream: query?.stream ?? false,
- }) as APIPromise | APIPromise>;
- }
-}
-
-/**
- * Note: no pagination actually occurs yet, this is for forwards-compatibility.
- */
-export class FineTunesPage extends Page {}
-
-/**
- * The `FineTune` object represents a legacy fine-tune job that has been created
- * through the API.
- */
-export interface FineTune {
- /**
- * The object identifier, which can be referenced in the API endpoints.
- */
- id: string;
-
- /**
- * The Unix timestamp (in seconds) for when the fine-tuning job was created.
- */
- created_at: number;
-
- /**
- * The name of the fine-tuned model that is being created.
- */
- fine_tuned_model: string | null;
-
- /**
- * The hyperparameters used for the fine-tuning job. See the
- * [fine-tuning guide](https://platform.openai.com/docs/guides/legacy-fine-tuning/hyperparameters)
- * for more details.
- */
- hyperparams: FineTune.Hyperparams;
-
- /**
- * The base model that is being fine-tuned.
- */
- model: string;
-
- /**
- * The object type, which is always "fine-tune".
- */
- object: 'fine-tune';
-
- /**
- * The organization that owns the fine-tuning job.
- */
- organization_id: string;
-
- /**
- * The compiled results files for the fine-tuning job.
- */
- result_files: Array;
-
- /**
- * The current status of the fine-tuning job, which can be either `created`,
- * `running`, `succeeded`, `failed`, or `cancelled`.
- */
- status: string;
-
- /**
- * The list of files used for training.
- */
- training_files: Array;
-
- /**
- * The Unix timestamp (in seconds) for when the fine-tuning job was last updated.
- */
- updated_at: number;
-
- /**
- * The list of files used for validation.
- */
- validation_files: Array;
-
- /**
- * The list of events that have been observed in the lifecycle of the FineTune job.
- */
- events?: Array;
-}
-
-export namespace FineTune {
- /**
- * The hyperparameters used for the fine-tuning job. See the
- * [fine-tuning guide](https://platform.openai.com/docs/guides/legacy-fine-tuning/hyperparameters)
- * for more details.
- */
- export interface Hyperparams {
- /**
- * The batch size to use for training. The batch size is the number of training
- * examples used to train a single forward and backward pass.
- */
- batch_size: number;
-
- /**
- * The learning rate multiplier to use for training.
- */
- learning_rate_multiplier: number;
-
- /**
- * The number of epochs to train the model for. An epoch refers to one full cycle
- * through the training dataset.
- */
- n_epochs: number;
-
- /**
- * The weight to use for loss on the prompt tokens.
- */
- prompt_loss_weight: number;
-
- /**
- * The number of classes to use for computing classification metrics.
- */
- classification_n_classes?: number;
-
- /**
- * The positive class to use for computing classification metrics.
- */
- classification_positive_class?: string;
-
- /**
- * The classification metrics to compute using the validation dataset at the end of
- * every epoch.
- */
- compute_classification_metrics?: boolean;
- }
-}
-
-/**
- * Fine-tune event object
- */
-export interface FineTuneEvent {
- created_at: number;
-
- level: string;
-
- message: string;
-
- object: 'fine-tune-event';
-}
-
-export interface FineTuneEventsListResponse {
- data: Array;
-
- object: 'list';
-}
-
-export interface FineTuneCreateParams {
- /**
- * The ID of an uploaded file that contains training data.
- *
- * See [upload file](https://platform.openai.com/docs/api-reference/files/upload)
- * for how to upload a file.
- *
- * Your dataset must be formatted as a JSONL file, where each training example is a
- * JSON object with the keys "prompt" and "completion". Additionally, you must
- * upload your file with the purpose `fine-tune`.
- *
- * See the
- * [fine-tuning guide](https://platform.openai.com/docs/guides/legacy-fine-tuning/creating-training-data)
- * for more details.
- */
- training_file: string;
-
- /**
- * The batch size to use for training. The batch size is the number of training
- * examples used to train a single forward and backward pass.
- *
- * By default, the batch size will be dynamically configured to be ~0.2% of the
- * number of examples in the training set, capped at 256 - in general, we've found
- * that larger batch sizes tend to work better for larger datasets.
- */
- batch_size?: number | null;
-
- /**
- * If this is provided, we calculate F-beta scores at the specified beta values.
- * The F-beta score is a generalization of F-1 score. This is only used for binary
- * classification.
- *
- * With a beta of 1 (i.e. the F-1 score), precision and recall are given the same
- * weight. A larger beta score puts more weight on recall and less on precision. A
- * smaller beta score puts more weight on precision and less on recall.
- */
- classification_betas?: Array | null;
-
- /**
- * The number of classes in a classification task.
- *
- * This parameter is required for multiclass classification.
- */
- classification_n_classes?: number | null;
-
- /**
- * The positive class in binary classification.
- *
- * This parameter is needed to generate precision, recall, and F1 metrics when
- * doing binary classification.
- */
- classification_positive_class?: string | null;
-
- /**
- * If set, we calculate classification-specific metrics such as accuracy and F-1
- * score using the validation set at the end of every epoch. These metrics can be
- * viewed in the
- * [results file](https://platform.openai.com/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model).
- *
- * In order to compute classification metrics, you must provide a
- * `validation_file`. Additionally, you must specify `classification_n_classes` for
- * multiclass classification or `classification_positive_class` for binary
- * classification.
- */
- compute_classification_metrics?: boolean | null;
-
- /**
- * The hyperparameters used for the fine-tuning job.
- */
- hyperparameters?: FineTuneCreateParams.Hyperparameters;
-
- /**
- * The learning rate multiplier to use for training. The fine-tuning learning rate
- * is the original learning rate used for pretraining multiplied by this value.
- *
- * By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on
- * final `batch_size` (larger learning rates tend to perform better with larger
- * batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to
- * see what produces the best results.
- */
- learning_rate_multiplier?: number | null;
-
- /**
- * The name of the base model to fine-tune. You can select one of "ada", "babbage",
- * "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before
- * 2023-08-22. To learn more about these models, see the
- * [Models](https://platform.openai.com/docs/models) documentation.
- */
- model?: (string & {}) | 'ada' | 'babbage' | 'curie' | 'davinci' | null;
-
- /**
- * The weight to use for loss on the prompt tokens. This controls how much the
- * model tries to learn to generate the prompt (as compared to the completion which
- * always has a weight of 1.0), and can add a stabilizing effect to training when
- * completions are short.
- *
- * If prompts are extremely long (relative to completions), it may make sense to
- * reduce this weight so as to avoid over-prioritizing learning the prompt.
- */
- prompt_loss_weight?: number | null;
-
- /**
- * A string of up to 40 characters that will be added to your fine-tuned model
- * name.
- *
- * For example, a `suffix` of "custom-model-name" would produce a model name like
- * `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
- */
- suffix?: string | null;
-
- /**
- * The ID of an uploaded file that contains validation data.
- *
- * If you provide this file, the data is used to generate validation metrics
- * periodically during fine-tuning. These metrics can be viewed in the
- * [fine-tuning results file](https://platform.openai.com/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model).
- * Your train and validation data should be mutually exclusive.
- *
- * Your dataset must be formatted as a JSONL file, where each validation example is
- * a JSON object with the keys "prompt" and "completion". Additionally, you must
- * upload your file with the purpose `fine-tune`.
- *
- * See the
- * [fine-tuning guide](https://platform.openai.com/docs/guides/legacy-fine-tuning/creating-training-data)
- * for more details.
- */
- validation_file?: string | null;
-}
-
-export namespace FineTuneCreateParams {
- /**
- * The hyperparameters used for the fine-tuning job.
- */
- export interface Hyperparameters {
- /**
- * The number of epochs to train the model for. An epoch refers to one full cycle
- * through the training dataset.
- */
- n_epochs?: 'auto' | number;
- }
-}
-
-export type FineTuneListEventsParams =
- | FineTuneListEventsParamsNonStreaming
- | FineTuneListEventsParamsStreaming;
-
-export interface FineTuneListEventsParamsBase {
- /**
- * Whether to stream events for the fine-tune job. If set to true, events will be
- * sent as data-only
- * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- * as they become available. The stream will terminate with a `data: [DONE]`
- * message when the job is finished (succeeded, cancelled, or failed).
- *
- * If set to false, only events generated so far will be returned.
- */
- stream?: boolean;
-}
-
-export namespace FineTuneListEventsParams {
- export type FineTuneListEventsParamsNonStreaming = FineTunesAPI.FineTuneListEventsParamsNonStreaming;
- export type FineTuneListEventsParamsStreaming = FineTunesAPI.FineTuneListEventsParamsStreaming;
-}
-
-export interface FineTuneListEventsParamsNonStreaming extends FineTuneListEventsParamsBase {
- /**
- * Whether to stream events for the fine-tune job. If set to true, events will be
- * sent as data-only
- * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- * as they become available. The stream will terminate with a `data: [DONE]`
- * message when the job is finished (succeeded, cancelled, or failed).
- *
- * If set to false, only events generated so far will be returned.
- */
- stream?: false;
-}
-
-export interface FineTuneListEventsParamsStreaming extends FineTuneListEventsParamsBase {
- /**
- * Whether to stream events for the fine-tune job. If set to true, events will be
- * sent as data-only
- * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- * as they become available. The stream will terminate with a `data: [DONE]`
- * message when the job is finished (succeeded, cancelled, or failed).
- *
- * If set to false, only events generated so far will be returned.
- */
- stream: true;
-}
-
-export namespace FineTunes {
- export import FineTune = FineTunesAPI.FineTune;
- export import FineTuneEvent = FineTunesAPI.FineTuneEvent;
- export import FineTuneEventsListResponse = FineTunesAPI.FineTuneEventsListResponse;
- export import FineTunesPage = FineTunesAPI.FineTunesPage;
- export import FineTuneCreateParams = FineTunesAPI.FineTuneCreateParams;
- export import FineTuneListEventsParams = FineTunesAPI.FineTuneListEventsParams;
- export import FineTuneListEventsParamsNonStreaming = FineTunesAPI.FineTuneListEventsParamsNonStreaming;
- export import FineTuneListEventsParamsStreaming = FineTunesAPI.FineTuneListEventsParamsStreaming;
-}
diff --git a/src/resources/fine-tuning/jobs.ts b/src/resources/fine-tuning/jobs.ts
index 9e013b27a..7bc216d7c 100644
--- a/src/resources/fine-tuning/jobs.ts
+++ b/src/resources/fine-tuning/jobs.ts
@@ -8,7 +8,8 @@ import { CursorPage, type CursorPageParams } from 'openai/pagination';
export class Jobs extends APIResource {
/**
- * Creates a job that fine-tunes a specified model from a given dataset.
+ * Creates a fine-tuning job which begins the process of creating a new model from
+ * a given dataset.
*
* Response includes details of the enqueued job including job status and the name
* of the fine-tuned models once complete.
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 3bc17fdc2..16ce85123 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -14,7 +14,6 @@ export {
Completions,
} from './completions';
export { CreateEmbeddingResponse, Embedding, EmbeddingCreateParams, Embeddings } from './embeddings';
-export { Edit, EditCreateParams, Edits } from './edits';
export {
FileContent,
FileDeleted,
@@ -24,17 +23,6 @@ export {
FileObjectsPage,
Files,
} from './files';
-export {
- FineTune,
- FineTuneEvent,
- FineTuneEventsListResponse,
- FineTuneCreateParams,
- FineTuneListEventsParams,
- FineTuneListEventsParamsNonStreaming,
- FineTuneListEventsParamsStreaming,
- FineTunesPage,
- FineTunes,
-} from './fine-tunes';
export { FineTuning } from './fine-tuning/fine-tuning';
export {
Image,
diff --git a/tests/api-resources/edits.test.ts b/tests/api-resources/edits.test.ts
deleted file mode 100644
index add95f051..000000000
--- a/tests/api-resources/edits.test.ts
+++ /dev/null
@@ -1,36 +0,0 @@
-// File generated from our OpenAPI spec by Stainless.
-
-import OpenAI from 'openai';
-import { Response } from 'node-fetch';
-
-const openai = new OpenAI({
- apiKey: 'My API Key',
- baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
-});
-
-describe('resource edits', () => {
- test('create: only required params', async () => {
- const responsePromise = openai.edits.create({
- instruction: 'Fix the spelling mistakes.',
- model: 'text-davinci-edit-001',
- });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('create: required and optional params', async () => {
- const response = await openai.edits.create({
- instruction: 'Fix the spelling mistakes.',
- model: 'text-davinci-edit-001',
- input: 'What day of the wek is it?',
- n: 1,
- temperature: 1,
- top_p: 1,
- });
- });
-});
diff --git a/tests/api-resources/fine-tunes.test.ts b/tests/api-resources/fine-tunes.test.ts
deleted file mode 100644
index c82898ff2..000000000
--- a/tests/api-resources/fine-tunes.test.ts
+++ /dev/null
@@ -1,117 +0,0 @@
-// File generated from our OpenAPI spec by Stainless.
-
-import OpenAI from 'openai';
-import { Response } from 'node-fetch';
-
-const openai = new OpenAI({
- apiKey: 'My API Key',
- baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
-});
-
-describe('resource fineTunes', () => {
- test('create: only required params', async () => {
- const responsePromise = openai.fineTunes.create({ training_file: 'file-abc123' });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('create: required and optional params', async () => {
- const response = await openai.fineTunes.create({
- training_file: 'file-abc123',
- batch_size: 0,
- classification_betas: [0.6, 1, 1.5, 2],
- classification_n_classes: 0,
- classification_positive_class: 'string',
- compute_classification_metrics: true,
- hyperparameters: { n_epochs: 'auto' },
- learning_rate_multiplier: 0,
- model: 'curie',
- prompt_loss_weight: 0,
- suffix: 'x',
- validation_file: 'file-abc123',
- });
- });
-
- test('retrieve', async () => {
- const responsePromise = openai.fineTunes.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F');
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('retrieve: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- openai.fineTunes.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }),
- ).rejects.toThrow(OpenAI.NotFoundError);
- });
-
- test('list', async () => {
- const responsePromise = openai.fineTunes.list();
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('list: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(openai.fineTunes.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
- OpenAI.NotFoundError,
- );
- });
-
- test('cancel', async () => {
- const responsePromise = openai.fineTunes.cancel('ft-AF1WoRqd3aJAHsqc9NY7iL8F');
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('cancel: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- openai.fineTunes.cancel('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }),
- ).rejects.toThrow(OpenAI.NotFoundError);
- });
-
- // Prism chokes on this
- test.skip('listEvents', async () => {
- const responsePromise = openai.fineTunes.listEvents('ft-AF1WoRqd3aJAHsqc9NY7iL8F');
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- // Prism chokes on this
- test.skip('listEvents: request options and params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- openai.fineTunes.listEvents(
- 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',
- { stream: false },
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(OpenAI.NotFoundError);
- });
-});