From fb92f5e6e3b6e7969b3d91f4ccdaef87e5fea0a4 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 6 Nov 2023 19:24:32 +0000
Subject: [PATCH] feat(api): releases from DevDay; assistants, multimodality,
tools, dall-e-3, tts, and more (#433)
---
.stats.yml | 2 +-
README.md | 20 +-
api.md | 125 +++-
build-deno | 2 +-
.../node-ts-cjs-auto/tests/test.ts | 8 +-
examples/audio.ts | 35 ++
examples/tool-call-helpers.ts | 126 +++++
helpers.md | 41 +-
src/index.ts | 14 +
src/lib/AbstractChatCompletionRunner.ts | 264 +++++++--
src/lib/ChatCompletionRunFunctions.test.ts | 42 +-
src/lib/ChatCompletionRunner.ts | 31 +-
src/lib/ChatCompletionStream.ts | 130 ++++-
src/lib/ChatCompletionStreamingRunner.ts | 24 +-
src/lib/RunnableFunction.ts | 12 +
src/lib/chatCompletionUtils.ts | 28 +
src/pagination.ts | 4 +-
src/resources/audio/audio.ts | 4 +
src/resources/audio/index.ts | 1 +
src/resources/audio/speech.ts | 49 ++
src/resources/audio/transcriptions.ts | 4 +-
src/resources/audio/translations.ts | 4 +-
src/resources/beta/assistants/assistants.ts | 470 +++++++++++++++
src/resources/beta/assistants/files.ts | 154 +++++
src/resources/beta/assistants/index.ts | 19 +
src/resources/beta/beta.ts | 17 +
src/resources/beta/chat/completions.ts | 43 +-
src/resources/beta/index.ts | 17 +
src/resources/beta/threads/index.ts | 31 +
src/resources/beta/threads/messages/files.ts | 105 ++++
src/resources/beta/threads/messages/index.ts | 14 +
.../beta/threads/messages/messages.ts | 343 +++++++++++
src/resources/beta/threads/runs/index.ts | 23 +
src/resources/beta/threads/runs/runs.ts | 535 ++++++++++++++++++
src/resources/beta/threads/runs/steps.ts | 365 ++++++++++++
src/resources/beta/threads/threads.ts | 339 +++++++++++
src/resources/chat/chat.ts | 13 +
src/resources/chat/completions.ts | 445 +++++++++++++--
src/resources/chat/index.ts | 13 +
src/resources/completions.ts | 22 +-
src/resources/edits.ts | 2 +-
src/resources/embeddings.ts | 4 +-
src/resources/files.ts | 80 ++-
src/resources/fine-tunes.ts | 6 +-
src/resources/fine-tuning/jobs.ts | 18 +-
src/resources/images.ts | 51 +-
src/resources/index.ts | 10 +-
src/resources/models.ts | 2 +-
tests/api-resources/audio/speech.test.ts | 20 +
.../beta/assistants/assistants.test.ts | 109 ++++
.../beta/assistants/files.test.ts | 95 ++++
.../beta/chat/completions.test.ts | 10 +
.../beta/threads/messages/files.test.ts | 68 +++
.../beta/threads/messages/messages.test.ts | 89 +++
.../beta/threads/runs/runs.test.ts | 131 +++++
.../beta/threads/runs/steps.test.ts | 61 ++
.../beta/threads/threads.test.ts | 98 ++++
tests/api-resources/chat/completions.test.ts | 17 +-
tests/api-resources/completions.test.ts | 1 +
tests/api-resources/files.test.ts | 11 +-
tests/api-resources/fine-tuning/jobs.test.ts | 2 +-
tests/api-resources/images.test.ts | 5 +
62 files changed, 4585 insertions(+), 243 deletions(-)
create mode 100755 examples/audio.ts
create mode 100755 examples/tool-call-helpers.ts
create mode 100644 src/lib/chatCompletionUtils.ts
create mode 100644 src/resources/audio/speech.ts
create mode 100644 src/resources/beta/assistants/assistants.ts
create mode 100644 src/resources/beta/assistants/files.ts
create mode 100644 src/resources/beta/assistants/index.ts
create mode 100644 src/resources/beta/threads/index.ts
create mode 100644 src/resources/beta/threads/messages/files.ts
create mode 100644 src/resources/beta/threads/messages/index.ts
create mode 100644 src/resources/beta/threads/messages/messages.ts
create mode 100644 src/resources/beta/threads/runs/index.ts
create mode 100644 src/resources/beta/threads/runs/runs.ts
create mode 100644 src/resources/beta/threads/runs/steps.ts
create mode 100644 src/resources/beta/threads/threads.ts
create mode 100644 tests/api-resources/audio/speech.test.ts
create mode 100644 tests/api-resources/beta/assistants/assistants.test.ts
create mode 100644 tests/api-resources/beta/assistants/files.test.ts
create mode 100644 tests/api-resources/beta/chat/completions.test.ts
create mode 100644 tests/api-resources/beta/threads/messages/files.test.ts
create mode 100644 tests/api-resources/beta/threads/messages/messages.test.ts
create mode 100644 tests/api-resources/beta/threads/runs/runs.test.ts
create mode 100644 tests/api-resources/beta/threads/runs/steps.test.ts
create mode 100644 tests/api-resources/beta/threads/threads.test.ts
diff --git a/.stats.yml b/.stats.yml
index f21eb8fef..03b0268ff 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1 @@
-configured_endpoints: 28
+configured_endpoints: 57
diff --git a/README.md b/README.md
index 99dfc86f4..2cbf9ce86 100644
--- a/README.md
+++ b/README.md
@@ -62,8 +62,8 @@ async function main() {
messages: [{ role: 'user', content: 'Say this is a test' }],
stream: true,
});
- for await (const part of stream) {
- process.stdout.write(part.choices[0]?.delta?.content || '');
+ for await (const chunk of stream) {
+ process.stdout.write(chunk.choices[0]?.delta?.content || '');
}
}
@@ -121,8 +121,8 @@ async function main() {
});
// or, equivalently:
- for await (const part of stream) {
- process.stdout.write(part.choices[0]?.delta?.content || '');
+ for await (const chunk of stream) {
+ process.stdout.write(chunk.choices[0]?.delta?.content || '');
}
const chatCompletion = await stream.finalChatCompletion();
@@ -143,14 +143,18 @@ If you need to cancel a stream, you can `break` from a `for await` loop or call
### Automated function calls
-We provide a `openai.beta.chat.completions.runFunctions({…})` convenience helper for using function calls
-with the `/chat/completions` endpoint which automatically calls the JavaScript functions you provide
+We provide `openai.beta.chat.completions.runFunctions({…})` and `openai.beta.chat.completions.runTools({…})`
+convenience helpers for using function calls with the `/chat/completions` endpoint
+which automatically call the JavaScript functions you provide
and sends their results back to the `/chat/completions` endpoint,
looping as long as the model requests function calls.
-If you pass a `parse` function, it will automatically parse the `arguments` for you and returns any parsing errors to the model to attempt auto-recovery. Otherwise, the args will be passed to the function you provide as a string.
+If you pass a `parse` function, it will automatically parse the `arguments` for you
+and returns any parsing errors to the model to attempt auto-recovery.
+Otherwise, the args will be passed to the function you provide as a string.
-If you pass `function_call: {name: …}` instead of `auto`, it returns immediately after calling that function (and only loops to auto-recover parsing errors).
+If you pass `function_call: {name: …}` or `tool_call: {function: {name: …}}` instead of `auto`,
+it returns immediately after calling that function (and only loops to auto-recover parsing errors).
```ts
import OpenAI from 'openai';
diff --git a/api.md b/api.md
index 00ba41ec2..5bcb1e18b 100644
--- a/api.md
+++ b/api.md
@@ -17,10 +17,23 @@ Methods:
Types:
- ChatCompletion
+- ChatCompletionAssistantMessageParam
- ChatCompletionChunk
+- ChatCompletionContentPart
+- ChatCompletionContentPartImage
+- ChatCompletionContentPartText
+- ChatCompletionFunctionCallOption
+- ChatCompletionFunctionMessageParam
- ChatCompletionMessage
- ChatCompletionMessageParam
+- ChatCompletionMessageToolCall
+- ChatCompletionNamedToolChoice
- ChatCompletionRole
+- ChatCompletionSystemMessageParam
+- ChatCompletionTool
+- ChatCompletionToolChoiceOption
+- ChatCompletionToolMessageParam
+- ChatCompletionUserMessageParam
- CreateChatCompletionRequestMessage
Methods:
@@ -60,7 +73,7 @@ Methods:
- client.files.create({ ...params }) -> FileObject
- client.files.retrieve(fileId) -> FileObject
-- client.files.list() -> FileObjectsPage
+- client.files.list({ ...params }) -> FileObjectsPage
- client.files.del(fileId) -> FileDeleted
- client.files.retrieveContent(fileId) -> string
- client.files.waitForProcessing(id, { pollInterval = 5000, maxWait = 30 _ 60 _ 1000 }) -> Promise<FileObject>
@@ -100,6 +113,12 @@ Methods:
- client.audio.translations.create({ ...params }) -> Translation
+## Speech
+
+Methods:
+
+- client.audio.speech.create({ ...params }) -> Response
+
# Moderations
Types:
@@ -166,4 +185,108 @@ Methods:
Methods:
- client.beta.chat.completions.runFunctions(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner
+- client.beta.chat.completions.runTools(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner
- client.beta.chat.completions.stream(body, options?) -> ChatCompletionStream
+
+## Assistants
+
+Types:
+
+- Assistant
+- AsssitantDeleted
+
+Methods:
+
+- client.beta.assistants.create({ ...params }) -> Assistant
+- client.beta.assistants.retrieve(assistantId) -> Assistant
+- client.beta.assistants.update(assistantId, { ...params }) -> Assistant
+- client.beta.assistants.list({ ...params }) -> AssistantsPage
+- client.beta.assistants.del(assistantId) -> AsssitantDeleted
+
+### Files
+
+Types:
+
+- AssistantFile
+- FileDeleteResponse
+
+Methods:
+
+- client.beta.assistants.files.create(assistantId, { ...params }) -> AssistantFile
+- client.beta.assistants.files.retrieve(assistantId, fileId) -> AssistantFile
+- client.beta.assistants.files.list(assistantId, { ...params }) -> AssistantFilesPage
+- client.beta.assistants.files.del(assistantId, fileId) -> FileDeleteResponse
+
+## Threads
+
+Types:
+
+- Thread
+- ThreadDeleted
+
+Methods:
+
+- client.beta.threads.create({ ...params }) -> Thread
+- client.beta.threads.retrieve(threadId) -> Thread
+- client.beta.threads.update(threadId, { ...params }) -> Thread
+- client.beta.threads.del(threadId) -> ThreadDeleted
+- client.beta.threads.createAndRun({ ...params }) -> Run
+
+### Runs
+
+Types:
+
+- RequiredActionFunctionToolCall
+- Run
+
+Methods:
+
+- client.beta.threads.runs.create(threadId, { ...params }) -> Run
+- client.beta.threads.runs.retrieve(threadId, runId) -> Run
+- client.beta.threads.runs.update(threadId, runId, { ...params }) -> Run
+- client.beta.threads.runs.list(threadId, { ...params }) -> RunsPage
+- client.beta.threads.runs.cancel(threadId, runId) -> Run
+- client.beta.threads.runs.submitToolOutputs(threadId, runId, { ...params }) -> Run
+
+#### Steps
+
+Types:
+
+- CodeToolCall
+- FunctionToolCall
+- MessageCreationStepDetails
+- RetrievalToolCall
+- RunStep
+- ToolCallsStepDetails
+
+Methods:
+
+- client.beta.threads.runs.steps.retrieve(threadId, runId, stepId) -> RunStep
+- client.beta.threads.runs.steps.list(threadId, runId, { ...params }) -> RunStepsPage
+
+### Messages
+
+Types:
+
+- MessageContentImageFile
+- MessageContentText
+- ThreadMessage
+- ThreadMessageDeleted
+
+Methods:
+
+- client.beta.threads.messages.create(threadId, { ...params }) -> ThreadMessage
+- client.beta.threads.messages.retrieve(threadId, messageId) -> ThreadMessage
+- client.beta.threads.messages.update(threadId, messageId, { ...params }) -> ThreadMessage
+- client.beta.threads.messages.list(threadId, { ...params }) -> ThreadMessagesPage
+
+#### Files
+
+Types:
+
+- MessageFile
+
+Methods:
+
+- client.beta.threads.messages.files.retrieve(threadId, messageId, fileId) -> MessageFile
+- client.beta.threads.messages.files.list(threadId, messageId, { ...params }) -> MessageFilesPage
diff --git a/build-deno b/build-deno
index b02b01ae5..d7f6dc298 100755
--- a/build-deno
+++ b/build-deno
@@ -37,7 +37,7 @@ done
for file in LICENSE CHANGELOG.md; do
if [ -e "${file}" ]; then cp "${file}" deno; fi
done
-npm exec ts-node -- scripts/denoify.ts
+npm exec ts-node -T -- scripts/denoify.ts
deno fmt deno
deno check deno/mod.ts
if [ -e deno_tests ]; then
diff --git a/ecosystem-tests/node-ts-cjs-auto/tests/test.ts b/ecosystem-tests/node-ts-cjs-auto/tests/test.ts
index b7ab308cb..bc0cbbd8d 100644
--- a/ecosystem-tests/node-ts-cjs-auto/tests/test.ts
+++ b/ecosystem-tests/node-ts-cjs-auto/tests/test.ts
@@ -71,10 +71,10 @@ it(`streaming works`, async function () {
it(`ChatCompletionStream works`, async function () {
const chunks: OpenAI.Chat.ChatCompletionChunk[] = [];
const contents: [string, string][] = [];
- const messages: OpenAI.Chat.ChatCompletionMessage[] = [];
+ const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
const chatCompletions: OpenAI.Chat.ChatCompletion[] = [];
let finalContent: string | undefined;
- let finalMessage: OpenAI.Chat.ChatCompletionMessage | undefined;
+ let finalMessage: OpenAI.Chat.ChatCompletionMessageParam | undefined;
let finalChatCompletion: OpenAI.Chat.ChatCompletion | undefined;
const stream = client.beta.chat.completions
@@ -113,10 +113,10 @@ it(`ChatCompletionStream works`, async function () {
it(`aborting ChatCompletionStream works`, async function () {
const chunks: OpenAI.Chat.ChatCompletionChunk[] = [];
const contents: [string, string][] = [];
- const messages: OpenAI.Chat.ChatCompletionMessage[] = [];
+ const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
const chatCompletions: OpenAI.Chat.ChatCompletion[] = [];
let finalContent: string | undefined;
- let finalMessage: OpenAI.Chat.ChatCompletionMessage | undefined;
+ let finalMessage: OpenAI.Chat.ChatCompletionMessageParam | undefined;
let finalChatCompletion: OpenAI.Chat.ChatCompletion | undefined;
let emittedError: any;
let caughtError: any;
diff --git a/examples/audio.ts b/examples/audio.ts
new file mode 100755
index 000000000..e4ab930fd
--- /dev/null
+++ b/examples/audio.ts
@@ -0,0 +1,35 @@
+#!/usr/bin/env -S npm run tsn -T
+
+import OpenAI, { toFile } from 'openai';
+import fs from 'fs/promises';
+import path from 'path';
+
+// gets API Key from environment variable OPENAI_API_KEY
+const openai = new OpenAI();
+
+const speechFile = path.resolve(__dirname, './speech.mp3');
+
+async function main() {
+ const mp3 = await openai.audio.speech.create({
+ model: 'tts-1',
+ voice: 'alloy',
+ input: 'the quick brown fox jumped over the lazy dogs',
+ });
+
+ const buffer = Buffer.from(await mp3.arrayBuffer());
+ await fs.writeFile(speechFile, buffer);
+
+ const transcription = await openai.audio.transcriptions.create({
+ file: await toFile(buffer, 'speech.mp3'),
+ model: 'whisper-1',
+ });
+ console.log(transcription.text);
+
+ const translation = await openai.audio.translations.create({
+ file: await toFile(buffer, 'speech.mp3'),
+ model: 'whisper-1',
+ });
+ console.log(translation.text);
+}
+
+main();
diff --git a/examples/tool-call-helpers.ts b/examples/tool-call-helpers.ts
new file mode 100755
index 000000000..e750b2ab9
--- /dev/null
+++ b/examples/tool-call-helpers.ts
@@ -0,0 +1,126 @@
+#!/usr/bin/env -S npm run tsn -T
+
+import OpenAI from 'openai';
+import { RunnableToolFunction } from 'openai/lib/RunnableFunction';
+
+// gets API Key from environment variable OPENAI_API_KEY
+const openai = new OpenAI();
+
+const tools: RunnableToolFunction[] = [
+ {
+ type: 'function',
+ function: {
+ name: 'list',
+ description: 'list queries books by genre, and returns a list of names of books',
+ parameters: {
+ type: 'object',
+ properties: {
+ genre: { type: 'string', enum: ['mystery', 'nonfiction', 'memoir', 'romance', 'historical'] },
+ },
+ },
+ function: list,
+ parse: JSON.parse,
+ },
+ } as RunnableToolFunction<{ genre: string }>,
+ {
+ type: 'function',
+ function: {
+ name: 'search',
+ description: 'search queries books by their name and returns a list of book names and their ids',
+ parameters: {
+ type: 'object',
+ properties: {
+ name: { type: 'string' },
+ },
+ },
+ function: search,
+ parse: JSON.parse,
+ },
+ } as RunnableToolFunction<{ name: string }>,
+ {
+ type: 'function',
+ function: {
+ name: 'get',
+ description:
+ "get returns a book's detailed information based on the id of the book. Note that this does not accept names, and only IDs, which you can get by using search.",
+ parameters: {
+ type: 'object',
+ properties: {
+ id: { type: 'string' },
+ },
+ },
+ function: get,
+ parse: JSON.parse,
+ },
+ } as RunnableToolFunction<{ id: string }>,
+];
+
+async function main() {
+ const runner = await openai.beta.chat.completions
+ .runTools({
+ model: 'gpt-4-1106-preview',
+ stream: true,
+ tools,
+ messages: [
+ {
+ role: 'system',
+ content:
+ 'Please use our book database, which you can access using functions to answer the following questions.',
+ },
+ {
+ role: 'user',
+ content:
+ 'I really enjoyed reading To Kill a Mockingbird, could you recommend me a book that is similar and tell me why?',
+ },
+ ],
+ })
+ .on('message', (msg) => console.log('msg', msg))
+ .on('functionCall', (functionCall) => console.log('functionCall', functionCall))
+ .on('functionCallResult', (functionCallResult) => console.log('functionCallResult', functionCallResult))
+ .on('content', (diff) => process.stdout.write(diff));
+
+ const result = await runner.finalChatCompletion();
+ console.log();
+ console.log('messages');
+ console.log(runner.messages);
+
+ console.log();
+ console.log('final chat competion');
+ console.dir(result, { depth: null });
+}
+
+const db = [
+ {
+ id: 'a1',
+ name: 'To Kill a Mockingbird',
+ genre: 'historical',
+ description: `Compassionate, dramatic, and deeply moving, "To Kill A Mockingbird" takes readers to the roots of human behavior - to innocence and experience, kindness and cruelty, love and hatred, humor and pathos. Now with over 18 million copies in print and translated into forty languages, this regional story by a young Alabama woman claims universal appeal. Harper Lee always considered her book to be a simple love story. Today it is regarded as a masterpiece of American literature.`,
+ },
+ {
+ id: 'a2',
+ name: 'All the Light We Cannot See',
+ genre: 'historical',
+ description: `In a mining town in Germany, Werner Pfennig, an orphan, grows up with his younger sister, enchanted by a crude radio they find that brings them news and stories from places they have never seen or imagined. Werner becomes an expert at building and fixing these crucial new instruments and is enlisted to use his talent to track down the resistance. Deftly interweaving the lives of Marie-Laure and Werner, Doerr illuminates the ways, against all odds, people try to be good to one another.`,
+ },
+ {
+ id: 'a3',
+ name: 'Where the Crawdads Sing',
+ genre: 'historical',
+ description: `For years, rumors of the “Marsh Girl” haunted Barkley Cove, a quiet fishing village. Kya Clark is barefoot and wild; unfit for polite society. So in late 1969, when the popular Chase Andrews is found dead, locals immediately suspect her.
+But Kya is not what they say. A born naturalist with just one day of school, she takes life's lessons from the land, learning the real ways of the world from the dishonest signals of fireflies. But while she has the skills to live in solitude forever, the time comes when she yearns to be touched and loved. Drawn to two young men from town, who are each intrigued by her wild beauty, Kya opens herself to a new and startling world—until the unthinkable happens.`,
+ },
+];
+
+async function list({ genre }: { genre: string }) {
+ return db.filter((item) => item.genre === genre).map((item) => ({ name: item.name, id: item.id }));
+}
+
+async function search({ name }: { name: string }) {
+ return db.filter((item) => item.name.includes(name)).map((item) => ({ name: item.name, id: item.id }));
+}
+
+async function get({ id }: { id: string }) {
+ return db.find((item) => item.id === id)!;
+}
+
+main();
diff --git a/helpers.md b/helpers.md
index 1ae25ef82..4a987b347 100644
--- a/helpers.md
+++ b/helpers.md
@@ -7,7 +7,7 @@ openai.chat.completions.stream({ stream?: false, … }, options?): ChatCompletio
```
`openai.chat.completions.stream()` returns a `ChatCompletionStreamingRunner`, which emits events, has an async
-iterator, and exposes a helper methods to accumulate chunks into a convenient shape and make it easy to reason
+iterator, and exposes helper methods to accumulate chunks into a convenient shape and make it easy to reason
about the conversation.
Alternatively, you can use `openai.chat.completions.create({ stream: true, … })` which returns an async
@@ -23,10 +23,14 @@ See an example of streaming helpers in action in [`examples/stream.ts`](examples
```ts
openai.chat.completions.runFunctions({ stream: false, … }, options?): ChatCompletionRunner
openai.chat.completions.runFunctions({ stream: true, … }, options?): ChatCompletionStreamingRunner
+
+openai.chat.completions.runTools({ stream: false, … }, options?): ChatCompletionRunner
+openai.chat.completions.runTools({ stream: true, … }, options?): ChatCompletionStreamingRunner
```
-`openai.chat.completions.runFunctions()` returns either a Runner for automating function calls with chat
-completions. The runner automatically calls the JavaScript functions you provide and sends their results back
+`openai.chat.completions.runFunctions()` and `openai.chat.completions.runTools()` return a Runner
+for automating function calls with chat completions.
+The runner automatically calls the JavaScript functions you provide and sends their results back
to the API, looping as long as the model requests function calls.
If you pass a `parse` function, it will automatically parse the `arguments` for you and returns any parsing
@@ -36,7 +40,7 @@ as a string.
```ts
client.chat.completions.runFunctions({
model: 'gpt-3.5-turbo',
- messages: [{ role: 'user', content: 'How's the weather this week?' }],
+ messages: [{ role: 'user', content: 'How is the weather this week?' }],
functions: [{
function: getWeather as (args: { location: string, time: Date}) => any,
parse: parseFunction as (args: strings) => { location: string, time: Date }.
@@ -51,13 +55,34 @@ client.chat.completions.runFunctions({
});
```
+```ts
+client.chat.completions.runTools({
+ model: 'gpt-3.5-turbo',
+ messages: [{ role: 'user', content: 'How is the weather this week?' }],
+ tools: [{
+ type: 'function',
+ function: {
+ function: getWeather as (args: { location: string, time: Date}) => any,
+ parse: parseFunction as (args: strings) => { location: string, time: Date },
+ parameters: {
+ type: 'object',
+ properties: {
+ location: { type: 'string' },
+ time: { type: 'string', format: 'date-time' },
+ },
+ },
+ }
+ }],
+});
+```
+
If you pass `function_call: {name: …}` instead of `auto`, it returns immediately after calling that
function (and only loops to auto-recover parsing errors).
-By default, we run the loop up to five chat completions from the API. You can change this behavior by
+By default, we run the loop up to 10 chat completions from the API. You can change this behavior by
adjusting `maxChatCompletions` in the request options object. Note that `max_tokens` is the limit per
-chat completion request, not for the entire run functions call run.
+chat completion request, not for the entire call run.
See an example of automated function calls in action in
[`examples/function-call-helpers.ts`](examples/function-call-helpers.ts).
@@ -80,7 +105,7 @@ fields and is built up from the chunks.
The event fired when a chat completion is returned or done being streamed by the API.
-#### `.on('message', (message: ChatCompletionMessage | ChatCompletionMessageParam) => …)`
+#### `.on('message', (message: ChatCompletionMessageParam) => …)`
The event fired when a new message is either sent or received from the API. Does not fire for the messages
sent as the parameter to either `.runFunctions()` or `.stream()`
@@ -113,7 +138,7 @@ The event fired for the final chat completion. If the function call runner excee
The event fired for the `content` of the last `role: "assistant"` message. Not fired if there is no `assistant`
message.
-#### `.on('finalMessage', (message: ChatCompletionMessage | ChatCompletionMessageParam) => …)`
+#### `.on('finalMessage', (message: ChatCompletionMessage) => …)`
The event fired for the last message.
diff --git a/src/index.ts b/src/index.ts
index 35587dda3..e213a00b6 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -226,10 +226,23 @@ export namespace OpenAI {
export import Chat = API.Chat;
export import ChatCompletion = API.ChatCompletion;
+ export import ChatCompletionAssistantMessageParam = API.ChatCompletionAssistantMessageParam;
export import ChatCompletionChunk = API.ChatCompletionChunk;
+ export import ChatCompletionContentPart = API.ChatCompletionContentPart;
+ export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage;
+ export import ChatCompletionContentPartText = API.ChatCompletionContentPartText;
+ export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption;
+ export import ChatCompletionFunctionMessageParam = API.ChatCompletionFunctionMessageParam;
export import ChatCompletionMessage = API.ChatCompletionMessage;
export import ChatCompletionMessageParam = API.ChatCompletionMessageParam;
+ export import ChatCompletionMessageToolCall = API.ChatCompletionMessageToolCall;
+ export import ChatCompletionNamedToolChoice = API.ChatCompletionNamedToolChoice;
export import ChatCompletionRole = API.ChatCompletionRole;
+ export import ChatCompletionSystemMessageParam = API.ChatCompletionSystemMessageParam;
+ export import ChatCompletionTool = API.ChatCompletionTool;
+ export import ChatCompletionToolChoiceOption = API.ChatCompletionToolChoiceOption;
+ export import ChatCompletionToolMessageParam = API.ChatCompletionToolMessageParam;
+ export import ChatCompletionUserMessageParam = API.ChatCompletionUserMessageParam;
export import ChatCompletionCreateParams = API.ChatCompletionCreateParams;
export import ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming;
export import ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming;
@@ -249,6 +262,7 @@ export namespace OpenAI {
export import FileObject = API.FileObject;
export import FileObjectsPage = API.FileObjectsPage;
export import FileCreateParams = API.FileCreateParams;
+ export import FileListParams = API.FileListParams;
export import Images = API.Images;
export import Image = API.Image;
diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts
index c8ee555a3..c8be63ab3 100644
--- a/src/lib/AbstractChatCompletionRunner.ts
+++ b/src/lib/AbstractChatCompletionRunner.ts
@@ -6,6 +6,8 @@ import {
type ChatCompletionMessage,
type ChatCompletionMessageParam,
type ChatCompletionCreateParams,
+ type ChatCompletionAssistantMessageParam,
+ type ChatCompletionTool,
} from 'openai/resources/chat/completions';
import { APIUserAbortError, OpenAIError } from 'openai/error';
import {
@@ -13,8 +15,18 @@ import {
isRunnableFunctionWithParse,
type BaseFunctionsArgs,
} from './RunnableFunction';
-import { ChatCompletionFunctionRunnerParams } from './ChatCompletionRunner';
-import { ChatCompletionStreamingFunctionRunnerParams } from './ChatCompletionStreamingRunner';
+import { ChatCompletionFunctionRunnerParams, ChatCompletionToolRunnerParams } from './ChatCompletionRunner';
+import {
+ ChatCompletionStreamingFunctionRunnerParams,
+ ChatCompletionStreamingToolRunnerParams,
+} from './ChatCompletionStreamingRunner';
+import { isAssistantMessage, isFunctionMessage, isToolMessage } from './chatCompletionUtils';
+
+const DEFAULT_MAX_CHAT_COMPLETIONS = 10;
+export interface RunnerOptions extends Core.RequestOptions {
+ /** How many requests to make before canceling. Default 10. */
+ maxChatCompletions?: number;
+}
export abstract class AbstractChatCompletionRunner<
Events extends CustomEvents = AbstractChatCompletionRunnerEvents,
@@ -32,7 +44,7 @@ export abstract class AbstractChatCompletionRunner<
#listeners: { [Event in keyof Events]?: ListenersForEvent } = {};
protected _chatCompletions: ChatCompletion[] = [];
- messages: (ChatCompletionMessage | ChatCompletionMessageParam)[] = [];
+ messages: ChatCompletionMessageParam[] = [];
#ended = false;
#errored = false;
@@ -73,18 +85,25 @@ export abstract class AbstractChatCompletionRunner<
this._chatCompletions.push(chatCompletion);
this._emit('chatCompletion', chatCompletion);
const message = chatCompletion.choices[0]?.message;
- if (message) this._addMessage(message);
+ if (message) this._addMessage(message as ChatCompletionMessageParam);
return chatCompletion;
}
- protected _addMessage(message: ChatCompletionMessage | ChatCompletionMessageParam, emit = true) {
+ protected _addMessage(message: ChatCompletionMessageParam, emit = true) {
this.messages.push(message);
if (emit) {
this._emit('message', message);
- if (message.role === 'function' && message.content) {
- this._emit('functionCallResult', message.content);
- } else if (message.function_call) {
+ if ((isFunctionMessage(message) || isToolMessage(message)) && message.content) {
+ // Note, this assumes that {role: 'tool', content: …} is always the result of a call of tool of type=function.
+ this._emit('functionCallResult', message.content as string);
+ } else if (isAssistantMessage(message) && message.function_call) {
this._emit('functionCall', message.function_call);
+ } else if (isAssistantMessage(message) && message.tool_calls) {
+ for (const tool_call of message.tool_calls) {
+ if (tool_call.type === 'function') {
+ this._emit('functionCall', tool_call.function);
+ }
+ }
}
}
}
@@ -194,11 +213,7 @@ export abstract class AbstractChatCompletionRunner<
}
#getFinalContent(): string | null {
- for (let i = this.messages.length - 1; i >= 0; i--) {
- const message = this.messages[i];
- if (message?.role === 'assistant') return message.content;
- }
- return null;
+ return this.#getFinalMessage().content;
}
/**
@@ -210,21 +225,32 @@ export abstract class AbstractChatCompletionRunner<
return this.#getFinalContent();
}
+ #getFinalMessage(): ChatCompletionAssistantMessageParam {
+ let i = this.messages.length;
+ while (i-- > 0) {
+ const message = this.messages[i];
+ if (isAssistantMessage(message)) {
+ return message;
+ }
+ }
+ throw new OpenAIError('stream ended without producing a ChatCompletionMessage with role=assistant');
+ }
+
/**
- * @returns a promise that resolves with the the final ChatCompletionMessage, or rejects
- * if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.
+ * @returns a promise that resolves with the the final assistant ChatCompletionMessage response,
+ * or rejects if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.
*/
async finalMessage(): Promise {
await this.done();
- const message = this.messages[this.messages.length - 1];
- if (!message) throw new OpenAIError('stream ended without producing a ChatCompletionMessage');
- return message;
+ return this.#getFinalMessage();
}
#getFinalFunctionCall(): ChatCompletionMessage.FunctionCall | undefined {
for (let i = this.messages.length - 1; i >= 0; i--) {
const message = this.messages[i];
- if (message?.function_call) return message.function_call;
+ if (isAssistantMessage(message) && message?.function_call) {
+ return message.function_call;
+ }
}
}
@@ -240,7 +266,9 @@ export abstract class AbstractChatCompletionRunner<
#getFinalFunctionCallResult(): string | undefined {
for (let i = this.messages.length - 1; i >= 0; i--) {
const message = this.messages[i];
- if (message?.role === 'function' && message.content != null) return message.content;
+ if (isFunctionMessage(message) && message.content != null) {
+ return message.content as string;
+ }
}
}
@@ -281,13 +309,18 @@ export abstract class AbstractChatCompletionRunner<
}
if (error instanceof APIUserAbortError) {
this.#aborted = true;
- this._emit('abort', error);
+ return this._emit('abort', error);
}
- const openAIError: OpenAIError =
- error instanceof OpenAIError ? error : (
- new OpenAIError(error instanceof Error ? error.message : String(error))
- );
- this._emit('error', openAIError);
+ if (error instanceof OpenAIError) {
+ return this._emit('error', error);
+ }
+ if (error instanceof Error) {
+ const openAIError: OpenAIError = new OpenAIError(error.message);
+ // @ts-ignore
+ openAIError.cause = error;
+ return this._emit('error', openAIError);
+ }
+ return this._emit('error', new OpenAIError(String(error)));
};
protected _emit(event: Event, ...args: EventParameters) {
@@ -305,6 +338,17 @@ export abstract class AbstractChatCompletionRunner<
listeners.forEach(({ listener }: any) => listener(...args));
}
+ if (event === 'abort') {
+ const error = args[0] as APIUserAbortError;
+ if (!this.#catchingPromiseCreated && !listeners?.length) {
+ Promise.reject(error);
+ }
+ this.#rejectConnectedPromise(error);
+ this.#rejectEndPromise(error);
+ this._emit('end');
+ return;
+ }
+
if (event === 'error') {
// NOTE: _emit('error', error) should only be called from #handleError().
@@ -343,6 +387,14 @@ export abstract class AbstractChatCompletionRunner<
}
}
+ #validateParams(params: ChatCompletionCreateParams): void {
+ if (params.n != null && params.n > 1) {
+ throw new OpenAIError(
+ 'ChatCompletion convenience helpers only support n=1 at this time. To use n>1, please use chat.completions.create() directly.',
+ );
+ }
+ }
+
protected async _createChatCompletion(
completions: Completions,
params: ChatCompletionCreateParams,
@@ -353,6 +405,8 @@ export abstract class AbstractChatCompletionRunner<
if (signal.aborted) this.controller.abort();
signal.addEventListener('abort', () => this.controller.abort());
}
+ this.#validateParams(params);
+
const chatCompletion = await completions.create(
{ ...params, stream: false },
{ ...options, signal: this.controller.signal },
@@ -377,10 +431,12 @@ export abstract class AbstractChatCompletionRunner<
params:
| ChatCompletionFunctionRunnerParams
| ChatCompletionStreamingFunctionRunnerParams,
- options?: Core.RequestOptions & { maxChatCompletions?: number },
+ options?: RunnerOptions,
) {
+ const role = 'function' as const;
const { function_call = 'auto', stream, ...restParams } = params;
- const isSingleFunctionCall = typeof function_call !== 'string' && function_call?.name;
+ const singleFunctionToCall = typeof function_call !== 'string' && function_call?.name;
+ const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
const functionsByName: Record> = {};
for (const f of params.functions) {
@@ -399,7 +455,7 @@ export abstract class AbstractChatCompletionRunner<
this._addMessage(message, false);
}
- for (let i = 0; i < (options?.maxChatCompletions ?? 5); ++i) {
+ for (let i = 0; i < maxChatCompletions; ++i) {
const chatCompletion: ChatCompletion = await this._createChatCompletion(
completions,
{
@@ -417,37 +473,147 @@ export abstract class AbstractChatCompletionRunner<
if (!message.function_call) return;
const { name, arguments: args } = message.function_call;
const fn = functionsByName[name];
- if (!fn || (typeof function_call !== 'string' && name !== function_call?.name)) {
- this._addMessage({
- role: 'function',
- name,
- content: `Invalid function_call: ${JSON.stringify(name)}. Available options are: ${functions
- .map((f) => JSON.stringify(f.name))
- .join(', ')}. Please try again`,
- });
- if (isSingleFunctionCall) return;
+ if (!fn) {
+ const content = `Invalid function_call: ${JSON.stringify(name)}. Available options are: ${functions
+ .map((f) => JSON.stringify(f.name))
+ .join(', ')}. Please try again`;
+
+ this._addMessage({ role, name, content });
+ continue;
+ } else if (singleFunctionToCall && singleFunctionToCall !== name) {
+ const content = `Invalid function_call: ${JSON.stringify(name)}. ${JSON.stringify(
+ singleFunctionToCall,
+ )} requested. Please try again`;
+
+ this._addMessage({ role, name, content });
continue;
}
+
let parsed;
try {
parsed = isRunnableFunctionWithParse(fn) ? await fn.parse(args) : args;
} catch (error) {
this._addMessage({
- role: 'function',
+ role,
name,
content: error instanceof Error ? error.message : String(error),
});
continue;
}
- const rawContent = await (fn.function as any)(parsed as any, this);
- const content =
- typeof rawContent === 'string' ? rawContent
- : rawContent === undefined ? 'undefined'
- : JSON.stringify(rawContent);
- this._addMessage({ role: 'function', name, content });
-
- if (isSingleFunctionCall) return;
+
+ // @ts-expect-error it can't rule out `never` type.
+ const rawContent = await fn.function(parsed, this);
+ const content = this.#stringifyFunctionCallResult(rawContent);
+
+ this._addMessage({ role, name, content });
+
+ if (singleFunctionToCall) return;
+ }
+ }
+
+ protected async _runTools(
+ completions: Completions,
+ params:
+ | ChatCompletionToolRunnerParams
+ | ChatCompletionStreamingToolRunnerParams,
+ options?: RunnerOptions,
+ ) {
+ const role = 'tool' as const;
+ const { tool_choice = 'auto', stream, ...restParams } = params;
+ const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name;
+ const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
+
+ const functionsByName: Record> = {};
+ for (const f of params.tools) {
+ if (f.type === 'function') {
+ functionsByName[f.function.name || f.function.function.name] = f.function;
+ }
}
+
+ const tools: ChatCompletionTool[] =
+ 'tools' in params ?
+ params.tools.map((t) =>
+ t.type === 'function' ?
+ {
+ type: 'function',
+ function: {
+ name: t.function.name || t.function.function.name,
+ parameters: t.function.parameters as Record,
+ description: t.function.description,
+ },
+ }
+ : (t as unknown as ChatCompletionTool),
+ )
+ : (undefined as any);
+
+ for (const message of params.messages) {
+ this._addMessage(message, false);
+ }
+
+ for (let i = 0; i < maxChatCompletions; ++i) {
+ const chatCompletion: ChatCompletion = await this._createChatCompletion(
+ completions,
+ {
+ ...restParams,
+ tool_choice,
+ tools,
+ messages: [...this.messages],
+ },
+ options,
+ );
+ const message = chatCompletion.choices[0]?.message;
+ if (!message) {
+ throw new OpenAIError(`missing message in ChatCompletion response`);
+ }
+ if (!message.tool_calls) return;
+
+ for (const tool_call of message.tool_calls) {
+ if (tool_call.type !== 'function') continue;
+ const tool_call_id = tool_call.id;
+ const { name, arguments: args } = tool_call.function;
+ const fn = functionsByName[name];
+
+ if (!fn) {
+ const content = `Invalid tool_call: ${JSON.stringify(name)}. Available options are: ${tools
+ .map((f) => JSON.stringify(f.function.name))
+ .join(', ')}. Please try again`;
+
+ this._addMessage({ role, tool_call_id, content });
+ continue;
+ } else if (singleFunctionToCall && singleFunctionToCall !== name) {
+ const content = `Invalid tool_call: ${JSON.stringify(name)}. ${JSON.stringify(
+ singleFunctionToCall,
+ )} requested. Please try again`;
+
+ this._addMessage({ role, tool_call_id, content });
+ continue;
+ }
+
+ let parsed;
+ try {
+ parsed = isRunnableFunctionWithParse(fn) ? await fn.parse(args) : args;
+ } catch (error) {
+ const content = error instanceof Error ? error.message : String(error);
+ this._addMessage({ role, tool_call_id, content });
+ continue;
+ }
+
+ // @ts-expect-error it can't rule out `never` type.
+ const rawContent = await fn.function(parsed, this);
+ const content = this.#stringifyFunctionCallResult(rawContent);
+ this._addMessage({ role, tool_call_id, content });
+
+ if (singleFunctionToCall) return;
+ }
+ }
+ }
+
+ #stringifyFunctionCallResult(rawContent: unknown): string {
+ return (
+ typeof rawContent === 'string' ? rawContent
+ : rawContent === undefined ? 'undefined'
+ : JSON.stringify(rawContent)
+ );
}
}
@@ -473,10 +639,10 @@ type EventParameters, Event extends keyof Event
export interface AbstractChatCompletionRunnerEvents {
connect: () => void;
functionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void;
- message: (message: ChatCompletionMessage | ChatCompletionMessageParam) => void;
+ message: (message: ChatCompletionMessageParam) => void;
chatCompletion: (completion: ChatCompletion) => void;
finalContent: (contentSnapshot: string) => void;
- finalMessage: (message: ChatCompletionMessage | ChatCompletionMessageParam) => void;
+ finalMessage: (message: ChatCompletionMessageParam) => void;
finalChatCompletion: (completion: ChatCompletion) => void;
finalFunctionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void;
functionCallResult: (content: string) => void;
diff --git a/src/lib/ChatCompletionRunFunctions.test.ts b/src/lib/ChatCompletionRunFunctions.test.ts
index 677f9513e..71a99b366 100644
--- a/src/lib/ChatCompletionRunFunctions.test.ts
+++ b/src/lib/ChatCompletionRunFunctions.test.ts
@@ -8,9 +8,11 @@ import {
ChatCompletionStreamingRunner,
type ChatCompletionStreamingFunctionRunnerParams,
} from 'openai/resources/beta/chat/completions';
+import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions';
import { type RequestInfo, type RequestInit } from 'openai/_shims/index';
import { Response } from 'node-fetch';
+import { isAssistantMessage } from './chatCompletionUtils';
type Fetch = (req: string | RequestInfo, init?: RequestInit) => Promise;
@@ -181,12 +183,12 @@ function* functionCallDeltas(
class RunnerListener {
readonly contents: string[] = [];
- readonly messages: OpenAI.Chat.ChatCompletionMessage[] = [];
+ readonly messages: ChatCompletionMessageParam[] = [];
readonly chatCompletions: OpenAI.Chat.ChatCompletion[] = [];
readonly functionCalls: OpenAI.Chat.ChatCompletionMessage.FunctionCall[] = [];
readonly functionCallResults: string[] = [];
finalContent: string | null = null;
- finalMessage: OpenAI.Chat.ChatCompletionMessage | undefined;
+ finalMessage: ChatCompletionMessageParam | undefined;
finalChatCompletion: OpenAI.Chat.ChatCompletion | undefined;
finalFunctionCall: OpenAI.Chat.ChatCompletionMessage.FunctionCall | undefined;
finalFunctionCallResult: string | undefined;
@@ -256,8 +258,8 @@ class RunnerListener {
if (error) return;
const expectedContents = this.messages
- .filter((m) => m.role === 'assistant')
- .map((m) => m.content)
+ .filter(isAssistantMessage)
+ .map((m) => m.content as string)
.filter(Boolean);
expect(this.contents).toEqual(expectedContents);
expect(this.finalMessage).toEqual(this.messages[this.messages.length - 1]);
@@ -298,13 +300,13 @@ class RunnerListener {
class StreamingRunnerListener {
readonly eventChunks: OpenAI.Chat.ChatCompletionChunk[] = [];
readonly eventContents: [string, string][] = [];
- readonly eventMessages: OpenAI.Chat.ChatCompletionMessage[] = [];
+ readonly eventMessages: ChatCompletionMessageParam[] = [];
readonly eventChatCompletions: OpenAI.Chat.ChatCompletion[] = [];
readonly eventFunctionCalls: OpenAI.Chat.ChatCompletionMessage.FunctionCall[] = [];
readonly eventFunctionCallResults: string[] = [];
finalContent: string | null = null;
- finalMessage: OpenAI.Chat.ChatCompletionMessage | undefined;
+ finalMessage: ChatCompletionMessageParam | undefined;
finalChatCompletion: OpenAI.Chat.ChatCompletion | undefined;
finalFunctionCall: OpenAI.Chat.ChatCompletionMessage.FunctionCall | undefined;
finalFunctionCallResult: string | undefined;
@@ -1326,7 +1328,7 @@ describe('resource completions', () => {
choices: [choice],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}
}),
@@ -1387,7 +1389,7 @@ describe('resource completions', () => {
],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
});
@@ -1459,7 +1461,7 @@ describe('resource completions', () => {
],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}),
handleRequest(async function* (request): AsyncIterable {
@@ -1488,7 +1490,7 @@ describe('resource completions', () => {
choices: [choice],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}
}),
@@ -1553,7 +1555,7 @@ describe('resource completions', () => {
choices: [choice],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}
}),
@@ -1583,7 +1585,7 @@ describe('resource completions', () => {
choices: [choice],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}
}),
@@ -1626,7 +1628,7 @@ describe('resource completions', () => {
choices: [choice],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}
}),
@@ -1696,7 +1698,7 @@ describe('resource completions', () => {
],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}),
runner.done(),
@@ -1751,7 +1753,7 @@ describe('resource completions', () => {
],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}),
handleRequest(async function* (request): AsyncIterable {
@@ -1789,7 +1791,7 @@ describe('resource completions', () => {
],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}),
handleRequest(async function* (request): AsyncIterable {
@@ -1828,7 +1830,7 @@ describe('resource completions', () => {
choices: [choice],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}
}),
@@ -1897,7 +1899,7 @@ describe('resource completions', () => {
],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}),
runner.done(),
@@ -1940,7 +1942,7 @@ describe('resource completions', () => {
choices: [choice],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}
}),
@@ -1973,7 +1975,7 @@ describe('resource completions', () => {
choices: [choice],
created: Math.floor(Date.now() / 1000),
model: 'gpt-3.5-turbo',
- object: 'chat.completion',
+ object: 'chat.completion.chunk',
};
}
}),
diff --git a/src/lib/ChatCompletionRunner.ts b/src/lib/ChatCompletionRunner.ts
index e2caf32eb..4a7ca18a6 100644
--- a/src/lib/ChatCompletionRunner.ts
+++ b/src/lib/ChatCompletionRunner.ts
@@ -1,15 +1,15 @@
-import * as Core from 'openai/core';
import {
type Completions,
- type ChatCompletionMessage,
type ChatCompletionMessageParam,
type ChatCompletionCreateParamsNonStreaming,
} from 'openai/resources/chat/completions';
-import { type RunnableFunctions, type BaseFunctionsArgs } from './RunnableFunction';
+import { type RunnableFunctions, type BaseFunctionsArgs, RunnableTools } from './RunnableFunction';
import {
AbstractChatCompletionRunner,
AbstractChatCompletionRunnerEvents,
+ RunnerOptions,
} from './AbstractChatCompletionRunner';
+import { isAssistantMessage } from './chatCompletionUtils';
export interface ChatCompletionRunnerEvents extends AbstractChatCompletionRunnerEvents {
content: (content: string) => void;
@@ -22,21 +22,38 @@ export type ChatCompletionFunctionRunnerParams;
};
+export type ChatCompletionToolRunnerParams = Omit<
+ ChatCompletionCreateParamsNonStreaming,
+ 'tools'
+> & {
+ tools: RunnableTools;
+};
+
export class ChatCompletionRunner extends AbstractChatCompletionRunner {
static runFunctions(
completions: Completions,
params: ChatCompletionFunctionRunnerParams,
- options?: Core.RequestOptions & { maxChatCompletions?: number },
+ options?: RunnerOptions,
): ChatCompletionRunner {
const runner = new ChatCompletionRunner();
runner._run(() => runner._runFunctions(completions, params, options));
return runner;
}
- override _addMessage(message: ChatCompletionMessage | ChatCompletionMessageParam) {
+ static runTools(
+ completions: Completions,
+ params: ChatCompletionToolRunnerParams,
+ options?: RunnerOptions,
+ ): ChatCompletionRunner {
+ const runner = new ChatCompletionRunner();
+ runner._run(() => runner._runTools(completions, params, options));
+ return runner;
+ }
+
+ override _addMessage(message: ChatCompletionMessageParam) {
super._addMessage(message);
- if (message.role === 'assistant' && message.content) {
- this._emit('content', message.content);
+ if (isAssistantMessage(message) && message.content) {
+ this._emit('content', message.content as string);
}
}
}
diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts
index 4e68f660f..1f14c8e33 100644
--- a/src/lib/ChatCompletionStream.ts
+++ b/src/lib/ChatCompletionStream.ts
@@ -5,7 +5,7 @@ import {
type ChatCompletion,
type ChatCompletionChunk,
type ChatCompletionCreateParams,
- ChatCompletionCreateParamsBase,
+ type ChatCompletionCreateParamsBase,
} from 'openai/resources/chat/completions';
import {
AbstractChatCompletionRunner,
@@ -64,7 +64,7 @@ export class ChatCompletionStream
if (this.ended) return;
const completion = this.#accumulateChatCompletion(chunk);
this._emit('chunk', chunk, completion);
- const delta = chunk.choices[0]?.delta.content;
+ const delta = chunk.choices[0]?.delta?.content;
const snapshot = completion.choices[0]?.message;
if (delta != null && snapshot?.role === 'assistant' && snapshot?.content) {
this._emit('content', delta, snapshot.content);
@@ -137,31 +137,53 @@ export class ChatCompletionStream
#accumulateChatCompletion(chunk: ChatCompletionChunk): ChatCompletionSnapshot {
let snapshot = this.#currentChatCompletionSnapshot;
+ const { choices, ...rest } = chunk;
if (!snapshot) {
- const { choices, ...rest } = chunk;
- this.#currentChatCompletionSnapshot = snapshot = {
+ snapshot = this.#currentChatCompletionSnapshot = {
...rest,
choices: [],
};
+ } else {
+ Object.assign(snapshot, rest);
}
- for (const { delta, finish_reason, index } of chunk.choices) {
+
+ for (const { delta, finish_reason, index, ...other } of chunk.choices) {
let choice = snapshot.choices[index];
- if (!choice) snapshot.choices[index] = choice = { finish_reason, index, message: delta };
- else {
- if (finish_reason) choice.finish_reason = finish_reason;
- const { content, function_call, role } = delta;
- if (content) choice.message.content = (choice.message.content || '') + content;
- if (role) choice.message.role = role;
- if (function_call) {
- if (!choice.message.function_call) choice.message.function_call = function_call;
- else {
- if (function_call.arguments)
- choice.message.function_call.arguments =
- (choice.message.function_call.arguments || '') + function_call.arguments;
- if (function_call.name) choice.message.function_call.name = function_call.name;
+ if (!choice) {
+ snapshot.choices[index] = { finish_reason, index, message: delta, ...other };
+ continue;
+ }
+
+ if (finish_reason) choice.finish_reason = finish_reason;
+ Object.assign(choice, other);
+
+ if (!delta) continue; // Shouldn't happen; just in case.
+ const { content, function_call, role, tool_calls } = delta;
+
+ if (content) choice.message.content = (choice.message.content || '') + content;
+ if (role) choice.message.role = role;
+ if (function_call) {
+ if (!choice.message.function_call) {
+ choice.message.function_call = function_call;
+ } else {
+ if (function_call.name) choice.message.function_call.name = function_call.name;
+ if (function_call.arguments) {
+ choice.message.function_call.arguments ??= '';
+ choice.message.function_call.arguments += function_call.arguments;
}
}
}
+ if (tool_calls) {
+ if (!choice.message.tool_calls) choice.message.tool_calls = [];
+ for (const { index, id, type, function: fn } of tool_calls) {
+ const tool_call = (choice.message.tool_calls[index] ??= {});
+ if (id) tool_call.id = id;
+ if (type) tool_call.type = type;
+ if (fn) tool_call.function ??= { arguments: '' };
+ if (fn?.name) tool_call.function!.name = fn.name;
+ if (fn?.arguments) tool_call.function!.arguments += fn.arguments;
+ }
+ }
}
return snapshot;
}
@@ -216,7 +238,8 @@ function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletio
id,
choices: choices.map(({ message, finish_reason, index }): ChatCompletion.Choice => {
if (!finish_reason) throw new OpenAIError(`missing finish_reason for choice ${index}`);
- const { content = null, function_call, role } = message;
+ const { content = null, function_call, tool_calls } = message;
+ const role = message.role as 'assistant'; // this is what we expect; in theory it could be different which would make our types a slight lie but would be fine.
if (!role) throw new OpenAIError(`missing role for choice ${index}`);
if (function_call) {
const { arguments: args, name } = function_call;
@@ -224,6 +247,34 @@ function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletio
if (!name) throw new OpenAIError(`missing function_call.name for choice ${index}`);
return { message: { content, function_call: { arguments: args, name }, role }, finish_reason, index };
}
+ if (tool_calls) {
+ return {
+ index,
+ finish_reason,
+ message: {
+ role,
+ content,
+ tool_calls: tool_calls.map((tool_call, i) => {
+ const { function: fn, type, id } = tool_call;
+ const { arguments: args, name } = fn || {};
+ if (id == null)
+ throw new OpenAIError(`missing choices[${index}].tool_calls[${i}].id\n${str(snapshot)}`);
+ if (type == null)
+ throw new OpenAIError(`missing choices[${index}].tool_calls[${i}].type\n${str(snapshot)}`);
+ if (name == null)
+ throw new OpenAIError(
+ `missing choices[${index}].tool_calls[${i}].function.name\n${str(snapshot)}`,
+ );
+ if (args == null)
+ throw new OpenAIError(
+ `missing choices[${index}].tool_calls[${i}].function.arguments\n${str(snapshot)}`,
+ );
+
+ return { id, type, function: { name, arguments: args } };
+ }),
+ },
+ };
+ }
return { message: { content: content, role }, finish_reason, index };
}),
created,
@@ -232,6 +283,10 @@ function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletio
};
}
+function str(x: unknown) {
+ return JSON.stringify(x);
+}
+
/**
* Represents a streamed chunk of a chat completion response returned by model,
* based on the provided input.
@@ -273,7 +328,7 @@ export namespace ChatCompletionSnapshot {
* content was omitted due to a flag from our content filters, or `function_call`
* if the model called a function.
*/
- finish_reason: 'stop' | 'length' | 'function_call' | 'content_filter' | null;
+ finish_reason: ChatCompletion.Choice['finish_reason'] | null;
/**
* The index of the choice in the list of choices.
@@ -297,13 +352,46 @@ export namespace ChatCompletionSnapshot {
*/
function_call?: Message.FunctionCall;
+ tool_calls?: Array;
+
/**
* The role of the author of this message.
*/
- role?: 'system' | 'user' | 'assistant' | 'function';
+ role?: 'system' | 'user' | 'assistant' | 'function' | 'tool';
}
export namespace Message {
+ export interface ToolCall {
+ /**
+ * The ID of the tool call.
+ */
+ id?: string;
+
+ function?: ToolCall.Function;
+
+ /**
+ * The type of the tool.
+ */
+ type?: 'function';
+ }
+
+ export namespace ToolCall {
+ export interface Function {
+ /**
+ * The arguments to call the function with, as generated by the model in JSON
+ * format. Note that the model does not always generate valid JSON, and may
+ * hallucinate parameters not defined by your function schema. Validate the
+ * arguments in your code before calling your function.
+ */
+ arguments?: string;
+
+ /**
+ * The name of the function to call.
+ */
+ name?: string;
+ }
+ }
+
/**
* The name and arguments of a function that should be called, as generated by the
* model.
diff --git a/src/lib/ChatCompletionStreamingRunner.ts b/src/lib/ChatCompletionStreamingRunner.ts
index 1e5e09de6..8ea911ea0 100644
--- a/src/lib/ChatCompletionStreamingRunner.ts
+++ b/src/lib/ChatCompletionStreamingRunner.ts
@@ -1,12 +1,11 @@
-import * as Core from 'openai/core';
import {
Completions,
type ChatCompletionChunk,
type ChatCompletionCreateParamsStreaming,
} from 'openai/resources/chat/completions';
-import { type AbstractChatCompletionRunnerEvents } from './AbstractChatCompletionRunner';
+import { RunnerOptions, type AbstractChatCompletionRunnerEvents } from './AbstractChatCompletionRunner';
import { type ReadableStream } from 'openai/_shims/index';
-import { type BaseFunctionsArgs, type RunnableFunctions } from './RunnableFunction';
+import { RunnableTools, type BaseFunctionsArgs, type RunnableFunctions } from './RunnableFunction';
import { ChatCompletionSnapshot, ChatCompletionStream } from './ChatCompletionStream';
export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents {
@@ -21,6 +20,13 @@ export type ChatCompletionStreamingFunctionRunnerParams;
};
+export type ChatCompletionStreamingToolRunnerParams = Omit<
+ ChatCompletionCreateParamsStreaming,
+ 'tools'
+> & {
+ tools: RunnableTools;
+};
+
export class ChatCompletionStreamingRunner
extends ChatCompletionStream
implements AsyncIterable
@@ -34,10 +40,20 @@ export class ChatCompletionStreamingRunner
static runFunctions(
completions: Completions,
params: ChatCompletionStreamingFunctionRunnerParams,
- options?: Core.RequestOptions & { maxChatCompletions?: number },
+ options?: RunnerOptions,
): ChatCompletionStreamingRunner {
const runner = new ChatCompletionStreamingRunner();
runner._run(() => runner._runFunctions(completions, params, options));
return runner;
}
+
+ static runTools(
+ completions: Completions,
+ params: ChatCompletionStreamingToolRunnerParams,
+ options?: RunnerOptions,
+ ): ChatCompletionStreamingRunner {
+ const runner = new ChatCompletionStreamingRunner();
+ runner._run(() => runner._runTools(completions, params, options));
+ return runner;
+ }
}
diff --git a/src/lib/RunnableFunction.ts b/src/lib/RunnableFunction.ts
index 1de9f04ca..5c6845cab 100644
--- a/src/lib/RunnableFunction.ts
+++ b/src/lib/RunnableFunction.ts
@@ -61,6 +61,11 @@ export type RunnableFunction =
: Args extends object ? RunnableFunctionWithParse
: never;
+export type RunnableToolFunction = {
+ type: 'function';
+ function: RunnableFunction;
+};
+
export function isRunnableFunctionWithParse(
fn: any,
): fn is RunnableFunctionWithParse {
@@ -76,6 +81,13 @@ export type RunnableFunctions =
: FunctionsArgs[Index];
};
+export type RunnableTools =
+ [any[]] extends [FunctionsArgs] ? readonly RunnableToolFunction[]
+ : {
+ [Index in keyof FunctionsArgs]: Index extends number ? RunnableToolFunction
+ : FunctionsArgs[Index];
+ };
+
/**
* This is helper class for passing a `function` and `parse` where the `function`
* argument type matches the `parse` return type.
diff --git a/src/lib/chatCompletionUtils.ts b/src/lib/chatCompletionUtils.ts
new file mode 100644
index 000000000..a0d9099de
--- /dev/null
+++ b/src/lib/chatCompletionUtils.ts
@@ -0,0 +1,28 @@
+import {
+ type ChatCompletionAssistantMessageParam,
+ type ChatCompletionFunctionMessageParam,
+ type ChatCompletionMessageParam,
+ type ChatCompletionToolMessageParam,
+} from 'openai/resources';
+
+export const isAssistantMessage = (
+ message: ChatCompletionMessageParam | null | undefined,
+): message is ChatCompletionAssistantMessageParam => {
+ return message?.role === 'assistant';
+};
+
+export const isFunctionMessage = (
+ message: ChatCompletionMessageParam | null | undefined,
+): message is ChatCompletionFunctionMessageParam => {
+ return message?.role === 'function';
+};
+
+export const isToolMessage = (
+ message: ChatCompletionMessageParam | null | undefined,
+): message is ChatCompletionToolMessageParam => {
+ return message?.role === 'tool';
+};
+
+export function isPresent(obj: T | null | undefined): obj is T {
+ return obj != null;
+}
diff --git a/src/pagination.ts b/src/pagination.ts
index a52b55f5b..af6cd964e 100644
--- a/src/pagination.ts
+++ b/src/pagination.ts
@@ -5,7 +5,7 @@ import { AbstractPage, Response, APIClient, FinalRequestOptions, PageInfo } from
export interface PageResponse- {
data: Array
- ;
- object: string;
+ object: 'list';
}
/**
@@ -14,7 +14,7 @@ export interface PageResponse
- {
export class Page
- extends AbstractPage
- implements PageResponse
- {
data: Array
- ;
- object: string;
+ object: 'list';
constructor(client: APIClient, response: Response, body: PageResponse
- , options: FinalRequestOptions) {
super(client, response, body, options);
diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts
index 74b1c841c..ee8c28c6b 100644
--- a/src/resources/audio/audio.ts
+++ b/src/resources/audio/audio.ts
@@ -1,12 +1,14 @@
// File generated from our OpenAPI spec by Stainless.
import { APIResource } from 'openai/resource';
+import * as SpeechAPI from 'openai/resources/audio/speech';
import * as TranscriptionsAPI from 'openai/resources/audio/transcriptions';
import * as TranslationsAPI from 'openai/resources/audio/translations';
export class Audio extends APIResource {
transcriptions: TranscriptionsAPI.Transcriptions = new TranscriptionsAPI.Transcriptions(this.client);
translations: TranslationsAPI.Translations = new TranslationsAPI.Translations(this.client);
+ speech: SpeechAPI.Speech = new SpeechAPI.Speech(this.client);
}
export namespace Audio {
@@ -16,4 +18,6 @@ export namespace Audio {
export import Translations = TranslationsAPI.Translations;
export import Translation = TranslationsAPI.Translation;
export import TranslationCreateParams = TranslationsAPI.TranslationCreateParams;
+ export import Speech = SpeechAPI.Speech;
+ export import SpeechCreateParams = SpeechAPI.SpeechCreateParams;
}
diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts
index e04c978aa..17c81d3bb 100644
--- a/src/resources/audio/index.ts
+++ b/src/resources/audio/index.ts
@@ -1,5 +1,6 @@
// File generated from our OpenAPI spec by Stainless.
export { Audio } from './audio';
+export { SpeechCreateParams, Speech } from './speech';
export { Transcription, TranscriptionCreateParams, Transcriptions } from './transcriptions';
export { Translation, TranslationCreateParams, Translations } from './translations';
diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts
new file mode 100644
index 000000000..c8199e746
--- /dev/null
+++ b/src/resources/audio/speech.ts
@@ -0,0 +1,49 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import { type Response } from 'openai/_shims/index';
+import * as SpeechAPI from 'openai/resources/audio/speech';
+
+export class Speech extends APIResource {
+ /**
+ * Generates audio from the input text.
+ */
+ create(body: SpeechCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this.post('/audio/speech', { body, ...options, __binaryResponse: true });
+ }
+}
+
+export interface SpeechCreateParams {
+ /**
+ * The text to generate audio for. The maximum length is 4096 characters.
+ */
+ input: string;
+
+ /**
+ * One of the available [TTS models](https://platform.openai.com/docs/models/tts):
+ * `tts-1` or `tts-1-hd`
+ */
+ model: (string & {}) | 'tts-1' | 'tts-1-hd';
+
+ /**
+ * The voice to use when generating the audio. Supported voices are `alloy`,
+ * `echo`, `fable`, `onyx`, `nova`, and `shimmer`.
+ */
+ voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
+
+ /**
+ * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`.
+ */
+ response_format?: 'mp3' | 'opus' | 'aac' | 'flac';
+
+ /**
+ * The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
+ * the default.
+ */
+ speed?: number;
+}
+
+export namespace Speech {
+ export import SpeechCreateParams = SpeechAPI.SpeechCreateParams;
+}
diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts
index 253ae33d5..feb37df09 100644
--- a/src/resources/audio/transcriptions.ts
+++ b/src/resources/audio/transcriptions.ts
@@ -46,8 +46,8 @@ export interface TranscriptionCreateParams {
prompt?: string;
/**
- * The format of the transcript output, in one of these options: json, text, srt,
- * verbose_json, or vtt.
+ * The format of the transcript output, in one of these options: `json`, `text`,
+ * `srt`, `verbose_json`, or `vtt`.
*/
response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';
diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts
index 5ac8d7d88..58e3ab7e7 100644
--- a/src/resources/audio/translations.ts
+++ b/src/resources/audio/translations.ts
@@ -39,8 +39,8 @@ export interface TranslationCreateParams {
prompt?: string;
/**
- * The format of the transcript output, in one of these options: json, text, srt,
- * verbose_json, or vtt.
+ * The format of the transcript output, in one of these options: `json`, `text`,
+ * `srt`, `verbose_json`, or `vtt`.
*/
response_format?: string;
diff --git a/src/resources/beta/assistants/assistants.ts b/src/resources/beta/assistants/assistants.ts
new file mode 100644
index 000000000..a9cfea61a
--- /dev/null
+++ b/src/resources/beta/assistants/assistants.ts
@@ -0,0 +1,470 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import { isRequestOptions } from 'openai/core';
+import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
+import * as FilesAPI from 'openai/resources/beta/assistants/files';
+import { CursorPage, type CursorPageParams } from 'openai/pagination';
+
+export class Assistants extends APIResource {
+ files: FilesAPI.Files = new FilesAPI.Files(this.client);
+
+ /**
+ * Create an Assistant with a model and instructions.
+ */
+ create(body: AssistantCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this.post('/assistants', {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Retrieves an Assistant.
+ */
+ retrieve(assistantId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this.get(`/assistants/${assistantId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Modifies an Assistant.
+ */
+ update(
+ assistantId: string,
+ body: AssistantUpdateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this.post(`/assistants/${assistantId}`, {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Returns a list of Assistants.
+ */
+ list(
+ query?: AssistantListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(options?: Core.RequestOptions): Core.PagePromise;
+ list(
+ query: AssistantListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list({}, query);
+ }
+ return this.getAPIList('/assistants', AssistantsPage, {
+ query,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Delete an Assistant.
+ */
+ del(assistantId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this.delete(`/assistants/${assistantId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+}
+
+export class AssistantsPage extends CursorPage {}
+
+/**
+ * Represents an `Assistant` that can call the model and use tools.
+ */
+export interface Assistant {
+ /**
+ * The identifier, which can be referenced in API endpoints.
+ */
+ id: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Assistant was created.
+ */
+ created_at: number;
+
+ /**
+ * The description of the Assistant. The maximum length is 512 characters.
+ */
+ description: string | null;
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs
+ * attached to this Assistant. There can be a maximum of 20 files attached to the
+ * Assistant. Files are ordered by their creation date in ascending order.
+ */
+ file_ids: Array;
+
+ /**
+ * The system instructions that the Assistant uses. The maximum length is 32768
+ * characters.
+ */
+ instructions: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata: unknown | null;
+
+ /**
+ * ID of the model to use. You can use the
+ * [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+ * see all of your available models, or see our
+ * [Model overview](https://platform.openai.com/docs/models/overview) for
+ * descriptions of them.
+ */
+ model: string;
+
+ /**
+ * The name of the Assistant. The maximum length is 256 characters.
+ */
+ name: string | null;
+
+ /**
+ * The object type, which is always `assistant`.
+ */
+ object: 'assistant';
+
+ /**
+ * A list of tool enabled on the Assistant. There can be a maximum of 128 tools per
+ * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ */
+ tools: Array;
+}
+
+export namespace Assistant {
+ export interface CodeInterpreter {
+ /**
+ * The type of tool being defined: `code_interpreter`
+ */
+ type: 'code_interpreter';
+ }
+
+ export interface Retreival {
+ /**
+ * The type of tool being defined: `retreival`
+ */
+ type: 'retreival';
+ }
+
+ export interface Function {
+ /**
+ * The function definition.
+ */
+ function: Function.Function;
+
+ /**
+ * The type of tool being defined: `function`
+ */
+ type: 'function';
+ }
+
+ export namespace Function {
+ /**
+ * The function definition.
+ */
+ export interface Function {
+ /**
+ * A description of what the function does, used by the model to choose when and
+ * how to call the function.
+ */
+ description: string;
+
+ /**
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
+ * underscores and dashes, with a maximum length of 64.
+ */
+ name: string;
+
+ /**
+ * The parameters the functions accepts, described as a JSON Schema object. See the
+ * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
+ * examples, and the
+ * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ * documentation about the format.
+ *
+ * To describe a function that accepts no parameters, provide the value
+ * `{"type": "object", "properties": {}}`.
+ */
+ parameters: Record;
+ }
+ }
+}
+
+export interface AsssitantDeleted {
+ id: string;
+
+ deleted: boolean;
+
+ object: 'assistant.deleted';
+}
+
+export interface AssistantCreateParams {
+ /**
+ * ID of the model to use. You can use the
+ * [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+ * see all of your available models, or see our
+ * [Model overview](https://platform.openai.com/docs/models/overview) for
+ * descriptions of them.
+ */
+ model: string;
+
+ /**
+ * The description of the Assistant. The maximum length is 512 characters.
+ */
+ description?: string | null;
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs
+ * attached to this Assistant. There can be a maximum of 20 files attached to the
+ * Assistant. Files are ordered by their creation date in ascending order.
+ */
+ file_ids?: Array;
+
+ /**
+ * The system instructions that the Assistant uses. The maximum length is 32768
+ * characters.
+ */
+ instructions?: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The name of the Assistant. The maximum length is 256 characters.
+ */
+ name?: string | null;
+
+ /**
+ * A list of tool enabled on the Assistant. There can be a maximum of 128 tools per
+ * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ */
+ tools?: Array<
+ | AssistantCreateParams.AssistantToolsCode
+ | AssistantCreateParams.AssistantToolsRetrieval
+ | AssistantCreateParams.AssistantToolsFunction
+ >;
+}
+
+export namespace AssistantCreateParams {
+ export interface AssistantToolsCode {
+ /**
+ * The type of tool being defined: `code_interpreter`
+ */
+ type: 'code_interpreter';
+ }
+
+ export interface AssistantToolsRetrieval {
+ /**
+ * The type of tool being defined: `retreival`
+ */
+ type: 'retreival';
+ }
+
+ export interface AssistantToolsFunction {
+ /**
+ * The function definition.
+ */
+ function: AssistantToolsFunction.Function;
+
+ /**
+ * The type of tool being defined: `function`
+ */
+ type: 'function';
+ }
+
+ export namespace AssistantToolsFunction {
+ /**
+ * The function definition.
+ */
+ export interface Function {
+ /**
+ * A description of what the function does, used by the model to choose when and
+ * how to call the function.
+ */
+ description: string;
+
+ /**
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
+ * underscores and dashes, with a maximum length of 64.
+ */
+ name: string;
+
+ /**
+ * The parameters the functions accepts, described as a JSON Schema object. See the
+ * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
+ * examples, and the
+ * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ * documentation about the format.
+ *
+ * To describe a function that accepts no parameters, provide the value
+ * `{"type": "object", "properties": {}}`.
+ */
+ parameters: Record;
+ }
+ }
+}
+
+export interface AssistantUpdateParams {
+ /**
+ * The description of the Assistant. The maximum length is 512 characters.
+ */
+ description?: string | null;
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs
+ * attached to this Assistant. There can be a maximum of 20 files attached to the
+ * Assistant. Files are ordered by their creation date in ascending order. If a
+ * file was previosuly attached to the list but does not show up in the list, it
+ * will be deleted from the assistant.
+ */
+ file_ids?: Array;
+
+ /**
+ * The system instructions that the Assistant uses. The maximum length is 32768
+ * characters.
+ */
+ instructions?: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * ID of the model to use. You can use the
+ * [List models](https://platform.openai.com/docs/api-reference/models/list) API to
+ * see all of your available models, or see our
+ * [Model overview](https://platform.openai.com/docs/models/overview) for
+ * descriptions of them.
+ */
+ model?: string;
+
+ /**
+ * The name of the Assistant. The maximum length is 256 characters.
+ */
+ name?: string | null;
+
+ /**
+ * A list of tool enabled on the Assistant. There can be a maximum of 128 tools per
+ * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ */
+ tools?: Array<
+ | AssistantUpdateParams.AssistantToolsCode
+ | AssistantUpdateParams.AssistantToolsRetrieval
+ | AssistantUpdateParams.AssistantToolsFunction
+ >;
+}
+
+export namespace AssistantUpdateParams {
+ export interface AssistantToolsCode {
+ /**
+ * The type of tool being defined: `code_interpreter`
+ */
+ type: 'code_interpreter';
+ }
+
+ export interface AssistantToolsRetrieval {
+ /**
+ * The type of tool being defined: `retreival`
+ */
+ type: 'retreival';
+ }
+
+ export interface AssistantToolsFunction {
+ /**
+ * The function definition.
+ */
+ function: AssistantToolsFunction.Function;
+
+ /**
+ * The type of tool being defined: `function`
+ */
+ type: 'function';
+ }
+
+ export namespace AssistantToolsFunction {
+ /**
+ * The function definition.
+ */
+ export interface Function {
+ /**
+ * A description of what the function does, used by the model to choose when and
+ * how to call the function.
+ */
+ description: string;
+
+ /**
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
+ * underscores and dashes, with a maximum length of 64.
+ */
+ name: string;
+
+ /**
+ * The parameters the functions accepts, described as a JSON Schema object. See the
+ * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
+ * examples, and the
+ * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ * documentation about the format.
+ *
+ * To describe a function that accepts no parameters, provide the value
+ * `{"type": "object", "properties": {}}`.
+ */
+ parameters: Record;
+ }
+ }
+}
+
+export interface AssistantListParams extends CursorPageParams {
+ /**
+ * A cursor for use in pagination. `before` is an object ID that defines your place
+ * in the list. For instance, if you make a list request and receive 100 objects,
+ * ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ * fetch the previous page of the list.
+ */
+ before?: string;
+
+ /**
+ * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ * order and `desc` for descending order.
+ */
+ order?: 'asc' | 'desc';
+}
+
+export namespace Assistants {
+ export import Assistant = AssistantsAPI.Assistant;
+ export import AsssitantDeleted = AssistantsAPI.AsssitantDeleted;
+ export import AssistantsPage = AssistantsAPI.AssistantsPage;
+ export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams;
+ export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams;
+ export import AssistantListParams = AssistantsAPI.AssistantListParams;
+ export import Files = FilesAPI.Files;
+ export import AssistantFile = FilesAPI.AssistantFile;
+ export import FileDeleteResponse = FilesAPI.FileDeleteResponse;
+ export import AssistantFilesPage = FilesAPI.AssistantFilesPage;
+ export import FileCreateParams = FilesAPI.FileCreateParams;
+ export import FileListParams = FilesAPI.FileListParams;
+}
diff --git a/src/resources/beta/assistants/files.ts b/src/resources/beta/assistants/files.ts
new file mode 100644
index 000000000..d913146ea
--- /dev/null
+++ b/src/resources/beta/assistants/files.ts
@@ -0,0 +1,154 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import { isRequestOptions } from 'openai/core';
+import * as FilesAPI from 'openai/resources/beta/assistants/files';
+import { CursorPage, type CursorPageParams } from 'openai/pagination';
+
+export class Files extends APIResource {
+ /**
+ * Create an Assistant File by attaching a
+ * [File](https://platform.openai.com/docs/api-reference/files) to an
+ * [Assistant](https://platform.openai.com/docs/api-reference/assistants).
+ */
+ create(
+ assistantId: string,
+ body: FileCreateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this.post(`/assistants/${assistantId}/files`, {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Retrieves an AssistantFile.
+ */
+ retrieve(
+ assistantId: string,
+ fileId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this.get(`/assistants/${assistantId}/files/${fileId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Returns a list of Assistant Files.
+ */
+ list(
+ assistantId: string,
+ query?: FileListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ assistantId: string,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ assistantId: string,
+ query: FileListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list(assistantId, {}, query);
+ }
+ return this.getAPIList(`/assistants/${assistantId}/files`, AssistantFilesPage, {
+ query,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Delete an Assistant File.
+ */
+ del(
+ assistantId: string,
+ fileId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this.delete(`/assistants/${assistantId}/files/${fileId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+}
+
+export class AssistantFilesPage extends CursorPage {}
+
+/**
+ * A list of [Files](https://platform.openai.com/docs/api-reference/files) attached
+ * to an `Assistant`.
+ */
+export interface AssistantFile {
+ /**
+ * The identifier, which can be referenced in API endpoints.
+ */
+ id: string;
+
+ /**
+ * The Assistant ID that the File is attached to.
+ */
+ assistant_id: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Assistant File was created.
+ */
+ created_at: number;
+
+ /**
+ * The object type, which is always `assistant.file`.
+ */
+ object: 'assistant.file';
+}
+
+/**
+ * Deletes the association between the Assistant and the File, but does not delete
+ * the [File](https://platform.openai.com/docs/api-reference/files) object itself.
+ */
+export interface FileDeleteResponse {
+ id: string;
+
+ deleted: boolean;
+
+ object: 'assistant.file.deleted';
+}
+
+export interface FileCreateParams {
+ /**
+ * A [File](https://platform.openai.com/docs/api-reference/files) ID (with
+ * `purpose="assistants"`) that the Assistant should use. Useful for tools like
+ * `retrieval` and `code_interpreter` that can access files.
+ */
+ file_id: string;
+}
+
+export interface FileListParams extends CursorPageParams {
+ /**
+ * A cursor for use in pagination. `before` is an object ID that defines your place
+ * in the list. For instance, if you make a list request and receive 100 objects,
+ * ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ * fetch the previous page of the list.
+ */
+ before?: string;
+
+ /**
+ * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ * order and `desc` for descending order.
+ */
+ order?: 'asc' | 'desc';
+}
+
+export namespace Files {
+ export import AssistantFile = FilesAPI.AssistantFile;
+ export import FileDeleteResponse = FilesAPI.FileDeleteResponse;
+ export import AssistantFilesPage = FilesAPI.AssistantFilesPage;
+ export import FileCreateParams = FilesAPI.FileCreateParams;
+ export import FileListParams = FilesAPI.FileListParams;
+}
diff --git a/src/resources/beta/assistants/index.ts b/src/resources/beta/assistants/index.ts
new file mode 100644
index 000000000..7455e48e6
--- /dev/null
+++ b/src/resources/beta/assistants/index.ts
@@ -0,0 +1,19 @@
+// File generated from our OpenAPI spec by Stainless.
+
+export {
+ Assistant,
+ AsssitantDeleted,
+ AssistantCreateParams,
+ AssistantUpdateParams,
+ AssistantListParams,
+ AssistantsPage,
+ Assistants,
+} from './assistants';
+export {
+ AssistantFile,
+ FileDeleteResponse,
+ FileCreateParams,
+ FileListParams,
+ AssistantFilesPage,
+ Files,
+} from './files';
diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts
index e76f34c83..a9505a17d 100644
--- a/src/resources/beta/beta.ts
+++ b/src/resources/beta/beta.ts
@@ -1,12 +1,29 @@
// File generated from our OpenAPI spec by Stainless.
import { APIResource } from 'openai/resource';
+import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
import * as ChatAPI from 'openai/resources/beta/chat/chat';
+import * as ThreadsAPI from 'openai/resources/beta/threads/threads';
export class Beta extends APIResource {
chat: ChatAPI.Chat = new ChatAPI.Chat(this.client);
+ assistants: AssistantsAPI.Assistants = new AssistantsAPI.Assistants(this.client);
+ threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this.client);
}
export namespace Beta {
export import Chat = ChatAPI.Chat;
+ export import Assistants = AssistantsAPI.Assistants;
+ export import Assistant = AssistantsAPI.Assistant;
+ export import AsssitantDeleted = AssistantsAPI.AsssitantDeleted;
+ export import AssistantsPage = AssistantsAPI.AssistantsPage;
+ export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams;
+ export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams;
+ export import AssistantListParams = AssistantsAPI.AssistantListParams;
+ export import Threads = ThreadsAPI.Threads;
+ export import Thread = ThreadsAPI.Thread;
+ export import ThreadDeleted = ThreadsAPI.ThreadDeleted;
+ export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams;
+ export import ThreadUpdateParams = ThreadsAPI.ThreadUpdateParams;
+ export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams;
}
diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts
index 24fe90a0a..1164b787f 100644
--- a/src/resources/beta/chat/completions.ts
+++ b/src/resources/beta/chat/completions.ts
@@ -20,6 +20,10 @@ export {
RunnableFunctionWithoutParse,
ParsingFunction,
} from 'openai/lib/RunnableFunction';
+import { ChatCompletionToolRunnerParams } from 'openai/lib/ChatCompletionRunner';
+export { ChatCompletionToolRunnerParams } from 'openai/lib/ChatCompletionRunner';
+import { ChatCompletionStreamingToolRunnerParams } from 'openai/lib/ChatCompletionStreamingRunner';
+export { ChatCompletionStreamingToolRunnerParams } from 'openai/lib/ChatCompletionStreamingRunner';
import { ChatCompletionStream, type ChatCompletionStreamParams } from 'openai/lib/ChatCompletionStream';
export { ChatCompletionStream, type ChatCompletionStreamParams } from 'openai/lib/ChatCompletionStream';
@@ -31,7 +35,7 @@ export class Completions extends APIResource {
* the model requests function calls.
*
* For more details and examples, see
- * [the docs](https://github.com/openai/openai-node#runFunctions)
+ * [the docs](https://github.com/openai/openai-node#automated-function-calls)
*/
runFunctions(
body: ChatCompletionFunctionRunnerParams,
@@ -61,6 +65,43 @@ export class Completions extends APIResource {
);
}
+ /**
+ * A convenience helper for using tool calls with the /chat/completions endpoint
+ * which automatically calls the JavaScript functions you provide and sends their
+ * results back to the /chat/completions endpoint, looping as long as the model
+ * requests function calls.
+ *
+ * For more details and examples, see
+ * [the docs](https://github.com/openai/openai-node#automated-function-calls)
+ */
+ runTools(
+ body: ChatCompletionToolRunnerParams,
+ options?: Core.RequestOptions,
+ ): ChatCompletionRunner;
+ runTools(
+ body: ChatCompletionStreamingToolRunnerParams,
+ options?: Core.RequestOptions,
+ ): ChatCompletionStreamingRunner;
+ runTools(
+ body:
+ | ChatCompletionToolRunnerParams
+ | ChatCompletionStreamingToolRunnerParams,
+ options?: Core.RequestOptions,
+ ): ChatCompletionRunner | ChatCompletionStreamingRunner {
+ if (body.stream) {
+ return ChatCompletionStreamingRunner.runTools(
+ this.client.chat.completions,
+ body as ChatCompletionStreamingToolRunnerParams,
+ options,
+ );
+ }
+ return ChatCompletionRunner.runTools(
+ this.client.chat.completions,
+ body as ChatCompletionToolRunnerParams,
+ options,
+ );
+ }
+
/**
* Creates a chat completion stream
*/
diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts
index 9d8daa323..1383aa2b8 100644
--- a/src/resources/beta/index.ts
+++ b/src/resources/beta/index.ts
@@ -1,4 +1,21 @@
// File generated from our OpenAPI spec by Stainless.
+export {
+ Assistant,
+ AsssitantDeleted,
+ AssistantCreateParams,
+ AssistantUpdateParams,
+ AssistantListParams,
+ AssistantsPage,
+ Assistants,
+} from './assistants/index';
export { Beta } from './beta';
export { Chat } from './chat/index';
+export {
+ Thread,
+ ThreadDeleted,
+ ThreadCreateParams,
+ ThreadUpdateParams,
+ ThreadCreateAndRunParams,
+ Threads,
+} from './threads/index';
diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts
new file mode 100644
index 000000000..53e26a5c6
--- /dev/null
+++ b/src/resources/beta/threads/index.ts
@@ -0,0 +1,31 @@
+// File generated from our OpenAPI spec by Stainless.
+
+export {
+ MessageContentImageFile,
+ MessageContentText,
+ ThreadMessage,
+ ThreadMessageDeleted,
+ MessageCreateParams,
+ MessageUpdateParams,
+ MessageListParams,
+ ThreadMessagesPage,
+ Messages,
+} from './messages/index';
+export {
+ RequiredActionFunctionToolCall,
+ Run,
+ RunCreateParams,
+ RunUpdateParams,
+ RunListParams,
+ RunSubmitToolOutputsParams,
+ RunsPage,
+ Runs,
+} from './runs/index';
+export {
+ Thread,
+ ThreadDeleted,
+ ThreadCreateParams,
+ ThreadUpdateParams,
+ ThreadCreateAndRunParams,
+ Threads,
+} from './threads';
diff --git a/src/resources/beta/threads/messages/files.ts b/src/resources/beta/threads/messages/files.ts
new file mode 100644
index 000000000..f55cd780c
--- /dev/null
+++ b/src/resources/beta/threads/messages/files.ts
@@ -0,0 +1,105 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import { isRequestOptions } from 'openai/core';
+import * as FilesAPI from 'openai/resources/beta/threads/messages/files';
+import { CursorPage, type CursorPageParams } from 'openai/pagination';
+
+export class Files extends APIResource {
+ /**
+ * Retrieves a Message File.
+ */
+ retrieve(
+ threadId: string,
+ messageId: string,
+ fileId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this.get(`/threads/${threadId}/messages/${messageId}/files/${fileId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Returns a list of Message Files.
+ */
+ list(
+ threadId: string,
+ messageId: string,
+ query?: FileListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ threadId: string,
+ messageId: string,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ threadId: string,
+ messageId: string,
+ query: FileListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list(threadId, messageId, {}, query);
+ }
+ return this.getAPIList(`/threads/${threadId}/messages/${messageId}/files`, MessageFilesPage, {
+ query,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+}
+
+export class MessageFilesPage extends CursorPage {}
+
+/**
+ * A list of Files attached to a `Message`.
+ */
+export interface MessageFile {
+ /**
+ * The identifier, which can be referenced in API endpoints.
+ */
+ id: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Message File was created.
+ */
+ created_at: number;
+
+ /**
+ * The ID of the [Message](https://platform.openai.com/docs/api-reference/messages)
+ * that the [File](https://platform.openai.com/docs/api-reference/files) is
+ * attached to.
+ */
+ message_id: string;
+
+ /**
+ * The object type, which is always `thread.message.file`.
+ */
+ object: 'thread.message.file';
+}
+
+export interface FileListParams extends CursorPageParams {
+ /**
+ * A cursor for use in pagination. `before` is an object ID that defines your place
+ * in the list. For instance, if you make a list request and receive 100 objects,
+ * ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ * fetch the previous page of the list.
+ */
+ before?: string;
+
+ /**
+ * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ * order and `desc` for descending order.
+ */
+ order?: 'asc' | 'desc';
+}
+
+export namespace Files {
+ export import MessageFile = FilesAPI.MessageFile;
+ export import MessageFilesPage = FilesAPI.MessageFilesPage;
+ export import FileListParams = FilesAPI.FileListParams;
+}
diff --git a/src/resources/beta/threads/messages/index.ts b/src/resources/beta/threads/messages/index.ts
new file mode 100644
index 000000000..cde22c2a9
--- /dev/null
+++ b/src/resources/beta/threads/messages/index.ts
@@ -0,0 +1,14 @@
+// File generated from our OpenAPI spec by Stainless.
+
+export {
+ MessageContentImageFile,
+ MessageContentText,
+ ThreadMessage,
+ ThreadMessageDeleted,
+ MessageCreateParams,
+ MessageUpdateParams,
+ MessageListParams,
+ ThreadMessagesPage,
+ Messages,
+} from './messages';
+export { MessageFile, FileListParams, MessageFilesPage, Files } from './files';
diff --git a/src/resources/beta/threads/messages/messages.ts b/src/resources/beta/threads/messages/messages.ts
new file mode 100644
index 000000000..ec99a10f9
--- /dev/null
+++ b/src/resources/beta/threads/messages/messages.ts
@@ -0,0 +1,343 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import { isRequestOptions } from 'openai/core';
+import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages';
+import * as FilesAPI from 'openai/resources/beta/threads/messages/files';
+import { CursorPage, type CursorPageParams } from 'openai/pagination';
+
+export class Messages extends APIResource {
+ files: FilesAPI.Files = new FilesAPI.Files(this.client);
+
+ /**
+ * Create a Message.
+ */
+ create(
+ threadId: string,
+ body: MessageCreateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this.post(`/threads/${threadId}/messages`, {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Retrieve a Message.
+ */
+ retrieve(
+ threadId: string,
+ messageId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this.get(`/threads/${threadId}/messages/${messageId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Modifies a Message.
+ */
+ update(
+ threadId: string,
+ messageId: string,
+ body: MessageUpdateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this.post(`/threads/${threadId}/messages/${messageId}`, {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Returns a list of Messages for a given Thread.
+ */
+ list(
+ threadId: string,
+ query?: MessageListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(threadId: string, options?: Core.RequestOptions): Core.PagePromise;
+ list(
+ threadId: string,
+ query: MessageListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list(threadId, {}, query);
+ }
+ return this.getAPIList(`/threads/${threadId}/messages`, ThreadMessagesPage, {
+ query,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+}
+
+export class ThreadMessagesPage extends CursorPage {}
+
+/**
+ * References an image [File](https://platform.openai.com/docs/api-reference/files)
+ * in the content of a Message.
+ */
+export interface MessageContentImageFile {
+ image_file: MessageContentImageFile.ImageFile;
+
+ /**
+ * Will always be `image_file`.
+ */
+ type: 'image_file';
+}
+
+export namespace MessageContentImageFile {
+ export interface ImageFile {
+ /**
+ * The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
+ * in the Message content.
+ */
+ file_id: string;
+ }
+}
+
+/**
+ * The text content that is part of a Message.
+ */
+export interface MessageContentText {
+ text: MessageContentText.Text;
+
+ /**
+ * Will always be `text`.
+ */
+ type: 'text';
+}
+
+export namespace MessageContentText {
+ export interface Text {
+ annotations: Array;
+
+ /**
+ * The data that makes up the text.
+ */
+ value: string;
+ }
+
+ export namespace Text {
+ /**
+ * A citation within the Message that points to a specific quote from a specific
+ * File associated with the Assistant or the Message. Generated when the Assistant
+ * uses the "retrieval" tool to search files.
+ */
+ export interface FileCitation {
+ end_index: number;
+
+ file_citation: FileCitation.FileCitation;
+
+ start_index: number;
+
+ /**
+ * The text in the Message content that needs to be replaced.
+ */
+ text: string;
+
+ /**
+ * Will always be `file_citation`.
+ */
+ type: 'file_citation';
+ }
+
+ export namespace FileCitation {
+ export interface FileCitation {
+ /**
+ * The ID of the specific File the citation is from.
+ */
+ file_id: string;
+
+ /**
+ * The specific quote in the File.
+ */
+ quote: string;
+ }
+ }
+
+ /**
+ * A URL for the File that's generated when the Assistant used the
+ * `code_interpreter` tool to generate a File.
+ */
+ export interface FilePath {
+ end_index: number;
+
+ file_path: FilePath.FilePath;
+
+ start_index: number;
+
+ /**
+ * The text in the Message content that needs to be replaced.
+ */
+ text: string;
+
+ /**
+ * Will always be `file_path`.
+ */
+ type: 'file_path';
+ }
+
+ export namespace FilePath {
+ export interface FilePath {
+ /**
+ * The ID of the File that was generated.
+ */
+ file_id: string;
+ }
+ }
+ }
+}
+
+/**
+ * Represents a Message within a
+ * [Thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+export interface ThreadMessage {
+ /**
+ * The identifier, which can be referenced in API endpoints.
+ */
+ id: string;
+
+ /**
+ * If applicable, the ID of the
+ * [Assistant](https://platform.openai.com/docs/api-reference/assistants) that
+ * authored this Message.
+ */
+ assistant_id: string | null;
+
+ /**
+ * The content of the Message in array of text and/or images.
+ */
+ content: Array;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Message was created.
+ */
+ created_at: number;
+
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the Assistant should use. Useful for tools like retrieval and code_interpreter
+ * that can access files. A maximum of 10 files can be attached to a Message.
+ */
+ file_ids: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata: unknown | null;
+
+ /**
+ * The object type, which is always `thread.message`.
+ */
+ object: 'thread.message';
+
+ /**
+ * The entity that produced the Message. One of `user` or `assistant`.
+ */
+ role: 'user' | 'assistant';
+
+ /**
+ * If applicable, the ID of the
+ * [Run](https://platform.openai.com/docs/api-reference/runs) associated with the
+ * authoring of this Message.
+ */
+ run_id: string | null;
+
+ /**
+ * The [Thread](https://platform.openai.com/docs/api-reference/threads) ID that
+ * this Message belongs to.
+ */
+ thread_id: string;
+}
+
+export interface ThreadMessageDeleted {
+ id: string;
+
+ deleted: boolean;
+
+ object: 'thread.message.deleted';
+}
+
+export interface MessageCreateParams {
+ /**
+ * The content of the Message.
+ */
+ content: string;
+
+ /**
+ * The role of the entity that is creating the Message. Currently only `user` is
+ * supported.
+ */
+ role: 'user';
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the Message should use. There can be a maximum of 10 files attached to a
+ * Message. Useful for tools like `retrieval` and `code_interpreter` that can
+ * access and use files.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+}
+
+export interface MessageUpdateParams {
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+}
+
+export interface MessageListParams extends CursorPageParams {
+ /**
+ * A cursor for use in pagination. `before` is an object ID that defines your place
+ * in the list. For instance, if you make a list request and receive 100 objects,
+ * ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ * fetch the previous page of the list.
+ */
+ before?: string;
+
+ /**
+ * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ * order and `desc` for descending order.
+ */
+ order?: 'asc' | 'desc';
+}
+
+export namespace Messages {
+ export import MessageContentImageFile = MessagesAPI.MessageContentImageFile;
+ export import MessageContentText = MessagesAPI.MessageContentText;
+ export import ThreadMessage = MessagesAPI.ThreadMessage;
+ export import ThreadMessageDeleted = MessagesAPI.ThreadMessageDeleted;
+ export import ThreadMessagesPage = MessagesAPI.ThreadMessagesPage;
+ export import MessageCreateParams = MessagesAPI.MessageCreateParams;
+ export import MessageUpdateParams = MessagesAPI.MessageUpdateParams;
+ export import MessageListParams = MessagesAPI.MessageListParams;
+ export import Files = FilesAPI.Files;
+ export import MessageFile = FilesAPI.MessageFile;
+ export import MessageFilesPage = FilesAPI.MessageFilesPage;
+ export import FileListParams = FilesAPI.FileListParams;
+}
diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts
new file mode 100644
index 000000000..a2261f961
--- /dev/null
+++ b/src/resources/beta/threads/runs/index.ts
@@ -0,0 +1,23 @@
+// File generated from our OpenAPI spec by Stainless.
+
+export {
+ CodeToolCall,
+ FunctionToolCall,
+ MessageCreationStepDetails,
+ RetrievalToolCall,
+ RunStep,
+ ToolCallsStepDetails,
+ StepListParams,
+ RunStepsPage,
+ Steps,
+} from './steps';
+export {
+ RequiredActionFunctionToolCall,
+ Run,
+ RunCreateParams,
+ RunUpdateParams,
+ RunListParams,
+ RunSubmitToolOutputsParams,
+ RunsPage,
+ Runs,
+} from './runs';
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
new file mode 100644
index 000000000..e81b09a90
--- /dev/null
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -0,0 +1,535 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import { isRequestOptions } from 'openai/core';
+import * as RunsAPI from 'openai/resources/beta/threads/runs/runs';
+import * as StepsAPI from 'openai/resources/beta/threads/runs/steps';
+import { CursorPage, type CursorPageParams } from 'openai/pagination';
+
+export class Runs extends APIResource {
+ steps: StepsAPI.Steps = new StepsAPI.Steps(this.client);
+
+ /**
+ * Create a Run.
+ */
+ create(threadId: string, body: RunCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this.post(`/threads/${threadId}/runs`, {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Retrieves a Run.
+ */
+ retrieve(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this.get(`/threads/${threadId}/runs/${runId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Modifies a Run.
+ */
+ update(
+ threadId: string,
+ runId: string,
+ body: RunUpdateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this.post(`/threads/${threadId}/runs/${runId}`, {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Returns a list of Runs belonging to a Thread.
+ */
+ list(
+ threadId: string,
+ query?: RunListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(threadId: string, options?: Core.RequestOptions): Core.PagePromise;
+ list(
+ threadId: string,
+ query: RunListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list(threadId, {}, query);
+ }
+ return this.getAPIList(`/threads/${threadId}/runs`, RunsPage, {
+ query,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Cancels a Run that is `in_progress`.
+ */
+ cancel(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this.post(`/threads/${threadId}/runs/${runId}/cancel`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * When a Run has the `status: "requires_action"` and `required_action.type` is
+ * `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
+ * tool calls once they're all completed. All outputs must be submitted in a single
+ * request.
+ */
+ submitToolOutputs(
+ threadId: string,
+ runId: string,
+ body: RunSubmitToolOutputsParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this.post(`/threads/${threadId}/runs/${runId}/submit_tool_outputs`, {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+}
+
+export class RunsPage extends CursorPage {}
+
+/**
+ * Tool call objects
+ */
+export interface RequiredActionFunctionToolCall {
+ /**
+ * The ID of the tool call. This ID must be referenced when you submit the tool
+ * outputs in using the
+ * [Submit tool outputs to Run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
+ * endpoint.
+ */
+ id: string;
+
+ /**
+ * The function definition.
+ */
+ function: RequiredActionFunctionToolCall.Function;
+
+ /**
+ * The type of tool call the output is required for. For now, this is always
+ * `function`.
+ */
+ type: 'function';
+}
+
+export namespace RequiredActionFunctionToolCall {
+ /**
+ * The function definition.
+ */
+ export interface Function {
+ /**
+ * The arguments that the model expects you to pass to the function.
+ */
+ arguments: string;
+
+ /**
+ * The name of the function.
+ */
+ name: string;
+ }
+}
+
+/**
+ * Represents an execution Run on a
+ * [Thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+export interface Run {
+ /**
+ * The identifier, which can be referenced in API endpoints.
+ */
+ id: string;
+
+ /**
+ * The ID of the
+ * [Assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+ * execution of this Run.
+ */
+ assistant_id: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Run was cancelled.
+ */
+ cancelled_at: number | null;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Run was completed.
+ */
+ completed_at: number | null;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Run was created.
+ */
+ created_at: number;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Run will expire.
+ */
+ expires_at: number;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Run failed.
+ */
+ failed_at: number | null;
+
+ /**
+ * The list of [File](https://platform.openai.com/docs/api-reference/files) IDs the
+ * [Assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+ * this Run.
+ */
+ file_ids: Array;
+
+ /**
+ * The instructions that the
+ * [Assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+ * this run.
+ */
+ instructions: string;
+
+ /**
+ * The last error associated with this Run. Will be `null` if there are no errors.
+ */
+ last_error: Run.LastError | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata: unknown | null;
+
+ /**
+ * The model that the
+ * [Assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+ * this Run.
+ */
+ model: string;
+
+ /**
+ * The object type, which is always `assistant.run`.
+ */
+ object: 'assistant.run';
+
+ /**
+ * Details on the action required to continue the Run. Will be `null` if no action
+ * is required.
+ */
+ required_action: Run.RequiredAction | null;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Run was started.
+ */
+ started_at: number | null;
+
+ /**
+ * The status of the run, which can be either `queued`, `in_progress`,
+ * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or
+ * `expired`.
+ */
+ status:
+ | 'queued'
+ | 'in_progress'
+ | 'requires_action'
+ | 'cancelling'
+ | 'cancelled'
+ | 'failed'
+ | 'completed'
+ | 'expired';
+
+ /**
+ * The ID of the [Thread](https://platform.openai.com/docs/api-reference/threads)
+ * that was executed on as a part of this Run.
+ */
+ thread_id: string;
+
+ /**
+ * The list of tools that the
+ * [Assistant](https://platform.openai.com/docs/api-reference/assistants) used for
+ * this Run.
+ */
+ tools: Array;
+}
+
+export namespace Run {
+ /**
+ * The last error associated with this Run. Will be `null` if there are no errors.
+ */
+ export interface LastError {
+ /**
+ * One of `server_error` or `rate_limit_exceeded`.
+ */
+ code: 'server_error' | 'rate_limit_exceeded';
+
+ /**
+ * A human-readable description of the error.
+ */
+ message: string;
+ }
+
+ /**
+ * Details on the action required to continue the Run. Will be `null` if no action
+ * is required.
+ */
+ export interface RequiredAction {
+ /**
+ * Details on the tool outputs needed for this Run to continue.
+ */
+ submit_tool_outputs: RequiredAction.SubmitToolOutputs;
+
+ /**
+ * For now, this is always `submit_tool_outputs`.
+ */
+ type: 'submit_tool_outputs';
+ }
+
+ export namespace RequiredAction {
+ /**
+ * Details on the tool outputs needed for this Run to continue.
+ */
+ export interface SubmitToolOutputs {
+ /**
+ * A list of the relevant tool calls.
+ */
+ tool_calls: Array;
+ }
+ }
+
+ export interface AssistantToolsCode {
+ /**
+ * The type of tool being defined: `code_interpreter`
+ */
+ type: 'code_interpreter';
+ }
+
+ export interface AssistantToolsRetrieval {
+ /**
+ * The type of tool being defined: `retreival`
+ */
+ type: 'retreival';
+ }
+
+ export interface AssistantToolsFunction {
+ /**
+ * The function definition.
+ */
+ function: AssistantToolsFunction.Function;
+
+ /**
+ * The type of tool being defined: `function`
+ */
+ type: 'function';
+ }
+
+ export namespace AssistantToolsFunction {
+ /**
+ * The function definition.
+ */
+ export interface Function {
+ /**
+ * A description of what the function does, used by the model to choose when and
+ * how to call the function.
+ */
+ description: string;
+
+ /**
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
+ * underscores and dashes, with a maximum length of 64.
+ */
+ name: string;
+
+ /**
+ * The parameters the functions accepts, described as a JSON Schema object. See the
+ * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
+ * examples, and the
+ * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ * documentation about the format.
+ *
+ * To describe a function that accepts no parameters, provide the value
+ * `{"type": "object", "properties": {}}`.
+ */
+ parameters: Record;
+ }
+ }
+}
+
+export interface RunCreateParams {
+ /**
+ * The ID of the
+ * [Assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+ * execute this Run.
+ */
+ assistant_id: string;
+
+ /**
+ * Override the default system message of the Assistant. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ instructions?: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+ * be used to execute this Run. If a value is provided here, it will override the
+ * model associated with the Assistant. If not, the model associated with the
+ * Assistant will be used.
+ */
+ model?: string | null;
+
+ /**
+ * Override the tools the Assistant can use for this Run. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ tools?: Array<
+ | RunCreateParams.AssistantToolsCode
+ | RunCreateParams.AssistantToolsRetrieval
+ | RunCreateParams.AssistantToolsFunction
+ > | null;
+}
+
+export namespace RunCreateParams {
+ export interface AssistantToolsCode {
+ /**
+ * The type of tool being defined: `code_interpreter`
+ */
+ type: 'code_interpreter';
+ }
+
+ export interface AssistantToolsRetrieval {
+ /**
+ * The type of tool being defined: `retreival`
+ */
+ type: 'retreival';
+ }
+
+ export interface AssistantToolsFunction {
+ /**
+ * The function definition.
+ */
+ function: AssistantToolsFunction.Function;
+
+ /**
+ * The type of tool being defined: `function`
+ */
+ type: 'function';
+ }
+
+ export namespace AssistantToolsFunction {
+ /**
+ * The function definition.
+ */
+ export interface Function {
+ /**
+ * A description of what the function does, used by the model to choose when and
+ * how to call the function.
+ */
+ description: string;
+
+ /**
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
+ * underscores and dashes, with a maximum length of 64.
+ */
+ name: string;
+
+ /**
+ * The parameters the functions accepts, described as a JSON Schema object. See the
+ * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
+ * examples, and the
+ * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ * documentation about the format.
+ *
+ * To describe a function that accepts no parameters, provide the value
+ * `{"type": "object", "properties": {}}`.
+ */
+ parameters: Record;
+ }
+ }
+}
+
+export interface RunUpdateParams {
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+}
+
+export interface RunListParams extends CursorPageParams {
+ /**
+ * A cursor for use in pagination. `before` is an object ID that defines your place
+ * in the list. For instance, if you make a list request and receive 100 objects,
+ * ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ * fetch the previous page of the list.
+ */
+ before?: string;
+
+ /**
+ * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ * order and `desc` for descending order.
+ */
+ order?: 'asc' | 'desc';
+}
+
+export interface RunSubmitToolOutputsParams {
+ /**
+ * A list of tools for which the outputs are being submitted.
+ */
+ tool_outputs: Array;
+}
+
+export namespace RunSubmitToolOutputsParams {
+ export interface ToolOutput {
+ /**
+ * The output of the tool call to be submitted to continue the Run.
+ */
+ output?: string;
+
+ /**
+ * The ID of the tool call in the `required_action` object within the Run object
+ * the output is being submitted for.
+ */
+ tool_call_id?: string;
+ }
+}
+
+export namespace Runs {
+ export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall;
+ export import Run = RunsAPI.Run;
+ export import RunsPage = RunsAPI.RunsPage;
+ export import RunCreateParams = RunsAPI.RunCreateParams;
+ export import RunUpdateParams = RunsAPI.RunUpdateParams;
+ export import RunListParams = RunsAPI.RunListParams;
+ export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams;
+ export import Steps = StepsAPI.Steps;
+ export import CodeToolCall = StepsAPI.CodeToolCall;
+ export import FunctionToolCall = StepsAPI.FunctionToolCall;
+ export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails;
+ export import RetrievalToolCall = StepsAPI.RetrievalToolCall;
+ export import RunStep = StepsAPI.RunStep;
+ export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails;
+ export import RunStepsPage = StepsAPI.RunStepsPage;
+ export import StepListParams = StepsAPI.StepListParams;
+}
diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts
new file mode 100644
index 000000000..94745f875
--- /dev/null
+++ b/src/resources/beta/threads/runs/steps.ts
@@ -0,0 +1,365 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import { isRequestOptions } from 'openai/core';
+import * as StepsAPI from 'openai/resources/beta/threads/runs/steps';
+import { CursorPage, type CursorPageParams } from 'openai/pagination';
+
+export class Steps extends APIResource {
+ /**
+ * Retrieves a Run Step.
+ */
+ retrieve(
+ threadId: string,
+ runId: string,
+ stepId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this.get(`/threads/${threadId}/runs/${runId}/steps/${stepId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Returns a list of Run Steps belonging to a Run.
+ */
+ list(
+ threadId: string,
+ runId: string,
+ query?: StepListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ threadId: string,
+ runId: string,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ threadId: string,
+ runId: string,
+ query: StepListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list(threadId, runId, {}, query);
+ }
+ return this.getAPIList(`/threads/${threadId}/runs/${runId}/steps`, RunStepsPage, {
+ query,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+}
+
+export class RunStepsPage extends CursorPage {}
+
+/**
+ * Details of the Code Interpreter tool call the Run Step was involved in.
+ */
+export interface CodeToolCall {
+ /**
+ * The ID of the tool call.
+ */
+ id: string;
+
+ /**
+ * The code interpreter tool call definition.
+ */
+ code_interpreter: CodeToolCall.CodeInterpreter;
+
+ /**
+ * The type of tool call. This is always going to be `code_interpreter` for this
+ * type of tool call.
+ */
+ type: 'code_interpreter';
+}
+
+export namespace CodeToolCall {
+ /**
+ * The code interpreter tool call definition.
+ */
+ export interface CodeInterpreter {
+ /**
+ * The input to the Code Interpreter tool call.
+ */
+ input: string;
+
+ /**
+ * The outputs from the Code Interpreter tool call. Code Interpreter can output one
+ * or more items, including text (`logs`) or images (`image`). Each of these are
+ * represented by a different object type.
+ */
+ outputs: Array;
+ }
+
+ export namespace CodeInterpreter {
+ /**
+ * Text output from the Code Interpreter tool call as part of a Run Step.
+ */
+ export interface Logs {
+ /**
+ * The text output from the Code Interpreter tool call.
+ */
+ logs: string;
+
+ /**
+ * Will always be `logs`.
+ */
+ type: 'logs';
+ }
+
+ export interface Image {
+ image: Image.Image;
+
+ /**
+ * Will always be `image`.
+ */
+ type: 'image';
+ }
+
+ export namespace Image {
+ export interface Image {
+ /**
+ * The [File](https://platform.openai.com/docs/api-reference/files) ID of the
+ * image.
+ */
+ file_id: string;
+ }
+ }
+ }
+}
+
+export interface FunctionToolCall {
+ /**
+ * The ID of the tool call object.
+ */
+ id: string;
+
+ /**
+ * The definition of the function that was called.
+ */
+ function: FunctionToolCall.Function;
+
+ /**
+ * The type of tool call. This is always going to be `function` for this type of
+ * tool call.
+ */
+ type: 'function';
+}
+
+export namespace FunctionToolCall {
+ /**
+ * The definition of the function that was called.
+ */
+ export interface Function {
+ /**
+ * The arguments passed to the function.
+ */
+ arguments: string;
+
+ /**
+ * The name of the function.
+ */
+ name: string;
+
+ /**
+ * The output of the function. This will be `null` if the outputs have not been
+ * [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
+ * yet.
+ */
+ output: string | null;
+ }
+}
+
+/**
+ * Details of the Message creation activity that the Run Step was involved in.
+ */
+export interface MessageCreationStepDetails {
+ message_creation: MessageCreationStepDetails.MessageCreation;
+
+ /**
+ * Will always be `message_creation``.
+ */
+ type: 'message_creation';
+}
+
+export namespace MessageCreationStepDetails {
+ export interface MessageCreation {
+ /**
+ * The ID of the Message that was created by this Run Step.
+ */
+ message_id: string;
+ }
+}
+
+export interface RetrievalToolCall {
+ /**
+ * The ID of the tool call object.
+ */
+ id: string;
+
+ /**
+ * For now, this is always going to be an empty object.
+ */
+ retrieval: unknown;
+
+ /**
+ * The type of tool call. This is always going to be `retrieval` for this type of
+ * tool call.
+ */
+ type: 'retrieval';
+}
+
+/**
+ * Represents a Step in execution of a Run.
+ */
+export interface RunStep {
+ /**
+ * The identifier of the run step, which can be referenced in API endpoints.
+ */
+ id: string;
+
+ /**
+ * The ID of the
+ * [Assistant](https://platform.openai.com/docs/api-reference/assistants)
+ * associated with the Run Step.
+ */
+ assistant_id: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Run Step was cancelled.
+ */
+ cancelled_at: number | null;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Run Step was completed.
+ */
+ completed_at: number | null;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Run Step was created.
+ */
+ created_at: number;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Run Step expired. A step is
+ * considered expired if the parent Run is expired.
+ */
+ expired_at: number | null;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Run Step failed.
+ */
+ failed_at: number | null;
+
+ /**
+ * The last error associated with this Run Step. Will be `null` if there are no
+ * errors.
+ */
+ last_error: RunStep.LastError | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata: unknown | null;
+
+ /**
+ * The object type, which is always `assistant.run.step``.
+ */
+ object: 'assistant.run.step';
+
+ /**
+ * The ID of the [Run](https://platform.openai.com/docs/api-reference/runs) that
+ * this Run Step is a part of.
+ */
+ run_id: string;
+
+ /**
+ * The status of the run, which can be either `in_progress`, `cancelled`, `failed`,
+ * `completed`, or `expired`.
+ */
+ status: 'in_progress' | 'cancelled' | 'failed' | 'completed' | 'expired';
+
+ /**
+ * The details of the activity the Run Step was involved in.
+ */
+ step_details: MessageCreationStepDetails | ToolCallsStepDetails;
+
+ /**
+ * The ID of the [Thread](https://platform.openai.com/docs/api-reference/threads)
+ * that was Run.
+ */
+ thread_id: string;
+
+ /**
+ * The type of Run Step, which can be either `message_creation` or `tool_calls`.
+ */
+ type: 'message_creation' | 'tool_calls';
+}
+
+export namespace RunStep {
+ /**
+ * The last error associated with this Run Step. Will be `null` if there are no
+ * errors.
+ */
+ export interface LastError {
+ /**
+ * One of `server_error` or `rate_limit_exceeded`.
+ */
+ code: 'server_error' | 'rate_limit_exceeded';
+
+ /**
+ * A human-readable description of the error.
+ */
+ message: string;
+ }
+}
+
+/**
+ * Details of the Tool Call activity that the Run Step was involved in.
+ */
+export interface ToolCallsStepDetails {
+ /**
+ * An array of tool calls the Run Step was involved in. These can be associated
+ * with one of three types of tools: `code_interpreter`, `retrieval`, or
+ * `function`.
+ */
+ tool_calls: Array;
+
+ /**
+ * Will always be `tool_calls`.
+ */
+ type: 'tool_calls';
+}
+
+export interface StepListParams extends CursorPageParams {
+ /**
+ * A cursor for use in pagination. `before` is an object ID that defines your place
+ * in the list. For instance, if you make a list request and receive 100 objects,
+ * ending with obj_foo, your subsequent call can include before=obj_foo in order to
+ * fetch the previous page of the list.
+ */
+ before?: string;
+
+ /**
+ * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ * order and `desc` for descending order.
+ */
+ order?: 'asc' | 'desc';
+}
+
+export namespace Steps {
+ export import CodeToolCall = StepsAPI.CodeToolCall;
+ export import FunctionToolCall = StepsAPI.FunctionToolCall;
+ export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails;
+ export import RetrievalToolCall = StepsAPI.RetrievalToolCall;
+ export import RunStep = StepsAPI.RunStep;
+ export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails;
+ export import RunStepsPage = StepsAPI.RunStepsPage;
+ export import StepListParams = StepsAPI.StepListParams;
+}
diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts
new file mode 100644
index 000000000..cb49cd230
--- /dev/null
+++ b/src/resources/beta/threads/threads.ts
@@ -0,0 +1,339 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import * as ThreadsAPI from 'openai/resources/beta/threads/threads';
+import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages';
+import * as RunsAPI from 'openai/resources/beta/threads/runs/runs';
+
+export class Threads extends APIResource {
+ runs: RunsAPI.Runs = new RunsAPI.Runs(this.client);
+ messages: MessagesAPI.Messages = new MessagesAPI.Messages(this.client);
+
+ /**
+ * Create a Thread.
+ */
+ create(body: ThreadCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this.post('/threads', {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Retrieves a Thread.
+ */
+ retrieve(threadId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this.get(`/threads/${threadId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Modifies a Thread.
+ */
+ update(threadId: string, body: ThreadUpdateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this.post(`/threads/${threadId}`, {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Delete a Thread.
+ */
+ del(threadId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this.delete(`/threads/${threadId}`, {
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+
+ /**
+ * Create a Thread and Run it in one request.
+ */
+ createAndRun(body: ThreadCreateAndRunParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this.post('/threads/runs', {
+ body,
+ ...options,
+ headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ });
+ }
+}
+
+/**
+ * Represents a Thread that contains
+ * [Messages](https://platform.openai.com/docs/api-reference/messages).
+ */
+export interface Thread {
+ /**
+ * The identifier, which can be referenced in API endpoints.
+ */
+ id: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the Thread was created.
+ */
+ created_at: number;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata: unknown | null;
+
+ /**
+ * The object type, which is always `thread`.
+ */
+ object: 'thread';
+}
+
+export interface ThreadDeleted {
+ id: string;
+
+ deleted: boolean;
+
+ object: 'thread.deleted';
+}
+
+export interface ThreadCreateParams {
+ /**
+ * A list of [Messages](https://platform.openai.com/docs/api-reference/messages) to
+ * start the Thread with.
+ */
+ messages?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+}
+
+export namespace ThreadCreateParams {
+ export interface Message {
+ /**
+ * The content of the Message.
+ */
+ content: string;
+
+ /**
+ * The role of the entity that is creating the Message. Currently only `user` is
+ * supported.
+ */
+ role: 'user';
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the Message should use. There can be a maximum of 10 files attached to a
+ * Message. Useful for tools like `retrieval` and `code_interpreter` that can
+ * access and use files.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+ }
+}
+
+export interface ThreadUpdateParams {
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+}
+
+export interface ThreadCreateAndRunParams {
+ /**
+ * The ID of the
+ * [Assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+ * execute this Run.
+ */
+ assistant_id: string;
+
+ /**
+ * Override the default system message of the Assistant. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ instructions?: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+ * be used to execute this Run. If a value is provided here, it will override the
+ * model associated with the Assistant. If not, the model associated with the
+ * Assistant will be used.
+ */
+ model?: string | null;
+
+ /**
+ * If no Thread is provided, an empty Thread will be created.
+ */
+ thread?: ThreadCreateAndRunParams.Thread;
+
+ /**
+ * Override the tools the Assistant can use for this Run. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ tools?: Array<
+ | ThreadCreateAndRunParams.AssistantToolsCode
+ | ThreadCreateAndRunParams.AssistantToolsRetrieval
+ | ThreadCreateAndRunParams.AssistantToolsFunction
+ > | null;
+}
+
+export namespace ThreadCreateAndRunParams {
+ /**
+ * If no Thread is provided, an empty Thread will be created.
+ */
+ export interface Thread {
+ /**
+ * A list of [Messages](https://platform.openai.com/docs/api-reference/messages) to
+ * start the Thread with.
+ */
+ messages?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+ }
+
+ export namespace Thread {
+ export interface Message {
+ /**
+ * The content of the Message.
+ */
+ content: string;
+
+ /**
+ * The role of the entity that is creating the Message. Currently only `user` is
+ * supported.
+ */
+ role: 'user';
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the Message should use. There can be a maximum of 10 files attached to a
+ * Message. Useful for tools like `retrieval` and `code_interpreter` that can
+ * access and use files.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+ }
+ }
+
+ export interface AssistantToolsCode {
+ /**
+ * The type of tool being defined: `code_interpreter`
+ */
+ type: 'code_interpreter';
+ }
+
+ export interface AssistantToolsRetrieval {
+ /**
+ * The type of tool being defined: `retreival`
+ */
+ type: 'retreival';
+ }
+
+ export interface AssistantToolsFunction {
+ /**
+ * The function definition.
+ */
+ function: AssistantToolsFunction.Function;
+
+ /**
+ * The type of tool being defined: `function`
+ */
+ type: 'function';
+ }
+
+ export namespace AssistantToolsFunction {
+ /**
+ * The function definition.
+ */
+ export interface Function {
+ /**
+ * A description of what the function does, used by the model to choose when and
+ * how to call the function.
+ */
+ description: string;
+
+ /**
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
+ * underscores and dashes, with a maximum length of 64.
+ */
+ name: string;
+
+ /**
+ * The parameters the functions accepts, described as a JSON Schema object. See the
+ * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
+ * examples, and the
+ * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ * documentation about the format.
+ *
+ * To describe a function that accepts no parameters, provide the value
+ * `{"type": "object", "properties": {}}`.
+ */
+ parameters: Record;
+ }
+ }
+}
+
+export namespace Threads {
+ export import Thread = ThreadsAPI.Thread;
+ export import ThreadDeleted = ThreadsAPI.ThreadDeleted;
+ export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams;
+ export import ThreadUpdateParams = ThreadsAPI.ThreadUpdateParams;
+ export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams;
+ export import Runs = RunsAPI.Runs;
+ export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall;
+ export import Run = RunsAPI.Run;
+ export import RunsPage = RunsAPI.RunsPage;
+ export import RunCreateParams = RunsAPI.RunCreateParams;
+ export import RunUpdateParams = RunsAPI.RunUpdateParams;
+ export import RunListParams = RunsAPI.RunListParams;
+ export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams;
+ export import Messages = MessagesAPI.Messages;
+ export import MessageContentImageFile = MessagesAPI.MessageContentImageFile;
+ export import MessageContentText = MessagesAPI.MessageContentText;
+ export import ThreadMessage = MessagesAPI.ThreadMessage;
+ export import ThreadMessageDeleted = MessagesAPI.ThreadMessageDeleted;
+ export import ThreadMessagesPage = MessagesAPI.ThreadMessagesPage;
+ export import MessageCreateParams = MessagesAPI.MessageCreateParams;
+ export import MessageUpdateParams = MessagesAPI.MessageUpdateParams;
+ export import MessageListParams = MessagesAPI.MessageListParams;
+}
diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts
index 35011c7a5..007c9271a 100644
--- a/src/resources/chat/chat.ts
+++ b/src/resources/chat/chat.ts
@@ -10,10 +10,23 @@ export class Chat extends APIResource {
export namespace Chat {
export import Completions = CompletionsAPI.Completions;
export import ChatCompletion = CompletionsAPI.ChatCompletion;
+ export import ChatCompletionAssistantMessageParam = CompletionsAPI.ChatCompletionAssistantMessageParam;
export import ChatCompletionChunk = CompletionsAPI.ChatCompletionChunk;
+ export import ChatCompletionContentPart = CompletionsAPI.ChatCompletionContentPart;
+ export import ChatCompletionContentPartImage = CompletionsAPI.ChatCompletionContentPartImage;
+ export import ChatCompletionContentPartText = CompletionsAPI.ChatCompletionContentPartText;
+ export import ChatCompletionFunctionCallOption = CompletionsAPI.ChatCompletionFunctionCallOption;
+ export import ChatCompletionFunctionMessageParam = CompletionsAPI.ChatCompletionFunctionMessageParam;
export import ChatCompletionMessage = CompletionsAPI.ChatCompletionMessage;
export import ChatCompletionMessageParam = CompletionsAPI.ChatCompletionMessageParam;
+ export import ChatCompletionMessageToolCall = CompletionsAPI.ChatCompletionMessageToolCall;
+ export import ChatCompletionNamedToolChoice = CompletionsAPI.ChatCompletionNamedToolChoice;
export import ChatCompletionRole = CompletionsAPI.ChatCompletionRole;
+ export import ChatCompletionSystemMessageParam = CompletionsAPI.ChatCompletionSystemMessageParam;
+ export import ChatCompletionTool = CompletionsAPI.ChatCompletionTool;
+ export import ChatCompletionToolChoiceOption = CompletionsAPI.ChatCompletionToolChoiceOption;
+ export import ChatCompletionToolMessageParam = CompletionsAPI.ChatCompletionToolMessageParam;
+ export import ChatCompletionUserMessageParam = CompletionsAPI.ChatCompletionUserMessageParam;
/**
* @deprecated ChatCompletionMessageParam should be used instead
*/
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index a5be20771..b81da9fa3 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -62,7 +62,15 @@ export interface ChatCompletion {
/**
* The object type, which is always `chat.completion`.
*/
- object: string;
+ object: 'chat.completion';
+
+ /**
+ * This fingerprint represents the backend configuration that the model runs with.
+ *
+ * Can be used in conjunction with the `seed` request parameter to understand when
+ * backend changes have been made that might impact determinism.
+ */
+ system_fingerprint?: string;
/**
* Usage statistics for the completion request.
@@ -76,10 +84,11 @@ export namespace ChatCompletion {
* The reason the model stopped generating tokens. This will be `stop` if the model
* hit a natural stop point or a provided stop sequence, `length` if the maximum
* number of tokens specified in the request was reached, `content_filter` if
- * content was omitted due to a flag from our content filters, or `function_call`
- * if the model called a function.
+ * content was omitted due to a flag from our content filters, `tool_calls` if the
+ * model called a tool, or `function_call` (deprecated) if the model called a
+ * function.
*/
- finish_reason: 'stop' | 'length' | 'function_call' | 'content_filter';
+ finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call';
/**
* The index of the choice in the list of choices.
@@ -93,6 +102,50 @@ export namespace ChatCompletion {
}
}
+export interface ChatCompletionAssistantMessageParam {
+ /**
+ * The contents of the assistant message.
+ */
+ content: string | null;
+
+ /**
+ * The role of the messages author, in this case `assistant`.
+ */
+ role: 'assistant';
+
+ /**
+ * Deprecated and replaced by `tool_calls`. The name and arguments of a function
+ * that should be called, as generated by the model.
+ */
+ function_call?: ChatCompletionAssistantMessageParam.FunctionCall;
+
+ /**
+ * The tool calls generated by the model, such as function calls.
+ */
+ tool_calls?: Array;
+}
+
+export namespace ChatCompletionAssistantMessageParam {
+ /**
+ * Deprecated and replaced by `tool_calls`. The name and arguments of a function
+ * that should be called, as generated by the model.
+ */
+ export interface FunctionCall {
+ /**
+ * The arguments to call the function with, as generated by the model in JSON
+ * format. Note that the model does not always generate valid JSON, and may
+ * hallucinate parameters not defined by your function schema. Validate the
+ * arguments in your code before calling your function.
+ */
+ arguments: string;
+
+ /**
+ * The name of the function to call.
+ */
+ name: string;
+ }
+}
+
/**
* Represents a streamed chunk of a chat completion response returned by model,
* based on the provided input.
@@ -123,7 +176,7 @@ export interface ChatCompletionChunk {
/**
* The object type, which is always `chat.completion.chunk`.
*/
- object: string;
+ object: 'chat.completion.chunk';
}
export namespace ChatCompletionChunk {
@@ -137,10 +190,11 @@ export namespace ChatCompletionChunk {
* The reason the model stopped generating tokens. This will be `stop` if the model
* hit a natural stop point or a provided stop sequence, `length` if the maximum
* number of tokens specified in the request was reached, `content_filter` if
- * content was omitted due to a flag from our content filters, or `function_call`
- * if the model called a function.
+ * content was omitted due to a flag from our content filters, `tool_calls` if the
+ * model called a tool, or `function_call` (deprecated) if the model called a
+ * function.
*/
- finish_reason: 'stop' | 'length' | 'function_call' | 'content_filter' | null;
+ finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | null;
/**
* The index of the choice in the list of choices.
@@ -159,21 +213,23 @@ export namespace ChatCompletionChunk {
content?: string | null;
/**
- * The name and arguments of a function that should be called, as generated by the
- * model.
+ * Deprecated and replaced by `tool_calls`. The name and arguments of a function
+ * that should be called, as generated by the model.
*/
function_call?: Delta.FunctionCall;
/**
* The role of the author of this message.
*/
- role?: ChatCompletionsAPI.ChatCompletionRole;
+ role?: 'system' | 'user' | 'assistant' | 'tool';
+
+ tool_calls?: Array;
}
export namespace Delta {
/**
- * The name and arguments of a function that should be called, as generated by the
- * model.
+ * Deprecated and replaced by `tool_calls`. The name and arguments of a function
+ * that should be called, as generated by the model.
*/
export interface FunctionCall {
/**
@@ -189,10 +245,108 @@ export namespace ChatCompletionChunk {
*/
name?: string;
}
+
+ export interface ToolCall {
+ index: number;
+
+ /**
+ * The ID of the tool call.
+ */
+ id?: string;
+
+ function?: ToolCall.Function;
+
+ /**
+ * The type of the tool. Currently, only `function` is supported.
+ */
+ type?: 'function';
+ }
+
+ export namespace ToolCall {
+ export interface Function {
+ /**
+ * The arguments to call the function with, as generated by the model in JSON
+ * format. Note that the model does not always generate valid JSON, and may
+ * hallucinate parameters not defined by your function schema. Validate the
+ * arguments in your code before calling your function.
+ */
+ arguments?: string;
+
+ /**
+ * The name of the function to call.
+ */
+ name?: string;
+ }
+ }
}
}
}
+export type ChatCompletionContentPart = ChatCompletionContentPartText | ChatCompletionContentPartImage;
+
+export interface ChatCompletionContentPartImage {
+ image_url: ChatCompletionContentPartImage.ImageURL;
+
+ /**
+ * The type of the content part.
+ */
+ type: 'image_url';
+}
+
+export namespace ChatCompletionContentPartImage {
+ export interface ImageURL {
+ /**
+ * Specifies the detail level of the image.
+ */
+ detail?: 'auto' | 'low' | 'high';
+
+ /**
+ * Either a URL of the image or the base64 encoded image data.
+ */
+ url?: string;
+ }
+}
+
+export interface ChatCompletionContentPartText {
+ /**
+ * The text content.
+ */
+ text: string;
+
+ /**
+ * The type of the content part.
+ */
+ type: 'text';
+}
+
+/**
+ * Specifying a particular function via `{"name": "my_function"}` forces the model
+ * to call that function.
+ */
+export interface ChatCompletionFunctionCallOption {
+ /**
+ * The name of the function to call.
+ */
+ name: string;
+}
+
+export interface ChatCompletionFunctionMessageParam {
+ /**
+ * The return value from the function call, to return to the model.
+ */
+ content: string | null;
+
+ /**
+ * The name of the function to call.
+ */
+ name: string;
+
+ /**
+ * The role of the messages author, in this case `function`.
+ */
+ role: 'function';
+}
+
/**
* A chat completion message generated by the model.
*/
@@ -205,19 +359,24 @@ export interface ChatCompletionMessage {
/**
* The role of the author of this message.
*/
- role: ChatCompletionRole;
+ role: 'assistant';
/**
- * The name and arguments of a function that should be called, as generated by the
- * model.
+ * Deprecated and replaced by `tool_calls`. The name and arguments of a function
+ * that should be called, as generated by the model.
*/
function_call?: ChatCompletionMessage.FunctionCall;
+
+ /**
+ * The tool calls generated by the model, such as function calls.
+ */
+ tool_calls?: Array;
}
export namespace ChatCompletionMessage {
/**
- * The name and arguments of a function that should be called, as generated by the
- * model.
+ * Deprecated and replaced by `tool_calls`. The name and arguments of a function
+ * that should be called, as generated by the model.
*/
export interface FunctionCall {
/**
@@ -235,40 +394,35 @@ export namespace ChatCompletionMessage {
}
}
-export interface ChatCompletionMessageParam {
- /**
- * The contents of the message. `content` is required for all messages, and may be
- * null for assistant messages with function calls.
- */
- content: string | null;
+export type ChatCompletionMessageParam =
+ | ChatCompletionSystemMessageParam
+ | ChatCompletionUserMessageParam
+ | ChatCompletionAssistantMessageParam
+ | ChatCompletionToolMessageParam
+ | ChatCompletionFunctionMessageParam;
+export interface ChatCompletionMessageToolCall {
/**
- * The role of the messages author. One of `system`, `user`, `assistant`, or
- * `function`.
+ * The ID of the tool call.
*/
- role: 'system' | 'user' | 'assistant' | 'function';
+ id: string;
/**
- * The name and arguments of a function that should be called, as generated by the
- * model.
+ * The function that the model called.
*/
- function_call?: ChatCompletionMessageParam.FunctionCall;
+ function: ChatCompletionMessageToolCall.Function;
/**
- * The name of the author of this message. `name` is required if role is
- * `function`, and it should be the name of the function whose response is in the
- * `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of
- * 64 characters.
+ * The type of the tool. Currently, only `function` is supported.
*/
- name?: string;
+ type: 'function';
}
-export namespace ChatCompletionMessageParam {
+export namespace ChatCompletionMessageToolCall {
/**
- * The name and arguments of a function that should be called, as generated by the
- * model.
+ * The function that the model called.
*/
- export interface FunctionCall {
+ export interface Function {
/**
* The arguments to call the function with, as generated by the model in JSON
* format. Note that the model does not always generate valid JSON, and may
@@ -285,9 +439,122 @@ export namespace ChatCompletionMessageParam {
}
/**
- * The role of the author of this message.
+ * Specifies a tool the model should use. Use to force the model to call a specific
+ * function.
+ */
+export interface ChatCompletionNamedToolChoice {
+ function?: ChatCompletionNamedToolChoice.Function;
+
+ /**
+ * The type of the tool. Currently, only `function` is supported.
+ */
+ type?: 'function';
+}
+
+export namespace ChatCompletionNamedToolChoice {
+ export interface Function {
+ /**
+ * The name of the function to call.
+ */
+ name: string;
+ }
+}
+
+/**
+ * The role of the author of a message
*/
-export type ChatCompletionRole = 'system' | 'user' | 'assistant' | 'function';
+export type ChatCompletionRole = 'system' | 'user' | 'assistant' | 'tool' | 'function';
+
+export interface ChatCompletionSystemMessageParam {
+ /**
+ * The contents of the system message.
+ */
+ content: string | null;
+
+ /**
+ * The role of the messages author, in this case `system`.
+ */
+ role: 'system';
+}
+
+export interface ChatCompletionTool {
+ function: ChatCompletionTool.Function;
+
+ /**
+ * The type of the tool. Currently, only `function` is supported.
+ */
+ type: 'function';
+}
+
+export namespace ChatCompletionTool {
+ export interface Function {
+ /**
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
+ * underscores and dashes, with a maximum length of 64.
+ */
+ name: string;
+
+ /**
+ * The parameters the functions accepts, described as a JSON Schema object. See the
+ * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
+ * examples, and the
+ * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
+ * documentation about the format.
+ *
+ * To describe a function that accepts no parameters, provide the value
+ * `{"type": "object", "properties": {}}`.
+ */
+ parameters: Record;
+
+ /**
+ * A description of what the function does, used by the model to choose when and
+ * how to call the function.
+ */
+ description?: string;
+ }
+}
+
+/**
+ * Controls which (if any) function is called by the model. `none` means the model
+ * will not call a function and instead generates a message. `auto` means the model
+ * can pick between generating a message or calling a function. Specifying a
+ * particular function via
+ * `{"type: "function", "function": {"name": "my_function"}}` forces the model to
+ * call that function.
+ *
+ * `none` is the default when no functions are present. `auto` is the default if
+ * functions are present.
+ */
+export type ChatCompletionToolChoiceOption = 'none' | 'auto' | ChatCompletionNamedToolChoice;
+
+export interface ChatCompletionToolMessageParam {
+ /**
+ * The contents of the tool message.
+ */
+ content: string | null;
+
+ /**
+ * The role of the messages author, in this case `tool`.
+ */
+ role: 'tool';
+
+ /**
+ * Tool call that this message is responding to.
+ */
+ tool_call_id: string;
+}
+
+export interface ChatCompletionUserMessageParam {
+ /**
+ * The contents of the user message.
+ */
+ content: string | Array | null;
+
+ /**
+ * The role of the messages author, in this case `user`.
+ */
+ role: 'user';
+}
/**
* @deprecated ChatCompletionMessageParam should be used instead
@@ -334,16 +601,22 @@ export interface ChatCompletionCreateParamsBase {
frequency_penalty?: number | null;
/**
- * Controls how the model calls functions. "none" means the model will not call a
- * function and instead generates a message. "auto" means the model can pick
- * between generating a message or calling a function. Specifying a particular
- * function via `{"name": "my_function"}` forces the model to call that function.
- * "none" is the default when no functions are present. "auto" is the default if
+ * Deprecated in favor of `tool_choice`.
+ *
+ * Controls which (if any) function is called by the model. `none` means the model
+ * will not call a function and instead generates a message. `auto` means the model
+ * can pick between generating a message or calling a function. Specifying a
+ * particular function via `{"name": "my_function"}` forces the model to call that
+ * function.
+ *
+ * `none` is the default when no functions are present. `auto`` is the default if
* functions are present.
*/
- function_call?: 'none' | 'auto' | ChatCompletionCreateParams.FunctionCallOption;
+ function_call?: 'none' | 'auto' | ChatCompletionFunctionCallOption;
/**
+ * Deprecated in favor of `tools`.
+ *
* A list of functions the model may generate JSON inputs for.
*/
functions?: Array;
@@ -351,7 +624,7 @@ export interface ChatCompletionCreateParamsBase {
/**
* Modify the likelihood of specified tokens appearing in the completion.
*
- * Accepts a json object that maps tokens (specified by their token ID in the
+ * Accepts a JSON object that maps tokens (specified by their token ID in the
* tokenizer) to an associated bias value from -100 to 100. Mathematically, the
* bias is added to the logits generated by the model prior to sampling. The exact
* effect will vary per model, but values between -1 and 1 should decrease or
@@ -384,6 +657,21 @@ export interface ChatCompletionCreateParamsBase {
*/
presence_penalty?: number | null;
+ /**
+ * An object specifying the format that the model must output. Used to enable JSON
+ * mode.
+ */
+ response_format?: ChatCompletionCreateParams.ResponseFormat;
+
+ /**
+ * This feature is in Beta. If specified, our system will make a best effort to
+ * sample deterministically, such that repeated requests with the same `seed` and
+ * parameters should return the same result. Determinism is not guaranteed, and you
+ * should refer to the `system_fingerprint` response parameter to monitor changes
+ * in the backend.
+ */
+ seed?: number | null;
+
/**
* Up to 4 sequences where the API will stop generating further tokens.
*/
@@ -408,6 +696,26 @@ export interface ChatCompletionCreateParamsBase {
*/
temperature?: number | null;
+ /**
+ * Controls which (if any) function is called by the model. `none` means the model
+ * will not call a function and instead generates a message. `auto` means the model
+ * can pick between generating a message or calling a function. Specifying a
+ * particular function via
+ * `{"type: "function", "function": {"name": "my_function"}}` forces the model to
+ * call that function.
+ *
+ * `none` is the default when no functions are present. `auto` is the default if
+ * functions are present.
+ */
+ tool_choice?: ChatCompletionToolChoiceOption;
+
+ /**
+ * A list of tools the model may call. Currently, only functions are supported as a
+ * tool. Use this to provide a list of functions the model may generate JSON inputs
+ * for.
+ */
+ tools?: Array;
+
/**
* An alternative to sampling with temperature, called nucleus sampling, where the
* model considers the results of the tokens with top_p probability mass. So 0.1
@@ -426,13 +734,6 @@ export interface ChatCompletionCreateParamsBase {
}
export namespace ChatCompletionCreateParams {
- export interface FunctionCallOption {
- /**
- * The name of the function to call.
- */
- name: string;
- }
-
export interface Function {
/**
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
@@ -459,6 +760,27 @@ export namespace ChatCompletionCreateParams {
description?: string;
}
+ /**
+ * An object specifying the format that the model must output. Used to enable JSON
+ * mode.
+ */
+ export interface ResponseFormat {
+ /**
+ * Setting to `json_object` enables JSON mode. This guarantees that the message the
+ * model generates is valid JSON.
+ *
+ * Note that your system prompt must still instruct the model to produce JSON, and
+ * to help ensure you don't forget, the API will throw an error if the string
+ * `JSON` does not appear in your system message. Also note that the message
+ * content may be partial (i.e. cut off) if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ *
+ * Must be one of `text` or `json_object`.
+ */
+ type?: 'text' | 'json_object';
+ }
+
export type ChatCompletionCreateParamsNonStreaming =
ChatCompletionsAPI.ChatCompletionCreateParamsNonStreaming;
export type ChatCompletionCreateParamsStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsStreaming;
@@ -505,10 +827,23 @@ export type CompletionCreateParamsStreaming = ChatCompletionCreateParamsStreamin
export namespace Completions {
export import ChatCompletion = ChatCompletionsAPI.ChatCompletion;
+ export import ChatCompletionAssistantMessageParam = ChatCompletionsAPI.ChatCompletionAssistantMessageParam;
export import ChatCompletionChunk = ChatCompletionsAPI.ChatCompletionChunk;
+ export import ChatCompletionContentPart = ChatCompletionsAPI.ChatCompletionContentPart;
+ export import ChatCompletionContentPartImage = ChatCompletionsAPI.ChatCompletionContentPartImage;
+ export import ChatCompletionContentPartText = ChatCompletionsAPI.ChatCompletionContentPartText;
+ export import ChatCompletionFunctionCallOption = ChatCompletionsAPI.ChatCompletionFunctionCallOption;
+ export import ChatCompletionFunctionMessageParam = ChatCompletionsAPI.ChatCompletionFunctionMessageParam;
export import ChatCompletionMessage = ChatCompletionsAPI.ChatCompletionMessage;
export import ChatCompletionMessageParam = ChatCompletionsAPI.ChatCompletionMessageParam;
+ export import ChatCompletionMessageToolCall = ChatCompletionsAPI.ChatCompletionMessageToolCall;
+ export import ChatCompletionNamedToolChoice = ChatCompletionsAPI.ChatCompletionNamedToolChoice;
export import ChatCompletionRole = ChatCompletionsAPI.ChatCompletionRole;
+ export import ChatCompletionSystemMessageParam = ChatCompletionsAPI.ChatCompletionSystemMessageParam;
+ export import ChatCompletionTool = ChatCompletionsAPI.ChatCompletionTool;
+ export import ChatCompletionToolChoiceOption = ChatCompletionsAPI.ChatCompletionToolChoiceOption;
+ export import ChatCompletionToolMessageParam = ChatCompletionsAPI.ChatCompletionToolMessageParam;
+ export import ChatCompletionUserMessageParam = ChatCompletionsAPI.ChatCompletionUserMessageParam;
/**
* @deprecated ChatCompletionMessageParam should be used instead
*/
diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts
index 32dea91fe..ea9fe29bd 100644
--- a/src/resources/chat/index.ts
+++ b/src/resources/chat/index.ts
@@ -3,10 +3,23 @@
export { Chat } from './chat';
export {
ChatCompletion,
+ ChatCompletionAssistantMessageParam,
ChatCompletionChunk,
+ ChatCompletionContentPart,
+ ChatCompletionContentPartImage,
+ ChatCompletionContentPartText,
+ ChatCompletionFunctionCallOption,
+ ChatCompletionFunctionMessageParam,
ChatCompletionMessage,
ChatCompletionMessageParam,
+ ChatCompletionMessageToolCall,
+ ChatCompletionNamedToolChoice,
ChatCompletionRole,
+ ChatCompletionSystemMessageParam,
+ ChatCompletionTool,
+ ChatCompletionToolChoiceOption,
+ ChatCompletionToolMessageParam,
+ ChatCompletionUserMessageParam,
CreateChatCompletionRequestMessage,
ChatCompletionCreateParams,
CompletionCreateParams,
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index 9ec1469dd..c314e7cf8 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -57,7 +57,15 @@ export interface Completion {
/**
* The object type, which is always "text_completion"
*/
- object: string;
+ object: 'text_completion';
+
+ /**
+ * This fingerprint represents the backend configuration that the model runs with.
+ *
+ * Can be used in conjunction with the `seed` request parameter to understand when
+ * backend changes have been made that might impact determinism.
+ */
+ system_fingerprint?: string;
/**
* Usage statistics for the completion request.
@@ -176,7 +184,7 @@ export interface CompletionCreateParamsBase {
/**
* Modify the likelihood of specified tokens appearing in the completion.
*
- * Accepts a json object that maps tokens (specified by their token ID in the GPT
+ * Accepts a JSON object that maps tokens (specified by their token ID in the GPT
* tokenizer) to an associated bias value from -100 to 100. You can use this
* [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
* convert text to token IDs. Mathematically, the bias is added to the logits
@@ -228,6 +236,16 @@ export interface CompletionCreateParamsBase {
*/
presence_penalty?: number | null;
+ /**
+ * If specified, our system will make a best effort to sample deterministically,
+ * such that repeated requests with the same `seed` and parameters should return
+ * the same result.
+ *
+ * Determinism is not guaranteed, and you should refer to the `system_fingerprint`
+ * response parameter to monitor changes in the backend.
+ */
+ seed?: number | null;
+
/**
* Up to 4 sequences where the API will stop generating further tokens. The
* returned text will not contain the stop sequence.
diff --git a/src/resources/edits.ts b/src/resources/edits.ts
index 300f7a7aa..a6512a1e9 100644
--- a/src/resources/edits.ts
+++ b/src/resources/edits.ts
@@ -32,7 +32,7 @@ export interface Edit {
/**
* The object type, which is always `edit`.
*/
- object: string;
+ object: 'edit';
/**
* Usage statistics for the completion request.
diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts
index 2c3302c99..219a05b1f 100644
--- a/src/resources/embeddings.ts
+++ b/src/resources/embeddings.ts
@@ -30,7 +30,7 @@ export interface CreateEmbeddingResponse {
/**
* The object type, which is always "embedding".
*/
- object: string;
+ object: 'embedding';
/**
* The usage information for the request.
@@ -74,7 +74,7 @@ export interface Embedding {
/**
* The object type, which is always "embedding".
*/
- object: string;
+ object: 'embedding';
}
export interface EmbeddingCreateParams {
diff --git a/src/resources/files.ts b/src/resources/files.ts
index bd684179a..52eb39b7c 100644
--- a/src/resources/files.ts
+++ b/src/resources/files.ts
@@ -2,6 +2,7 @@
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
+import { isRequestOptions } from 'openai/core';
import { sleep } from 'openai/core';
import { APIConnectionTimeoutError } from 'openai/error';
import * as FilesAPI from 'openai/resources/files';
@@ -10,10 +11,16 @@ import { Page } from 'openai/pagination';
export class Files extends APIResource {
/**
- * Upload a file that can be used across various endpoints/features. Currently, the
- * size of all the files uploaded by one organization can be up to 1 GB. Please
- * [contact us](https://help.openai.com/) if you need to increase the storage
- * limit.
+ * Upload a file that can be used across various endpoints/features. The size of
+ * all the files uploaded by one organization can be up to 100 GB.
+ *
+ * The size of individual files for can be a maximum of 512MB. See the
+ * [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to
+ * learn more about the types of files supported. The Fine-tuning API only supports
+ * `.jsonl` files.
+ *
+ * Please [contact us](https://help.openai.com/) if you need to increase these
+ * storage limits.
*/
create(body: FileCreateParams, options?: Core.RequestOptions): Core.APIPromise {
return this.post('/files', multipartFormRequestOptions({ body, ...options }));
@@ -29,8 +36,16 @@ export class Files extends APIResource {
/**
* Returns a list of files that belong to the user's organization.
*/
- list(options?: Core.RequestOptions): Core.PagePromise {
- return this.getAPIList('/files', FileObjectsPage, options);
+ list(query?: FileListParams, options?: Core.RequestOptions): Core.PagePromise;
+ list(options?: Core.RequestOptions): Core.PagePromise;
+ list(
+ query: FileListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list({}, query);
+ }
+ return this.getAPIList('/files', FileObjectsPage, { query, ...options });
}
/**
@@ -89,7 +104,7 @@ export interface FileDeleted {
deleted: boolean;
- object: string;
+ object: 'file';
}
/**
@@ -97,53 +112,52 @@ export interface FileDeleted {
*/
export interface FileObject {
/**
- * The file identifier, which can be referenced in the API endpoints.
+ * The File identifier, which can be referenced in the API endpoints.
*/
id: string;
/**
- * The size of the file in bytes.
+ * The size of the file, in bytes.
*/
bytes: number;
/**
- * The Unix timestamp (in seconds) for when the file was created.
+ * The Unix timestamp (in seconds) for when the File was created.
*/
created_at: number;
/**
- * The name of the file.
+ * The name of the File.
*/
filename: string;
/**
- * The object type, which is always "file".
+ * The object type, which is always `file`.
*/
- object: string;
+ object: 'file';
/**
- * The intended purpose of the file. Currently, only "fine-tune" is supported.
+ * The intended purpose of the File. Supported values are `fine-tune`,
+ * `fine-tune-results`, `assistants`, and `assistants_output`.
*/
- purpose: string;
+ purpose: 'fine-tune' | 'fine-tune-results' | 'assistants' | 'assistants_output';
/**
- * The current status of the file, which can be either `uploaded`, `processed`,
- * `pending`, `error`, `deleting` or `deleted`.
+ * Deprecated. The current status of the File, which can be either `uploaded`,
+ * `processed`, or `error`.
*/
- status?: string;
+ status: 'uploaded' | 'processed' | 'error';
/**
- * Additional details about the status of the file. If the file is in the `error`
- * state, this will include a message describing the error.
+ * Deprecated. For details on why a fine-tuning training file failed validation,
+ * see the `error` field on `fine_tuning.job`.
*/
- status_details?: string | null;
+ status_details?: string;
}
export interface FileCreateParams {
/**
- * The file object (not file name) to be uploaded.
- *
- * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning.
+ * The File object (not file name) to be uploaded.
*/
file: Uploadable;
@@ -151,11 +165,20 @@ export interface FileCreateParams {
* The intended purpose of the uploaded file.
*
* Use "fine-tune" for
- * [fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). This
- * allows us to validate the format of the uploaded file is correct for
- * fine-tuning.
+ * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning) and
+ * "assistants" for
+ * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and
+ * [Messages](https://platform.openai.com/docs/api-reference/messages). This allows
+ * us to validate the format of the uploaded file is correct for fine-tuning.
+ */
+ purpose: 'fine-tune' | 'assistants';
+}
+
+export interface FileListParams {
+ /**
+ * Only return files with the given purpose.
*/
- purpose: string;
+ purpose?: string;
}
export namespace Files {
@@ -164,4 +187,5 @@ export namespace Files {
export import FileObject = FilesAPI.FileObject;
export import FileObjectsPage = FilesAPI.FileObjectsPage;
export import FileCreateParams = FilesAPI.FileCreateParams;
+ export import FileListParams = FilesAPI.FileListParams;
}
diff --git a/src/resources/fine-tunes.ts b/src/resources/fine-tunes.ts
index 99f90d17f..59551452f 100644
--- a/src/resources/fine-tunes.ts
+++ b/src/resources/fine-tunes.ts
@@ -116,7 +116,7 @@ export interface FineTune {
/**
* The object type, which is always "fine-tune".
*/
- object: string;
+ object: 'fine-tune';
/**
* The organization that owns the fine-tuning job.
@@ -212,13 +212,13 @@ export interface FineTuneEvent {
message: string;
- object: string;
+ object: 'fine-tune-event';
}
export interface FineTuneEventsListResponse {
data: Array;
- object: string;
+ object: 'list';
}
export interface FineTuneCreateParams {
diff --git a/src/resources/fine-tuning/jobs.ts b/src/resources/fine-tuning/jobs.ts
index d616ce452..ad5ef3e0b 100644
--- a/src/resources/fine-tuning/jobs.ts
+++ b/src/resources/fine-tuning/jobs.ts
@@ -132,7 +132,7 @@ export interface FineTuningJob {
/**
* The object type, which is always "fine_tuning.job".
*/
- object: string;
+ object: 'fine_tuning.job';
/**
* The organization that owns the fine-tuning job.
@@ -150,7 +150,7 @@ export interface FineTuningJob {
* The current status of the fine-tuning job, which can be either
* `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.
*/
- status: string;
+ status: 'validating_files' | 'queued' | 'running' | 'succeeded' | 'failed' | 'cancelled';
/**
* The total number of billable tokens processed by this fine-tuning job. The value
@@ -223,7 +223,7 @@ export interface FineTuningJobEvent {
message: string;
- object: string;
+ object: 'fine_tuning.job.event';
}
export interface JobCreateParams {
@@ -283,6 +283,18 @@ export namespace JobCreateParams {
* The hyperparameters used for the fine-tuning job.
*/
export interface Hyperparameters {
+ /**
+ * Number of examples in each batch. A larger batch size means that model
+ * parameters are updated less frequently, but with lower variance.
+ */
+ batch_size?: 'auto' | number;
+
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful to
+ * avoid overfitting.
+ */
+ learning_rate_multiplier?: 'auto' | number;
+
/**
* The number of epochs to train the model for. An epoch refers to one full cycle
* through the training dataset.
diff --git a/src/resources/images.ts b/src/resources/images.ts
index 019371b98..36744cb38 100644
--- a/src/resources/images.ts
+++ b/src/resources/images.ts
@@ -41,6 +41,12 @@ export interface Image {
*/
b64_json?: string;
+ /**
+ * The prompt that was used to generate the image, if there was any revision to the
+ * prompt.
+ */
+ revised_prompt?: string;
+
/**
* The URL of the generated image, if `response_format` is `url` (default).
*/
@@ -61,7 +67,14 @@ export interface ImageCreateVariationParams {
image: Uploadable;
/**
- * The number of images to generate. Must be between 1 and 10.
+ * The model to use for image generation. Only `dall-e-2` is supported at this
+ * time.
+ */
+ model?: (string & {}) | 'dall-e-2' | null;
+
+ /**
+ * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
+ * `n=1` is supported.
*/
n?: number | null;
@@ -105,6 +118,12 @@ export interface ImageEditParams {
*/
mask?: Uploadable;
+ /**
+ * The model to use for image generation. Only `dall-e-2` is supported at this
+ * time.
+ */
+ model?: (string & {}) | 'dall-e-2' | null;
+
/**
* The number of images to generate. Must be between 1 and 10.
*/
@@ -133,15 +152,28 @@ export interface ImageEditParams {
export interface ImageGenerateParams {
/**
* A text description of the desired image(s). The maximum length is 1000
- * characters.
+ * characters for `dall-e-2` and 4000 characters for `dall-e-3`.
*/
prompt: string;
/**
- * The number of images to generate. Must be between 1 and 10.
+ * The model to use for image generation.
+ */
+ model?: (string & {}) | 'dall-e-2' | 'dall-e-3' | null;
+
+ /**
+ * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
+ * `n=1` is supported.
*/
n?: number | null;
+ /**
+ * The quality of the image that will be generated. `hd` creates images with finer
+ * details and greater consistency across the image. This param is only supported
+ * for `dall-e-3`.
+ */
+ quality?: 'standard' | 'hd';
+
/**
* The format in which the generated images are returned. Must be one of `url` or
* `b64_json`.
@@ -150,9 +182,18 @@ export interface ImageGenerateParams {
/**
* The size of the generated images. Must be one of `256x256`, `512x512`, or
- * `1024x1024`.
+ * `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
+ * `1024x1792` for `dall-e-3` models.
*/
- size?: '256x256' | '512x512' | '1024x1024' | null;
+ size?: '256x256' | '512x512' | '1024x1024' | '1792x1024' | '1024x1792' | null;
+
+ /**
+ * The style of the generated images. Must be one of `vivid` or `natural`. Vivid
+ * causes the model to lean towards generating hyper-real and dramatic images.
+ * Natural causes the model to produce more natural, less hyper-real looking
+ * images. This param is only supported for `dall-e-3`.
+ */
+ style?: 'vivid' | 'natural' | null;
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 3f2d78020..67a4ed227 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -14,7 +14,15 @@ export {
} from './completions';
export { CreateEmbeddingResponse, Embedding, EmbeddingCreateParams, Embeddings } from './embeddings';
export { Edit, EditCreateParams, Edits } from './edits';
-export { FileContent, FileDeleted, FileObject, FileCreateParams, FileObjectsPage, Files } from './files';
+export {
+ FileContent,
+ FileDeleted,
+ FileObject,
+ FileCreateParams,
+ FileListParams,
+ FileObjectsPage,
+ Files,
+} from './files';
export {
FineTune,
FineTuneEvent,
diff --git a/src/resources/models.ts b/src/resources/models.ts
index e1906db5d..4954ab4dd 100644
--- a/src/resources/models.ts
+++ b/src/resources/models.ts
@@ -53,7 +53,7 @@ export interface Model {
/**
* The object type, which is always "model".
*/
- object: string;
+ object: 'model';
/**
* The organization that owns the model.
diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts
new file mode 100644
index 000000000..0d7ecd887
--- /dev/null
+++ b/tests/api-resources/audio/speech.test.ts
@@ -0,0 +1,20 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import OpenAI from 'openai';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource speech', () => {
+ test('create: required and optional params', async () => {
+ const response = await openai.audio.speech.create({
+ input: 'string',
+ model: 'string',
+ voice: 'alloy',
+ response_format: 'mp3',
+ speed: 0.25,
+ });
+ });
+});
diff --git a/tests/api-resources/beta/assistants/assistants.test.ts b/tests/api-resources/beta/assistants/assistants.test.ts
new file mode 100644
index 000000000..60ca0a6e2
--- /dev/null
+++ b/tests/api-resources/beta/assistants/assistants.test.ts
@@ -0,0 +1,109 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import OpenAI from 'openai';
+import { Response } from 'node-fetch';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource assistants', () => {
+ test('create: only required params', async () => {
+ const responsePromise = openai.beta.assistants.create({ model: 'string' });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await openai.beta.assistants.create({
+ model: 'string',
+ description: 'string',
+ file_ids: ['string', 'string', 'string'],
+ instructions: 'string',
+ metadata: {},
+ name: 'string',
+ tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
+ });
+ });
+
+ test('retrieve', async () => {
+ const responsePromise = openai.beta.assistants.retrieve('string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.assistants.retrieve('string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('update', async () => {
+ const responsePromise = openai.beta.assistants.update('string', {});
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list', async () => {
+ const responsePromise = openai.beta.assistants.list();
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(openai.beta.assistants.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
+ OpenAI.NotFoundError,
+ );
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.assistants.list(
+ { after: 'string', before: 'string', limit: 0, order: 'asc' },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('del', async () => {
+ const responsePromise = openai.beta.assistants.del('string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('del: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(openai.beta.assistants.del('string', { path: '/_stainless_unknown_path' })).rejects.toThrow(
+ OpenAI.NotFoundError,
+ );
+ });
+});
diff --git a/tests/api-resources/beta/assistants/files.test.ts b/tests/api-resources/beta/assistants/files.test.ts
new file mode 100644
index 000000000..b06cac855
--- /dev/null
+++ b/tests/api-resources/beta/assistants/files.test.ts
@@ -0,0 +1,95 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import OpenAI from 'openai';
+import { Response } from 'node-fetch';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource files', () => {
+ test('create: only required params', async () => {
+ const responsePromise = openai.beta.assistants.files.create('file-AF1WoRqd3aJAHsqc9NY7iL8F', {
+ file_id: 'string',
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await openai.beta.assistants.files.create('file-AF1WoRqd3aJAHsqc9NY7iL8F', {
+ file_id: 'string',
+ });
+ });
+
+ test('retrieve', async () => {
+ const responsePromise = openai.beta.assistants.files.retrieve('string', 'string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.assistants.files.retrieve('string', 'string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('list', async () => {
+ const responsePromise = openai.beta.assistants.files.list('string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.assistants.files.list('string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.assistants.files.list(
+ 'string',
+ { after: 'string', before: 'string', limit: 0, order: 'asc' },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('del', async () => {
+ const responsePromise = openai.beta.assistants.files.del('string', 'string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('del: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.assistants.files.del('string', 'string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+});
diff --git a/tests/api-resources/beta/chat/completions.test.ts b/tests/api-resources/beta/chat/completions.test.ts
new file mode 100644
index 000000000..a8d0b400e
--- /dev/null
+++ b/tests/api-resources/beta/chat/completions.test.ts
@@ -0,0 +1,10 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import OpenAI from 'openai';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource completions', () => {});
diff --git a/tests/api-resources/beta/threads/messages/files.test.ts b/tests/api-resources/beta/threads/messages/files.test.ts
new file mode 100644
index 000000000..501ed8311
--- /dev/null
+++ b/tests/api-resources/beta/threads/messages/files.test.ts
@@ -0,0 +1,68 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import OpenAI from 'openai';
+import { Response } from 'node-fetch';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource files', () => {
+ test('retrieve', async () => {
+ const responsePromise = openai.beta.threads.messages.files.retrieve(
+ 'thread_AF1WoRqd3aJAHsqc9NY7iL8F',
+ 'msg_AF1WoRqd3aJAHsqc9NY7iL8F',
+ 'file-AF1WoRqd3aJAHsqc9NY7iL8F',
+ );
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.messages.files.retrieve(
+ 'thread_AF1WoRqd3aJAHsqc9NY7iL8F',
+ 'msg_AF1WoRqd3aJAHsqc9NY7iL8F',
+ 'file-AF1WoRqd3aJAHsqc9NY7iL8F',
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('list', async () => {
+ const responsePromise = openai.beta.threads.messages.files.list('string', 'string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.messages.files.list('string', 'string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.messages.files.list(
+ 'string',
+ 'string',
+ { after: 'string', before: 'string', limit: 0, order: 'asc' },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+});
diff --git a/tests/api-resources/beta/threads/messages/messages.test.ts b/tests/api-resources/beta/threads/messages/messages.test.ts
new file mode 100644
index 000000000..35538efb9
--- /dev/null
+++ b/tests/api-resources/beta/threads/messages/messages.test.ts
@@ -0,0 +1,89 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import OpenAI from 'openai';
+import { Response } from 'node-fetch';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource messages', () => {
+ test('create: only required params', async () => {
+ const responsePromise = openai.beta.threads.messages.create('string', { content: 'x', role: 'user' });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await openai.beta.threads.messages.create('string', {
+ content: 'x',
+ role: 'user',
+ file_ids: ['string'],
+ metadata: {},
+ });
+ });
+
+ test('retrieve', async () => {
+ const responsePromise = openai.beta.threads.messages.retrieve('string', 'string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.messages.retrieve('string', 'string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('update', async () => {
+ const responsePromise = openai.beta.threads.messages.update('string', 'string', {});
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list', async () => {
+ const responsePromise = openai.beta.threads.messages.list('string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.messages.list('string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.messages.list(
+ 'string',
+ { after: 'string', before: 'string', limit: 0, order: 'asc' },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+});
diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts
new file mode 100644
index 000000000..0705ac528
--- /dev/null
+++ b/tests/api-resources/beta/threads/runs/runs.test.ts
@@ -0,0 +1,131 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import OpenAI from 'openai';
+import { Response } from 'node-fetch';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource runs', () => {
+ test('create: only required params', async () => {
+ const responsePromise = openai.beta.threads.runs.create('string', { assistant_id: 'string' });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await openai.beta.threads.runs.create('string', {
+ assistant_id: 'string',
+ instructions: 'string',
+ metadata: {},
+ model: 'string',
+ tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
+ });
+ });
+
+ test('retrieve', async () => {
+ const responsePromise = openai.beta.threads.runs.retrieve('string', 'string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.runs.retrieve('string', 'string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('update', async () => {
+ const responsePromise = openai.beta.threads.runs.update('string', 'string', {});
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list', async () => {
+ const responsePromise = openai.beta.threads.runs.list('string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.runs.list('string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.runs.list(
+ 'string',
+ { after: 'string', before: 'string', limit: 0, order: 'asc' },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('cancel', async () => {
+ const responsePromise = openai.beta.threads.runs.cancel('string', 'string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('cancel: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.runs.cancel('string', 'string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('submitToolOutputs: only required params', async () => {
+ const responsePromise = openai.beta.threads.runs.submitToolOutputs('string', 'string', {
+ tool_outputs: [{}, {}, {}],
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('submitToolOutputs: required and optional params', async () => {
+ const response = await openai.beta.threads.runs.submitToolOutputs('string', 'string', {
+ tool_outputs: [
+ { tool_call_id: 'string', output: 'string' },
+ { tool_call_id: 'string', output: 'string' },
+ { tool_call_id: 'string', output: 'string' },
+ ],
+ });
+ });
+});
diff --git a/tests/api-resources/beta/threads/runs/steps.test.ts b/tests/api-resources/beta/threads/runs/steps.test.ts
new file mode 100644
index 000000000..76eec269a
--- /dev/null
+++ b/tests/api-resources/beta/threads/runs/steps.test.ts
@@ -0,0 +1,61 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import OpenAI from 'openai';
+import { Response } from 'node-fetch';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource steps', () => {
+ test('retrieve', async () => {
+ const responsePromise = openai.beta.threads.runs.steps.retrieve('string', 'string', 'string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.runs.steps.retrieve('string', 'string', 'string', {
+ path: '/_stainless_unknown_path',
+ }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('list', async () => {
+ const responsePromise = openai.beta.threads.runs.steps.list('string', 'string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.runs.steps.list('string', 'string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.runs.steps.list(
+ 'string',
+ 'string',
+ { after: 'string', before: 'string', limit: 0, order: 'asc' },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+});
diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts
new file mode 100644
index 000000000..4ca2247dd
--- /dev/null
+++ b/tests/api-resources/beta/threads/threads.test.ts
@@ -0,0 +1,98 @@
+// File generated from our OpenAPI spec by Stainless.
+
+import OpenAI from 'openai';
+import { Response } from 'node-fetch';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource threads', () => {
+ test('create', async () => {
+ const responsePromise = openai.beta.threads.create({});
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve', async () => {
+ const responsePromise = openai.beta.threads.retrieve('string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.beta.threads.retrieve('string', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('update', async () => {
+ const responsePromise = openai.beta.threads.update('string', {});
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('del', async () => {
+ const responsePromise = openai.beta.threads.del('string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('del: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(openai.beta.threads.del('string', { path: '/_stainless_unknown_path' })).rejects.toThrow(
+ OpenAI.NotFoundError,
+ );
+ });
+
+ test('createAndRun: only required params', async () => {
+ const responsePromise = openai.beta.threads.createAndRun({ assistant_id: 'string' });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('createAndRun: required and optional params', async () => {
+ const response = await openai.beta.threads.createAndRun({
+ assistant_id: 'string',
+ instructions: 'string',
+ metadata: {},
+ model: 'string',
+ thread: {
+ messages: [
+ { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
+ { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
+ { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
+ ],
+ metadata: {},
+ },
+ tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
+ });
+ });
+});
diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts
index 4a5616cae..b4eb00dfd 100644
--- a/tests/api-resources/chat/completions.test.ts
+++ b/tests/api-resources/chat/completions.test.ts
@@ -25,14 +25,7 @@ describe('resource completions', () => {
test('create: required and optional params', async () => {
const response = await openai.chat.completions.create({
- messages: [
- {
- content: 'string',
- function_call: { arguments: 'string', name: 'string' },
- name: 'string',
- role: 'system',
- },
- ],
+ messages: [{ content: 'string', role: 'system' }],
model: 'gpt-3.5-turbo',
frequency_penalty: -2,
function_call: 'none',
@@ -41,9 +34,17 @@ describe('resource completions', () => {
max_tokens: 0,
n: 1,
presence_penalty: -2,
+ response_format: { type: 'json_object' },
+ seed: -9223372036854776000,
stop: 'string',
stream: false,
temperature: 1,
+ tool_choice: 'none',
+ tools: [
+ { type: 'function', function: { description: 'string', name: 'string', parameters: { foo: 'bar' } } },
+ { type: 'function', function: { description: 'string', name: 'string', parameters: { foo: 'bar' } } },
+ { type: 'function', function: { description: 'string', name: 'string', parameters: { foo: 'bar' } } },
+ ],
top_p: 1,
user: 'user-1234',
});
diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts
index b3f083727..85fc68498 100644
--- a/tests/api-resources/completions.test.ts
+++ b/tests/api-resources/completions.test.ts
@@ -32,6 +32,7 @@ describe('resource completions', () => {
max_tokens: 16,
n: 1,
presence_penalty: -2,
+ seed: -9223372036854776000,
stop: '\n',
stream: false,
suffix: 'test.',
diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts
index dc8a6da00..a84a2aba7 100644
--- a/tests/api-resources/files.test.ts
+++ b/tests/api-resources/files.test.ts
@@ -12,7 +12,7 @@ describe('resource files', () => {
test('create: only required params', async () => {
const responsePromise = openai.files.create({
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
- purpose: 'string',
+ purpose: 'fine-tune',
});
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
@@ -26,7 +26,7 @@ describe('resource files', () => {
test('create: required and optional params', async () => {
const response = await openai.files.create({
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
- purpose: 'string',
+ purpose: 'fine-tune',
});
});
@@ -66,6 +66,13 @@ describe('resource files', () => {
);
});
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.files.list({ purpose: 'string' }, { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
test('del', async () => {
const responsePromise = openai.files.del('string');
const rawResponse = await responsePromise.asResponse();
diff --git a/tests/api-resources/fine-tuning/jobs.test.ts b/tests/api-resources/fine-tuning/jobs.test.ts
index 9bcb4b085..22f457303 100644
--- a/tests/api-resources/fine-tuning/jobs.test.ts
+++ b/tests/api-resources/fine-tuning/jobs.test.ts
@@ -27,7 +27,7 @@ describe('resource jobs', () => {
const response = await openai.fineTuning.jobs.create({
model: 'gpt-3.5-turbo',
training_file: 'file-abc123',
- hyperparameters: { n_epochs: 'auto' },
+ hyperparameters: { batch_size: 'auto', learning_rate_multiplier: 'auto', n_epochs: 'auto' },
suffix: 'x',
validation_file: 'file-abc123',
});
diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts
index c9291b258..418a55eb0 100644
--- a/tests/api-resources/images.test.ts
+++ b/tests/api-resources/images.test.ts
@@ -25,6 +25,7 @@ describe('resource images', () => {
test('createVariation: required and optional params', async () => {
const response = await openai.images.createVariation({
image: await toFile(Buffer.from('# my file contents'), 'README.md'),
+ model: 'dall-e-2',
n: 1,
response_format: 'url',
size: '1024x1024',
@@ -51,6 +52,7 @@ describe('resource images', () => {
image: await toFile(Buffer.from('# my file contents'), 'README.md'),
prompt: 'A cute baby sea otter wearing a beret',
mask: await toFile(Buffer.from('# my file contents'), 'README.md'),
+ model: 'dall-e-2',
n: 1,
response_format: 'url',
size: '1024x1024',
@@ -72,9 +74,12 @@ describe('resource images', () => {
test('generate: required and optional params', async () => {
const response = await openai.images.generate({
prompt: 'A cute baby sea otter',
+ model: 'dall-e-3',
n: 1,
+ quality: 'standard',
response_format: 'url',
size: '1024x1024',
+ style: 'vivid',
user: 'user-1234',
});
});