Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support Vertex Llama Hosted API Enpoint #484

Merged
merged 5 commits into from
Aug 22, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 11 additions & 9 deletions src/handlers/streamHandler.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@ import {
GOOGLE,
REQUEST_TIMEOUT_STATUS_CODE,
PRECONDITION_CHECK_FAILED_STATUS_CODE,
GOOGLE_VERTEX_AI,
} from '../globals';
import { VertexLlamaChatCompleteStreamChunkTransform } from '../providers/google-vertex-ai/chatComplete';
import { OpenAIChatCompleteResponse } from '../providers/openai/chatComplete';
import { OpenAICompleteResponse } from '../providers/openai/complete';
import { getStreamModeSplitPattern, type SplitPatternType } from '../utils';
Expand Down Expand Up @@ -292,15 +294,15 @@ export async function handleStreamingMode(
}

// Convert GEMINI/COHERE json stream to text/event-stream for non-proxy calls
if (
[
//
GOOGLE,
COHERE,
BEDROCK,
].includes(proxyProvider) &&
responseTransformer
) {
const isGoogleCohereOrBedrock = [GOOGLE, COHERE, BEDROCK].includes(
proxyProvider
);
const isVertexLlama =
GOOGLE_VERTEX_AI.includes(proxyProvider) &&
narengogi marked this conversation as resolved.
Show resolved Hide resolved
responseTransformer?.name ===
VertexLlamaChatCompleteStreamChunkTransform.name;
const isJsonStream = isGoogleCohereOrBedrock || isVertexLlama;
if (isJsonStream && responseTransformer) {
return new Response(readable, {
...response,
headers: new Headers({
Expand Down
49 changes: 33 additions & 16 deletions src/providers/google-vertex-ai/api.ts
Original file line number Diff line number Diff line change
@@ -1,22 +1,34 @@
import { Options } from '../../types/requestBody';
import { ProviderAPIConfig } from '../types';
import { getModelAndProvider } from './utils';
import { getAccessToken } from './utils';

const getProjectRoute = (
providerOptions: Options,
inputModel: string
): string => {
const {
vertexProjectId: inputProjectId,
vertexRegion,
vertexServiceAccountJson,
} = providerOptions;
let projectId = inputProjectId;
if (vertexServiceAccountJson) {
projectId = vertexServiceAccountJson.project_id;
}

const { provider } = getModelAndProvider(inputModel as string);
const routeVersion = provider === 'meta' ? 'v1beta1' : 'v1';
return `/${routeVersion}/projects/${projectId}/locations/${vertexRegion}`;
};

// Good reference for using REST: https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstarts/quickstart-multimodal#gemini-beginner-samples-drest
// Difference versus Studio AI: https://cloud.google.com/vertex-ai/docs/start/ai-platform-users
export const GoogleApiConfig: ProviderAPIConfig = {
getBaseURL: ({ providerOptions }) => {
const {
vertexProjectId: inputProjectId,
vertexRegion,
vertexServiceAccountJson,
} = providerOptions;
let projectId = inputProjectId;
if (vertexServiceAccountJson) {
projectId = vertexServiceAccountJson.project_id;
}
const { vertexRegion } = providerOptions;

return `https://${vertexRegion}-aiplatform.googleapis.com/v1/projects/${projectId}/locations/${vertexRegion}`;
return `https://${vertexRegion}-aiplatform.googleapis.com`;
},
headers: async ({ providerOptions }) => {
const { apiKey, vertexServiceAccountJson } = providerOptions;
Expand All @@ -30,36 +42,41 @@ export const GoogleApiConfig: ProviderAPIConfig = {
Authorization: `Bearer ${authToken}`,
};
},
getEndpoint: ({ fn, gatewayRequestBody }) => {
getEndpoint: ({ fn, gatewayRequestBody, providerOptions }) => {
let mappedFn = fn;
const { model: inputModel, stream } = gatewayRequestBody;
if (stream) {
mappedFn = `stream-${fn}`;
}

const { provider, model } = getModelAndProvider(inputModel as string);
const projectRoute = getProjectRoute(providerOptions, inputModel as string);

switch (provider) {
case 'google': {
if (mappedFn === 'chatComplete') {
return `/publishers/${provider}/models/${model}:generateContent`;
return `${projectRoute}/publishers/${provider}/models/${model}:generateContent`;
} else if (mappedFn === 'stream-chatComplete') {
return `/publishers/${provider}/models/${model}:streamGenerateContent?alt=sse`;
return `${projectRoute}/publishers/${provider}/models/${model}:streamGenerateContent?alt=sse`;
}
}

case 'anthropic': {
if (mappedFn === 'chatComplete') {
return `/publishers/${provider}/models/${model}:rawPredict`;
return `${projectRoute}/publishers/${provider}/models/${model}:rawPredict`;
} else if (mappedFn === 'stream-chatComplete') {
return `/publishers/${provider}/models/${model}:streamRawPredict`;
return `${projectRoute}/publishers/${provider}/models/${model}:streamRawPredict`;
}
}

case 'meta': {
return `${projectRoute}/endpoints/openapi/chat/completions`;
}

// Embed API is not yet implemented in the gateway
// This may be as easy as copy-paste from Google provider, but needs to be tested
default:
return '';
return `${projectRoute}`;
}
},
};
Expand Down
89 changes: 89 additions & 0 deletions src/providers/google-vertex-ai/chatComplete.ts
narengogi marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ import { transformGenerationConfig } from './transformGenerationConfig';
import type {
GoogleErrorResponse,
GoogleGenerateContentResponse,
VertexLlamaChatCompleteStreamChunk,
VertexLLamaChatCompleteResponse,
} from './types';

export const VertexGoogleChatCompleteConfig: ProviderConfig = {
Expand Down Expand Up @@ -640,6 +642,47 @@ export const GoogleChatCompleteResponseTransform: (
return generateInvalidProviderResponseError(response, GOOGLE_VERTEX_AI);
};

export const VertexLlamaChatCompleteConfig: ProviderConfig = {
model: {
param: 'model',
required: true,
default: 'meta/llama3-405b-instruct-maas',
},
messages: {
param: 'messages',
required: true,
default: [],
},
max_tokens: {
param: 'max_tokens',
default: 512,
min: 1,
max: 2048,
},
temperature: {
param: 'temperature',
default: 0.5,
min: 0,
max: 1,
},
top_p: {
param: 'top_p',
default: 0.9,
min: 0,
max: 1,
},
top_k: {
param: 'top_k',
default: 0,
min: 0,
max: 2048,
},
stream: {
param: 'stream',
default: false,
},
};

export const GoogleChatCompleteStreamChunkTransform: (
response: string,
fallbackId: string
Expand Down Expand Up @@ -922,3 +965,49 @@ export const VertexAnthropicChatCompleteStreamChunkTransform: (
})}` + '\n\n'
);
};

export const VertexLlamaChatCompleteResponseTransform: (
response: VertexLLamaChatCompleteResponse | GoogleErrorResponse,
responseStatus: number
) => ChatCompletionResponse | ErrorResponse = (response, responseStatus) => {
if (
responseStatus !== 200 &&
Array.isArray(response) &&
response.length > 0 &&
'error' in response[0]
) {
const { error } = response[0];

return generateErrorResponse(
{
message: error.message,
type: error.status,
param: null,
code: String(error.code),
},
GOOGLE_VERTEX_AI
);
}
if ('choices' in response) {
narengogi marked this conversation as resolved.
Show resolved Hide resolved
return {
id: crypto.randomUUID(),
created: Math.floor(Date.now() / 1000),
...response,
};
}
return generateInvalidProviderResponseError(response, GOOGLE_VERTEX_AI);
};

export const VertexLlamaChatCompleteStreamChunkTransform: (
response: string,
fallbackId: string
) => string = (responseChunk, fallbackId) => {
let chunk = responseChunk.trim();
chunk = chunk.replace(/^data: /, '');
chunk = chunk.trim();
console.log(chunk);
narengogi marked this conversation as resolved.
Show resolved Hide resolved
const parsedChunk: VertexLlamaChatCompleteStreamChunk = JSON.parse(chunk);
parsedChunk.id = fallbackId;
parsedChunk.created = Math.floor(Date.now() / 1000);
return `data: ${JSON.stringify(parsedChunk)}` + '\n\n';
};
12 changes: 12 additions & 0 deletions src/providers/google-vertex-ai/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ import {
VertexAnthropicChatCompleteResponseTransform,
VertexAnthropicChatCompleteStreamChunkTransform,
VertexGoogleChatCompleteConfig,
VertexLlamaChatCompleteConfig,
VertexLlamaChatCompleteResponseTransform,
VertexLlamaChatCompleteStreamChunkTransform,
} from './chatComplete';
import { getModelAndProvider } from './utils';

Expand Down Expand Up @@ -36,6 +39,15 @@ const VertexConfig: ProviderConfigs = {
chatComplete: VertexAnthropicChatCompleteResponseTransform,
},
};
case 'meta':
return {
chatComplete: VertexLlamaChatCompleteConfig,
api: GoogleApiConfig,
responseTransforms: {
chatComplete: VertexLlamaChatCompleteResponseTransform,
'stream-chatComplete': VertexLlamaChatCompleteStreamChunkTransform,
},
};
}
},
};
Expand Down
25 changes: 25 additions & 0 deletions src/providers/google-vertex-ai/types.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import { ChatCompletionResponse } from '../types';

export interface GoogleErrorResponse {
error: {
code: number;
Expand Down Expand Up @@ -42,3 +44,26 @@ export interface GoogleGenerateContentResponse {
totalTokenCount: number;
};
}

export interface VertexLLamaChatCompleteResponse
extends Omit<ChatCompletionResponse, 'id' | 'created'> {}

export interface VertexLlamaChatCompleteStreamChunk {
choices: {
delta: {
content: string;
role: string;
};
finish_reason?: string;
index: 0;
}[];
model: string;
object: string;
usage?: {
completion_tokens: number;
prompt_tokens: number;
total_tokens: number;
};
id?: string;
created?: number;
}
2 changes: 2 additions & 0 deletions src/providers/google-vertex-ai/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,8 @@ export const getModelAndProvider = (modelString: string) => {
) {
provider = modelStringParts[0];
model = modelStringParts.slice(1).join('.');
} else if (modelString.includes('llama')) {
provider = 'meta';
}

return { provider, model };
Expand Down
2 changes: 1 addition & 1 deletion src/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ export const getStreamModeSplitPattern = (
// Anthropic vertex has \n\n as the pattern
if (
proxyProvider === GOOGLE_VERTEX_AI &&
requestURL.indexOf('/publishers/anthropic') === -1
requestURL.includes('/publishers/google')
) {
splitPattern = '\r\n\r\n';
}
Expand Down
Loading