Skip to content

Commit

Permalink
Merge pull request #617 from shivam-pareek/fix/max-completion-tokens
Browse files Browse the repository at this point in the history
fix: added max_completion_tokens for chat complete
  • Loading branch information
VisargD authored Sep 27, 2024
2 parents f3ed2b1 + 3b1c139 commit a65600e
Show file tree
Hide file tree
Showing 30 changed files with 178 additions and 0 deletions.
4 changes: 4 additions & 0 deletions src/providers/ai21/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,10 @@ export const AI21ChatCompleteConfig: ProviderConfig = {
param: 'maxTokens',
default: 16,
},
max_completion_tokens: {
param: 'maxTokens',
default: 16,
},
minTokens: {
param: 'minTokens',
default: 0,
Expand Down
4 changes: 4 additions & 0 deletions src/providers/anthropic/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,10 @@ export const AnthropicChatCompleteConfig: ProviderConfig = {
param: 'max_tokens',
required: true,
},
max_completion_tokens: {
param: 'max_tokens',
required: true,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/anyscale/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ export const AnyscaleChatCompleteConfig: ProviderConfig = {
default: 100,
min: 0,
},
max_completion_tokens: {
param: 'max_tokens',
default: 100,
min: 0,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/azure-ai-inference/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,11 @@ export const AzureAIInferenceChatCompleteConfig: ProviderConfig = {
default: 100,
min: 0,
},
max_completion_tokens: {
param: 'max_tokens',
default: 100,
min: 0,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/azure-openai/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,11 @@ export const AzureOpenAIChatCompleteConfig: ProviderConfig = {
default: 100,
min: 0,
},
max_completion_tokens: {
param: 'max_completion_tokens',
default: 100,
min: 0,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down
31 changes: 31 additions & 0 deletions src/providers/bedrock/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,10 @@ export const BedrockAnthropicChatCompleteConfig: ProviderConfig = {
param: 'max_tokens',
required: true,
},
max_completion_tokens: {
param: 'max_tokens',
required: true,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down Expand Up @@ -318,6 +322,11 @@ export const BedrockCohereChatCompleteConfig: ProviderConfig = {
default: 20,
min: 1,
},
max_completion_tokens: {
param: 'max_tokens',
default: 20,
min: 1,
},
temperature: {
param: 'temperature',
default: 0.75,
Expand Down Expand Up @@ -394,6 +403,12 @@ export const BedrockLLamaChatCompleteConfig: ProviderConfig = {
min: 1,
max: 2048,
},
max_completion_tokens: {
param: 'max_gen_len',
default: 512,
min: 1,
max: 2048,
},
temperature: {
param: 'temperature',
default: 0.5,
Expand Down Expand Up @@ -437,6 +452,11 @@ export const BedrockMistralChatCompleteConfig: ProviderConfig = {
default: 20,
min: 1,
},
max_completion_tokens: {
param: 'max_tokens',
default: 20,
min: 1,
},
temperature: {
param: 'temperature',
default: 0.75,
Expand Down Expand Up @@ -470,6 +490,9 @@ const transformTitanGenerationConfig = (params: Params) => {
if (params['max_tokens']) {
generationConfig['maxTokenCount'] = params['max_tokens'];
}
if (params['max_completion_tokens']) {
generationConfig['maxTokenCount'] = params['max_completion_tokens'];
}
if (params['stop']) {
generationConfig['stopSequences'] = params['stop'];
}
Expand Down Expand Up @@ -508,6 +531,10 @@ export const BedrockTitanChatompleteConfig: ProviderConfig = {
param: 'textGenerationConfig',
transform: (params: Params) => transformTitanGenerationConfig(params),
},
max_completion_tokens: {
param: 'textGenerationConfig',
transform: (params: Params) => transformTitanGenerationConfig(params),
},
top_p: {
param: 'textGenerationConfig',
transform: (params: Params) => transformTitanGenerationConfig(params),
Expand Down Expand Up @@ -542,6 +569,10 @@ export const BedrockAI21ChatCompleteConfig: ProviderConfig = {
param: 'maxTokens',
default: 200,
},
max_completion_tokens: {
param: 'maxTokens',
default: 200,
},
temperature: {
param: 'temperature',
default: 0.7,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/cohere/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,11 @@ export const CohereChatCompleteConfig: ProviderConfig = {
default: 20,
min: 1,
},
max_completion_tokens: {
param: 'max_tokens',
default: 20,
min: 1,
},
temperature: {
param: 'temperature',
default: 0.75,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/deepbricks/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,11 @@ export const DeepbricksChatCompleteConfig: ProviderConfig = {
default: 100,
min: 0,
},
max_completion_tokens: {
param: 'max_tokens',
default: 100,
min: 0,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/deepinfra/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,11 @@ export const DeepInfraChatCompleteConfig: ProviderConfig = {
default: 100,
min: 1,
},
max_completion_tokens: {
param: 'max_tokens',
default: 100,
min: 1,
},
n: {
param: 'n',
default: 1,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/deepseek/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,11 @@ export const DeepSeekChatCompleteConfig: ProviderConfig = {
default: 100,
min: 0,
},
max_completion_tokens: {
param: 'max_tokens',
default: 100,
min: 0,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/fireworks-ai/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,11 @@ export const FireworksAIChatCompleteConfig: ProviderConfig = {
default: 200,
min: 1,
},
max_completion_tokens: {
param: 'max_tokens',
default: 200,
min: 1,
},
prompt_truncate_len: {
param: 'prompt_truncate_len',
default: 1500,
Expand Down
14 changes: 14 additions & 0 deletions src/providers/google-vertex-ai/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,10 @@ export const VertexGoogleChatCompleteConfig: ProviderConfig = {
param: 'generationConfig',
transform: (params: Params) => transformGenerationConfig(params),
},
max_completion_tokens: {
param: 'generationConfig',
transform: (params: Params) => transformGenerationConfig(params),
},
stop: {
param: 'generationConfig',
transform: (params: Params) => transformGenerationConfig(params),
Expand Down Expand Up @@ -509,6 +513,10 @@ export const VertexAnthropicChatCompleteConfig: ProviderConfig = {
param: 'max_tokens',
required: true,
},
max_completion_tokens: {
param: 'max_tokens',
required: true,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down Expand Up @@ -663,6 +671,12 @@ export const VertexLlamaChatCompleteConfig: ProviderConfig = {
min: 1,
max: 2048,
},
max_completion_tokens: {
param: 'max_tokens',
default: 512,
min: 1,
max: 2048,
},
temperature: {
param: 'temperature',
default: 0.5,
Expand Down
3 changes: 3 additions & 0 deletions src/providers/google-vertex-ai/transformGenerationConfig.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ export function transformGenerationConfig(params: Params) {
if (params['max_tokens']) {
generationConfig['maxOutputTokens'] = params['max_tokens'];
}
if (params['max_completion_tokens']) {
generationConfig['maxOutputTokens'] = params['max_completion_tokens'];
}
if (params['stop']) {
generationConfig['stopSequences'] = params['stop'];
}
Expand Down
7 changes: 7 additions & 0 deletions src/providers/google/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ const transformGenerationConfig = (params: Params) => {
if (params['max_tokens']) {
generationConfig['maxOutputTokens'] = params['max_tokens'];
}
if (params['max_completion_tokens']) {
generationConfig['maxOutputTokens'] = params['max_completion_tokens'];
}
if (params['stop']) {
generationConfig['stopSequences'] = params['stop'];
}
Expand Down Expand Up @@ -287,6 +290,10 @@ export const GoogleChatCompleteConfig: ProviderConfig = {
param: 'generationConfig',
transform: (params: Params) => transformGenerationConfig(params),
},
max_completion_tokens: {
param: 'generationConfig',
transform: (params: Params) => transformGenerationConfig(params),
},
stop: {
param: 'generationConfig',
transform: (params: Params) => transformGenerationConfig(params),
Expand Down
5 changes: 5 additions & 0 deletions src/providers/groq/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,11 @@ export const GroqChatCompleteConfig: ProviderConfig = {
default: 100,
min: 0,
},
max_completion_tokens: {
param: 'max_tokens',
default: 100,
min: 0,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/huggingface/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,11 @@ export const HuggingfaceChatCompleteConfig: ProviderConfig = {
default: 100,
min: 0,
},
max_completion_tokens: {
param: 'max_tokens',
default: 100,
min: 0,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/lingyi/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,11 @@ export const LingyiChatCompleteConfig: ProviderConfig = {
default: 100,
min: 0,
},
max_completion_tokens: {
param: 'max_tokens',
default: 100,
min: 0,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/mistral-ai/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,11 @@ export const MistralAIChatCompleteConfig: ProviderConfig = {
default: null,
min: 1,
},
max_completion_tokens: {
param: 'max_tokens',
default: null,
min: 1,
},
stream: {
param: 'stream',
default: false,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/moonshot/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,11 @@ export const MoonshotChatCompleteConfig: ProviderConfig = {
default: 100,
min: 0,
},
max_completion_tokens: {
param: 'max_tokens',
default: 100,
min: 0,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down
6 changes: 6 additions & 0 deletions src/providers/novita-ai/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,12 @@ export const NovitaAIChatCompleteConfig: ProviderConfig = {
default: 128,
min: 1,
},
max_completion_tokens: {
param: 'max_tokens',
required: true,
default: 128,
min: 1,
},
stop: {
param: 'stop',
},
Expand Down
5 changes: 5 additions & 0 deletions src/providers/ollama/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,11 @@ export const OllamaChatCompleteConfig: ProviderConfig = {
default: 100,
min: 0,
},
max_completion_tokens: {
param: 'max_tokens',
default: 100,
min: 0,
},
};

export interface OllamaChatCompleteResponse
Expand Down
5 changes: 5 additions & 0 deletions src/providers/openrouter/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,11 @@ export const OpenrouterChatCompleteConfig: ProviderConfig = {
default: 100,
min: 0,
},
max_completion_tokens: {
param: 'max_tokens',
default: 100,
min: 0,
},
temperature: {
param: 'temperature',
default: 1,
Expand Down
5 changes: 5 additions & 0 deletions src/providers/palm/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,11 @@ export const PalmChatCompleteConfig: ProviderConfig = {
default: 100,
min: 1,
},
max_completion_tokens: {
param: 'maxOutputTokens',
default: 100,
min: 1,
},
stop: {
param: 'stopSequences',
},
Expand Down
5 changes: 5 additions & 0 deletions src/providers/perplexity-ai/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,11 @@ export const PerplexityAIChatCompleteConfig: ProviderConfig = {
required: true,
min: 1,
},
max_completion_tokens: {
param: 'max_tokens',
required: true,
min: 1,
},
temperature: {
param: 'temperature',
min: 0,
Expand Down
6 changes: 6 additions & 0 deletions src/providers/predibase/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,12 @@ export const PredibaseChatCompleteConfig: ProviderConfig = {
default: 4096,
min: 0,
},
max_completion_tokens: {
param: 'max_tokens',
required: false,
default: 4096,
min: 0,
},
temperature: {
param: 'temperature',
required: false,
Expand Down
Loading

0 comments on commit a65600e

Please sign in to comment.