Skip to content

Commit

Permalink
✨ Feature(custom): add groq support
Browse files Browse the repository at this point in the history
ISSUES CLOSED: #64
  • Loading branch information
Kuingsmile committed Jul 16, 2024
1 parent 638ced4 commit 23e16d8
Show file tree
Hide file tree
Showing 14 changed files with 387 additions and 204 deletions.
9 changes: 5 additions & 4 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,11 @@
"axios": "^1.6.2",
"core-js": "^3.37.1",
"dexie": "^3.2.4",
"element-plus": "2.7.5",
"element-plus": "2.7.7",
"groq-sdk": "^0.5.0",
"openai": "^4.10.0",
"unfetch": "^5.0.0",
"vue": "^3.4.29",
"vue": "^3.4.31",
"vue-class-component": "^8.0.0-rc.1",
"vue-i18n": "^9.13.1",
"vue-router": "^4.4.0"
Expand All @@ -43,10 +44,10 @@
"dpdm": "^3.14.0",
"eslint": "^8.57.0",
"eslint-plugin-vue": "^9.26.0",
"prettier": "^3.3.2",
"stylus": "^0.61.0",
"stylus-loader": "^7.1.3",
"typescript": "^5.2.2",
"prettier": "^3.3.2"
"typescript": "^5.2.2"
},
"commitlint": {
"extends": [
Expand Down
48 changes: 48 additions & 0 deletions src/api/groq.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import Groq from 'groq-sdk'
import { Ref } from 'vue'

interface ChatCompletionStreamOptions {
groqAPIKey: string
groqModel: string
messages: any[]
result: Ref<string>
historyDialog: Ref<any[]>
errorIssue: Ref<boolean>
loading: Ref<boolean>
maxTokens?: number
temperature?: number
}

async function createChatCompletionStream(
options: ChatCompletionStreamOptions
): Promise<void> {
try {
const groq = new Groq({
apiKey: options.groqAPIKey,
dangerouslyAllowBrowser: true
})
const requestConfig = {
model: options.groqModel,
messages: options.messages,
temperature: options.temperature ?? 0.5,
max_tokens: options.maxTokens ?? 1024
}

const response = await groq.chat.completions.create(requestConfig)
options.result.value =
response.choices[0].message?.content?.replace(/\\n/g, '\n') ?? ''
options.historyDialog.value.push({
role: 'assistant',
content: options.result.value
})
} catch (error) {
options.result.value = String(error)
options.errorIssue.value = true
console.error(error)
}
options.loading.value = false
}

export default {
createChatCompletionStream
}
2 changes: 2 additions & 0 deletions src/api/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,15 @@ import azure from './azure'
import palm from './palm'
import common from './common'
import gemini from './gemini'
import groq from './groq'
import ollama from './ollama'

export default {
official,
azure,
palm,
gemini,
groq,
common,
ollama
}
62 changes: 33 additions & 29 deletions src/api/official.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,48 +11,52 @@ function setConfig(apiKey: string, basePath?: string): ClientOptions {
return config
}

async function createChatCompletionStream(
config: ClientOptions,
messages: any[],
result: Ref<string>,
historyDialog: Ref<any[]>,
errorIssue: Ref<boolean>,
loading: Ref<boolean>,
maxTokens?: number,
temperature?: number,
interface ChatCompletionStreamOptions {
config: ClientOptions
messages: any[]
result: Ref<string>
historyDialog: Ref<any[]>
errorIssue: Ref<boolean>
loading: Ref<boolean>
maxTokens?: number
temperature?: number
model?: string
}

async function createChatCompletionStream(
options: ChatCompletionStreamOptions
): Promise<void> {
const openai = new OpenAI(config)
if (Object.keys(availableModels).includes(model ?? '')) {
model = availableModels[model ?? '']
}
const requestConfig = {
model: model ?? 'gpt-3.5-turbo',
messages,
temperature: temperature ?? 0.7,
max_tokens: maxTokens ?? 800
}
let response
try {
response = await openai.chat.completions.create(requestConfig)
result.value =
const openai = new OpenAI(options.config)
if (Object.keys(availableModels).includes(options.model ?? '')) {
options.model = availableModels[options.model ?? '']
}
const requestConfig = {
model: options.model ?? 'gpt-3.5-turbo',
messages: options.messages,
temperature: options.temperature ?? 0.7,
max_tokens: options.maxTokens ?? 800
}

const response = await openai.chat.completions.create(requestConfig)
options.result.value =
response.choices[0].message?.content?.replace(/\\n/g, '\n') ?? ''
historyDialog.value.push({
options.historyDialog.value.push({
role: 'assistant',
content: result.value
content: options.result.value
})
} catch (error) {
if (error instanceof OpenAI.APIError) {
result.value = error.message
errorIssue.value = true
options.result.value = error.message
options.errorIssue.value = true
console.error(error.message)
} else {
result.value = String(error)
errorIssue.value = true
options.result.value = String(error)
options.errorIssue.value = true
console.error(error)
}
}
loading.value = false
options.loading.value = false
}

export default {
Expand Down
67 changes: 36 additions & 31 deletions src/api/ollama.ts
Original file line number Diff line number Diff line change
@@ -1,48 +1,53 @@
import { Ref } from 'vue'
import axios from 'axios'

async function createChatCompletionStream(
ollamaEndpoint: string,
ollamaModel: string,
messages: any[],
result: Ref<string>,
historyDialog: Ref<any[]>,
errorIssue: Ref<boolean>,
loading: Ref<boolean>,
interface ChatCompletionStreamOptions {
ollamaEndpoint: string
ollamaModel: string
messages: any[]
result: Ref<string>
historyDialog: Ref<any[]>
errorIssue: Ref<boolean>
loading: Ref<boolean>
temperature?: number
}

async function createChatCompletionStream(
options: ChatCompletionStreamOptions
): Promise<void> {
const formatedEndpoint = ollamaEndpoint.replace(/\/$/, '')
const url = `${formatedEndpoint}/api/chat`
const headers = {
'Content-Type': 'application/json'
}
const body = {
model: ollamaModel,
options: {
temperature
},
stream: false,
messages
}
let response
try {
response = await axios.post(url, body, {
headers
})
const formatedEndpoint = options.ollamaEndpoint.replace(/\/$/, '')
const response = await axios.post(
`${formatedEndpoint}/api/chat`,
{
model: options.ollamaModel,
options: {
temperature: options.temperature
},
stream: false,
messages: options.messages
},
{
headers: {
'Content-Type': 'application/json'
}
}
)
if (response.status !== 200) {
throw new Error(`Status code: ${response.status}`)
}
result.value = response.data?.message?.content?.replace(/\\n/g, '\n') ?? ''
historyDialog.value.push({
options.result.value =
response.data?.message?.content?.replace(/\\n/g, '\n') ?? ''
options.historyDialog.value.push({
role: 'assistant',
content: result.value
content: options.result.value
})
} catch (error) {
console.error(error)
result.value = String(error)
errorIssue.value = true
options.result.value = String(error)
options.errorIssue.value = true
}
loading.value = false
options.loading.value = false
}

export default {
Expand Down
73 changes: 38 additions & 35 deletions src/api/palm.ts
Original file line number Diff line number Diff line change
@@ -1,50 +1,53 @@
import { Ref } from 'vue'
import axios from 'axios'

async function createChatCompletionStream(
palmAPIKey: string,
palmAPIEndpoint: string,
palmModel: string,
prompt: string,
result: Ref<string>,
errorIssue: Ref<boolean>,
loading: Ref<boolean>,
maxTokens?: number,
interface ChatCompletionStreamOptions {
palmAPIKey: string
palmAPIEndpoint: string
palmModel: string
prompt: string
result: Ref<string>
errorIssue: Ref<boolean>
loading: Ref<boolean>
maxTokens?: number
temperature?: number
}

async function createChatCompletionStream(
options: ChatCompletionStreamOptions
): Promise<void> {
const formatedEndpoint = palmAPIEndpoint
.replace(/^https?:\/\//, '')
.replace(/\/$/, '')
const url = `https://${formatedEndpoint}/models/${palmModel}:generateText`
const headers = {
'Content-Type': 'application/json'
}
const query = {
key: palmAPIKey
}
const body = {
prompt: {
text: prompt
},
temperature,
maxOutputTokens: maxTokens
}
let response
try {
response = await axios.post(url, body, {
headers,
params: query
})
const formatedEndpoint = options.palmAPIEndpoint
.replace(/^https?:\/\//, '')
.replace(/\/$/, '')
const response = await axios.post(
`https://${formatedEndpoint}/models/${options.palmModel}:generateText`,
{
prompt: {
text: prompt
},
temperature: options.temperature,
maxOutputTokens: options.maxTokens
},
{
headers: {
'Content-Type': 'application/json'
},
params: {
key: options.palmAPIKey
}
}
)
if (response.status !== 200) {
throw new Error(`Status code: ${response.status}`)
}
result.value = response.data?.candidates[0]?.output || ''
options.result.value = response.data?.candidates[0]?.output || ''
} catch (error) {
console.error(error)
result.value = String(error)
errorIssue.value = true
options.result.value = String(error)
options.errorIssue.value = true
}
loading.value = false
options.loading.value = false
}

export default {
Expand Down
16 changes: 16 additions & 0 deletions src/i18n/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,14 @@ const messages = {
ollamaModelSelectLabel: 'Model',
ollamaModelSelectPlaceholder: 'Please select model',
ollamaTemperatureLabel: 'Temperature',
groqAPIKeyLabel: 'API Key',
groqAPIKeyPlaceholder: 'API key for groq',
groqTemperatureLabel: 'Temperature',
groqMaxTokensLabel: 'Max tokens',
groqModelSelectLabel: 'Model',
groqModelSelectPlaceholder: 'Please select model',
groqCustomModelLabel: 'Custom Model',
groqCustomModelPlaceholder: 'Please input model',
translate: 'Trans',
summary: 'Sum',
polish: 'Polish',
Expand Down Expand Up @@ -144,6 +152,14 @@ const messages = {
ollamaModelSelectLabel: '选择模型',
ollamaModelSelectPlaceholder: '请选择模型',
ollamaTemperatureLabel: 'temperature',
groqAPIKeyLabel: 'API 密钥',
groqAPIKeyPlaceholder: 'API 密钥',
groqTemperatureLabel: 'temperature',
groqMaxTokensLabel: 'max tokens',
groqModelSelectLabel: '选择模型',
groqModelSelectPlaceholder: '请选择模型',
groqCustomModelLabel: '自定义模型',
groqCustomModelPlaceholder: '请输入模型',
translate: '翻译',
summary: '摘要',
polish: '润色',
Expand Down
Loading

0 comments on commit 23e16d8

Please sign in to comment.