Skip to content

Commit

Permalink
improve chatglm support (#696, #464)
Browse files Browse the repository at this point in the history
  • Loading branch information
josStorer committed Aug 4, 2024
1 parent 12ef5e8 commit 9c371f7
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 116 deletions.
8 changes: 0 additions & 8 deletions build.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -250,14 +250,6 @@ async function runWebpack(isWithoutKatex, isWithoutTiktoken, minimal, callback)
search: 'await generateAnswersWithChatGLMApi',
replace: '//',
},
{
search: 'chatglmTurbo',
replace: '//',
},
{
search: "'chatglmTurbo",
replace: '//',
},
],
},
}
Expand Down
7 changes: 5 additions & 2 deletions src/config/index.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ export const claudeApiModelKeys = [
'claude3SonnetApi',
'claude3OpusApi',
]
export const chatglmApiModelKeys = ['chatglmTurbo']
export const chatglmApiModelKeys = ['chatglmTurbo', 'chatglm4', 'chatglmEmohaa', 'chatglmCharGLM3']
export const githubThirdPartyApiModelKeys = ['waylaidwandererApi']
export const poeWebModelKeys = [
'poeAiWebSage', //poe.com/Assistant
Expand Down Expand Up @@ -145,7 +145,10 @@ export const Models = {

bardWebFree: { value: '', desc: 'Gemini (Web)' },

chatglmTurbo: { value: 'chatglm_turbo', desc: 'ChatGLM (ChatGLM-Turbo)' },
chatglmTurbo: { value: 'GLM-4-Air', desc: 'ChatGLM (GLM-4-Air)' },
chatglm4: { value: 'GLM-4-0520', desc: 'ChatGLM (GLM-4-0520)' },
chatglmEmohaa: { value: 'Emohaa', desc: 'ChatGLM (Emohaa)' },
chatglmCharGLM3: { value: 'CharGLM-3', desc: 'ChatGLM (CharGLM-3)' },

chatgptFree35Mobile: { value: 'text-davinci-002-render-sha-mobile', desc: 'ChatGPT (Mobile)' },
chatgptPlus4Mobile: { value: 'gpt-4-mobile', desc: 'ChatGPT (Mobile, GPT-4)' },
Expand Down
113 changes: 10 additions & 103 deletions src/services/apis/chatglm-api.mjs
Original file line number Diff line number Diff line change
@@ -1,39 +1,6 @@
import { Models, getUserConfig } from '../../config/index.mjs'
import { pushRecord, setAbortController } from './shared.mjs'
import { isEmpty } from 'lodash-es'
import { getUserConfig } from '../../config/index.mjs'
import { getToken } from '../../utils/jwt-token-generator.mjs'
import { createParser } from '../../utils/eventsource-parser.mjs'

async function fetchSSE(resource, options) {
const { onMessage, onStart, onEnd, onError, ...fetchOptions } = options
const resp = await fetch(resource, fetchOptions).catch(async (err) => {
await onError(err)
})
if (!resp) return
if (!resp.ok) {
await onError(resp)
return
}

const parser = createParser((event) => {
if (event.type === 'event') {
onMessage(event)
}
})

let hasStarted = false
const reader = resp.body.getReader()
let result
while (!(result = await reader.read()).done) {
const chunk = result.value
if (!hasStarted) {
hasStarted = true
await onStart(new TextDecoder().decode(chunk))
}
parser.feed(chunk)
}
await onEnd()
}
import { generateAnswersWithChatgptApiCompat } from './openai-api.mjs'

/**
* @param {Runtime.Port} port
Expand All @@ -42,74 +9,14 @@ async function fetchSSE(resource, options) {
* @param {string} modelName
*/
export async function generateAnswersWithChatGLMApi(port, question, session, modelName) {
const { controller, messageListener, disconnectListener } = setAbortController(port)
const baseUrl = 'https://open.bigmodel.cn/api/paas/v4'
const config = await getUserConfig()

const prompt = []
for (const record of session.conversationRecords.slice(-config.maxConversationContextLength)) {
prompt.push({ role: 'user', content: record.question })
prompt.push({ role: 'assistant', content: record.answer })
}
prompt.push({ role: 'user', content: question })

let answer = ''
await fetchSSE(
`https://open.bigmodel.cn/api/paas/v3/model-api/${Models[modelName].value}/sse-invoke`,
{
method: 'POST',
signal: controller.signal,
headers: {
'Content-Type': 'application/json; charset=UTF-8',
Accept: 'text/event-stream',
Authorization: getToken(config.chatglmApiKey),
},
body: JSON.stringify({
prompt: prompt,
// temperature: config.temperature,
// top_t: 0.7,
// request_id: string
// incremental: true,
// return_type: "json_string",
// ref: {"enable": "true", "search_query": "history"},
}),
onMessage(event) {
console.debug('sse event', event)

// Handle different types of events
switch (event.event) {
case 'add':
// In the case of an "add" event, append the completion to the answer
if (event.data) {
answer += event.data
port.postMessage({ answer: answer, done: false, session: null })
}
break
case 'error':
case 'interrupted':
case 'finish':
pushRecord(session, question, answer)
console.debug('conversation history', { content: session.conversationRecords })
port.postMessage({ answer: null, done: true, session: session })
break
default:
break
}
},
async onStart() {},
async onEnd() {
port.postMessage({ done: true })
port.onMessage.removeListener(messageListener)
port.onDisconnect.removeListener(disconnectListener)
},
async onError(resp) {
port.onMessage.removeListener(messageListener)
port.onDisconnect.removeListener(disconnectListener)
if (resp instanceof Error) throw resp
const error = await resp.json().catch(() => ({}))
throw new Error(
!isEmpty(error) ? JSON.stringify(error) : `${resp.status} ${resp.statusText}`,
)
},
},
return generateAnswersWithChatgptApiCompat(
baseUrl,
port,
question,
session,
getToken(config.chatglmApiKey),
modelName,
)
}
2 changes: 1 addition & 1 deletion src/services/apis/moonshot-api.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,6 @@ export async function generateAnswersWithMoonshotCompletionApi(
apiKey,
modelName,
) {
const baseUrl = 'https://api.moonshot.cn'
const baseUrl = 'https://api.moonshot.cn/v1'
return generateAnswersWithChatgptApiCompat(baseUrl, port, question, session, apiKey, modelName)
}
4 changes: 2 additions & 2 deletions src/services/apis/openai-api.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ export async function generateAnswersWithGptCompletionApi(
export async function generateAnswersWithChatgptApi(port, question, session, apiKey, modelName) {
const config = await getUserConfig()
return generateAnswersWithChatgptApiCompat(
config.customOpenAiApiUrl,
config.customOpenAiApiUrl + '/v1',
port,
question,
session,
Expand Down Expand Up @@ -144,7 +144,7 @@ export async function generateAnswersWithChatgptApiCompat(
console.debug('conversation history', { content: session.conversationRecords })
port.postMessage({ answer: null, done: true, session: session })
}
await fetchSSE(`${baseUrl}/v1/chat/completions`, {
await fetchSSE(`${baseUrl}/chat/completions`, {
method: 'POST',
signal: controller.signal,
headers: {
Expand Down

0 comments on commit 9c371f7

Please sign in to comment.