diff --git a/README.md b/README.md
index 2534f3c99d0..3973c84bfde 100644
--- a/README.md
+++ b/README.md
@@ -159,7 +159,7 @@ Your openai api key.
### `CODE` (optional)
-Access passsword, separated by comma.
+Access password, separated by comma.
### `BASE_URL` (optional)
@@ -185,18 +185,25 @@ If you do not want users to input their own API key, set this value to 1.
If you do not want users to use GPT-4, set this value to 1.
-### `HIDE_BALANCE_QUERY` (optional)
+### `ENABLE_BALANCE_QUERY` (optional)
> Default: Empty
-If you do not want users to query balance, set this value to 1.
+If you do want users to query balance, set this value to 1, or you should set it to 0.
-### MODEL_LIST (optional)
-If you want to reduce the number of options in the model list, you can set it to a custom list, such as "gpt3.5, gpt4".
-This is particularly useful when deploying ChatGPT on Azure.
+### `DISABLE_FAST_LINK` (optional)
> Default: Empty
+If you want to disable parse settings from url, set this to 1.
+
+### `CUSTOM_MODELS` (optional)
+
+> Default: Empty
+> Example: `+llama,+claude-2,-gpt-3.5-turbo` means add `llama, claude-2` to model list, and remove `gpt-3.5-turbo` from list.
+
+To control custom models, use `+` to add a custom model, use `-` to hide a model, separated by comma.
+
## Requirements
NodeJS >= 18, Docker >= 20
@@ -263,6 +270,10 @@ If your proxy needs password, use:
bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh)
```
+## Synchronizing Chat Records (UpStash)
+
+| [简体中文](./docs/synchronise-chat-logs-cn.md) | [English](./docs/synchronise-chat-logs-en.md) | [Italiano](./docs/synchronise-chat-logs-es.md) | [日本語](./docs/synchronise-chat-logs-ja.md) | [한국어](./docs/synchronise-chat-logs-ko.md)
+
## Documentation
> Please go to the [docs][./docs] directory for more documentation instructions.
@@ -315,6 +326,7 @@ If you want to add a new translation, read this [document](./docs/translation.md
[@AnsonHyq](https://github.com/AnsonHyq)
[@synwith](https://github.com/synwith)
[@piksonGit](https://github.com/piksonGit)
+[@ouyangzhiping](https://github.com/ouyangzhiping)
### Contributor
diff --git a/README_CN.md b/README_CN.md
index 8e7b939a7dd..9e4d1b64bf4 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -106,6 +106,12 @@ OpenAI 接口代理 URL,如果你手动配置了 openai 接口代理,请填
如果你想要在模型列表中不出现那么多选项,你可以设置为自定义列表,比如: gpt3.5,gpt4
在使用azure 部署的 chatgpt 时,非常有用
+### `CUSTOM_MODELS` (可选)
+
+> 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo` 表示增加 `qwen-7b-chat` 和 `glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`。
+
+用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,用英文逗号隔开。
+
## 开发
点击下方按钮,开始二次开发:
diff --git a/app/api/common.ts b/app/api/common.ts
index 0af7761d88c..a1decd42f5b 100644
--- a/app/api/common.ts
+++ b/app/api/common.ts
@@ -1,10 +1,9 @@
import { NextRequest, NextResponse } from "next/server";
+import { getServerSideConfig } from "../config/server";
+import { DEFAULT_MODELS, OPENAI_BASE_URL } from "../constant";
+import { collectModelTable, collectModels } from "../utils/model";
-export const OPENAI_URL = "api.openai.com";
-const DEFAULT_PROTOCOL = "https";
-const PROTOCOL = process.env.PROTOCOL || DEFAULT_PROTOCOL;
-const BASE_URL = process.env.BASE_URL || OPENAI_URL;
-const DISABLE_GPT4 = !!process.env.DISABLE_GPT4;
+const serverConfig = getServerSideConfig();
export async function requestOpenai(req: NextRequest) {
const controller = new AbortController();
@@ -14,10 +13,10 @@ export async function requestOpenai(req: NextRequest) {
"",
);
- let baseUrl = BASE_URL;
+ let baseUrl = serverConfig.baseUrl ?? OPENAI_BASE_URL;
if (!baseUrl.startsWith("http")) {
- baseUrl = `${PROTOCOL}://${baseUrl}`;
+ baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
@@ -26,10 +25,7 @@ export async function requestOpenai(req: NextRequest) {
console.log("[Proxy] ", openaiPath);
console.log("[Base Url]", baseUrl);
-
- if (process.env.OPENAI_ORG_ID) {
- console.log("[Org ID]", process.env.OPENAI_ORG_ID);
- }
+ console.log("[Org ID]", serverConfig.openaiOrgId);
const timeoutId = setTimeout(
() => {
@@ -58,18 +54,23 @@ export async function requestOpenai(req: NextRequest) {
};
// #1815 try to refuse gpt4 request
- if (DISABLE_GPT4 && req.body) {
+ if (serverConfig.customModels && req.body) {
try {
+ const modelTable = collectModelTable(
+ DEFAULT_MODELS,
+ serverConfig.customModels,
+ );
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
- const jsonBody = JSON.parse(clonedBody);
+ const jsonBody = JSON.parse(clonedBody) as { model?: string };
- if ((jsonBody?.model ?? "").includes("gpt-4")) {
+ // not undefined and is false
+ if (modelTable[jsonBody?.model ?? ""] === false) {
return NextResponse.json(
{
error: true,
- message: "you are not allowed to use gpt-4 model",
+ message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
diff --git a/app/api/config/route.ts b/app/api/config/route.ts
index 16ce429e4f6..db84fba175a 100644
--- a/app/api/config/route.ts
+++ b/app/api/config/route.ts
@@ -11,7 +11,8 @@ const DANGER_CONFIG = {
hideUserApiKey: serverConfig.hideUserApiKey,
disableGPT4: serverConfig.disableGPT4,
hideBalanceQuery: serverConfig.hideBalanceQuery,
- enableVercelWebAnalytics: serverConfig.isVercelWebAnalytics,
+ disableFastLink: serverConfig.disableFastLink,
+ customModels: serverConfig.customModels,
};
declare global {
diff --git a/app/components/chat.tsx b/app/components/chat.tsx
index d5fbdbfbb83..3e5a2c33685 100644
--- a/app/components/chat.tsx
+++ b/app/components/chat.tsx
@@ -92,6 +92,7 @@ import { ChatCommandPrefix, useChatCommand, useCommand } from "../command";
import { prettyObject } from "../utils/format";
import { ExportMessageModal } from "./exporter";
import { getClientConfig } from "../config/client";
+import { useAllModels } from "../utils/hooks";
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
loading: () => ,
@@ -434,14 +435,9 @@ export function ChatActions(props: {
// switch model
const currentModel = chatStore.currentSession().mask.modelConfig.model;
- const models = useMemo(
- () =>
- config
- .allModels()
- .filter((m) => m.available)
- .map((m) => m.name),
- [config],
- );
+ const models = useAllModels()
+ .filter((m) => m.available)
+ .map((m) => m.name);
const [showModelSelector, setShowModelSelector] = useState(false);
return (
diff --git a/app/components/model-config.tsx b/app/components/model-config.tsx
index cedee3c4e86..6e4c9bcb17b 100644
--- a/app/components/model-config.tsx
+++ b/app/components/model-config.tsx
@@ -1,56 +1,15 @@
-import {
- ModalConfigValidator,
- ModelConfig,
- useAccessStore,
- useAppConfig,
-} from "../store";
+import { ModalConfigValidator, ModelConfig } from "../store";
import Locale from "../locales";
import { InputRange } from "./input-range";
import { ListItem, Select } from "./ui-lib";
-import { getHeaders } from "@/app/client/api";
-import { useEffect, useState } from "react";
+import { useAllModels } from "../utils/hooks";
-interface ModelItem {
- name: string;
- available: boolean;
-}
-interface ModelConfigResponse {
- model_list: ModelItem[];
-}
-async function loadModelList(): Promise {
- return new Promise((resolve, reject) => {
- fetch("/api/model-config", {
- method: "get",
- body: null,
- headers: {
- ...getHeaders(),
- },
- })
- .then((res) => res.json())
- .then((res: ModelConfigResponse) => {
- console.log("fetched config", res);
- if (res.model_list && res.model_list.length > 0) {
- resolve(res.model_list);
- }
- })
- .catch(reject);
- });
-}
export function ModelConfigList(props: {
modelConfig: ModelConfig;
updateConfig: (updater: (config: ModelConfig) => void) => void;
}) {
- const config = useAppConfig();
- const [modelList, setModelList] = useState(config.allModels());
- useEffect(() => {
- (async () => {
- let model_list = await loadModelList();
- if (model_list && model_list.length > 0) {
- setModelList(model_list);
- }
- })();
- }, []);
+ const allModels = useAllModels();
return (
<>
@@ -66,7 +25,7 @@ export function ModelConfigList(props: {
);
}}
>
- {modelList.map((v, i) => (
+ {allModels.map((v, i) => (
diff --git a/app/config/server.ts b/app/config/server.ts
index 289363ca42a..5760a875379 100644
--- a/app/config/server.ts
+++ b/app/config/server.ts
@@ -1,4 +1,5 @@
import md5 from "spark-md5";
+import { DEFAULT_MODELS } from "../constant";
declare global {
namespace NodeJS {
@@ -8,6 +9,7 @@ declare global {
BASE_URL?: string;
MODEL_LIST?: string;
PROXY_URL?: string;
+ OPENAI_ORG_ID?: string;
VERCEL?: string;
VERCEL_ANALYTICS?: string; // vercel web analytics
HIDE_USER_API_KEY?: string; // disable user's api key input
@@ -15,6 +17,9 @@ declare global {
BUILD_MODE?: "standalone" | "export";
BUILD_APP?: string; // is building desktop app
HIDE_BALANCE_QUERY?: string; // allow user to query balance or not
+ ENABLE_BALANCE_QUERY?: string; // allow user to query balance or not
+ DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
+ CUSTOM_MODELS?: string; // to control custom models
}
}
}
@@ -39,6 +44,16 @@ export const getServerSideConfig = () => {
);
}
+ let disableGPT4 = !!process.env.DISABLE_GPT4;
+ let customModels = process.env.CUSTOM_MODELS ?? "";
+
+ if (disableGPT4) {
+ if (customModels) customModels += ",";
+ customModels += DEFAULT_MODELS.filter((m) => m.name.startsWith("gpt-4"))
+ .map((m) => "-" + m.name)
+ .join(",");
+ }
+
return {
apiKey: process.env.OPENAI_API_KEY,
code: process.env.CODE,
@@ -46,10 +61,13 @@ export const getServerSideConfig = () => {
needCode: ACCESS_CODES.size > 0,
baseUrl: process.env.BASE_URL,
proxyUrl: process.env.PROXY_URL,
+ openaiOrgId: process.env.OPENAI_ORG_ID,
isVercel: !!process.env.VERCEL,
isVercelWebAnalytics: !!process.env.VERCEL_ANALYTICS,
hideUserApiKey: !!process.env.HIDE_USER_API_KEY,
- disableGPT4: !!process.env.DISABLE_GPT4,
- hideBalanceQuery: !!process.env.HIDE_BALANCE_QUERY,
+ disableGPT4,
+ hideBalanceQuery: !process.env.ENABLE_BALANCE_QUERY,
+ disableFastLink: !!process.env.DISABLE_FAST_LINK,
+ customModels,
};
};
diff --git a/app/constant.ts b/app/constant.ts
index 576d1751ef3..070dd33ba4b 100644
--- a/app/constant.ts
+++ b/app/constant.ts
@@ -10,6 +10,7 @@ export const RUNTIME_CONFIG_DOM = "danger-runtime-config";
export const DEFAULT_CORS_HOST = "https://chatgpt.btz.sh";
export const DEFAULT_API_HOST = `${DEFAULT_CORS_HOST}/api/proxy`;
+export const OPENAI_BASE_URL = "https://api.openai.com";
export enum Path {
Home = "/",
diff --git a/app/store/access.ts b/app/store/access.ts
index 9eaa81e5ea3..f87e44a2ac4 100644
--- a/app/store/access.ts
+++ b/app/store/access.ts
@@ -16,6 +16,8 @@ const DEFAULT_ACCESS_STATE = {
hideUserApiKey: false,
hideBalanceQuery: false,
disableGPT4: false,
+ disableFastLink: false,
+ customModels: "",
openaiUrl: DEFAULT_OPENAI_URL,
};
@@ -29,15 +31,6 @@ export const useAccessStore = createPersistStore(
return get().needCode;
},
- updateCode(code: string) {
- set(() => ({ accessCode: code?.trim() }));
- },
- updateToken(token: string) {
- set(() => ({ token: token?.trim() }));
- },
- updateOpenAiUrl(url: string) {
- set(() => ({ openaiUrl: url?.trim() }));
- },
isAuthorized() {
this.fetch();
@@ -60,12 +53,6 @@ export const useAccessStore = createPersistStore(
.then((res: DangerConfig) => {
console.log("[Config] got config from server", res);
set(() => ({ ...res }));
-
- if (res.disableGPT4) {
- DEFAULT_MODELS.forEach(
- (m: any) => (m.available = !m.name.startsWith("gpt-4")),
- );
- }
})
.catch(() => {
console.error("[Config] failed to fetch config");
diff --git a/app/store/config.ts b/app/store/config.ts
index 0fbc26dfe0e..5fcd6ff514c 100644
--- a/app/store/config.ts
+++ b/app/store/config.ts
@@ -128,15 +128,7 @@ export const useAppConfig = createPersistStore(
}));
},
- allModels() {
- const customModels = get()
- .customModels.split(",")
- .filter((v) => !!v && v.length > 0)
- .map((m) => ({ name: m, available: true }));
- const allModels = get().models.concat(customModels);
- allModels.sort((a, b) => (a.name < b.name ? -1 : 1));
- return allModels;
- },
+ allModels() {},
}),
{
name: StoreKey.Config,
diff --git a/app/utils/hooks.ts b/app/utils/hooks.ts
new file mode 100644
index 00000000000..f6bfae67323
--- /dev/null
+++ b/app/utils/hooks.ts
@@ -0,0 +1,16 @@
+import { useMemo } from "react";
+import { useAccessStore, useAppConfig } from "../store";
+import { collectModels } from "./model";
+
+export function useAllModels() {
+ const accessStore = useAccessStore();
+ const configStore = useAppConfig();
+ const models = useMemo(() => {
+ return collectModels(
+ configStore.models,
+ [accessStore.customModels, configStore.customModels].join(","),
+ );
+ }, [accessStore.customModels, configStore.customModels, configStore.models]);
+
+ return models;
+}
diff --git a/app/utils/model.ts b/app/utils/model.ts
new file mode 100644
index 00000000000..23090f9d2f3
--- /dev/null
+++ b/app/utils/model.ts
@@ -0,0 +1,40 @@
+import { LLMModel } from "../client/api";
+
+export function collectModelTable(
+ models: readonly LLMModel[],
+ customModels: string,
+) {
+ const modelTable: Record = {};
+
+ // default models
+ models.forEach((m) => (modelTable[m.name] = m.available));
+
+ // server custom models
+ customModels
+ .split(",")
+ .filter((v) => !!v && v.length > 0)
+ .map((m) => {
+ if (m.startsWith("+")) {
+ modelTable[m.slice(1)] = true;
+ } else if (m.startsWith("-")) {
+ modelTable[m.slice(1)] = false;
+ } else modelTable[m] = true;
+ });
+ return modelTable;
+}
+
+/**
+ * Generate full model table.
+ */
+export function collectModels(
+ models: readonly LLMModel[],
+ customModels: string,
+) {
+ const modelTable = collectModelTable(models, customModels);
+ const allModels = Object.keys(modelTable).map((m) => ({
+ name: m,
+ available: modelTable[m],
+ }));
+
+ return allModels;
+}