Skip to content

Commit

Permalink
Merge pull request zhayujie#2300 from 6vision/master
Browse files Browse the repository at this point in the history
feat: support o1-preview and o1-mini model
  • Loading branch information
zhayujie authored Sep 13, 2024
2 parents 3c5db32 + 3a85ccb commit a5ccc8d
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 5 deletions.
16 changes: 12 additions & 4 deletions bot/chatgpt/chat_gpt_bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import openai
import openai.error
import requests

from common import const
from bot.bot import Bot
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.openai.open_ai_image import OpenAIImage
Expand All @@ -15,7 +15,7 @@
from common.log import logger
from common.token_bucket import TokenBucket
from config import conf, load_config

from bot.baidu.baidu_wenxin_session import BaiduWenxinSession

# OpenAI对话模型API (可用)
class ChatGPTBot(Bot, OpenAIImage):
Expand All @@ -30,10 +30,12 @@ def __init__(self):
openai.proxy = proxy
if conf().get("rate_limit_chatgpt"):
self.tb4chatgpt = TokenBucket(conf().get("rate_limit_chatgpt", 20))

conf_model = conf().get("model") or "gpt-3.5-turbo"
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
# o1相关模型不支持system prompt,暂时用文心模型的session

self.args = {
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
"model": conf_model, # 对话模型的名称
"temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
# "max_tokens":4096, # 回复最大的字符数
"top_p": conf().get("top_p", 1),
Expand All @@ -42,6 +44,12 @@ def __init__(self):
"request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试
}
# o1相关模型固定了部分参数,暂时去掉
if conf_model in [const.O1, const.O1_MINI]:
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI)
remove_keys = ["temperature", "top_p", "frequency_penalty", "presence_penalty"]
for key in remove_keys:
self.args.pop(key, None) # 如果键不存在,使用 None 来避免抛出错误

def reply(self, query, context=None):
# acquire reply content
Expand Down
5 changes: 4 additions & 1 deletion common/const.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@
GPT4_06_13 = "gpt-4-0613"
GPT4_32k_06_13 = "gpt-4-32k-0613"

O1 = "o1-preview"
O1_MINI = "o1-mini"

WHISPER_1 = "whisper-1"
TTS_1 = "tts-1"
TTS_1_HD = "tts-1-hd"
Expand All @@ -59,7 +62,7 @@

MODEL_LIST = [
GPT35, GPT35_0125, GPT35_1106, "gpt-3.5-turbo-16k",
GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
O1, O1_MINI, GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
WEN_XIN, WEN_XIN_4,
XUNFEI, ZHIPU_AI, MOONSHOT, MiniMax,
GEMINI, GEMINI_PRO, GEMINI_15_flash, GEMINI_15_PRO,
Expand Down

0 comments on commit a5ccc8d

Please sign in to comment.