fixed bug for chat context not work for chating with image

This commit is contained in:
RockYang 2024-11-12 18:23:27 +08:00
parent 9e8f1ed6bf
commit 97e81a7dcc
5 changed files with 18 additions and 21 deletions

View File

@ -4,7 +4,8 @@
* Bug修复音乐视频无法下载思维导图下载后看不清文字[#IB0N2E](https://gitee.com/blackfox/geekai/issues/IB0N2E) * Bug修复音乐视频无法下载思维导图下载后看不清文字[#IB0N2E](https://gitee.com/blackfox/geekai/issues/IB0N2E)
* 功能优化保存所有AIGC任务的原始信息程序启动之后自动将未执行的任务加入到 redis 队列 * 功能优化保存所有AIGC任务的原始信息程序启动之后自动将未执行的任务加入到 redis 队列
* 功能优化:失败的任务自动退回算力,而不需要在删除的时候再退回 * 功能优化:失败的任务自动退回算力,而不需要在删除的时候再退回
* 功能新增:支持设置一个专门的模型来翻译提示词,提供元提示词生成功能 * 功能新增:支持设置一个专门的模型来翻译提示词,提供 Mate 提示词生成功能
* Bug修复修复图片对话的时候上下文不起作用的Bug
## v4.1.6 ## v4.1.6
* 功能新增:**支持OpenAI实时语音对话功能** :rocket: :rocket: :rocket:, Beta 版,目前没有做算力计费控制,目前只有 VIP 用户可以使用。 * 功能新增:**支持OpenAI实时语音对话功能** :rocket: :rocket: :rocket:, Beta 版,目前没有做算力计费控制,目前只有 VIP 用户可以使用。

View File

@ -16,7 +16,7 @@ type MKey interface {
string | int | uint string | int | uint
} }
type MValue interface { type MValue interface {
*WsClient | *ChatSession | context.CancelFunc | []Message *WsClient | *ChatSession | context.CancelFunc | []interface{}
} }
type LMap[K MKey, T MValue] struct { type LMap[K MKey, T MValue] struct {
lock sync.RWMutex lock sync.RWMutex

View File

@ -40,7 +40,7 @@ type ChatHandler struct {
uploadManager *oss.UploaderManager uploadManager *oss.UploaderManager
licenseService *service.LicenseService licenseService *service.LicenseService
ReqCancelFunc *types.LMap[string, context.CancelFunc] // HttpClient 请求取消 handle function ReqCancelFunc *types.LMap[string, context.CancelFunc] // HttpClient 请求取消 handle function
ChatContexts *types.LMap[string, []types.Message] // 聊天上下文 Map [chatId] => []Message ChatContexts *types.LMap[string, []interface{}] // 聊天上下文 Map [chatId] => []Message
userService *service.UserService userService *service.UserService
} }
@ -51,7 +51,7 @@ func NewChatHandler(app *core.AppServer, db *gorm.DB, redis *redis.Client, manag
uploadManager: manager, uploadManager: manager,
licenseService: licenseService, licenseService: licenseService,
ReqCancelFunc: types.NewLMap[string, context.CancelFunc](), ReqCancelFunc: types.NewLMap[string, context.CancelFunc](),
ChatContexts: types.NewLMap[string, []types.Message](), ChatContexts: types.NewLMap[string, []interface{}](),
userService: userService, userService: userService,
} }
} }
@ -143,8 +143,8 @@ func (h *ChatHandler) sendMessage(ctx context.Context, session *types.ChatSessio
} }
// 加载聊天上下文 // 加载聊天上下文
chatCtx := make([]types.Message, 0) chatCtx := make([]interface{}, 0)
messages := make([]types.Message, 0) messages := make([]interface{}, 0)
if h.App.SysConfig.EnableContext { if h.App.SysConfig.EnableContext {
if h.ChatContexts.Has(session.ChatId) { if h.ChatContexts.Has(session.ChatId) {
messages = h.ChatContexts.Get(session.ChatId) messages = h.ChatContexts.Get(session.ChatId)
@ -174,7 +174,7 @@ func (h *ChatHandler) sendMessage(ctx context.Context, session *types.ChatSessio
for i := len(messages) - 1; i >= 0; i-- { for i := len(messages) - 1; i >= 0; i-- {
v := messages[i] v := messages[i]
tks, _ = utils.CalcTokens(v.Content, req.Model) tks, _ = utils.CalcTokens(utils.JsonEncode(v), req.Model)
// 上下文 token 超出了模型的最大上下文长度 // 上下文 token 超出了模型的最大上下文长度
if tokens+tks >= session.Model.MaxContext { if tokens+tks >= session.Model.MaxContext {
break break
@ -192,8 +192,9 @@ func (h *ChatHandler) sendMessage(ctx context.Context, session *types.ChatSessio
logger.Debugf("聊天上下文:%+v", chatCtx) logger.Debugf("聊天上下文:%+v", chatCtx)
} }
reqMgs := make([]interface{}, 0) reqMgs := make([]interface{}, 0)
for _, m := range chatCtx {
reqMgs = append(reqMgs, m) for i := len(chatCtx) - 1; i >= 0; i-- {
reqMgs = append(reqMgs, chatCtx[i])
} }
fullPrompt := prompt fullPrompt := prompt
@ -258,7 +259,7 @@ func (h *ChatHandler) sendMessage(ctx context.Context, session *types.ChatSessio
logger.Debugf("%+v", req.Messages) logger.Debugf("%+v", req.Messages)
return h.sendOpenAiMessage(chatCtx, req, userVo, ctx, session, role, prompt, ws) return h.sendOpenAiMessage(req, userVo, ctx, session, role, prompt, ws)
} }
// Tokens 统计 token 数量 // Tokens 统计 token 数量
@ -399,17 +400,15 @@ func (h *ChatHandler) saveChatHistory(
req types.ApiRequest, req types.ApiRequest,
usage Usage, usage Usage,
message types.Message, message types.Message,
chatCtx []types.Message,
session *types.ChatSession, session *types.ChatSession,
role model.ChatRole, role model.ChatRole,
userVo vo.User, userVo vo.User,
promptCreatedAt time.Time, promptCreatedAt time.Time,
replyCreatedAt time.Time) { replyCreatedAt time.Time) {
useMsg := types.Message{Role: "user", Content: usage.Prompt} // 更新上下文消息
// 更新上下文消息,如果是调用函数则不需要更新上下文
if h.App.SysConfig.EnableContext { if h.App.SysConfig.EnableContext {
chatCtx = append(chatCtx, useMsg) // 提问消息 chatCtx := req.Messages // 提问消息
chatCtx = append(chatCtx, message) // 回复消息 chatCtx = append(chatCtx, message) // 回复消息
h.ChatContexts.Put(session.ChatId, chatCtx) h.ChatContexts.Put(session.ChatId, chatCtx)
} }

View File

@ -51,7 +51,6 @@ type OpenAIResVo struct {
// OPenAI 消息发送实现 // OPenAI 消息发送实现
func (h *ChatHandler) sendOpenAiMessage( func (h *ChatHandler) sendOpenAiMessage(
chatCtx []types.Message,
req types.ApiRequest, req types.ApiRequest,
userVo vo.User, userVo vo.User,
ctx context.Context, ctx context.Context,
@ -201,7 +200,7 @@ func (h *ChatHandler) sendOpenAiMessage(
TotalTokens: 0, TotalTokens: 0,
} }
message.Content = usage.Content message.Content = usage.Content
h.saveChatHistory(req, usage, message, chatCtx, session, role, userVo, promptCreatedAt, replyCreatedAt) h.saveChatHistory(req, usage, message, session, role, userVo, promptCreatedAt, replyCreatedAt)
} }
} else { // 非流式输出 } else { // 非流式输出
var respVo OpenAIResVo var respVo OpenAIResVo
@ -220,7 +219,7 @@ func (h *ChatHandler) sendOpenAiMessage(
utils.SendChunkMsg(ws, content) utils.SendChunkMsg(ws, content)
respVo.Usage.Prompt = prompt respVo.Usage.Prompt = prompt
respVo.Usage.Content = content respVo.Usage.Content = content
h.saveChatHistory(req, respVo.Usage, respVo.Choices[0].Message, chatCtx, session, role, userVo, promptCreatedAt, time.Now()) h.saveChatHistory(req, respVo.Usage, respVo.Choices[0].Message, session, role, userVo, promptCreatedAt, time.Now())
} }
return nil return nil

View File

@ -169,9 +169,9 @@
</el-button> </el-button>
</el-tooltip> </el-tooltip>
<el-popover placement="right" :width="400" trigger="click" :visible="popoverVisible"> <el-popover placement="right" :width="400" trigger="click">
<template #reference> <template #reference>
<el-button type="primary" circle size="small" class="icon-btn" @click="popoverVisible = true"> <el-button type="primary" circle size="small" class="icon-btn">
<i class="iconfont icon-linggan"></i> <i class="iconfont icon-linggan"></i>
</el-button> </el-button>
</template> </template>
@ -392,13 +392,11 @@ const uploadImg = (file) => {
} }
const isGenerating = ref(false) const isGenerating = ref(false)
const popoverVisible = ref(false)
const metaPrompt = ref("") const metaPrompt = ref("")
const generatePrompt = (row) => { const generatePrompt = (row) => {
if (metaPrompt.value === "") { if (metaPrompt.value === "") {
return showMessageError("请输入元提示词") return showMessageError("请输入元提示词")
} }
popoverVisible.value = false
isGenerating.value = true isGenerating.value = true
httpPost("/api/prompt/meta", {prompt: metaPrompt.value}).then(res => { httpPost("/api/prompt/meta", {prompt: metaPrompt.value}).then(res => {
row.content = res.data row.content = res.data