diff --git a/.github/workflows/deploy_preview.yml b/.github/workflows/deploy_preview.yml index 30d9b85b4..b98845243 100644 --- a/.github/workflows/deploy_preview.yml +++ b/.github/workflows/deploy_preview.yml @@ -3,9 +3,7 @@ name: VercelPreviewDeployment on: pull_request_target: types: - - opened - - synchronize - - reopened + - review_requested env: VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 000000000..faf7205d9 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,39 @@ +name: Run Tests + +on: + push: + branches: + - main + tags: + - "!*" + pull_request: + types: + - review_requested + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v3 + with: + node-version: 18 + cache: "yarn" + + - name: Cache node_modules + uses: actions/cache@v4 + with: + path: node_modules + key: ${{ runner.os }}-node_modules-${{ hashFiles('**/yarn.lock') }} + restore-keys: | + ${{ runner.os }}-node_modules- + + - name: Install dependencies + run: yarn install + + - name: Run Jest tests + run: yarn test:ci diff --git a/README.md b/README.md index d370000fa..dda896cbf 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,17 @@
- - icon + + icon +

NextChat (ChatGPT Next Web)

English / [简体中文](./README_CN.md) -One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 & Gemini Pro support. +One-Click to get a well-designed cross-platform ChatGPT web UI, with Claude, GPT4 & Gemini Pro support. -一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。 +一键免费部署你的跨平台私人 ChatGPT 应用, 支持 Claude, GPT4 & Gemini Pro 模型。 [![Saas][Saas-image]][saas-url] [![Web][Web-image]][web-url] @@ -18,9 +19,9 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 [![MacOS][MacOS-image]][download-url] [![Linux][Linux-image]][download-url] -[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) +[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) -[NextChatAI](https://nextchat.dev/chat) / [网页版](https://app.nextchat.dev) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) +[NextChatAI](https://nextchat.dev/chat) / [自部署网页版](https://app.nextchat.dev) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) [saas-url]: https://nextchat.dev/chat?utm_source=readme [saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge @@ -31,7 +32,7 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 [MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple [Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu -[Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) +[Deploy on Vercel](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [BT Deply Install](https://www.bt.cn/new/download.html) [](https://monica.im/?utm=nxcrp) @@ -96,10 +97,11 @@ For enterprise inquiries, please contact: **business@nextchat.dev** - [x] Artifacts: Easily preview, copy and share generated content/webpages through a separate window [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) - [x] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) - [x] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) +- [x] Supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672) - [ ] local knowledge base ## What's New - +- 🚀 v2.15.8 Now supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672) - 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379) - 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) - 🚀 v2.14.0 Now supports Artifacts & SD @@ -134,10 +136,11 @@ For enterprise inquiries, please contact: **business@nextchat.dev** - [x] Artifacts: 通过独立窗口,轻松预览、复制和分享生成的内容/可交互网页 [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) - [x] 插件机制,支持`联网搜索`、`计算器`、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) - [x] 支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) + - [x] 支持 Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672) - [ ] 本地知识库 ## 最新动态 - +- 🚀 v2.15.8 现在支持Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672) - 🚀 v2.15.4 客户端支持Tauri本地直接调用大模型API,更安全![#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379) - 🚀 v2.15.0 现在支持插件功能了!了解更多:[NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) - 🚀 v2.14.0 现在支持 Artifacts & SD 了。 @@ -301,6 +304,22 @@ iflytek Api Key. iflytek Api Secret. +### `CHATGLM_API_KEY` (optional) + +ChatGLM Api Key. + +### `CHATGLM_URL` (optional) + +ChatGLM Api Url. + +### `DEEPSEEK_API_KEY` (optional) + +DeepSeek Api Key. + +### `DEEPSEEK_URL` (optional) + +DeepSeek Api Url. + ### `HIDE_USER_API_KEY` (optional) > Default: Empty @@ -345,6 +364,13 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name Change default model +### `VISION_MODELS` (optional) + +> Default: Empty +> Example: `gpt-4-vision,claude-3-opus,my-custom-model` means add vision capabilities to these models in addition to the default pattern matches (which detect models containing keywords like "vision", "claude-3", "gemini-1.5", etc). + +Add additional models to have vision capabilities, beyond the default pattern matching. Multiple models should be separated by commas. + ### `WHITE_WEBDAV_ENDPOINTS` (optional) You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format: @@ -397,6 +423,9 @@ yarn dev > [简体中文 > 如何部署到私人服务器](./README_CN.md#部署) +### BT Install +> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md) + ### Docker (Recommended) ```shell diff --git a/README_CN.md b/README_CN.md index 3f339ea61..aa95d6b5c 100644 --- a/README_CN.md +++ b/README_CN.md @@ -184,6 +184,21 @@ ByteDance Api Url. 讯飞星火Api Secret. +### `CHATGLM_API_KEY` (可选) + +ChatGLM Api Key. + +### `CHATGLM_URL` (可选) + +ChatGLM Api Url. + +### `DEEPSEEK_API_KEY` (可选) + +DeepSeek Api Key. + +### `DEEPSEEK_URL` (可选) + +DeepSeek Api Url. ### `HIDE_USER_API_KEY` (可选) @@ -228,6 +243,13 @@ ByteDance Api Url. 更改默认模型 +### `VISION_MODELS` (可选) + +> 默认值:空 +> 示例:`gpt-4-vision,claude-3-opus,my-custom-model` 表示为这些模型添加视觉能力,作为对默认模式匹配的补充(默认会检测包含"vision"、"claude-3"、"gemini-1.5"等关键词的模型)。 + +在默认模式匹配之外,添加更多具有视觉能力的模型。多个模型用逗号分隔。 + ### `DEFAULT_INPUT_TEMPLATE` (可选) 自定义默认的 template,用于初始化『设置』中的『用户输入预处理』配置项 @@ -264,6 +286,9 @@ BASE_URL=https://b.nextweb.fun/api/proxy ## 部署 +### 宝塔面板部署 +> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md) + ### 容器部署 (推荐) > Docker 版本需要在 20 及其以上,否则会提示找不到镜像。 diff --git a/README_JA.md b/README_JA.md index 062c11262..29eb0d275 100644 --- a/README_JA.md +++ b/README_JA.md @@ -217,6 +217,13 @@ ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデ デフォルトのモデルを変更します。 +### `VISION_MODELS` (オプション) + +> デフォルト:空 +> 例:`gpt-4-vision,claude-3-opus,my-custom-model` は、これらのモデルにビジョン機能を追加します。これはデフォルトのパターンマッチング("vision"、"claude-3"、"gemini-1.5"などのキーワードを含むモデルを検出)に加えて適用されます。 + +デフォルトのパターンマッチングに加えて、追加のモデルにビジョン機能を付与します。複数のモデルはカンマで区切ります。 + ### `DEFAULT_INPUT_TEMPLATE` (オプション) 『設定』の『ユーザー入力前処理』の初期設定に使用するテンプレートをカスタマイズします。 diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts index dffb3e9da..3b5833d7e 100644 --- a/app/api/[provider]/[...path]/route.ts +++ b/app/api/[provider]/[...path]/route.ts @@ -10,6 +10,9 @@ import { handle as alibabaHandler } from "../../alibaba"; import { handle as moonshotHandler } from "../../moonshot"; import { handle as stabilityHandler } from "../../stability"; import { handle as iflytekHandler } from "../../iflytek"; +import { handle as deepseekHandler } from "../../deepseek"; +import { handle as xaiHandler } from "../../xai"; +import { handle as chatglmHandler } from "../../glm"; import { handle as proxyHandler } from "../../proxy"; async function handle( @@ -38,6 +41,12 @@ async function handle( return stabilityHandler(req, { params }); case ApiPath.Iflytek: return iflytekHandler(req, { params }); + case ApiPath.DeepSeek: + return deepseekHandler(req, { params }); + case ApiPath.XAI: + return xaiHandler(req, { params }); + case ApiPath.ChatGLM: + return chatglmHandler(req, { params }); case ApiPath.OpenAI: return openaiHandler(req, { params }); default: diff --git a/app/api/alibaba.ts b/app/api/alibaba.ts index 894b1ae4c..20f6caefa 100644 --- a/app/api/alibaba.ts +++ b/app/api/alibaba.ts @@ -8,7 +8,7 @@ import { import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; +import { isModelNotavailableInServer } from "@/app/utils/model"; const serverConfig = getServerSideConfig(); @@ -89,7 +89,7 @@ async function request(req: NextRequest) { // not undefined and is false if ( - isModelAvailableInServer( + isModelNotavailableInServer( serverConfig.customModels, jsonBody?.model as string, ServiceProvider.Alibaba as string, diff --git a/app/api/anthropic.ts b/app/api/anthropic.ts index 7a4444371..b96637b2c 100644 --- a/app/api/anthropic.ts +++ b/app/api/anthropic.ts @@ -9,7 +9,7 @@ import { import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "./auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; +import { isModelNotavailableInServer } from "@/app/utils/model"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]); @@ -122,7 +122,7 @@ async function request(req: NextRequest) { // not undefined and is false if ( - isModelAvailableInServer( + isModelNotavailableInServer( serverConfig.customModels, jsonBody?.model as string, ServiceProvider.Anthropic as string, diff --git a/app/api/auth.ts b/app/api/auth.ts index 95965ceec..1760c249c 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -92,6 +92,15 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { systemApiKey = serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret; break; + case ModelProvider.DeepSeek: + systemApiKey = serverConfig.deepseekApiKey; + break; + case ModelProvider.XAI: + systemApiKey = serverConfig.xaiApiKey; + break; + case ModelProvider.ChatGLM: + systemApiKey = serverConfig.chatglmApiKey; + break; case ModelProvider.GPT: default: if (req.nextUrl.pathname.includes("azure/deployments")) { diff --git a/app/api/baidu.ts b/app/api/baidu.ts index 0408b43c5..0f4e05ee8 100644 --- a/app/api/baidu.ts +++ b/app/api/baidu.ts @@ -8,7 +8,7 @@ import { import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; +import { isModelNotavailableInServer } from "@/app/utils/model"; import { getAccessToken } from "@/app/utils/baidu"; const serverConfig = getServerSideConfig(); @@ -104,7 +104,7 @@ async function request(req: NextRequest) { // not undefined and is false if ( - isModelAvailableInServer( + isModelNotavailableInServer( serverConfig.customModels, jsonBody?.model as string, ServiceProvider.Baidu as string, diff --git a/app/api/bytedance.ts b/app/api/bytedance.ts index cb65b1061..51b39ceb7 100644 --- a/app/api/bytedance.ts +++ b/app/api/bytedance.ts @@ -8,7 +8,7 @@ import { import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; +import { isModelNotavailableInServer } from "@/app/utils/model"; const serverConfig = getServerSideConfig(); @@ -88,7 +88,7 @@ async function request(req: NextRequest) { // not undefined and is false if ( - isModelAvailableInServer( + isModelNotavailableInServer( serverConfig.customModels, jsonBody?.model as string, ServiceProvider.ByteDance as string, diff --git a/app/api/common.ts b/app/api/common.ts index b4c792d6f..b7e41fa26 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -1,8 +1,8 @@ import { NextRequest, NextResponse } from "next/server"; import { getServerSideConfig } from "../config/server"; import { OPENAI_BASE_URL, ServiceProvider } from "../constant"; -import { isModelAvailableInServer } from "../utils/model"; import { cloudflareAIGatewayUrl } from "../utils/cloudflare"; +import { getModelProvider, isModelNotavailableInServer } from "../utils/model"; const serverConfig = getServerSideConfig(); @@ -71,7 +71,7 @@ export async function requestOpenai(req: NextRequest) { .filter((v) => !!v && !v.startsWith("-") && v.includes(modelName)) .forEach((m) => { const [fullName, displayName] = m.split("="); - const [_, providerName] = fullName.split("@"); + const [_, providerName] = getModelProvider(fullName); if (providerName === "azure" && !displayName) { const [_, deployId] = (serverConfig?.azureUrl ?? "").split( "deployments/", @@ -118,15 +118,14 @@ export async function requestOpenai(req: NextRequest) { // not undefined and is false if ( - isModelAvailableInServer( + isModelNotavailableInServer( serverConfig.customModels, jsonBody?.model as string, - ServiceProvider.OpenAI as string, - ) || - isModelAvailableInServer( - serverConfig.customModels, - jsonBody?.model as string, - ServiceProvider.Azure as string, + [ + ServiceProvider.OpenAI, + ServiceProvider.Azure, + jsonBody?.model as string, // support provider-unspecified model + ], ) ) { return NextResponse.json( diff --git a/app/api/config/route.ts b/app/api/config/route.ts index b0d9da031..855a5db01 100644 --- a/app/api/config/route.ts +++ b/app/api/config/route.ts @@ -14,6 +14,7 @@ const DANGER_CONFIG = { disableFastLink: serverConfig.disableFastLink, customModels: serverConfig.customModels, defaultModel: serverConfig.defaultModel, + visionModels: serverConfig.visionModels, }; declare global { diff --git a/app/api/deepseek.ts b/app/api/deepseek.ts new file mode 100644 index 000000000..a9879eced --- /dev/null +++ b/app/api/deepseek.ts @@ -0,0 +1,128 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + DEEPSEEK_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelNotavailableInServer } from "@/app/utils/model"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[DeepSeek Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.DeepSeek); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[DeepSeek] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.DeepSeek, ""); + + let baseUrl = serverConfig.deepseekUrl || DEEPSEEK_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelNotavailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.DeepSeek as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[DeepSeek] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/glm.ts b/app/api/glm.ts new file mode 100644 index 000000000..8431c5db5 --- /dev/null +++ b/app/api/glm.ts @@ -0,0 +1,129 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + CHATGLM_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelNotavailableInServer } from "@/app/utils/model"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[GLM Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.ChatGLM); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[GLM] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ChatGLM, ""); + + let baseUrl = serverConfig.chatglmUrl || CHATGLM_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + console.log("[Fetch Url] ", fetchUrl); + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelNotavailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.ChatGLM as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[GLM] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/iflytek.ts b/app/api/iflytek.ts index 8b8227dce..6624f74e9 100644 --- a/app/api/iflytek.ts +++ b/app/api/iflytek.ts @@ -8,7 +8,7 @@ import { import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; +import { isModelNotavailableInServer } from "@/app/utils/model"; // iflytek const serverConfig = getServerSideConfig(); @@ -89,7 +89,7 @@ async function request(req: NextRequest) { // not undefined and is false if ( - isModelAvailableInServer( + isModelNotavailableInServer( serverConfig.customModels, jsonBody?.model as string, ServiceProvider.Iflytek as string, diff --git a/app/api/moonshot.ts b/app/api/moonshot.ts index 5bf4807e3..792d14d33 100644 --- a/app/api/moonshot.ts +++ b/app/api/moonshot.ts @@ -8,7 +8,7 @@ import { import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; +import { isModelNotavailableInServer } from "@/app/utils/model"; const serverConfig = getServerSideConfig(); @@ -88,7 +88,7 @@ async function request(req: NextRequest) { // not undefined and is false if ( - isModelAvailableInServer( + isModelNotavailableInServer( serverConfig.customModels, jsonBody?.model as string, ServiceProvider.Moonshot as string, diff --git a/app/api/openai.ts b/app/api/openai.ts index bbba69e56..2b5deca8b 100644 --- a/app/api/openai.ts +++ b/app/api/openai.ts @@ -14,7 +14,7 @@ function getModels(remoteModelRes: OpenAIListModelResponse) { if (config.disableGPT4) { remoteModelRes.data = remoteModelRes.data.filter( (m) => - !(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o")) || + !(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o") || m.id.startsWith("o1")) || m.id.startsWith("gpt-4o-mini"), ); } diff --git a/app/api/proxy.ts b/app/api/proxy.ts index 731003aa1..b3e5e7b7b 100644 --- a/app/api/proxy.ts +++ b/app/api/proxy.ts @@ -1,4 +1,5 @@ import { NextRequest, NextResponse } from "next/server"; +import { getServerSideConfig } from "@/app/config/server"; export async function handle( req: NextRequest, @@ -9,6 +10,7 @@ export async function handle( if (req.method === "OPTIONS") { return NextResponse.json({ body: "OK" }, { status: 200 }); } + const serverConfig = getServerSideConfig(); // remove path params from searchParams req.nextUrl.searchParams.delete("path"); @@ -31,6 +33,18 @@ export async function handle( return true; }), ); + // if dalle3 use openai api key + const baseUrl = req.headers.get("x-base-url"); + if (baseUrl?.includes("api.openai.com")) { + if (!serverConfig.apiKey) { + return NextResponse.json( + { error: "OpenAI API key not configured" }, + { status: 500 }, + ); + } + headers.set("Authorization", `Bearer ${serverConfig.apiKey}`); + } + const controller = new AbortController(); const fetchOptions: RequestInit = { headers, diff --git a/app/api/xai.ts b/app/api/xai.ts new file mode 100644 index 000000000..4aad5e5fb --- /dev/null +++ b/app/api/xai.ts @@ -0,0 +1,128 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + XAI_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelNotavailableInServer } from "@/app/utils/model"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[XAI Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.XAI); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[XAI] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.XAI, ""); + + let baseUrl = serverConfig.xaiUrl || XAI_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelNotavailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.XAI as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[XAI] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/client/api.ts b/app/client/api.ts index 7a242ea99..8f263763b 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -20,6 +20,9 @@ import { QwenApi } from "./platforms/alibaba"; import { HunyuanApi } from "./platforms/tencent"; import { MoonshotApi } from "./platforms/moonshot"; import { SparkApi } from "./platforms/iflytek"; +import { DeepSeekApi } from "./platforms/deepseek"; +import { XAIApi } from "./platforms/xai"; +import { ChatGLMApi } from "./platforms/glm"; export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; @@ -68,7 +71,7 @@ export interface ChatOptions { config: LLMConfig; onUpdate?: (message: string, chunk: string) => void; - onFinish: (message: string) => void; + onFinish: (message: string, responseRes: Response) => void; onError?: (err: Error) => void; onController?: (controller: AbortController) => void; onBeforeTool?: (tool: ChatMessageTool) => void; @@ -152,6 +155,15 @@ export class ClientApi { case ModelProvider.Iflytek: this.llm = new SparkApi(); break; + case ModelProvider.DeepSeek: + this.llm = new DeepSeekApi(); + break; + case ModelProvider.XAI: + this.llm = new XAIApi(); + break; + case ModelProvider.ChatGLM: + this.llm = new ChatGLMApi(); + break; default: this.llm = new ChatGPTApi(); } @@ -239,6 +251,9 @@ export function getHeaders(ignoreHeaders: boolean = false) { const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba; const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot; const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek; + const isDeepSeek = modelConfig.providerName === ServiceProvider.DeepSeek; + const isXAI = modelConfig.providerName === ServiceProvider.XAI; + const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM; const isEnabledAccessControl = accessStore.enabledAccessControl(); const apiKey = isGoogle ? accessStore.googleApiKey @@ -252,6 +267,12 @@ export function getHeaders(ignoreHeaders: boolean = false) { ? accessStore.alibabaApiKey : isMoonshot ? accessStore.moonshotApiKey + : isXAI + ? accessStore.xaiApiKey + : isDeepSeek + ? accessStore.deepseekApiKey + : isChatGLM + ? accessStore.chatglmApiKey : isIflytek ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret ? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret @@ -266,6 +287,9 @@ export function getHeaders(ignoreHeaders: boolean = false) { isAlibaba, isMoonshot, isIflytek, + isDeepSeek, + isXAI, + isChatGLM, apiKey, isEnabledAccessControl, }; @@ -286,6 +310,13 @@ export function getHeaders(ignoreHeaders: boolean = false) { isAzure, isAnthropic, isBaidu, + isByteDance, + isAlibaba, + isMoonshot, + isIflytek, + isDeepSeek, + isXAI, + isChatGLM, apiKey, isEnabledAccessControl, } = getConfig(); @@ -328,6 +359,12 @@ export function getClientApi(provider: ServiceProvider): ClientApi { return new ClientApi(ModelProvider.Moonshot); case ServiceProvider.Iflytek: return new ClientApi(ModelProvider.Iflytek); + case ServiceProvider.DeepSeek: + return new ClientApi(ModelProvider.DeepSeek); + case ServiceProvider.XAI: + return new ClientApi(ModelProvider.XAI); + case ServiceProvider.ChatGLM: + return new ClientApi(ModelProvider.ChatGLM); default: return new ClientApi(ModelProvider.GPT); } diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts index 86229a147..6fe69e87a 100644 --- a/app/client/platforms/alibaba.ts +++ b/app/client/platforms/alibaba.ts @@ -143,6 +143,7 @@ export class QwenApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -172,7 +173,7 @@ export class QwenApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -188,6 +189,7 @@ export class QwenApi implements LLMApi { "[Alibaba] request response content type: ", contentType, ); + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); @@ -254,7 +256,7 @@ export class QwenApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index 1a83bd53a..6747221a8 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -13,6 +13,7 @@ import { getMessageTextContent, isVisionModel } from "@/app/utils"; import { preProcessImageContent, stream } from "@/app/utils/chat"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { RequestPayload } from "./openai"; +import { fetch } from "@/app/utils/stream"; export type MultiBlockContent = { type: "image" | "text"; @@ -316,13 +317,14 @@ export class ClaudeApi implements LLMApi { }; try { - controller.signal.onabort = () => options.onFinish(""); + controller.signal.onabort = () => + options.onFinish("", new Response(null, { status: 400 })); const res = await fetch(path, payload); const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } catch (e) { console.error("failed to chat", e); options.onError?.(e as Error); diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts index 2511a696b..9e8c2f139 100644 --- a/app/client/platforms/baidu.ts +++ b/app/client/platforms/baidu.ts @@ -162,6 +162,7 @@ export class ErnieApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -191,7 +192,7 @@ export class ErnieApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -204,7 +205,7 @@ export class ErnieApi implements LLMApi { clearTimeout(requestTimeoutId); const contentType = res.headers.get("content-type"); console.log("[Baidu] request response content type: ", contentType); - + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); return finish(); @@ -267,7 +268,7 @@ export class ErnieApi implements LLMApi { const resJson = await res.json(); const message = resJson?.result; - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index 000a9e278..a2f0660d8 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -130,6 +130,7 @@ export class DoubaoApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -159,7 +160,7 @@ export class DoubaoApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -175,7 +176,7 @@ export class DoubaoApi implements LLMApi { "[ByteDance] request response content type: ", contentType, ); - + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); return finish(); @@ -241,7 +242,7 @@ export class DoubaoApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/deepseek.ts b/app/client/platforms/deepseek.ts new file mode 100644 index 000000000..e2ae645c6 --- /dev/null +++ b/app/client/platforms/deepseek.ts @@ -0,0 +1,200 @@ +"use client"; +// azure and openai, using same models. so using same LLMApi. +import { + ApiPath, + DEEPSEEK_BASE_URL, + DeepSeek, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; +import { stream } from "@/app/utils/chat"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + SpeechOptions, +} from "../api"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; +import { RequestPayload } from "./openai"; +import { fetch } from "@/app/utils/stream"; + +export class DeepSeekApi implements LLMApi { + private disableListModels = true; + + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.deepseekUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.DeepSeek; + baseUrl = isApp ? DEEPSEEK_BASE_URL : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.DeepSeek)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + + async chat(options: ChatOptions) { + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + + const requestPayload: RequestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + // max_tokens: Math.max(modelConfig.max_tokens, 1024), + // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. + }; + + console.log("[Request] openai payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(DeepSeek.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string; + tool_calls: ChatMessageTool[]; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } + } + return choices[0]?.delta?.content; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); + }, + options, + ); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message, res); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/client/platforms/glm.ts b/app/client/platforms/glm.ts new file mode 100644 index 000000000..a8d1869e3 --- /dev/null +++ b/app/client/platforms/glm.ts @@ -0,0 +1,293 @@ +"use client"; +import { + ApiPath, + CHATGLM_BASE_URL, + ChatGLM, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; +import { stream } from "@/app/utils/chat"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + SpeechOptions, +} from "../api"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent, isVisionModel } from "@/app/utils"; +import { RequestPayload } from "./openai"; +import { fetch } from "@/app/utils/stream"; +import { preProcessImageContent } from "@/app/utils/chat"; + +interface BasePayload { + model: string; +} + +interface ChatPayload extends BasePayload { + messages: ChatOptions["messages"]; + stream?: boolean; + temperature?: number; + presence_penalty?: number; + frequency_penalty?: number; + top_p?: number; +} + +interface ImageGenerationPayload extends BasePayload { + prompt: string; + size?: string; + user_id?: string; +} + +interface VideoGenerationPayload extends BasePayload { + prompt: string; + duration?: number; + resolution?: string; + user_id?: string; +} + +type ModelType = "chat" | "image" | "video"; + +export class ChatGLMApi implements LLMApi { + private disableListModels = true; + + private getModelType(model: string): ModelType { + if (model.startsWith("cogview-")) return "image"; + if (model.startsWith("cogvideo-")) return "video"; + return "chat"; + } + + private getModelPath(type: ModelType): string { + switch (type) { + case "image": + return ChatGLM.ImagePath; + case "video": + return ChatGLM.VideoPath; + default: + return ChatGLM.ChatPath; + } + } + + private createPayload( + messages: ChatOptions["messages"], + modelConfig: any, + options: ChatOptions, + ): BasePayload { + const modelType = this.getModelType(modelConfig.model); + const lastMessage = messages[messages.length - 1]; + const prompt = + typeof lastMessage.content === "string" + ? lastMessage.content + : lastMessage.content.map((c) => c.text).join("\n"); + + switch (modelType) { + case "image": + return { + model: modelConfig.model, + prompt, + size: options.config.size, + } as ImageGenerationPayload; + default: + return { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + } as ChatPayload; + } + } + + private parseResponse(modelType: ModelType, json: any): string { + switch (modelType) { + case "image": { + const imageUrl = json.data?.[0]?.url; + return imageUrl ? `![Generated Image](${imageUrl})` : ""; + } + case "video": { + const videoUrl = json.data?.[0]?.url; + return videoUrl ? `` : ""; + } + default: + return this.extractMessage(json); + } + } + + path(path: string): string { + const accessStore = useAccessStore.getState(); + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.chatglmUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.ChatGLM; + baseUrl = isApp ? CHATGLM_BASE_URL : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ChatGLM)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + + async chat(options: ChatOptions) { + const visionModel = isVisionModel(options.config.model); + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = visionModel + ? await preProcessImageContent(v.content) + : getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + const modelType = this.getModelType(modelConfig.model); + const requestPayload = this.createPayload(messages, modelConfig, options); + const path = this.path(this.getModelPath(modelType)); + + console.log(`[Request] glm ${modelType} payload: `, requestPayload); + + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (modelType === "image" || modelType === "video") { + const res = await fetch(path, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + console.log(`[Response] glm ${modelType}:`, resJson); + const message = this.parseResponse(modelType, resJson); + options.onFinish(message, res); + return; + } + + const shouldStream = !!options.config.stream; + if (shouldStream) { + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + path, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string; + tool_calls: ChatMessageTool[]; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } + } + return choices[0]?.delta?.content; + }, + // processToolMessage + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); + }, + options, + ); + } else { + const res = await fetch(path, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message, res); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index 7265a500b..5ca8e1071 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -29,7 +29,7 @@ import { RequestPayload } from "./openai"; import { fetch } from "@/app/utils/stream"; export class GeminiProApi implements LLMApi { - path(path: string): string { + path(path: string, shouldStream = false): string { const accessStore = useAccessStore.getState(); let baseUrl = ""; @@ -51,15 +51,27 @@ export class GeminiProApi implements LLMApi { console.log("[Proxy Endpoint] ", baseUrl, path); let chatPath = [baseUrl, path].join("/"); + if (shouldStream) { + chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse"; + } - chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse"; return chatPath; } extractMessage(res: any) { console.log("[Response] gemini-pro response: ", res); + const getTextFromParts = (parts: any[]) => { + if (!Array.isArray(parts)) return ""; + + return parts + .map((part) => part?.text || "") + .filter((text) => text.trim() !== "") + .join("\n\n"); + }; + return ( - res?.candidates?.at(0)?.content?.parts.at(0)?.text || + getTextFromParts(res?.candidates?.at(0)?.content?.parts) || + getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) || res?.error?.message || "" ); @@ -166,7 +178,10 @@ export class GeminiProApi implements LLMApi { options.onController?.(controller); try { // https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb - const chatPath = this.path(Google.ChatPath(modelConfig.model)); + const chatPath = this.path( + Google.ChatPath(modelConfig.model), + shouldStream, + ); const chatPayload = { method: "POST", @@ -192,7 +207,10 @@ export class GeminiProApi implements LLMApi { requestPayload, getHeaders(), // @ts-ignore - [{ functionDeclarations: tools.map((tool) => tool.function) }], + tools.length > 0 + ? // @ts-ignore + [{ functionDeclarations: tools.map((tool) => tool.function) }] + : [], funcs, controller, // parseSSE @@ -214,7 +232,10 @@ export class GeminiProApi implements LLMApi { }, }); } - return chunkJson?.candidates?.at(0)?.content.parts.at(0)?.text; + return chunkJson?.candidates + ?.at(0) + ?.content.parts?.map((part: { text: string }) => part.text) + .join("\n\n"); }, // processToolMessage, include tool_calls message and tool call results ( @@ -271,7 +292,7 @@ export class GeminiProApi implements LLMApi { ); } const message = apiClient.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/iflytek.ts b/app/client/platforms/iflytek.ts index 55a39d0cc..cfc37b3b2 100644 --- a/app/client/platforms/iflytek.ts +++ b/app/client/platforms/iflytek.ts @@ -117,6 +117,7 @@ export class SparkApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // Animate response text to make it look smooth function animateResponseText() { @@ -143,7 +144,7 @@ export class SparkApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -156,7 +157,7 @@ export class SparkApi implements LLMApi { clearTimeout(requestTimeoutId); const contentType = res.headers.get("content-type"); console.log("[Spark] request response content type: ", contentType); - + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); return finish(); @@ -231,7 +232,7 @@ export class SparkApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/moonshot.ts b/app/client/platforms/moonshot.ts index e0ef3494f..b6812c0d7 100644 --- a/app/client/platforms/moonshot.ts +++ b/app/client/platforms/moonshot.ts @@ -24,6 +24,7 @@ import { import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent } from "@/app/utils"; import { RequestPayload } from "./openai"; +import { fetch } from "@/app/utils/stream"; export class MoonshotApi implements LLMApi { private disableListModels = true; @@ -179,7 +180,7 @@ export class MoonshotApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index a22633611..5a110b84b 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -24,7 +24,7 @@ import { stream, } from "@/app/utils/chat"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; -import { DalleSize, DalleQuality, DalleStyle } from "@/app/typing"; +import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing"; import { ChatOptions, @@ -42,6 +42,7 @@ import { isVisionModel, isDalle3 as _isDalle3, } from "@/app/utils"; +import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { object: string; @@ -64,6 +65,7 @@ export interface RequestPayload { frequency_penalty: number; top_p: number; max_tokens?: number; + max_completion_tokens?: number; } export interface DalleRequestPayload { @@ -71,7 +73,7 @@ export interface DalleRequestPayload { prompt: string; response_format: "url" | "b64_json"; n: number; - size: DalleSize; + size: ModelSize; quality: DalleQuality; style: DalleStyle; } @@ -222,7 +224,7 @@ export class ChatGPTApi implements LLMApi { // O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet. requestPayload = { messages, - stream: !isO1 ? options.config.stream : false, + stream: options.config.stream, model: modelConfig.model, temperature: !isO1 ? modelConfig.temperature : 1, presence_penalty: !isO1 ? modelConfig.presence_penalty : 0, @@ -232,6 +234,11 @@ export class ChatGPTApi implements LLMApi { // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; + // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs) + if (isO1) { + requestPayload["max_completion_tokens"] = modelConfig.max_tokens; + } + // add max_tokens to vision model if (visionModel) { requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); @@ -240,7 +247,7 @@ export class ChatGPTApi implements LLMApi { console.log("[Request] openai payload: ", requestPayload); - const shouldStream = !isDalle3 && !!options.config.stream && !isO1; + const shouldStream = !isDalle3 && !!options.config.stream; const controller = new AbortController(); options.onController?.(controller); @@ -352,7 +359,7 @@ export class ChatGPTApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. + isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. ); const res = await fetch(chatPath, chatPayload); @@ -360,7 +367,7 @@ export class ChatGPTApi implements LLMApi { const resJson = await res.json(); const message = await this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/tencent.ts b/app/client/platforms/tencent.ts index 3610fac0a..580844a5b 100644 --- a/app/client/platforms/tencent.ts +++ b/app/client/platforms/tencent.ts @@ -142,6 +142,7 @@ export class HunyuanApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -171,7 +172,7 @@ export class HunyuanApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -187,7 +188,7 @@ export class HunyuanApi implements LLMApi { "[Tencent] request response content type: ", contentType, ); - + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); return finish(); @@ -253,7 +254,7 @@ export class HunyuanApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/xai.ts b/app/client/platforms/xai.ts new file mode 100644 index 000000000..06dbaaa29 --- /dev/null +++ b/app/client/platforms/xai.ts @@ -0,0 +1,193 @@ +"use client"; +// azure and openai, using same models. so using same LLMApi. +import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; +import { stream } from "@/app/utils/chat"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + SpeechOptions, +} from "../api"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; +import { RequestPayload } from "./openai"; +import { fetch } from "@/app/utils/stream"; + +export class XAIApi implements LLMApi { + private disableListModels = true; + + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.xaiUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.XAI; + baseUrl = isApp ? XAI_BASE_URL : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.XAI)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + + async chat(options: ChatOptions) { + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + + const requestPayload: RequestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + }; + + console.log("[Request] xai payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(XAI.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string; + tool_calls: ChatMessageTool[]; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } + } + return choices[0]?.delta?.content; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); + }, + options, + ); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message, res); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/components/auth.tsx b/app/components/auth.tsx index e19512d87..5375bda3f 100644 --- a/app/components/auth.tsx +++ b/app/components/auth.tsx @@ -11,12 +11,15 @@ import Logo from "../icons/logo.svg"; import { useMobileScreen } from "@/app/utils"; import BotIcon from "../icons/bot.svg"; import { getClientConfig } from "../config/client"; +import { PasswordInput } from "./ui-lib"; import LeftIcon from "@/app/icons/left.svg"; import { safeLocalStorage } from "@/app/utils"; import { trackSettingsPageGuideToCPaymentClick, trackAuthorizationPageButtonToCPaymentClick, } from "../utils/auth-settings-events"; +import clsx from "clsx"; + const storage = safeLocalStorage(); export function AuthPage() { @@ -53,43 +56,50 @@ export function AuthPage() { onClick={() => navigate(Path.Home)} >
-
+
{Locale.Auth.Title}
{Locale.Auth.Tips}
- { accessStore.update( (access) => (access.accessCode = e.currentTarget.value), ); }} /> + {!accessStore.hideUserApiKey ? ( <>
{Locale.Auth.SubTips}
- { accessStore.update( (access) => (access.openaiApiKey = e.currentTarget.value), ); }} /> - { accessStore.update( (access) => (access.googleApiKey = e.currentTarget.value), @@ -155,7 +165,7 @@ function TopBanner() { onMouseEnter={handleMouseEnter} onMouseLeave={handleMouseLeave} > -
+
{Locale.Auth.TopTips} diff --git a/app/components/button.tsx b/app/components/button.tsx index 87b4abd30..157d5d73d 100644 --- a/app/components/button.tsx +++ b/app/components/button.tsx @@ -2,6 +2,7 @@ import * as React from "react"; import styles from "./button.module.scss"; import { CSSProperties } from "react"; +import clsx from "clsx"; export type ButtonType = "primary" | "danger" | null; @@ -22,12 +23,16 @@ export function IconButton(props: { }) { return (
diff --git a/app/components/chat-list.tsx b/app/components/chat-list.tsx index 03b1a5c88..63dc4d5ff 100644 --- a/app/components/chat-list.tsx +++ b/app/components/chat-list.tsx @@ -18,6 +18,7 @@ import { Mask } from "../store/mask"; import { useRef, useEffect } from "react"; import { showConfirm } from "./ui-lib"; import { useMobileScreen } from "../utils"; +import clsx from "clsx"; export function ChatItem(props: { onClick?: () => void; @@ -45,11 +46,11 @@ export function ChatItem(props: { {(provided) => (
{ draggableRef.current = ele; @@ -63,7 +64,7 @@ export function ChatItem(props: { > {props.narrow ? (
-
+
.chat-message-container { +.chat-message-user > .chat-message-container { align-items: flex-end; } @@ -443,6 +449,25 @@ transition: all ease 0.3s; } +.chat-message-audio { + display: flex; + align-items: center; + justify-content: space-between; + border-radius: 10px; + background-color: rgba(0, 0, 0, 0.05); + border: var(--border-in-light); + position: relative; + transition: all ease 0.3s; + margin-top: 10px; + font-size: 14px; + user-select: text; + word-break: break-word; + box-sizing: border-box; + audio { + height: 30px; /* 调整高度 */ + } +} + .chat-message-item-image { width: 100%; margin-top: 10px; @@ -471,23 +496,27 @@ border: rgba($color: #888, $alpha: 0.2) 1px solid; } - @media only screen and (max-width: 600px) { - $calc-image-width: calc(100vw/3*2/var(--image-count)); + $calc-image-width: calc(100vw / 3 * 2 / var(--image-count)); .chat-message-item-image-multi { width: $calc-image-width; height: $calc-image-width; } - + .chat-message-item-image { - max-width: calc(100vw/3*2); + max-width: calc(100vw / 3 * 2); } } @media screen and (min-width: 600px) { - $max-image-width: calc(calc(1200px - var(--sidebar-width))/3*2/var(--image-count)); - $image-width: calc(calc(var(--window-width) - var(--sidebar-width))/3*2/var(--image-count)); + $max-image-width: calc( + calc(1200px - var(--sidebar-width)) / 3 * 2 / var(--image-count) + ); + $image-width: calc( + calc(var(--window-width) - var(--sidebar-width)) / 3 * 2 / + var(--image-count) + ); .chat-message-item-image-multi { width: $image-width; @@ -497,7 +526,7 @@ } .chat-message-item-image { - max-width: calc(calc(1200px - var(--sidebar-width))/3*2); + max-width: calc(calc(1200px - var(--sidebar-width)) / 3 * 2); } } @@ -515,7 +544,7 @@ z-index: 1; } -.chat-message-user>.chat-message-container>.chat-message-item { +.chat-message-user > .chat-message-container > .chat-message-item { background-color: var(--second); &:hover { @@ -626,7 +655,8 @@ min-height: 68px; } -.chat-input:focus {} +.chat-input:focus { +} .chat-input-send { background-color: var(--primary); @@ -693,4 +723,31 @@ .shortcut-key span { font-size: 12px; color: var(--black); -} \ No newline at end of file +} + +.chat-main { + display: flex; + height: 100%; + width: 100%; + position: relative; + overflow: hidden; + .chat-body-container { + height: 100%; + display: flex; + flex-direction: column; + flex: 1; + width: 100%; + } + .chat-side-panel { + position: absolute; + inset: 0; + background: var(--white); + overflow: hidden; + z-index: 10; + transform: translateX(100%); + transition: all ease 0.3s; + &-show { + transform: translateX(0); + } + } +} diff --git a/app/components/chat.tsx b/app/components/chat.tsx index b45d36f95..9990a359e 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -46,7 +46,7 @@ import StyleIcon from "../icons/palette.svg"; import PluginIcon from "../icons/plugin.svg"; import ShortcutkeyIcon from "../icons/shortcutkey.svg"; import ReloadIcon from "../icons/reload.svg"; - +import HeadphoneIcon from "../icons/headphone.svg"; import { ChatMessage, SubmitKey, @@ -72,6 +72,8 @@ import { isDalle3, showPlugins, safeLocalStorage, + getModelSizes, + supportsCustomSize, } from "../utils"; import { uploadImage as uploadImageRemote } from "@/app/utils/chat"; @@ -79,7 +81,7 @@ import { uploadImage as uploadImageRemote } from "@/app/utils/chat"; import dynamic from "next/dynamic"; import { ChatControllerPool } from "../client/controller"; -import { DalleSize, DalleQuality, DalleStyle } from "../typing"; +import { DalleQuality, DalleStyle, ModelSize } from "../typing"; import { Prompt, usePromptStore } from "../store/prompt"; import Locale from "../locales"; @@ -115,11 +117,17 @@ import { getClientConfig } from "../config/client"; import { useAllModels } from "../utils/hooks"; import { MultimodalContent } from "../client/api"; -const localStorage = safeLocalStorage(); import { ClientApi } from "../client/api"; import { createTTSPlayer } from "../utils/audio"; import { MsEdgeTTS, OUTPUT_FORMAT } from "../utils/ms_edge_tts"; +import { isEmpty } from "lodash-es"; +import { getModelProvider } from "../utils/model"; +import { RealtimeChat } from "@/app/components/realtime-chat"; +import clsx from "clsx"; + +const localStorage = safeLocalStorage(); + const ttsPlayer = createTTSPlayer(); const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { @@ -145,7 +153,8 @@ export function SessionConfigModel(props: { onClose: () => void }) { text={Locale.Chat.Config.Reset} onClick={async () => { if (await showConfirm(Locale.Memory.ResetConfirm)) { - chatStore.updateCurrentSession( + chatStore.updateTargetSession( + session, (session) => (session.memoryPrompt = ""), ); } @@ -170,7 +179,10 @@ export function SessionConfigModel(props: { onClose: () => void }) { updateMask={(updater) => { const mask = { ...session.mask }; updater(mask); - chatStore.updateCurrentSession((session) => (session.mask = mask)); + chatStore.updateTargetSession( + session, + (session) => (session.mask = mask), + ); }} shouldSyncFromGlobal extraListItems={ @@ -203,7 +215,7 @@ function PromptToast(props: {
{props.showToast && context.length > 0 && (
props.setShowModal(true)} > @@ -324,10 +336,9 @@ export function PromptHints(props: { {props.prompts.map((prompt, i) => (
props.onPromptSelect(prompt)} onMouseEnter={() => setSelectIndex(i)} @@ -342,12 +353,14 @@ export function PromptHints(props: { function ClearContextDivider() { const chatStore = useChatStore(); + const session = chatStore.currentSession(); return (
- chatStore.updateCurrentSession( + chatStore.updateTargetSession( + session, (session) => (session.clearContextIndex = undefined), ) } @@ -385,7 +398,7 @@ export function ChatAction(props: { return (
{ props.onClick(); setTimeout(updateWidth, 1); @@ -452,11 +465,13 @@ export function ChatActions(props: { uploading: boolean; setShowShortcutKeyModal: React.Dispatch>; setUserInput: (input: string) => void; + setShowChatSidePanel: React.Dispatch>; }) { const config = useAppConfig(); const navigate = useNavigate(); const chatStore = useChatStore(); const pluginStore = usePluginStore(); + const session = chatStore.currentSession(); // switch themes const theme = config.theme; @@ -473,10 +488,9 @@ export function ChatActions(props: { const stopAll = () => ChatControllerPool.stopAll(); // switch model - const currentModel = chatStore.currentSession().mask.modelConfig.model; + const currentModel = session.mask.modelConfig.model; const currentProviderName = - chatStore.currentSession().mask.modelConfig?.providerName || - ServiceProvider.OpenAI; + session.mask.modelConfig?.providerName || ServiceProvider.OpenAI; const allModels = useAllModels(); const models = useMemo(() => { const filteredModels = allModels.filter((m) => m.available); @@ -507,15 +521,13 @@ export function ChatActions(props: { const [showSizeSelector, setShowSizeSelector] = useState(false); const [showQualitySelector, setShowQualitySelector] = useState(false); const [showStyleSelector, setShowStyleSelector] = useState(false); - const dalle3Sizes: DalleSize[] = ["1024x1024", "1792x1024", "1024x1792"]; + const modelSizes = getModelSizes(currentModel); const dalle3Qualitys: DalleQuality[] = ["standard", "hd"]; const dalle3Styles: DalleStyle[] = ["vivid", "natural"]; const currentSize = - chatStore.currentSession().mask.modelConfig?.size ?? "1024x1024"; - const currentQuality = - chatStore.currentSession().mask.modelConfig?.quality ?? "standard"; - const currentStyle = - chatStore.currentSession().mask.modelConfig?.style ?? "vivid"; + session.mask.modelConfig?.size ?? ("1024x1024" as ModelSize); + const currentQuality = session.mask.modelConfig?.quality ?? "standard"; + const currentStyle = session.mask.modelConfig?.style ?? "vivid"; const isMobileScreen = useMobileScreen(); @@ -533,7 +545,7 @@ export function ChatActions(props: { if (isUnavailableModel && models.length > 0) { // show next model to default model if exist let nextModel = models.find((model) => model.isDefault) || models[0]; - chatStore.updateCurrentSession((session) => { + chatStore.updateTargetSession(session, (session) => { session.mask.modelConfig.model = nextModel.name; session.mask.modelConfig.providerName = nextModel?.provider ?.providerName as ServiceProvider; @@ -544,242 +556,254 @@ export function ChatActions(props: { : nextModel.name, ); } - }, [chatStore, currentModel, models]); + }, [chatStore, currentModel, models, session]); return (
- {couldStop && ( + <> + {couldStop && ( + } + /> + )} + {!props.hitBottom && ( + } + /> + )} + {props.hitBottom && ( + } + /> + )} + + {showUploadImage && ( + : } + /> + )} } + onClick={nextTheme} + text={Locale.Chat.InputActions.Theme[theme]} + icon={ + <> + {theme === Theme.Auto ? ( + + ) : theme === Theme.Light ? ( + + ) : theme === Theme.Dark ? ( + + ) : null} + + } /> - )} - {!props.hitBottom && ( + } + onClick={props.showPromptHints} + text={Locale.Chat.InputActions.Prompt} + icon={} /> - )} - {props.hitBottom && ( - } - /> - )} - {showUploadImage && ( - : } - /> - )} - - {theme === Theme.Auto ? ( - - ) : theme === Theme.Light ? ( - - ) : theme === Theme.Dark ? ( - - ) : null} - - } - /> - - } - /> - - { - navigate(Path.Masks); - }} - text={Locale.Chat.InputActions.Masks} - icon={} - /> - - } - onClick={() => { - chatStore.updateCurrentSession((session) => { - if (session.clearContextIndex === session.messages.length) { - session.clearContextIndex = undefined; - } else { - session.clearContextIndex = session.messages.length; - session.memoryPrompt = ""; // will clear memory - } - }); - }} - /> - - setShowModelSelector(true)} - text={currentModelName} - icon={} - /> - - {showModelSelector && ( - ({ - title: `${m.displayName}${ - m?.provider?.providerName - ? " (" + m?.provider?.providerName + ")" - : "" - }`, - value: `${m.name}@${m?.provider?.providerName}`, - }))} - onClose={() => setShowModelSelector(false)} - onSelection={(s) => { - if (s.length === 0) return; - const [model, providerName] = s[0].split("@"); - chatStore.updateCurrentSession((session) => { - session.mask.modelConfig.model = model as ModelType; - session.mask.modelConfig.providerName = - providerName as ServiceProvider; - session.mask.syncGlobalConfig = false; - }); - if (providerName == "ByteDance") { - const selectedModel = models.find( - (m) => - m.name == model && m?.provider?.providerName == providerName, - ); - showToast(selectedModel?.displayName ?? ""); - } else { - showToast(model); - } - }} - /> - )} - - {isDalle3(currentModel) && ( - setShowSizeSelector(true)} - text={currentSize} - icon={} - /> - )} - - {showSizeSelector && ( - ({ - title: m, - value: m, - }))} - onClose={() => setShowSizeSelector(false)} - onSelection={(s) => { - if (s.length === 0) return; - const size = s[0]; - chatStore.updateCurrentSession((session) => { - session.mask.modelConfig.size = size; - }); - showToast(size); - }} - /> - )} - - {isDalle3(currentModel) && ( - setShowQualitySelector(true)} - text={currentQuality} - icon={} - /> - )} - - {showQualitySelector && ( - ({ - title: m, - value: m, - }))} - onClose={() => setShowQualitySelector(false)} - onSelection={(q) => { - if (q.length === 0) return; - const quality = q[0]; - chatStore.updateCurrentSession((session) => { - session.mask.modelConfig.quality = quality; - }); - showToast(quality); - }} - /> - )} - - {isDalle3(currentModel) && ( - setShowStyleSelector(true)} - text={currentStyle} - icon={} - /> - )} - - {showStyleSelector && ( - ({ - title: m, - value: m, - }))} - onClose={() => setShowStyleSelector(false)} - onSelection={(s) => { - if (s.length === 0) return; - const style = s[0]; - chatStore.updateCurrentSession((session) => { - session.mask.modelConfig.style = style; - }); - showToast(style); - }} - /> - )} - - {showPlugins(currentProviderName, currentModel) && ( { - if (pluginStore.getAll().length == 0) { - navigate(Path.Plugins); - } else { - setShowPluginSelector(true); - } + navigate(Path.Masks); }} - text={Locale.Plugin.Name} - icon={} + text={Locale.Chat.InputActions.Masks} + icon={} /> - )} - {showPluginSelector && ( - ({ - title: `${item?.title}@${item?.version}`, - value: item?.id, - }))} - onClose={() => setShowPluginSelector(false)} - onSelection={(s) => { - chatStore.updateCurrentSession((session) => { - session.mask.plugin = s as string[]; + + } + onClick={() => { + chatStore.updateTargetSession(session, (session) => { + if (session.clearContextIndex === session.messages.length) { + session.clearContextIndex = undefined; + } else { + session.clearContextIndex = session.messages.length; + session.memoryPrompt = ""; // will clear memory + } }); }} /> - )} - {!isMobileScreen && ( props.setShowShortcutKeyModal(true)} - text={Locale.Chat.ShortcutKey.Title} - icon={} + onClick={() => setShowModelSelector(true)} + text={currentModelName} + icon={} /> - )} + + {showModelSelector && ( + ({ + title: `${m.displayName}${ + m?.provider?.providerName + ? " (" + m?.provider?.providerName + ")" + : "" + }`, + value: `${m.name}@${m?.provider?.providerName}`, + }))} + onClose={() => setShowModelSelector(false)} + onSelection={(s) => { + if (s.length === 0) return; + const [model, providerName] = getModelProvider(s[0]); + chatStore.updateTargetSession(session, (session) => { + session.mask.modelConfig.model = model as ModelType; + session.mask.modelConfig.providerName = + providerName as ServiceProvider; + session.mask.syncGlobalConfig = false; + }); + if (providerName == "ByteDance") { + const selectedModel = models.find( + (m) => + m.name == model && + m?.provider?.providerName == providerName, + ); + showToast(selectedModel?.displayName ?? ""); + } else { + showToast(model); + } + }} + /> + )} + + {supportsCustomSize(currentModel) && ( + setShowSizeSelector(true)} + text={currentSize} + icon={} + /> + )} + + {showSizeSelector && ( + ({ + title: m, + value: m, + }))} + onClose={() => setShowSizeSelector(false)} + onSelection={(s) => { + if (s.length === 0) return; + const size = s[0]; + chatStore.updateTargetSession(session, (session) => { + session.mask.modelConfig.size = size; + }); + showToast(size); + }} + /> + )} + + {isDalle3(currentModel) && ( + setShowQualitySelector(true)} + text={currentQuality} + icon={} + /> + )} + + {showQualitySelector && ( + ({ + title: m, + value: m, + }))} + onClose={() => setShowQualitySelector(false)} + onSelection={(q) => { + if (q.length === 0) return; + const quality = q[0]; + chatStore.updateTargetSession(session, (session) => { + session.mask.modelConfig.quality = quality; + }); + showToast(quality); + }} + /> + )} + + {isDalle3(currentModel) && ( + setShowStyleSelector(true)} + text={currentStyle} + icon={} + /> + )} + + {showStyleSelector && ( + ({ + title: m, + value: m, + }))} + onClose={() => setShowStyleSelector(false)} + onSelection={(s) => { + if (s.length === 0) return; + const style = s[0]; + chatStore.updateTargetSession(session, (session) => { + session.mask.modelConfig.style = style; + }); + showToast(style); + }} + /> + )} + + {showPlugins(currentProviderName, currentModel) && ( + { + if (pluginStore.getAll().length == 0) { + navigate(Path.Plugins); + } else { + setShowPluginSelector(true); + } + }} + text={Locale.Plugin.Name} + icon={} + /> + )} + {showPluginSelector && ( + ({ + title: `${item?.title}@${item?.version}`, + value: item?.id, + }))} + onClose={() => setShowPluginSelector(false)} + onSelection={(s) => { + chatStore.updateTargetSession(session, (session) => { + session.mask.plugin = s as string[]; + }); + }} + /> + )} + + {!isMobileScreen && ( + props.setShowShortcutKeyModal(true)} + text={Locale.Chat.ShortcutKey.Title} + icon={} + /> + )} + +
+ {config.realtimeConfig.enable && ( + props.setShowChatSidePanel(true)} + text={"Realtime Chat"} + icon={} + /> + )} +
); } @@ -809,7 +833,8 @@ export function EditMessageModal(props: { onClose: () => void }) { icon={} key="ok" onClick={() => { - chatStore.updateCurrentSession( + chatStore.updateTargetSession( + session, (session) => (session.messages = messages), ); props.onClose(); @@ -826,7 +851,8 @@ export function EditMessageModal(props: { onClose: () => void }) { type="text" value={session.topic} onInput={(e) => - chatStore.updateCurrentSession( + chatStore.updateTargetSession( + session, (session) => (session.topic = e.currentTarget.value), ) } @@ -874,6 +900,12 @@ export function ShortcutKeyModal(props: { onClose: () => void }) { title: Locale.Chat.ShortcutKey.showShortcutKey, keys: isMac ? ["⌘", "/"] : ["Ctrl", "/"], }, + { + title: Locale.Chat.ShortcutKey.clearContext, + keys: isMac + ? ["⌘", "Shift", "backspace"] + : ["Ctrl", "Shift", "backspace"], + }, ]; return (
@@ -937,9 +969,24 @@ function _Chat() { (scrollRef.current.scrollTop + scrollRef.current.clientHeight), ) <= 1 : false; + const isAttachWithTop = useMemo(() => { + const lastMessage = scrollRef.current?.lastElementChild as HTMLElement; + // if scrolllRef is not ready or no message, return false + if (!scrollRef?.current || !lastMessage) return false; + const topDistance = + lastMessage!.getBoundingClientRect().top - + scrollRef.current.getBoundingClientRect().top; + // leave some space for user question + return topDistance < 100; + }, [scrollRef?.current?.scrollHeight]); + + const isTyping = userInput !== ""; + + // if user is typing, should auto scroll to bottom + // if user is not typing, should auto scroll to bottom only if already at bottom const { setAutoScroll, scrollDomToBottom } = useScrollToBottom( scrollRef, - isScrolledToBottom, + (isScrolledToBottom || isAttachWithTop) && !isTyping, ); const [hitBottom, setHitBottom] = useState(true); const isMobileScreen = useMobileScreen(); @@ -987,7 +1034,8 @@ function _Chat() { prev: () => chatStore.nextSession(-1), next: () => chatStore.nextSession(1), clear: () => - chatStore.updateCurrentSession( + chatStore.updateTargetSession( + session, (session) => (session.clearContextIndex = session.messages.length), ), fork: () => chatStore.forkSession(), @@ -1015,7 +1063,7 @@ function _Chat() { }; const doSubmit = (userInput: string) => { - if (userInput.trim() === "") return; + if (userInput.trim() === "" && isEmpty(attachImages)) return; const matchCommand = chatCommands.match(userInput); if (matchCommand.matched) { setUserInput(""); @@ -1058,7 +1106,7 @@ function _Chat() { }; useEffect(() => { - chatStore.updateCurrentSession((session) => { + chatStore.updateTargetSession(session, (session) => { const stopTiming = Date.now() - REQUEST_TIMEOUT_MS; session.messages.forEach((m) => { // check if should stop all stale messages @@ -1084,7 +1132,7 @@ function _Chat() { } }); // eslint-disable-next-line react-hooks/exhaustive-deps - }, []); + }, [session]); // check if should send message const onInputKeyDown = (e: React.KeyboardEvent) => { @@ -1115,7 +1163,8 @@ function _Chat() { }; const deleteMessage = (msgId?: string) => { - chatStore.updateCurrentSession( + chatStore.updateTargetSession( + session, (session) => (session.messages = session.messages.filter((m) => m.id !== msgId)), ); @@ -1182,7 +1231,7 @@ function _Chat() { }; const onPinMessage = (message: ChatMessage) => { - chatStore.updateCurrentSession((session) => + chatStore.updateTargetSession(session, (session) => session.mask.context.push(message), ); @@ -1509,7 +1558,7 @@ function _Chat() { const [showShortcutKeyModal, setShowShortcutKeyModal] = useState(false); useEffect(() => { - const handleKeyDown = (event: any) => { + const handleKeyDown = (event: KeyboardEvent) => { // 打开新聊天 command + shift + o if ( (event.metaKey || event.ctrlKey) && @@ -1560,424 +1609,487 @@ function _Chat() { event.preventDefault(); setShowShortcutKeyModal(true); } + // 清除上下文 command + shift + backspace + else if ( + (event.metaKey || event.ctrlKey) && + event.shiftKey && + event.key.toLowerCase() === "backspace" + ) { + event.preventDefault(); + chatStore.updateTargetSession(session, (session) => { + if (session.clearContextIndex === session.messages.length) { + session.clearContextIndex = undefined; + } else { + session.clearContextIndex = session.messages.length; + session.memoryPrompt = ""; // will clear memory + } + }); + } }; - window.addEventListener("keydown", handleKeyDown); + document.addEventListener("keydown", handleKeyDown); return () => { - window.removeEventListener("keydown", handleKeyDown); + document.removeEventListener("keydown", handleKeyDown); }; - }, [messages, chatStore, navigate]); + }, [messages, chatStore, navigate, session]); + + const [showChatSidePanel, setShowChatSidePanel] = useState(false); return ( -
-
- {isMobileScreen && ( -
-
- } - bordered - title={Locale.Chat.Actions.ChatList} - onClick={() => navigate(Path.Home)} - /> -
-
- )} - -
-
setIsEditingMessage(true)} - > - {!session.topic ? DEFAULT_TOPIC : session.topic} -
-
- {Locale.Chat.SubTitle(session.messages.length)} -
-
-
-
- } - bordered - title={Locale.Chat.Actions.RefreshTitle} - onClick={() => { - showToast(Locale.Chat.Actions.RefreshToast); - chatStore.summarizeSession(true); - }} - /> -
- {!isMobileScreen && ( -
- } - bordered - title={Locale.Chat.EditMessage.Title} - aria={Locale.Chat.EditMessage.Title} - onClick={() => setIsEditingMessage(true)} - /> + <> +
+
+ {isMobileScreen && ( +
+
+ } + bordered + title={Locale.Chat.Actions.ChatList} + onClick={() => navigate(Path.Home)} + /> +
)} -
- } - bordered - title={Locale.Chat.Actions.Export} - onClick={() => { - setShowExport(true); - }} - /> + +
+
setIsEditingMessage(true)} + > + {!session.topic ? DEFAULT_TOPIC : session.topic} +
+
+ {Locale.Chat.SubTitle(session.messages.length)} +
- {showMaxIcon && ( +
: } + icon={} bordered - title={Locale.Chat.Actions.FullScreen} - aria={Locale.Chat.Actions.FullScreen} + title={Locale.Chat.Actions.RefreshTitle} onClick={() => { - config.update( - (config) => (config.tightBorder = !config.tightBorder), - ); + showToast(Locale.Chat.Actions.RefreshToast); + chatStore.summarizeSession(true, session); }} />
- )} + {!isMobileScreen && ( +
+ } + bordered + title={Locale.Chat.EditMessage.Title} + aria={Locale.Chat.EditMessage.Title} + onClick={() => setIsEditingMessage(true)} + /> +
+ )} +
+ } + bordered + title={Locale.Chat.Actions.Export} + onClick={() => { + setShowExport(true); + }} + /> +
+ {showMaxIcon && ( +
+ : } + bordered + title={Locale.Chat.Actions.FullScreen} + aria={Locale.Chat.Actions.FullScreen} + onClick={() => { + config.update( + (config) => (config.tightBorder = !config.tightBorder), + ); + }} + /> +
+ )} +
+ +
+
+
+
onChatBodyScroll(e.currentTarget)} + onMouseDown={() => inputRef.current?.blur()} + onTouchStart={() => { + inputRef.current?.blur(); + setAutoScroll(false); + }} + > + {messages.map((message, i) => { + const isUser = message.role === "user"; + const isContext = i < context.length; + const showActions = + i > 0 && + !(message.preview || message.content.length === 0) && + !isContext; + const showTyping = message.preview || message.streaming; - -
+ const shouldShowClearContextDivider = + i === clearContextIndex - 1; -
onChatBodyScroll(e.currentTarget)} - onMouseDown={() => inputRef.current?.blur()} - onTouchStart={() => { - inputRef.current?.blur(); - setAutoScroll(false); - }} - > - {messages.map((message, i) => { - const isUser = message.role === "user"; - const isContext = i < context.length; - const showActions = - i > 0 && - !(message.preview || message.content.length === 0) && - !isContext; - const showTyping = message.preview || message.streaming; - - const shouldShowClearContextDivider = i === clearContextIndex - 1; - - return ( - -
-
-
-
-
- } - aria={Locale.Chat.Actions.Edit} - onClick={async () => { - const newMessage = await showPrompt( - Locale.Chat.Actions.Edit, - getMessageTextContent(message), - 10, - ); - let newContent: string | MultimodalContent[] = - newMessage; - const images = getMessageImages(message); - if (images.length > 0) { - newContent = [{ type: "text", text: newMessage }]; - for (let i = 0; i < images.length; i++) { - newContent.push({ - type: "image_url", - image_url: { - url: images[i], - }, - }); - } - } - chatStore.updateCurrentSession((session) => { - const m = session.mask.context - .concat(session.messages) - .find((m) => m.id === message.id); - if (m) { - m.content = newContent; - } - }); - }} - > -
- {isUser ? ( - - ) : ( - <> - {["system"].includes(message.role) ? ( - - ) : ( - - )} - - )} -
- {!isUser && ( -
- {message.model} -
- )} - - {showActions && ( -
-
- {message.streaming ? ( - } - onClick={() => onUserStop(message.id ?? i)} - /> - ) : ( - <> - } - onClick={() => onResend(message)} - /> - - } - onClick={() => onDelete(message.id ?? i)} - /> - - } - onClick={() => onPinMessage(message)} - /> - } - onClick={() => - copyToClipboard( - getMessageTextContent(message), - ) - } - /> - {config.ttsConfig.enable && ( - - ) : ( - - ) - } - onClick={() => - openaiSpeech(getMessageTextContent(message)) - } - /> - )} - - )} -
-
- )} -
- {message?.tools?.length == 0 && showTyping && ( -
- {Locale.Chat.Typing} -
- )} - {/*@ts-ignore*/} - {message?.tools?.length > 0 && ( -
- {message?.tools?.map((tool) => ( -
- {tool.isError === false ? ( - - ) : tool.isError === true ? ( - - ) : ( - - )} - {tool?.function?.name} -
- ))} -
- )} -
- +
onRightClick(e, message)} // hard to use - onDoubleClickCapture={() => { - if (!isMobileScreen) return; - setUserInput(getMessageTextContent(message)); - }} - fontSize={fontSize} - fontFamily={fontFamily} - parentRef={scrollRef} - defaultShow={i >= messages.length - 6} - /> - {getMessageImages(message).length == 1 && ( - - )} - {getMessageImages(message).length > 1 && ( -
- {getMessageImages(message).map((image, index) => { - return ( + > +
+
+
+
+ } + aria={Locale.Chat.Actions.Edit} + onClick={async () => { + const newMessage = await showPrompt( + Locale.Chat.Actions.Edit, + getMessageTextContent(message), + 10, + ); + let newContent: string | MultimodalContent[] = + newMessage; + const images = getMessageImages(message); + if (images.length > 0) { + newContent = [ + { type: "text", text: newMessage }, + ]; + for (let i = 0; i < images.length; i++) { + newContent.push({ + type: "image_url", + image_url: { + url: images[i], + }, + }); + } + } + chatStore.updateTargetSession( + session, + (session) => { + const m = session.mask.context + .concat(session.messages) + .find((m) => m.id === message.id); + if (m) { + m.content = newContent; + } + }, + ); + }} + > +
+ {isUser ? ( + + ) : ( + <> + {["system"].includes(message.role) ? ( + + ) : ( + + )} + + )} +
+ {!isUser && ( +
+ {message.model} +
+ )} + + {showActions && ( +
+
+ {message.streaming ? ( + } + onClick={() => onUserStop(message.id ?? i)} + /> + ) : ( + <> + } + onClick={() => onResend(message)} + /> + + } + onClick={() => onDelete(message.id ?? i)} + /> + + } + onClick={() => onPinMessage(message)} + /> + } + onClick={() => + copyToClipboard( + getMessageTextContent(message), + ) + } + /> + {config.ttsConfig.enable && ( + + ) : ( + + ) + } + onClick={() => + openaiSpeech( + getMessageTextContent(message), + ) + } + /> + )} + + )} +
+
+ )} +
+ {message?.tools?.length == 0 && showTyping && ( +
+ {Locale.Chat.Typing} +
+ )} + {/*@ts-ignore*/} + {message?.tools?.length > 0 && ( +
+ {message?.tools?.map((tool) => ( +
+ {tool.isError === false ? ( + + ) : tool.isError === true ? ( + + ) : ( + + )} + {tool?.function?.name} +
+ ))} +
+ )} +
+ onRightClick(e, message)} // hard to use + onDoubleClickCapture={() => { + if (!isMobileScreen) return; + setUserInput(getMessageTextContent(message)); + }} + fontSize={fontSize} + fontFamily={fontFamily} + parentRef={scrollRef} + defaultShow={i >= messages.length - 6} + /> + {getMessageImages(message).length == 1 && ( - ); - })} + )} + {getMessageImages(message).length > 1 && ( +
+ {getMessageImages(message).map((image, index) => { + return ( + + ); + })} +
+ )} +
+ {message?.audio_url && ( +
+
+ )} + +
+ {isContext + ? Locale.Chat.IsContext + : message.date.toLocaleString()} +
- )} -
- -
- {isContext - ? Locale.Chat.IsContext - : message.date.toLocaleString()} -
-
-
- {shouldShowClearContextDivider && } - - ); - })} -
- -
- - - setShowPromptModal(true)} - scrollToBottom={scrollToBottom} - hitBottom={hitBottom} - uploading={uploading} - showPromptHints={() => { - // Click again to close - if (promptHints.length > 0) { - setPromptHints([]); - return; - } - - inputRef.current?.focus(); - setUserInput("/"); - onSearch(""); - }} - setShowShortcutKeyModal={setShowShortcutKeyModal} - setUserInput={setUserInput} - /> -