diff --git a/.env.template b/.env.template index 3e3290369..89bab2cb1 100644 --- a/.env.template +++ b/.env.template @@ -8,6 +8,16 @@ CODE=your-password # You can start service behind a proxy PROXY_URL=http://localhost:7890 +# (optional) +# Default: Empty +# Googel Gemini Pro API key, set if you want to use Google Gemini Pro API. +GOOGLE_API_KEY= + +# (optional) +# Default: https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent +# Googel Gemini Pro API url, set if you want to customize Google Gemini Pro API url. +GOOGLE_URL= + # Override openai api request base url. (optional) # Default: https://api.openai.com # Examples: http://your-openai-proxy.com @@ -36,3 +46,4 @@ ENABLE_BALANCE_QUERY= # Default: Empty # If you want to disable parse settings from url, set this value to 1. DISABLE_FAST_LINK= + diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml index e04e30adb..a25799a21 100644 --- a/.github/workflows/sync.yml +++ b/.github/workflows/sync.yml @@ -1,40 +1,40 @@ -name: Upstream Sync - -permissions: - contents: write - -on: - schedule: - - cron: "0 0 * * *" # every day - workflow_dispatch: - -jobs: - sync_latest_from_upstream: - name: Sync latest commits from upstream repo - runs-on: ubuntu-latest - if: ${{ github.event.repository.fork }} - - steps: - # Step 1: run a standard checkout action - - name: Checkout target repo - uses: actions/checkout@v3 - - # Step 2: run the sync action - - name: Sync upstream changes - id: sync - uses: aormsby/Fork-Sync-With-Upstream-action@v3.4 - with: - upstream_sync_repo: ChatGPTNextWeb/ChatGPT-Next-Web - upstream_sync_branch: main - target_sync_branch: main - target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set - - # Set test_mode true to run tests instead of the true action!! - test_mode: false - - - name: Sync check - if: failure() - run: | - echo "[Error] 由于上游仓库的 workflow 文件变更,导致 GitHub 自动暂停了本次自动更新,你需要手动 Sync Fork 一次,详细教程请查看:https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/README_CN.md#%E6%89%93%E5%BC%80%E8%87%AA%E5%8A%A8%E6%9B%B4%E6%96%B0" - echo "[Error] Due to a change in the workflow file of the upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork. Please refer to the detailed tutorial for instructions: https://github.com/Yidadaa/ChatGPT-Next-Web#enable-automatic-updates" - exit 1 +#name: Upstream Sync +# +#permissions: +# contents: write +# +#on: +# schedule: +# - cron: "0 0 * * *" # every day +# workflow_dispatch: +# +#jobs: +# sync_latest_from_upstream: +# name: Sync latest commits from upstream repo +# runs-on: ubuntu-latest +# if: ${{ github.event.repository.fork }} +# +# steps: +# # Step 1: run a standard checkout action +# - name: Checkout target repo +# uses: actions/checkout@v3 +# +# # Step 2: run the sync action +# - name: Sync upstream changes +# id: sync +# uses: aormsby/Fork-Sync-With-Upstream-action@v3.4 +# with: +# upstream_sync_repo: ChatGPTNextWeb/ChatGPT-Next-Web +# upstream_sync_branch: main +# target_sync_branch: main +# target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set +# +# # Set test_mode true to run tests instead of the true action!! +# test_mode: false +# +# - name: Sync check +# if: failure() +# run: | +# echo "[Error] 由于上游仓库的 workflow 文件变更,导致 GitHub 自动暂停了本次自动更新,你需要手动 Sync Fork 一次,详细教程请查看:https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/README_CN.md#%E6%89%93%E5%BC%80%E8%87%AA%E5%8A%A8%E6%9B%B4%E6%96%B0" +# echo "[Error] Due to a change in the workflow file of the upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork. Please refer to the detailed tutorial for instructions: https://github.com/Yidadaa/ChatGPT-Next-Web#enable-automatic-updates" +# exit 1 diff --git a/Dockerfile b/Dockerfile index ae37461a0..16c0e3bb8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,6 +24,7 @@ RUN yarn install FROM base AS builder ENV OPENAI_API_KEY="" +ENV GOOGLE_API_KEY="" ENV CODE="" WORKDIR /app @@ -39,6 +40,7 @@ RUN apk add proxychains-ng ENV PROXY_URL="" ENV OPENAI_API_KEY="" +ENV GOOGLE_API_KEY="" ENV CODE="" COPY --from=builder /app/public ./public @@ -52,22 +54,22 @@ EXPOSE 3000 ENV KEEP_ALIVE_TIMEOUT=30 CMD if [ -n "$PROXY_URL" ]; then \ - export HOSTNAME="127.0.0.1"; \ - protocol=$(echo $PROXY_URL | cut -d: -f1); \ - host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \ - port=$(echo $PROXY_URL | cut -d: -f3); \ - conf=/etc/proxychains.conf; \ - echo "strict_chain" > $conf; \ - echo "proxy_dns" >> $conf; \ - echo "remote_dns_subnet 224" >> $conf; \ - echo "tcp_read_time_out 15000" >> $conf; \ - echo "tcp_connect_time_out 8000" >> $conf; \ - echo "localnet 127.0.0.0/255.0.0.0" >> $conf; \ - echo "localnet ::1/128" >> $conf; \ - echo "[ProxyList]" >> $conf; \ - echo "$protocol $host $port" >> $conf; \ - cat /etc/proxychains.conf; \ - proxychains -f $conf node server.js; \ + export HOSTNAME="127.0.0.1"; \ + protocol=$(echo $PROXY_URL | cut -d: -f1); \ + host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \ + port=$(echo $PROXY_URL | cut -d: -f3); \ + conf=/etc/proxychains.conf; \ + echo "strict_chain" > $conf; \ + echo "proxy_dns" >> $conf; \ + echo "remote_dns_subnet 224" >> $conf; \ + echo "tcp_read_time_out 15000" >> $conf; \ + echo "tcp_connect_time_out 8000" >> $conf; \ + echo "localnet 127.0.0.0/255.0.0.0" >> $conf; \ + echo "localnet ::1/128" >> $conf; \ + echo "[ProxyList]" >> $conf; \ + echo "$protocol $host $port" >> $conf; \ + cat /etc/proxychains.conf; \ + proxychains -f $conf node server.js; \ else \ - node server.js; \ + node server.js; \ fi diff --git a/README.md b/README.md index f9c35227f..69b649926 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,22 @@
icon -

ChatGPT Next Web

+

NextChat (ChatGPT Next Web)

English / [简体中文](./README_CN.md) -One-Click to get well-designed cross-platform ChatGPT web UI. +One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 & Gemini Pro support. -一键免费部署你的跨平台私人 ChatGPT 应用。 +一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。 [![Web][Web-image]][web-url] [![Windows][Windows-image]][download-url] [![MacOS][MacOS-image]][download-url] [![Linux][Linux-image]][download-url] -[Web App](https://chatgpt.nextweb.fun/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/mortiest_ricky) / [Buy Me a Coffee](https://www.buymeacoffee.com/yidadaa) +[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/mortiest_ricky) / [Buy Me a Coffee](https://www.buymeacoffee.com/yidadaa) -[网页版](https://chatgpt.nextweb.fun/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [QQ 群](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg) +[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [QQ 群](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg) [web-url]: https://chatgpt.nextweb.fun [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases @@ -25,7 +25,7 @@ One-Click to get well-designed cross-platform ChatGPT web UI. [MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple [Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) +[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) [![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA) @@ -191,6 +191,14 @@ Azure Api Key. Azure Api Version, find it at [Azure Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions). +### `GOOGLE_API_KEY` (optional) + +Google Gemini Pro Api Key. + +### `GOOGLE_URL` (optional) + +Google Gemini Pro Api Url. + ### `HIDE_USER_API_KEY` (optional) > Default: Empty diff --git a/README_CN.md b/README_CN.md index 770212406..0f390a51c 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,9 +1,9 @@
预览 -

ChatGPT Next Web

+

NextChat

-一键免费部署你的私人 ChatGPT 网页应用。 +一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) / [QQ 群](https://user-images.githubusercontent.com/16968934/228190818-7dd00845-e9b9-4363-97e5-44c507ac76da.jpeg) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg) / [Donate](#捐赠-donate-usdt) @@ -21,7 +21,7 @@ 1. 准备好你的 [OpenAI API Key](https://platform.openai.com/account/api-keys); 2. 点击右侧按钮开始部署: - [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE; + [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE; 3. 部署完毕后,即可开始使用; 4. (可选)[绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名即可直连。 @@ -106,6 +106,14 @@ Azure 密钥。 Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)。 +### `GOOGLE_API_KEY` (optional) + +Google Gemini Pro 密钥. + +### `GOOGLE_URL` (optional) + +Google Gemini Pro Api Url. + ### `HIDE_USER_API_KEY` (可选) 如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。 diff --git a/app/api/auth.ts b/app/api/auth.ts index 2f06a31d9..e52e9c360 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -1,7 +1,7 @@ import { NextRequest } from "next/server"; import { getServerSideConfig } from "../config/server"; import md5 from "spark-md5"; -import { ACCESS_CODE_PREFIX } from "../constant"; +import { ACCESS_CODE_PREFIX, ModelProvider } from "../constant"; export function getIP(req: NextRequest) { let ip = req.headers.get("x-real-ip") ?? req.ip; @@ -17,15 +17,19 @@ export function getIP(req: NextRequest) { function parseApiKey(bearToken: string) { const token = bearToken.trim().replaceAll("Bearer ", "").trim(); - const isOpenAiKey = !token.startsWith(ACCESS_CODE_PREFIX); + const isApiKey = !token.startsWith(ACCESS_CODE_PREFIX); return { - accessCode: isOpenAiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length), - apiKey: isOpenAiKey ? token : "", + accessCode: isApiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length), + apiKey: isApiKey ? token : "", }; } -export function auth(req: NextRequest, isAzure?: boolean) { +export function auth( + req: NextRequest, + modelProvider: ModelProvider, + isAzure?: boolean, +) { const authToken = req.headers.get("Authorization") ?? ""; // check if it is openai api key or user token @@ -50,22 +54,23 @@ export function auth(req: NextRequest, isAzure?: boolean) { if (serverConfig.hideUserApiKey && !!apiKey) { return { error: true, - msg: "you are not allowed to access openai with your own api key", + msg: "you are not allowed to access with your own api key", }; } // if user does not provide an api key, inject system api key if (!apiKey) { - const serverApiKey = isAzure - ? serverConfig.azureApiKey - : serverConfig.apiKey; + const serverConfig = getServerSideConfig(); - if (serverApiKey) { + const systemApiKey = + modelProvider === ModelProvider.GeminiPro + ? serverConfig.googleApiKey + : isAzure + ? serverConfig.azureApiKey + : serverConfig.apiKey; + if (systemApiKey) { console.log("[Auth] use system api key"); - req.headers.set( - "Authorization", - `${isAzure ? "" : "Bearer "}${serverApiKey}`, - ); + req.headers.set("Authorization", `Bearer ${systemApiKey}`); } else { console.log("[Auth] admin did not provide an api key"); } diff --git a/app/api/common.ts b/app/api/common.ts index a17af4127..946125c6f 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -1,6 +1,6 @@ import { NextRequest, NextResponse } from "next/server"; import { getServerSideConfig } from "../config/server"; -import { DEFAULT_MODELS, OPENAI_BASE_URL } from "../constant"; +import { DEFAULT_MODELS, OPENAI_BASE_URL, GEMINI_BASE_URL } from "../constant"; import { collectModelTable } from "../utils/model"; import { makeAzurePath } from "../azure"; @@ -14,8 +14,23 @@ export async function requestOpenai( ) { const controller = new AbortController(); - const authValue = req.headers.get("Authorization") ?? ""; - const authHeaderName = isAzure ? "api-key" : "Authorization"; + var authValue, + authHeaderName = ""; + if (isAzure) { + authValue = + req.headers + .get("Authorization") + ?.trim() + .replaceAll("Bearer ", "") + .trim() ?? ""; + + authHeaderName = "api-key"; + } else { + authValue = req.headers.get("Authorization") ?? ""; + authHeaderName = "Authorization"; + } + // const authValue = req.headers.get("Authorization") ?? ""; + // const authHeaderName = isAzure ? "api-key" : "Authorization"; let path = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll( "/api/openai/", @@ -100,6 +115,12 @@ export async function requestOpenai( // to disable nginx buffering newHeaders.set("X-Accel-Buffering", "no"); + // The latest version of the OpenAI API forced the content-encoding to be "br" in json response + // So if the streaming is disabled, we need to remove the content-encoding header + // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header + // The browser will try to decode the response with brotli and fail + newHeaders.delete("content-encoding"); + return new Response(res.body, { status: res.status, statusText: res.statusText, diff --git a/app/api/google/[...path]/route.ts b/app/api/google/[...path]/route.ts new file mode 100644 index 000000000..869bd5076 --- /dev/null +++ b/app/api/google/[...path]/route.ts @@ -0,0 +1,121 @@ +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "../../auth"; +import { getServerSideConfig } from "@/app/config/server"; +import { GEMINI_BASE_URL, Google, ModelProvider } from "@/app/constant"; + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Google Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const controller = new AbortController(); + + const serverConfig = getServerSideConfig(); + + let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + let path = `${req.nextUrl.pathname}`.replaceAll("/api/google/", ""); + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const authResult = auth(req, ModelProvider.GeminiPro); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + const bearToken = req.headers.get("Authorization") ?? ""; + const token = bearToken.trim().replaceAll("Bearer ", "").trim(); + + const key = token ? token : serverConfig.googleApiKey; + + if (!key) { + return NextResponse.json( + { + error: true, + message: `missing GOOGLE_API_KEY in server env vars`, + }, + { + status: 401, + }, + ); + } + + const fetchUrl = `${baseUrl}/${path}?key=${key}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + "Cache-Control": "no-store", + }, + method: req.method, + body: req.body, + // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + try { + const res = await fetch(fetchUrl, fetchOptions); + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; diff --git a/app/api/midjourney/[...path]/route.ts b/app/api/midjourney/[...path]/route.ts index e0a4fa743..ce40a8aa3 100644 --- a/app/api/midjourney/[...path]/route.ts +++ b/app/api/midjourney/[...path]/route.ts @@ -1,5 +1,6 @@ import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; +import { ModelProvider } from "@/app/constant"; const BASE_URL = process.env.MIDJOURNEY_PROXY_URL ?? null; const MIDJOURNEY_PROXY_KEY = process.env.MIDJOURNEY_PROXY_KEY ?? null; @@ -40,7 +41,7 @@ async function handle( jsonBody = {}; } - const authResult = auth(req); + const authResult = auth(req, ModelProvider.GPT); // if (authResult.error) { // return NextResponse.json(authResult, { // status: 401, diff --git a/app/api/openai/[...path]/route.ts b/app/api/openai/[...path]/route.ts index 3807d3a4e..e153138aa 100644 --- a/app/api/openai/[...path]/route.ts +++ b/app/api/openai/[...path]/route.ts @@ -1,6 +1,11 @@ import { type OpenAIListModelResponse } from "@/app/client/platforms/openai"; import { getServerSideConfig } from "@/app/config/server"; -import { OpenaiPath, AZURE_PATH, AZURE_MODELS } from "@/app/constant"; +import { + ModelProvider, + OpenaiPath, + AZURE_PATH, + AZURE_MODELS, +} from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth, getIP } from "../../auth"; @@ -54,6 +59,13 @@ async function handle( }, ); } + + // const authResult = auth(req, ModelProvider.GPT); + // if (authResult.error) { + // return NextResponse.json(authResult, { + // status: 401, + // }); + // } let cloneBody, jsonBody; try { @@ -96,7 +108,7 @@ async function handle( const isAzure = AZURE_MODELS.includes(jsonBody?.model as string); // console.log("[Models]", jsonBody?.model); - const authResult = auth(req, isAzure); + const authResult = auth(req, ModelProvider.GPT, isAzure); // if (authResult.error) { // return NextResponse.json(authResult, { // status: 401, diff --git a/app/client/api.ts b/app/client/api.ts index e5c8e6284..48deb0fae 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -3,11 +3,12 @@ import { ACCESS_CODE_PREFIX, Azure, AZURE_MODELS, + ModelProvider, ServiceProvider, } from "../constant"; -import { ChatMessage, ModelType, useAccessStore } from "../store"; +import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store"; import { ChatGPTApi } from "./platforms/openai"; - +import { GeminiProApi } from "./platforms/google"; export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; @@ -16,6 +17,7 @@ export const Models = [ "gpt-4-0613", "gpt-4-32k", "midjourney", + "emini-pro", ] as const; export type ChatModel = ModelType; @@ -52,6 +54,13 @@ export interface LLMModel { name: string; describe: string; available: boolean; + provider: LLMModelProvider; +} + +export interface LLMModelProvider { + id: string; + providerName: string; + providerType: string; } export abstract class LLMApi { @@ -84,7 +93,11 @@ interface ChatProvider { export class ClientApi { public llm: LLMApi; - constructor() { + constructor(provider: ModelProvider = ModelProvider.GPT) { + if (provider === ModelProvider.GeminiPro) { + this.llm = new GeminiProApi(); + return; + } this.llm = new ChatGPTApi(); } @@ -104,7 +117,7 @@ export class ClientApi { { from: "human", value: - "Share from [ChatGPT Next Web]: https://github.com/Yidadaa/ChatGPT-Next-Web", + "Share from [NextChat]: https://github.com/Yidadaa/ChatGPT-Next-Web", }, ]); // 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用 @@ -134,19 +147,22 @@ export class ClientApi { } } -export const api = new ClientApi(); - export function getHeaders(isAzure?: boolean) { const accessStore = useAccessStore.getState(); const headers: Record = { "Content-Type": "application/json", "x-requested-with": "XMLHttpRequest", + Accept: "application/json", }; - // const isAzure = AZURE_MODELS.includes(jsonBody?.model as string) + const modelConfig = useChatStore.getState().currentSession().mask.modelConfig; + const isGoogle = modelConfig.model === "gemini-pro"; // const isAzure = accessStore.provider === ServiceProvider.Azure; - const authHeader = isAzure ? "api-key" : "Authorization"; - const apiKey = isAzure ? accessStore.azureApiKey : accessStore.openaiApiKey; + const apiKey = isGoogle + ? accessStore.googleApiKey + : isAzure + ? accessStore.azureApiKey + : accessStore.openaiApiKey; const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`; const validString = (x: string) => x && x.length > 0; diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts new file mode 100644 index 000000000..c35e93cb3 --- /dev/null +++ b/app/client/platforms/google.ts @@ -0,0 +1,222 @@ +import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import Locale from "../../locales"; +import { getServerSideConfig } from "@/app/config/server"; +export class GeminiProApi implements LLMApi { + extractMessage(res: any) { + console.log("[Response] gemini-pro response: ", res); + + return ( + res?.candidates?.at(0)?.content?.parts.at(0)?.text || + res?.error?.message || + "" + ); + } + async chat(options: ChatOptions): Promise { + const messages = options.messages.map((v) => ({ + role: v.role.replace("assistant", "model").replace("system", "user"), + parts: [{ text: v.content }], + })); + + // google requires that role in neighboring messages must not be the same + for (let i = 0; i < messages.length - 1; ) { + // Check if current and next item both have the role "model" + if (messages[i].role === messages[i + 1].role) { + // Concatenate the 'parts' of the current and next item + messages[i].parts = messages[i].parts.concat(messages[i + 1].parts); + // Remove the next item + messages.splice(i + 1, 1); + } else { + // Move to the next item + i++; + } + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + const requestPayload = { + contents: messages, + generationConfig: { + // stopSequences: [ + // "Title" + // ], + temperature: modelConfig.temperature, + maxOutputTokens: modelConfig.max_tokens, + topP: modelConfig.top_p, + // "topK": modelConfig.top_k, + }, + }; + + console.log("[Request] google payload: ", requestPayload); + + // todo: support stream later + const shouldStream = false; + const controller = new AbortController(); + options.onController?.(controller); + try { + const chatPath = this.path(Google.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[OpenAI] request response content type: ", + contentType, + ); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text) as { + choices: Array<{ + delta: { + content: string; + }; + }>; + }; + const delta = json.choices[0]?.delta?.content; + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + + if (resJson?.promptFeedback?.blockReason) { + // being blocked + options.onError?.( + new Error( + "Message is being blocked for reason: " + + resJson.promptFeedback.blockReason, + ), + ); + } + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + usage(): Promise { + throw new Error("Method not implemented."); + } + async models(): Promise { + return []; + } + path(path: string): string { + return "/api/google/" + path; + } +} diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index f1ce369a8..c18f35a19 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -333,6 +333,11 @@ export class ChatGPTApi implements LLMApi { return chatModels.map((m) => ({ name: m.id, available: true, + provider: { + id: "openai", + providerName: "OpenAI", + providerType: "openai", + }, describe: "", })); } diff --git a/app/components/auth.tsx b/app/components/auth.tsx index 7962d46be..57118349b 100644 --- a/app/components/auth.tsx +++ b/app/components/auth.tsx @@ -64,6 +64,17 @@ export function AuthPage() { ); }} /> + { + accessStore.update( + (access) => (access.googleApiKey = e.currentTarget.value), + ); + }} + /> ) : null} diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index 03aac05f2..a2a50320d 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -10,7 +10,10 @@ import BotIcon from "../icons/bot.svg"; import BlackBotIcon from "../icons/black-bot.svg"; export function getEmojiUrl(unified: string, style: EmojiStyle) { - return `https://cdn.staticfile.org/emoji-datasource-apple/14.0.0/img/${style}/64/${unified}.png`; + // Whoever owns this Content Delivery Network (CDN), I am using your CDN to serve emojis + // Old CDN broken, so I had to switch to this one + // Author: https://github.com/H0llyW00dzZ + return `https://cdn.jsdelivr.net/npm/emoji-datasource-apple/img/${style}/64/${unified}.png`; } export function AvatarPicker(props: { diff --git a/app/components/exporter.tsx b/app/components/exporter.tsx index 4ca6427a7..dff17e4ab 100644 --- a/app/components/exporter.tsx +++ b/app/components/exporter.tsx @@ -29,10 +29,11 @@ import NextImage from "next/image"; import { toBlob, toPng } from "html-to-image"; import { DEFAULT_MASK_AVATAR } from "../store/mask"; -import { api } from "../client/api"; + import { prettyObject } from "../utils/format"; -import { EXPORT_MESSAGE_CLASS_NAME } from "../constant"; +import { EXPORT_MESSAGE_CLASS_NAME, ModelProvider } from "../constant"; import { getClientConfig } from "../config/client"; +import { ClientApi } from "../client/api"; const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { loading: () => , @@ -301,10 +302,17 @@ export function PreviewActions(props: { }) { const [loading, setLoading] = useState(false); const [shouldExport, setShouldExport] = useState(false); - + const config = useAppConfig(); const onRenderMsgs = (msgs: ChatMessage[]) => { setShouldExport(false); + var api: ClientApi; + if (config.modelConfig.model === "gemini-pro") { + api = new ClientApi(ModelProvider.GeminiPro); + } else { + api = new ClientApi(ModelProvider.GPT); + } + api .share(msgs) .then((res) => { @@ -530,7 +538,7 @@ export function ImagePreviewer(props: {
-
ChatGPT Next Web
+
NextChat
github.com/Yidadaa/ChatGPT-Next-Web
diff --git a/app/components/home.tsx b/app/components/home.tsx index 22f4a4b26..7c6a3f423 100644 --- a/app/components/home.tsx +++ b/app/components/home.tsx @@ -12,7 +12,7 @@ import LoadingIcon from "../icons/three-dots.svg"; import { getCSSVar, useMobileScreen } from "../utils"; import dynamic from "next/dynamic"; -import { Path, SlotID } from "../constant"; +import { ModelProvider, Path, SlotID } from "../constant"; import { ErrorBoundary } from "./error"; import { getISOLang, getLang } from "../locales"; @@ -27,7 +27,7 @@ import { SideBar } from "./sidebar"; import { useAppConfig } from "@/app/store"; import { AuthPage } from "./auth"; import { getClientConfig } from "../config/client"; -import { api } from "../client/api"; +import { ClientApi } from "../client/api"; import { useAccessStore } from "../store"; export function Loading(props: { noLogo?: boolean }) { @@ -173,6 +173,12 @@ function Screen() { export function useLoadData() { const config = useAppConfig(); + var api: ClientApi; + if (config.modelConfig.model === "gemini-pro") { + api = new ClientApi(ModelProvider.GeminiPro); + } else { + api = new ClientApi(ModelProvider.GPT); + } useEffect(() => { (async () => { const models = await api.llm.models(); diff --git a/app/components/model-config.tsx b/app/components/model-config.tsx index 214a18c79..b9f811674 100644 --- a/app/components/model-config.tsx +++ b/app/components/model-config.tsx @@ -29,7 +29,7 @@ export function ModelConfigList(props: { .filter((v) => v.available) .map((v, i) => ( ))} @@ -91,79 +91,84 @@ export function ModelConfigList(props: { } > - - { - props.updateConfig( - (config) => - (config.presence_penalty = - ModalConfigValidator.presence_penalty( - e.currentTarget.valueAsNumber, - )), - ); - }} - > - - - { - props.updateConfig( - (config) => - (config.frequency_penalty = - ModalConfigValidator.frequency_penalty( - e.currentTarget.valueAsNumber, - )), - ); - }} - > - + {props.modelConfig.model === "gemini-pro" ? null : ( + <> + + { + props.updateConfig( + (config) => + (config.presence_penalty = + ModalConfigValidator.presence_penalty( + e.currentTarget.valueAsNumber, + )), + ); + }} + > + - - - props.updateConfig( - (config) => - (config.enableInjectSystemPrompts = e.currentTarget.checked), - ) - } - > - + + { + props.updateConfig( + (config) => + (config.frequency_penalty = + ModalConfigValidator.frequency_penalty( + e.currentTarget.valueAsNumber, + )), + ); + }} + > + - - - props.updateConfig( - (config) => (config.template = e.currentTarget.value), - ) - } - > - + + + props.updateConfig( + (config) => + (config.enableInjectSystemPrompts = + e.currentTarget.checked), + ) + } + > + + + + props.updateConfig( + (config) => (config.template = e.currentTarget.value), + ) + } + > + + + )} { const isOpenAiUrl = accessStore.openaiUrl.includes(OPENAI_BASE_URL); + return ( accessStore.hideBalanceQuery || isOpenAiUrl || @@ -959,109 +961,168 @@ export function Settings() { {/* */} {/* */} - {/* {accessStore.provider === "OpenAI" ? (*/} - {/* <>*/} - {/* */} + {/* */} + {/* */} + {/* accessStore.update(*/} + {/* (access) =>*/} + {/* (access.openaiUrl = e.currentTarget.value),*/} + {/* )*/} {/* }*/} - {/* >*/} - {/* */} - {/* accessStore.update(*/} - {/* (access) =>*/} - {/* (access.openaiUrl = e.currentTarget.value),*/} - {/* )*/} - {/* }*/} - {/* >*/} - {/* */} - {/* */} - {/* {*/} - {/* accessStore.update(*/} - {/* (access) =>*/} - {/* (access.openaiApiKey = e.currentTarget.value),*/} - {/* );*/} - {/* }}*/} - {/* />*/} - {/* */} - {/* */} - {/* ) : (*/} - {/* <>*/} - {/* */} + {/* */} + {/* */} + {/* */} - {/* */} - {/* accessStore.update(*/} - {/* (access) =>*/} - {/* (access.azureUrl = e.currentTarget.value),*/} - {/* )*/} - {/* }*/} - {/* >*/} - {/* */} - {/* */} - {/* {*/} - {/* accessStore.update(*/} - {/* (access) =>*/} - {/* (access.azureApiKey = e.currentTarget.value),*/} - {/* );*/} - {/* }}*/} - {/* />*/} - {/* */} - {/* {*/} + {/* accessStore.update(*/} + {/* (access) =>*/} + {/* (access.openaiApiKey = e.currentTarget.value),*/} + {/* );*/} + {/* }}*/} + {/* />*/} + {/* */} + {/* */} + {/* ) : accessStore.provider === "Azure" ? (*/} + {/* <>*/} + {/* */} + {/* */} + {/* accessStore.update(*/} + {/* (access) =>*/} + {/* (access.azureUrl = e.currentTarget.value),*/} + {/* )*/} {/* }*/} - {/* >*/} - {/* */} - {/* accessStore.update(*/} - {/* (access) =>*/} - {/* (access.azureApiVersion =*/} - {/* e.currentTarget.value),*/} - {/* )*/} - {/* }*/} - {/* >*/} - {/* */} - {/* */} - {/* )}*/} - {/* */} - {/* )}*/} - {/* */} - {/* )}*/} + {/* >*/} + {/* */} + {/* */} + {/* {*/} + {/* accessStore.update(*/} + {/* (access) =>*/} + {/* (access.azureApiKey = e.currentTarget.value),*/} + {/* );*/} + {/* }}*/} + {/* />*/} + {/* */} + {/* */} + {/* */} + {/* accessStore.update(*/} + {/* (access) =>*/} + {/* (access.azureApiVersion =*/} + {/* e.currentTarget.value),*/} + {/* )*/} + {/* }*/} + {/* >*/} + {/* */} + {/* */} + {/* ) : accessStore.provider === "Google" ? (*/} + {/* <>*/} + {/* */} + {/* */} + {/* accessStore.update(*/} + {/* (access) =>*/} + {/* (access.googleUrl = e.currentTarget.value),*/} + {/* )*/} + {/* }*/} + {/* >*/} + {/* */} + {/* */} + {/* {*/} + {/* accessStore.update(*/} + {/* (access) =>*/} + {/* (access.googleApiKey = e.currentTarget.value),*/} + {/* );*/} + {/* }}*/} + {/* />*/} + {/* */} + {/* */} + {/* */} + {/* accessStore.update(*/} + {/* (access) =>*/} + {/* (access.googleApiVersion =*/} + {/* e.currentTarget.value),*/} + {/* )*/} + {/* }*/} + {/* >*/} + {/* */} + {/* */} + {/* ) : null}*/} + {/* */} + {/* )}*/} + {/* */} + {/*)}*/} {/*{!shouldHideBalanceQuery && !clientConfig?.isApp ? (*/} {/* { .join(","); } + // const isAzure = !!process.env.AZURE_URL; + const isGoogle = !!process.env.GOOGLE_API_KEY; // 需要一个函数来判断请求中模型是否为微软的。 // 当前逻辑,gpt-4-32k模型为微软,别的不是 // const isAzure = !!process.env.AZURE_URL; @@ -85,6 +91,10 @@ export const getServerSideConfig = () => { azureApiKey: process.env.AZURE_API_KEY ?? "", azureApiVersion: process.env.AZURE_API_VERSION ?? "", + isGoogle, + googleApiKey: process.env.GOOGLE_API_KEY, + googleUrl: process.env.GOOGLE_URL, + needCode: ACCESS_CODES.size > 0, code: process.env.CODE, codes: ACCESS_CODES, diff --git a/app/constant.ts b/app/constant.ts index ae3dae370..b64a6cd58 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -12,6 +12,8 @@ export const DEFAULT_CORS_HOST = "https://a.nextweb.fun"; export const DEFAULT_API_HOST = `${DEFAULT_CORS_HOST}/api/proxy`; export const OPENAI_BASE_URL = "https://api.openai.com"; +export const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/"; + export enum Path { Home = "/", Chat = "/chat", @@ -66,6 +68,12 @@ export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown"; export enum ServiceProvider { OpenAI = "OpenAI", Azure = "Azure", + Google = "Google", +} + +export enum ModelProvider { + GPT = "GPT", + GeminiPro = "GeminiPro", } export const OpenaiPath = { @@ -82,6 +90,14 @@ export const Azure = { ExampleEndpoint: "https://{resource-url}/openai/deployments/{deploy-id}", }; +export const Google = { + ExampleEndpoint: + "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent", + ChatPath: "v1beta/models/gemini-pro:generateContent", + + // /api/openai/v1/chat/completions +}; + export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang export const DEFAULT_SYSTEM_TEMPLATE = ` You are ChatGPT, a large language model trained by OpenAI. @@ -105,37 +121,57 @@ export const DEFAULT_MODELS = [ // name: "gpt-4", // available: true, // }, - { - name: "gpt-3.5-turbo-16k", - describe: "GPT-3,最快,笨", - available: false, - }, + // { + // name: "gpt-3.5-turbo-16k", + // describe: "GPT-3,最快,笨", + // available: false, + // }, { name: "gpt-3.5-turbo-1106", describe: "GPT-3,最快,笨,最便宜", available: true, + provider: { + id: "openai", + providerName: "OpenAI", + providerType: "openai", + }, }, - { - name: "gpt-4", - describe: "GPT-4,聪明,贵,慢", - available: false, - }, + // { + // name: "gpt-4", + // describe: "GPT-4,聪明,贵,慢", + // available: false, + // }, { name: "gpt-4-1106-preview", describe: "GPT-4,又强又快,推荐", available: true, + provider: { + id: "openai", + providerName: "OpenAI", + providerType: "openai", + }, }, { - name: "gpt-4-32k", - describe: "GPT-4,聪明,慢,但是白嫖", - available: false, - }, - { - name: "gpt-4-all", - describe: "GPT-4全能版,联网绘图多模态,又慢又贵", + name: "gemini-pro", available: false, + describe: "谷歌的,不知道杂用", + provider: { + id: "google", + providerName: "Google", + providerType: "google", + }, }, // { + // name: "gpt-4-32k", + // describe: "GPT-4,聪明,慢,但是白嫖", + // available: false, + // }, + // { + // name: "gpt-4-all", + // describe: "GPT-4全能版,联网绘图多模态,又慢又贵", + // available: false, + // }, + // { // name: "gpt-4v", // describe: "GPT-4,官方网页版,最聪明,贵且慢", // available: true, @@ -149,6 +185,11 @@ export const DEFAULT_MODELS = [ name: "midjourney", describe: "绘图用,不用选", available: false, + provider: { + id: "openai", + providerName: "OpenAI", + providerType: "openai", + }, }, ] as const; diff --git a/app/locales/cn.ts b/app/locales/cn.ts index c9e6c5681..04ffb5b7a 100644 --- a/app/locales/cn.ts +++ b/app/locales/cn.ts @@ -15,7 +15,7 @@ const cn = { Auth: { Title: "需要密码", Tips: "管理员开启了密码验证,请在下方填入访问码", - SubTips: "或者输入你的 OpenAI API 密钥", + SubTips: "或者输入你的 OpenAI 或 Google API 密钥", Input: "在此处填写访问码", Confirm: "确认", Later: "稍后再说", @@ -353,6 +353,23 @@ const cn = { SubTitle: "选择指定的部分版本", }, }, + Google: { + ApiKey: { + Title: "接口密钥", + SubTitle: "使用自定义 Google AI Studio API Key 绕过密码访问限制", + Placeholder: "Google AI Studio API Key", + }, + + Endpoint: { + Title: "接口地址", + SubTitle: "样例:", + }, + + ApiVerion: { + Title: "接口版本 (gemini-pro api version)", + SubTitle: "选择指定的部分版本", + }, + }, CustomModel: { Title: "自定义模型名", SubTitle: "增加自定义模型可选项,使用英文逗号隔开", @@ -389,7 +406,7 @@ const cn = { Prompt: { History: (content: string) => "这是历史聊天总结作为前情提要:" + content, Topic: - "使用四到五个字直接返回这句话的简要主题,不要解释、不要标点、不要语气词、不要多余文本,如果没有主题,请直接返回“闲聊”", + "使用四到五个字直接返回这句话的简要主题,不要解释、不要标点、不要语气词、不要多余文本,不要加粗,如果没有主题,请直接返回“闲聊”", Summarize: "简要总结一下对话内容,用作后续的上下文提示 prompt,控制在 200 字以内", }, diff --git a/app/locales/en.ts b/app/locales/en.ts index 79bd1438c..fbe6c3d5c 100644 --- a/app/locales/en.ts +++ b/app/locales/en.ts @@ -17,7 +17,7 @@ const en: LocaleType = { Auth: { Title: "Need Access Code", Tips: "Please enter access code below", - SubTips: "Or enter your OpenAI API Key", + SubTips: "Or enter your OpenAI or Google API Key", Input: "access code", Confirm: "Confirm", Later: "Later", @@ -360,6 +360,24 @@ const en: LocaleType = { Title: "Custom Models", SubTitle: "Custom model options, seperated by comma", }, + Google: { + ApiKey: { + Title: "API Key", + SubTitle: + "Bypass password access restrictions using a custom Google AI Studio API Key", + Placeholder: "Google AI Studio API Key", + }, + + Endpoint: { + Title: "Endpoint Address", + SubTitle: "Example:", + }, + + ApiVerion: { + Title: "API Version (gemini-pro api version)", + SubTitle: "Select a specific part version", + }, + }, }, Model: "Model", @@ -395,7 +413,7 @@ const en: LocaleType = { History: (content: string) => "This is a summary of the chat history as a recap: " + content, Topic: - "Please generate a four to five word title summarizing our conversation without any lead-in, punctuation, quotation marks, periods, symbols, or additional text. Remove enclosing quotation marks.", + "Please generate a four to five word title summarizing our conversation without any lead-in, punctuation, quotation marks, periods, symbols, bold text, or additional text. Remove enclosing quotation marks.", Summarize: "Summarize the discussion briefly in 200 words or less to use as a prompt for future context.", }, diff --git a/app/masks/cn.ts b/app/masks/cn.ts index 86a073813..efeecf802 100644 --- a/app/masks/cn.ts +++ b/app/masks/cn.ts @@ -58,7 +58,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 1, max_tokens: 2000, presence_penalty: 0, @@ -84,7 +84,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 1, max_tokens: 2000, presence_penalty: 0, @@ -110,7 +110,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 1, max_tokens: 2000, presence_penalty: 0, @@ -136,7 +136,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 1, max_tokens: 2000, presence_penalty: 0, @@ -162,7 +162,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 1, max_tokens: 2000, presence_penalty: 0, @@ -188,7 +188,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 1, max_tokens: 2000, presence_penalty: 0, @@ -214,7 +214,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 1, max_tokens: 2000, presence_penalty: 0, @@ -240,7 +240,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 1, max_tokens: 2000, presence_penalty: 0, @@ -272,7 +272,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 0.5, max_tokens: 2000, presence_penalty: 0, @@ -298,7 +298,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 1, max_tokens: 2000, presence_penalty: 0, @@ -331,7 +331,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 1, max_tokens: 2000, presence_penalty: 0, @@ -364,7 +364,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 1, max_tokens: 2000, presence_penalty: 0, @@ -422,7 +422,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 1, max_tokens: 2000, presence_penalty: 0, @@ -454,7 +454,7 @@ export const CN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-4", + model: "gpt-4-1106-preview", temperature: 0.5, max_tokens: 2000, presence_penalty: 0, diff --git a/app/masks/en.ts b/app/masks/en.ts index 6e8678df6..0a3939de5 100644 --- a/app/masks/en.ts +++ b/app/masks/en.ts @@ -14,7 +14,7 @@ export const EN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-4", + model: "gpt-4-1106-preview", temperature: 0.3, max_tokens: 2000, presence_penalty: 0, @@ -60,7 +60,7 @@ export const EN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-4", + model: "gpt-4-1106-preview", temperature: 0.5, max_tokens: 2000, presence_penalty: 0, @@ -86,7 +86,7 @@ export const EN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-3.5-turbo-16k", + model: "gpt-3.5-turbo-1106", temperature: 0.5, max_tokens: 2000, presence_penalty: 0, @@ -118,7 +118,7 @@ export const EN_MASKS: BuiltinMask[] = [ }, ], modelConfig: { - model: "gpt-4", + model: "gpt-4-1106-preview", temperature: 0.5, max_tokens: 2000, presence_penalty: 0, diff --git a/app/store/access.ts b/app/store/access.ts index 881853737..8f8af562c 100644 --- a/app/store/access.ts +++ b/app/store/access.ts @@ -29,6 +29,11 @@ const DEFAULT_ACCESS_STATE = { azureApiKey: "", azureApiVersion: "2023-05-15", + // google ai studio + googleUrl: "", + googleApiKey: "", + googleApiVersion: "v1", + // server config needCode: true, hideUserApiKey: false, @@ -59,6 +64,10 @@ export const useAccessStore = createPersistStore( // return ensure(get(), ["azureUrl", "azureApiKey", "azureApiVersion"]); }, + isValidGoogle() { + return ensure(get(), ["googleApiKey"]); + }, + isAuthorized() { this.fetch(); @@ -66,6 +75,7 @@ export const useAccessStore = createPersistStore( return ( this.isValidOpenAI() || this.isValidAzure() || + this.isValidGoogle() || !this.enabledAccessControl() || (this.enabledAccessControl() && ensure(get(), ["accessCode"])) ); @@ -93,6 +103,7 @@ export const useAccessStore = createPersistStore( token: string; openaiApiKey: string; azureApiVersion: string; + googleApiKey: string; }; state.openaiApiKey = state.token; state.azureApiVersion = "2023-05-15"; diff --git a/app/store/chat.ts b/app/store/chat.ts index 11d36a8ba..af6b75372 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -8,13 +8,14 @@ import { DEFAULT_INPUT_TEMPLATE, DEFAULT_SYSTEM_TEMPLATE, KnowledgeCutOffDate, + ModelProvider, StoreKey, SUMMARIZE_MODEL, } from "../constant"; import { - api, getHeaders, useGetMidjourneySelfProxyUrl, + ClientApi, RequestMessage, } from "../client/api"; import { ChatControllerPool } from "../client/controller"; @@ -440,7 +441,6 @@ export const useChatStore = createPersistStore( botMessage, ]); }); - if ( content.toLowerCase().startsWith("/mj") || content.toLowerCase().startsWith("/MJ") @@ -604,6 +604,13 @@ export const useChatStore = createPersistStore( set(() => ({})); extAttr?.setAutoScroll(true); } else { + var api: ClientApi; + if (modelConfig.model === "gemini-pro") { + api = new ClientApi(ModelProvider.GeminiPro); + } else { + api = new ClientApi(ModelProvider.GPT); + } + // make request api.llm.chat({ messages: sendMessages, @@ -627,12 +634,11 @@ export const useChatStore = createPersistStore( }, onError(error) { const isAborted = error.message.includes("aborted"); - botMessage.content = + botMessage.content += "\n\n" + prettyObject({ error: true, message: error.message, - message2: "用上面刷新按钮试试。", }); botMessage.streaming = false; userMessage.isError = !isAborted; @@ -684,7 +690,9 @@ export const useChatStore = createPersistStore( // system prompts, to get close to OpenAI Web ChatGPT const shouldInjectSystemPrompts = modelConfig.enableInjectSystemPrompts; - const systemPrompts = shouldInjectSystemPrompts + + var systemPrompts: ChatMessage[] = []; + systemPrompts = shouldInjectSystemPrompts ? [ createMessage({ role: "system", @@ -778,6 +786,14 @@ export const useChatStore = createPersistStore( summarizeSession() { const config = useAppConfig.getState(); const session = get().currentSession(); + const modelConfig = session.mask.modelConfig; + + var api: ClientApi; + if (modelConfig.model === "gemini-pro") { + api = new ClientApi(ModelProvider.GeminiPro); + } else { + api = new ClientApi(ModelProvider.GPT); + } // remove error messages if any const messages = session.messages; @@ -809,8 +825,6 @@ export const useChatStore = createPersistStore( }, }); } - - const modelConfig = session.mask.modelConfig; const summarizeIndex = Math.max( session.lastSummarizeIndex, session.clearContextIndex ?? 0, diff --git a/app/store/config.ts b/app/store/config.ts index a24e898e2..4a5e40c36 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -137,7 +137,7 @@ export const useAppConfig = createPersistStore( }), { name: StoreKey.Config, - version: 3.89, + version: 3.891, migrate(persistedState, version) { const state = persistedState as ChatConfig; @@ -168,7 +168,7 @@ export const useAppConfig = createPersistStore( if (version < 3.8) { state.lastUpdate = Date.now(); } - if (version < 3.89) { + if (version < 3.891) { state.lastUpdate = Date.now(); return { ...DEFAULT_CONFIG }; } diff --git a/app/store/update.ts b/app/store/update.ts index 86b0d37b9..7253caffc 100644 --- a/app/store/update.ts +++ b/app/store/update.ts @@ -1,9 +1,16 @@ -import { FETCH_COMMIT_URL, FETCH_TAG_URL, StoreKey } from "../constant"; -import { api } from "../client/api"; +import { + FETCH_COMMIT_URL, + FETCH_TAG_URL, + ModelProvider, + StoreKey, +} from "../constant"; import { getClientConfig } from "../config/client"; import { createPersistStore } from "../utils/store"; import ChatGptIcon from "../icons/chatgpt.png"; import Locale from "../locales"; +import { use } from "react"; +import { useAppConfig } from "."; +import { ClientApi } from "../client/api"; const ONE_MINUTE = 60 * 1000; const isApp = !!getClientConfig()?.isApp; @@ -99,7 +106,7 @@ export const useUpdateStore = createPersistStore( if (version === remoteId) { // Show a notification using Tauri window.__TAURI__?.notification.sendNotification({ - title: "ChatGPT Next Web", + title: "NextChat", body: `${Locale.Settings.Update.IsLatest}`, icon: `${ChatGptIcon.src}`, sound: "Default", @@ -109,7 +116,7 @@ export const useUpdateStore = createPersistStore( Locale.Settings.Update.FoundUpdate(`${remoteId}`); // Show a notification for the new version using Tauri window.__TAURI__?.notification.sendNotification({ - title: "ChatGPT Next Web", + title: "NextChat", body: updateMessage, icon: `${ChatGptIcon.src}`, sound: "Default", @@ -127,6 +134,7 @@ export const useUpdateStore = createPersistStore( }, async updateUsage(force = false) { + // only support openai for now const overOneMinute = Date.now() - get().lastUpdateUsage >= ONE_MINUTE; if (!overOneMinute && !force) return; @@ -135,6 +143,7 @@ export const useUpdateStore = createPersistStore( })); try { + const api = new ClientApi(ModelProvider.GPT); const usage = await api.llm.usage(); if (usage) { diff --git a/app/utils/model.ts b/app/utils/model.ts index a236fa560..9d2d81e50 100644 --- a/app/utils/model.ts +++ b/app/utils/model.ts @@ -6,23 +6,28 @@ export function collectModelTable( ) { const modelTable: Record< string, - { available: boolean; name: string; displayName: string; describe: string } + { + available: boolean; + name: string; + displayName: string; + describe: string; + provider?: LLMModel["provider"]; // Marked as optional + } > = {}; // default models - models.forEach( - (m) => - (modelTable[m.name] = { - ...m, - displayName: m.name, - }), - ); + models.forEach((m) => { + modelTable[m.name] = { + ...m, + displayName: m.name, // 'provider' is copied over if it exists + }; + }); // server custom models customModels .split(",") .filter((v) => !!v && v.length > 0) - .map((m) => { + .forEach((m) => { const available = !m.startsWith("-"); const nameConfig = m.startsWith("+") || m.startsWith("-") ? m.slice(1) : m; @@ -30,15 +35,18 @@ export function collectModelTable( // enable or disable all models if (name === "all") { - Object.values(modelTable).forEach((m) => (m.available = available)); + Object.values(modelTable).forEach( + (model) => (model.available = available), + ); + } else { + modelTable[name] = { + name, + displayName: displayName || name, + available, + describe: "", + provider: modelTable[name]?.provider, // Use optional chaining + }; } - - modelTable[name] = { - name, - displayName: displayName || name, - available, - describe: "", - }; }); return modelTable; } diff --git a/docker-compose.yml b/docker-compose.yml index 3edb7ca01..85848c378 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -16,6 +16,7 @@ services: environment: - TZ=Asia/Shanghai - OPENAI_API_KEY=$OPENAI_API_KEY + - GOOGLE_API_KEY=$GOOGLE_API_KEY - CODE=$CODE - BASE_URL=$BASE_URL - OPENAI_ORG_ID=$OPENAI_ORG_ID diff --git a/docs/faq-cn.md b/docs/faq-cn.md index bf79ef7d9..06a96852b 100644 --- a/docs/faq-cn.md +++ b/docs/faq-cn.md @@ -23,7 +23,7 @@ Docker 版本相当于稳定版,latest Docker 总是与 latest release version ## 如何修改 Vercel 环境变量 - 进入 vercel 的控制台页面; -- 选中你的 chatgpt next web 项目; +- 选中你的 NextChat 项目; - 点击页面头部的 Settings 选项; - 找到侧边栏的 Environment Variables 选项; - 修改对应的值即可。 diff --git a/docs/faq-ko.md b/docs/faq-ko.md index 9eb6bbbb2..b0d28917f 100644 --- a/docs/faq-ko.md +++ b/docs/faq-ko.md @@ -23,7 +23,7 @@ Docker 버전은 사실상 안정된 버전과 같습니다. latest Docker는 ## Vercel 환경 변수를 어떻게 수정하나요? - Vercel의 제어판 페이지로 이동합니다. -- chatgpt next web 프로젝트를 선택합니다. +- NextChat 프로젝트를 선택합니다. - 페이지 상단의 Settings 옵션을 클릭합니다. - 사이드바의 Environment Variables 옵션을 찾습니다. - 해당 값을 수정합니다. diff --git a/docs/user-manual-cn.md b/docs/user-manual-cn.md index 883bbc23e..6109fcf57 100644 --- a/docs/user-manual-cn.md +++ b/docs/user-manual-cn.md @@ -2,7 +2,7 @@ > No english version yet, please read this doc with ChatGPT or other translation tools. -本文档用于解释 ChatGPT Next Web 的部分功能介绍和设计原则。 +本文档用于解释 NextChat 的部分功能介绍和设计原则。 ## 面具 (Mask) @@ -22,7 +22,7 @@ 编辑步骤如下: -1. 在 ChatGPT Next Web 中配置好一个面具; +1. 在 NextChat 中配置好一个面具; 2. 使用面具编辑页面的下载按钮,将面具保存为 JSON 格式; 3. 让 ChatGPT 帮你将 json 文件格式化为对应的 ts 代码; 4. 放入对应的 .ts 文件。 diff --git a/public/site.webmanifest b/public/site.webmanifest index e698afeb8..233c8e384 100644 --- a/public/site.webmanifest +++ b/public/site.webmanifest @@ -1,21 +1,20 @@ { - "name": "ChatGPT Next Web", - "short_name": "ChatGPT", - "icons": [ - { - "src": "https://cos.xiaosi.cc/next/public/android-chrome-192x192.png", - "sizes": "192x192", - "type": "image/png" - }, - { - "src": "https://cos.xiaosi.cc/next/public/android-chrome-512x512.png", - "sizes": "512x512", - "type": "image/png" - } - ], - "start_url": "/", - "theme_color": "#ffffff", - "background_color": "#ffffff", - "display": "standalone" - } - \ No newline at end of file + "name": "NextChat", + "short_name": "NextChat", + "icons": [ + { + "src": "https://cos.xiaosi.cc/next/public/android-chrome-192x192.png", + "sizes": "192x192", + "type": "image/png" + }, + { + "src": "https://cos.xiaosi.cc/next/public/android-chrome-512x512.png", + "sizes": "512x512", + "type": "image/png" + } + ], + "start_url": "/", + "theme_color": "#ffffff", + "background_color": "#ffffff", + "display": "standalone" +} \ No newline at end of file diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index bb72a88e7..d93210fc5 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -56,6 +56,128 @@ version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" +[[package]] +name = "async-broadcast" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c48ccdbf6ca6b121e0f586cbc0e73ae440e56c67c30fa0873b4e110d9c26d2b" +dependencies = [ + "event-listener", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener", + "futures-core", +] + +[[package]] +name = "async-executor" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b0c4a4f319e45986f347ee47fef8bf5e81c9abc3f6f58dc2391439f30df65f0" +dependencies = [ + "async-lock", + "async-task", + "concurrent-queue", + "fastrand 2.0.1", + "futures-lite", + "slab", +] + +[[package]] +name = "async-fs" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" +dependencies = [ + "async-lock", + "autocfg", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite", + "log", + "parking", + "polling", + "rustix", + "slab", + "socket2", + "waker-fn", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener", +] + +[[package]] +name = "async-process" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" +dependencies = [ + "async-io", + "async-lock", + "autocfg", + "blocking", + "cfg-if", + "event-listener", + "futures-lite", + "rustix", + "signal-hook", + "windows-sys 0.48.0", +] + +[[package]] +name = "async-recursion" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.16", +] + +[[package]] +name = "async-task" +version = "4.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d90cd0b264dfdd8eb5bad0a2c217c1f88fa96a8573f40e7b12de23fb468f46" + +[[package]] +name = "async-trait" +version = "0.1.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2d0f03b3640e3a630367e40c468cb7f309529c708ed1d88597047b0e7c6ef7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.16", +] + [[package]] name = "atk" version = "0.15.1" @@ -80,6 +202,12 @@ dependencies = [ "system-deps 6.1.0", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "attohttpc" version = "0.22.0" @@ -150,6 +278,22 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blocking" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c36a4d0d48574b3dd360b4b7d95cc651d2b6557b6402848a27d4b228a473e2a" +dependencies = [ + "async-channel", + "async-lock", + "async-task", + "fastrand 2.0.1", + "futures-io", + "futures-lite", + "piper", + "tracing", +] + [[package]] name = "brotli" version = "3.3.4" @@ -358,6 +502,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "concurrent-queue" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "convert_case" version = "0.4.0" @@ -530,6 +683,17 @@ dependencies = [ "syn 2.0.16", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -629,6 +793,27 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enumflags2" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5998b4f30320c9d93aed72f63af821bfdac50465b75428fce77b48ec482c3939" +dependencies = [ + "enumflags2_derive", + "serde", +] + +[[package]] +name = "enumflags2_derive" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f95e2801cd355d4a1a3e3953ce6ee5ae9603a5c833455343a8bfe3f44d418246" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.16", +] + [[package]] name = "errno" version = "0.3.1" @@ -650,6 +835,12 @@ dependencies = [ "libc", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + [[package]] name = "fastrand" version = "1.9.0" @@ -659,6 +850,12 @@ dependencies = [ "instant", ] +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + [[package]] name = "fdeflate" version = "0.3.0" @@ -674,7 +871,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3cf3a800ff6e860c863ca6d4b16fd999db8b752819c1606884047b73e468535" dependencies = [ - "memoffset", + "memoffset 0.8.0", "rustc_version", ] @@ -772,6 +969,21 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + [[package]] name = "futures-macro" version = "0.3.28" @@ -783,6 +995,12 @@ dependencies = [ "syn 2.0.16", ] +[[package]] +name = "futures-sink" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" + [[package]] name = "futures-task" version = "0.3.28" @@ -796,8 +1014,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-core", + "futures-io", "futures-macro", + "futures-sink", "futures-task", + "memchr", "pin-project-lite", "pin-utils", "slab", @@ -1451,6 +1672,19 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4" +[[package]] +name = "mac-notification-sys" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51fca4d74ff9dbaac16a01b924bc3693fa2bba0862c2c633abc73f9a8ea21f64" +dependencies = [ + "cc", + "dirs-next", + "objc-foundation", + "objc_id", + "time", +] + [[package]] name = "malloc_buf" version = "0.0.6" @@ -1495,6 +1729,15 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +dependencies = [ + "autocfg", +] + [[package]] name = "memoffset" version = "0.8.0" @@ -1504,6 +1747,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] + [[package]] name = "minisign-verify" version = "0.2.1" @@ -1572,12 +1824,37 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", + "memoffset 0.7.1", +] + [[package]] name = "nodrop" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" +[[package]] +name = "notify-rust" +version = "4.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "827c5edfa80235ded4ab3fe8e9dc619b4f866ef16fe9b1c6b8a7f8692c0f2226" +dependencies = [ + "log", + "mac-notification-sys", + "serde", + "tauri-winrt-notification", + "zbus", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -1757,6 +2034,16 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "ordered-stream" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aa2b01e1d916879f73a53d01d1d6cee68adbb31d6d9177a8cfce093cced1d50" +dependencies = [ + "futures-core", + "pin-project-lite", +] + [[package]] name = "overload" version = "0.1.1" @@ -1788,6 +2075,12 @@ dependencies = [ "system-deps 6.1.0", ] +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + [[package]] name = "parking_lot" version = "0.12.1" @@ -1933,6 +2226,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.1", + "futures-io", +] + [[package]] name = "pkg-config" version = "0.3.27" @@ -1948,7 +2252,7 @@ dependencies = [ "base64 0.21.0", "indexmap", "line-wrap", - "quick-xml", + "quick-xml 0.28.2", "serde", "time", ] @@ -1966,6 +2270,22 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -2027,6 +2347,15 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "quick-xml" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11bafc859c6815fbaffbbbf4229ecb767ac913fecb27f9ad4343662e9ef099ea" +dependencies = [ + "memchr", +] + [[package]] name = "quick-xml" version = "0.28.2" @@ -2466,6 +2795,17 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "sha2" version = "0.10.6" @@ -2486,6 +2826,25 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "signal-hook" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + [[package]] name = "simd-adler32" version = "0.3.5" @@ -2513,6 +2872,16 @@ version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +[[package]] +name = "socket2" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "soup2" version = "0.2.1" @@ -2556,6 +2925,12 @@ dependencies = [ "loom", ] +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + [[package]] name = "string_cache" version = "0.8.7" @@ -2733,6 +3108,7 @@ dependencies = [ "http", "ignore", "minisign-verify", + "notify-rust", "objc", "once_cell", "open", @@ -2915,6 +3291,16 @@ dependencies = [ "toml 0.7.3", ] +[[package]] +name = "tauri-winrt-notification" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f5bff1d532fead7c43324a0fa33643b8621a47ce2944a633be4cb6c0240898f" +dependencies = [ + "quick-xml 0.23.1", + "windows 0.39.0", +] + [[package]] name = "tempfile" version = "3.5.0" @@ -2922,7 +3308,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if", - "fastrand", + "fastrand 1.9.0", "redox_syscall 0.3.5", "rustix", "windows-sys 0.45.0", @@ -3135,6 +3521,17 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +[[package]] +name = "uds_windows" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89daebc3e6fd160ac4aa9fc8b3bf71e1f74fbf92367ae71fb83a037e8bf164b9" +dependencies = [ + "memoffset 0.9.0", + "tempfile", + "winapi", +] + [[package]] name = "unicode-bidi" version = "0.3.13" @@ -3239,6 +3636,12 @@ dependencies = [ "libc", ] +[[package]] +name = "waker-fn" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" + [[package]] name = "walkdir" version = "2.3.3" @@ -3815,6 +4218,82 @@ dependencies = [ "libc", ] +[[package]] +name = "xdg-home" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2769203cd13a0c6015d515be729c526d041e9cf2c0cc478d57faee85f40c6dcd" +dependencies = [ + "nix", + "winapi", +] + +[[package]] +name = "zbus" +version = "3.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31de390a2d872e4cd04edd71b425e29853f786dc99317ed72d73d6fcf5ebb948" +dependencies = [ + "async-broadcast", + "async-executor", + "async-fs", + "async-io", + "async-lock", + "async-process", + "async-recursion", + "async-task", + "async-trait", + "blocking", + "byteorder", + "derivative", + "enumflags2", + "event-listener", + "futures-core", + "futures-sink", + "futures-util", + "hex", + "nix", + "once_cell", + "ordered-stream", + "rand 0.8.5", + "serde", + "serde_repr", + "sha1", + "static_assertions", + "tracing", + "uds_windows", + "winapi", + "xdg-home", + "zbus_macros", + "zbus_names", + "zvariant", +] + +[[package]] +name = "zbus_macros" +version = "3.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d1794a946878c0e807f55a397187c11fc7a038ba5d868e7db4f3bd7760bc9d" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "regex", + "syn 1.0.109", + "zvariant_utils", +] + +[[package]] +name = "zbus_names" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb80bb776dbda6e23d705cf0123c3b95df99c4ebeaec6c2599d4a5419902b4a9" +dependencies = [ + "serde", + "static_assertions", + "zvariant", +] + [[package]] name = "zip" version = "0.6.6" @@ -3825,3 +4304,41 @@ dependencies = [ "crc32fast", "crossbeam-utils", ] + +[[package]] +name = "zvariant" +version = "3.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44b291bee0d960c53170780af148dca5fa260a63cdd24f1962fa82e03e53338c" +dependencies = [ + "byteorder", + "enumflags2", + "libc", + "serde", + "static_assertions", + "zvariant_derive", +] + +[[package]] +name = "zvariant_derive" +version = "3.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "934d7a7dfc310d6ee06c87ffe88ef4eca7d3e37bb251dece2ef93da8f17d8ecd" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", + "zvariant_utils", +] + +[[package]] +name = "zvariant_utils" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7234f0d811589db492d16893e3f21e8e2fd282e6d01b0cddee310322062cc200" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 182d00792..30546227d 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -8,8 +8,8 @@ "withGlobalTauri": true }, "package": { - "productName": "ChatGPT Next Web", - "version": "2.9.12" + "productName": "NextChat", + "version": "2.9.13" }, "tauri": { "allowlist": { @@ -68,7 +68,7 @@ "icons/icon.ico" ], "identifier": "com.yida.chatgpt.next.web", - "longDescription": "ChatGPT Next Web is a cross-platform ChatGPT client, including Web/Win/Linux/OSX/PWA.", + "longDescription": "NextChat is a cross-platform ChatGPT client, including Web/Win/Linux/OSX/PWA.", "macOS": { "entitlements": null, "exceptionDomain": "", @@ -77,7 +77,7 @@ "signingIdentity": null }, "resources": [], - "shortDescription": "ChatGPT Next Web App", + "shortDescription": "NextChat App", "targets": "all", "windows": { "certificateThumbprint": null, @@ -104,11 +104,11 @@ "fullscreen": false, "height": 600, "resizable": true, - "title": "ChatGPT Next Web", + "title": "NextChat", "width": 960, "hiddenTitle": true, "titleBarStyle": "Overlay" } ] } -} +} \ No newline at end of file