diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 000000000..60da41dd8
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,8 @@
+# local env files
+.env*.local
+
+# docker-compose env files
+.env
+
+*.key
+*.key.pub
\ No newline at end of file
diff --git a/.env.template b/.env.template
index 3e3290369..89bab2cb1 100644
--- a/.env.template
+++ b/.env.template
@@ -8,6 +8,16 @@ CODE=your-password
# You can start service behind a proxy
PROXY_URL=http://localhost:7890
+# (optional)
+# Default: Empty
+# Googel Gemini Pro API key, set if you want to use Google Gemini Pro API.
+GOOGLE_API_KEY=
+
+# (optional)
+# Default: https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent
+# Googel Gemini Pro API url, set if you want to customize Google Gemini Pro API url.
+GOOGLE_URL=
+
# Override openai api request base url. (optional)
# Default: https://api.openai.com
# Examples: http://your-openai-proxy.com
@@ -36,3 +46,4 @@ ENABLE_BALANCE_QUERY=
# Default: Empty
# If you want to disable parse settings from url, set this value to 1.
DISABLE_FAST_LINK=
+
diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml
index ebf5587d0..e04e30adb 100644
--- a/.github/workflows/sync.yml
+++ b/.github/workflows/sync.yml
@@ -24,7 +24,7 @@ jobs:
id: sync
uses: aormsby/Fork-Sync-With-Upstream-action@v3.4
with:
- upstream_sync_repo: Yidadaa/ChatGPT-Next-Web
+ upstream_sync_repo: ChatGPTNextWeb/ChatGPT-Next-Web
upstream_sync_branch: main
target_sync_branch: main
target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set
diff --git a/Dockerfile b/Dockerfile
index 720a0cfe9..436d39d82 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -16,6 +16,7 @@ FROM base AS builder
RUN apk update && apk add --no-cache git
ENV OPENAI_API_KEY=""
+ENV GOOGLE_API_KEY=""
ENV CODE=""
WORKDIR /app
@@ -31,6 +32,7 @@ RUN apk add proxychains-ng
ENV PROXY_URL=""
ENV OPENAI_API_KEY=""
+ENV GOOGLE_API_KEY=""
ENV CODE=""
COPY --from=builder /app/public ./public
@@ -41,22 +43,22 @@ COPY --from=builder /app/.next/server ./.next/server
EXPOSE 3000
CMD if [ -n "$PROXY_URL" ]; then \
- export HOSTNAME="127.0.0.1"; \
- protocol=$(echo $PROXY_URL | cut -d: -f1); \
- host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \
- port=$(echo $PROXY_URL | cut -d: -f3); \
- conf=/etc/proxychains.conf; \
- echo "strict_chain" > $conf; \
- echo "proxy_dns" >> $conf; \
- echo "remote_dns_subnet 224" >> $conf; \
- echo "tcp_read_time_out 15000" >> $conf; \
- echo "tcp_connect_time_out 8000" >> $conf; \
- echo "localnet 127.0.0.0/255.0.0.0" >> $conf; \
- echo "localnet ::1/128" >> $conf; \
- echo "[ProxyList]" >> $conf; \
- echo "$protocol $host $port" >> $conf; \
- cat /etc/proxychains.conf; \
- proxychains -f $conf node server.js; \
+ export HOSTNAME="127.0.0.1"; \
+ protocol=$(echo $PROXY_URL | cut -d: -f1); \
+ host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \
+ port=$(echo $PROXY_URL | cut -d: -f3); \
+ conf=/etc/proxychains.conf; \
+ echo "strict_chain" > $conf; \
+ echo "proxy_dns" >> $conf; \
+ echo "remote_dns_subnet 224" >> $conf; \
+ echo "tcp_read_time_out 15000" >> $conf; \
+ echo "tcp_connect_time_out 8000" >> $conf; \
+ echo "localnet 127.0.0.0/255.0.0.0" >> $conf; \
+ echo "localnet ::1/128" >> $conf; \
+ echo "[ProxyList]" >> $conf; \
+ echo "$protocol $host $port" >> $conf; \
+ cat /etc/proxychains.conf; \
+ proxychains -f $conf node server.js; \
else \
- node server.js; \
+ node server.js; \
fi
diff --git a/README.md b/README.md
index 7c7a6f243..69b649926 100644
--- a/README.md
+++ b/README.md
@@ -1,22 +1,22 @@
-
ChatGPT Next Web
+
NextChat (ChatGPT Next Web)
English / [简体中文](./README_CN.md)
-One-Click to get well-designed cross-platform ChatGPT web UI.
+One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 & Gemini Pro support.
-一键免费部署你的跨平台私人 ChatGPT 应用。
+一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。
[![Web][Web-image]][web-url]
[![Windows][Windows-image]][download-url]
[![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url]
-[Web App](https://chatgpt.nextweb.fun/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/mortiest_ricky) / [Buy Me a Coffee](https://www.buymeacoffee.com/yidadaa)
+[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/mortiest_ricky) / [Buy Me a Coffee](https://www.buymeacoffee.com/yidadaa)
-[网页版](https://chatgpt.nextweb.fun/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [QQ 群](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg)
+[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [QQ 群](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg)
[web-url]: https://chatgpt.nextweb.fun
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
@@ -25,7 +25,9 @@ One-Click to get well-designed cross-platform ChatGPT web UI.
[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple
[Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu
-[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web)
+[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web)
+
+[](https://zeabur.com/templates/ZBUEFA)
[](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
@@ -37,8 +39,8 @@ One-Click to get well-designed cross-platform ChatGPT web UI.
- **Deploy for free with one-click** on Vercel in under 1 minute
- Compact client (~5MB) on Linux/Windows/MacOS, [download it now](https://github.com/Yidadaa/ChatGPT-Next-Web/releases)
-- Fully compatible with self-deployed llms, recommended for use with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) or [LocalAI](https://github.com/go-skynet/LocalAI)
-- Privacy first, all data stored locally in the browser
+- Fully compatible with self-deployed LLMs, recommended for use with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) or [LocalAI](https://github.com/go-skynet/LocalAI)
+- Privacy first, all data is stored locally in the browser
- Markdown support: LaTex, mermaid, code highlight, etc.
- Responsive design, dark mode and PWA
- Fast first screen loading speed (~100kb), support streaming response
@@ -189,6 +191,14 @@ Azure Api Key.
Azure Api Version, find it at [Azure Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions).
+### `GOOGLE_API_KEY` (optional)
+
+Google Gemini Pro Api Key.
+
+### `GOOGLE_URL` (optional)
+
+Google Gemini Pro Api Url.
+
### `HIDE_USER_API_KEY` (optional)
> Default: Empty
diff --git a/README_CN.md b/README_CN.md
index 0e288151a..33acb44a3 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -3,12 +3,14 @@
SoulShellGPT
-一键免费部署你的私人 ChatGPT 网页应用。
+一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。
[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) / [QQ 群](https://user-images.githubusercontent.com/16968934/228190818-7dd00845-e9b9-4363-97e5-44c507ac76da.jpeg) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg) / [Donate](#捐赠-donate-usdt)
[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web)
+[](https://zeabur.com/templates/ZBUEFA)
+
[](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)

@@ -19,7 +21,7 @@
1. 准备好你的 [OpenAI API Key](https://platform.openai.com/account/api-keys);
2. 点击右侧按钮开始部署:
- [](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE;
+ [](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE;
3. 部署完毕后,即可开始使用;
4. (可选)[绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名即可直连。
@@ -104,6 +106,14 @@ Azure 密钥。
Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)。
+### `GOOGLE_API_KEY` (optional)
+
+Google Gemini Pro 密钥.
+
+### `GOOGLE_URL` (optional)
+
+Google Gemini Pro Api Url.
+
### `HIDE_USER_API_KEY` (可选)
如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。
diff --git a/app/api/auth.ts b/app/api/auth.ts
index b41e34e05..16c8034eb 100644
--- a/app/api/auth.ts
+++ b/app/api/auth.ts
@@ -1,7 +1,7 @@
import { NextRequest } from "next/server";
import { getServerSideConfig } from "../config/server";
import md5 from "spark-md5";
-import { ACCESS_CODE_PREFIX } from "../constant";
+import { ACCESS_CODE_PREFIX, ModelProvider } from "../constant";
function getIP(req: NextRequest) {
let ip = req.ip ?? req.headers.get("x-real-ip");
@@ -16,15 +16,15 @@ function getIP(req: NextRequest) {
function parseApiKey(bearToken: string) {
const token = bearToken.trim().replaceAll("Bearer ", "").trim();
- const isOpenAiKey = !token.startsWith(ACCESS_CODE_PREFIX);
+ const isApiKey = !token.startsWith(ACCESS_CODE_PREFIX);
return {
- accessCode: isOpenAiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length),
- apiKey: isOpenAiKey ? token : "",
+ accessCode: isApiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length),
+ apiKey: isApiKey ? token : "",
};
}
-export function auth(req: NextRequest) {
+export function auth(req: NextRequest, modelProvider: ModelProvider) {
const authToken = req.headers.get("Authorization") ?? "";
// check if it is openai api key or user token
@@ -49,22 +49,23 @@ export function auth(req: NextRequest) {
if (serverConfig.hideUserApiKey && !!apiKey) {
return {
error: true,
- msg: "you are not allowed to access openai with your own api key",
+ msg: "you are not allowed to access with your own api key",
};
}
// if user does not provide an api key, inject system api key
if (!apiKey) {
- const serverApiKey = serverConfig.isAzure
- ? serverConfig.azureApiKey
- : serverConfig.apiKey;
+ const serverConfig = getServerSideConfig();
- if (serverApiKey) {
+ const systemApiKey =
+ modelProvider === ModelProvider.GeminiPro
+ ? serverConfig.googleApiKey
+ : serverConfig.isAzure
+ ? serverConfig.azureApiKey
+ : serverConfig.apiKey;
+ if (systemApiKey) {
console.log("[Auth] use system api key");
- req.headers.set(
- "Authorization",
- `${serverConfig.isAzure ? "" : "Bearer "}${serverApiKey}`,
- );
+ req.headers.set("Authorization", `Bearer ${systemApiKey}`);
} else {
console.log("[Auth] admin did not provide an api key");
}
diff --git a/app/api/common.ts b/app/api/common.ts
index 6b0d619df..ca8406bb3 100644
--- a/app/api/common.ts
+++ b/app/api/common.ts
@@ -1,6 +1,6 @@
import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "../config/server";
-import { DEFAULT_MODELS, OPENAI_BASE_URL } from "../constant";
+import { DEFAULT_MODELS, OPENAI_BASE_URL, GEMINI_BASE_URL } from "../constant";
import { collectModelTable } from "../utils/model";
import { makeAzurePath } from "../azure";
@@ -9,8 +9,21 @@ const serverConfig = getServerSideConfig();
export async function requestOpenai(req: NextRequest) {
const controller = new AbortController();
- const authValue = req.headers.get("Authorization") ?? "";
- const authHeaderName = serverConfig.isAzure ? "api-key" : "Authorization";
+ var authValue,
+ authHeaderName = "";
+ if (serverConfig.isAzure) {
+ authValue =
+ req.headers
+ .get("Authorization")
+ ?.trim()
+ .replaceAll("Bearer ", "")
+ .trim() ?? "";
+
+ authHeaderName = "api-key";
+ } else {
+ authValue = req.headers.get("Authorization") ?? "";
+ authHeaderName = "Authorization";
+ }
let path = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll(
"/api/openai/",
@@ -109,6 +122,12 @@ export async function requestOpenai(req: NextRequest) {
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
+ // The latest version of the OpenAI API forced the content-encoding to be "br" in json response
+ // So if the streaming is disabled, we need to remove the content-encoding header
+ // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header
+ // The browser will try to decode the response with brotli and fail
+ newHeaders.delete("content-encoding");
+
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
diff --git a/app/api/google/[...path]/route.ts b/app/api/google/[...path]/route.ts
new file mode 100644
index 000000000..869bd5076
--- /dev/null
+++ b/app/api/google/[...path]/route.ts
@@ -0,0 +1,121 @@
+import { NextRequest, NextResponse } from "next/server";
+import { auth } from "../../auth";
+import { getServerSideConfig } from "@/app/config/server";
+import { GEMINI_BASE_URL, Google, ModelProvider } from "@/app/constant";
+
+async function handle(
+ req: NextRequest,
+ { params }: { params: { path: string[] } },
+) {
+ console.log("[Google Route] params ", params);
+
+ if (req.method === "OPTIONS") {
+ return NextResponse.json({ body: "OK" }, { status: 200 });
+ }
+
+ const controller = new AbortController();
+
+ const serverConfig = getServerSideConfig();
+
+ let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL;
+
+ if (!baseUrl.startsWith("http")) {
+ baseUrl = `https://${baseUrl}`;
+ }
+
+ if (baseUrl.endsWith("/")) {
+ baseUrl = baseUrl.slice(0, -1);
+ }
+
+ let path = `${req.nextUrl.pathname}`.replaceAll("/api/google/", "");
+
+ console.log("[Proxy] ", path);
+ console.log("[Base Url]", baseUrl);
+
+ const timeoutId = setTimeout(
+ () => {
+ controller.abort();
+ },
+ 10 * 60 * 1000,
+ );
+
+ const authResult = auth(req, ModelProvider.GeminiPro);
+ if (authResult.error) {
+ return NextResponse.json(authResult, {
+ status: 401,
+ });
+ }
+
+ const bearToken = req.headers.get("Authorization") ?? "";
+ const token = bearToken.trim().replaceAll("Bearer ", "").trim();
+
+ const key = token ? token : serverConfig.googleApiKey;
+
+ if (!key) {
+ return NextResponse.json(
+ {
+ error: true,
+ message: `missing GOOGLE_API_KEY in server env vars`,
+ },
+ {
+ status: 401,
+ },
+ );
+ }
+
+ const fetchUrl = `${baseUrl}/${path}?key=${key}`;
+ const fetchOptions: RequestInit = {
+ headers: {
+ "Content-Type": "application/json",
+ "Cache-Control": "no-store",
+ },
+ method: req.method,
+ body: req.body,
+ // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
+ redirect: "manual",
+ // @ts-ignore
+ duplex: "half",
+ signal: controller.signal,
+ };
+
+ try {
+ const res = await fetch(fetchUrl, fetchOptions);
+ // to prevent browser prompt for credentials
+ const newHeaders = new Headers(res.headers);
+ newHeaders.delete("www-authenticate");
+ // to disable nginx buffering
+ newHeaders.set("X-Accel-Buffering", "no");
+
+ return new Response(res.body, {
+ status: res.status,
+ statusText: res.statusText,
+ headers: newHeaders,
+ });
+ } finally {
+ clearTimeout(timeoutId);
+ }
+}
+
+export const GET = handle;
+export const POST = handle;
+
+export const runtime = "edge";
+export const preferredRegion = [
+ "arn1",
+ "bom1",
+ "cdg1",
+ "cle1",
+ "cpt1",
+ "dub1",
+ "fra1",
+ "gru1",
+ "hnd1",
+ "iad1",
+ "icn1",
+ "kix1",
+ "lhr1",
+ "pdx1",
+ "sfo1",
+ "sin1",
+ "syd1",
+];
diff --git a/app/api/openai/[...path]/route.ts b/app/api/openai/[...path]/route.ts
index 2addd53a5..77059c151 100644
--- a/app/api/openai/[...path]/route.ts
+++ b/app/api/openai/[...path]/route.ts
@@ -1,6 +1,6 @@
import { type OpenAIListModelResponse } from "@/app/client/platforms/openai";
import { getServerSideConfig } from "@/app/config/server";
-import { OpenaiPath } from "@/app/constant";
+import { ModelProvider, OpenaiPath } from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "../../auth";
@@ -45,7 +45,7 @@ async function handle(
);
}
- const authResult = auth(req);
+ const authResult = auth(req, ModelProvider.GPT);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
@@ -75,4 +75,22 @@ export const GET = handle;
export const POST = handle;
export const runtime = "edge";
-export const preferredRegion = ['arn1', 'bom1', 'cdg1', 'cle1', 'cpt1', 'dub1', 'fra1', 'gru1', 'hnd1', 'iad1', 'icn1', 'kix1', 'lhr1', 'pdx1', 'sfo1', 'sin1', 'syd1'];
+export const preferredRegion = [
+ "arn1",
+ "bom1",
+ "cdg1",
+ "cle1",
+ "cpt1",
+ "dub1",
+ "fra1",
+ "gru1",
+ "hnd1",
+ "iad1",
+ "icn1",
+ "kix1",
+ "lhr1",
+ "pdx1",
+ "sfo1",
+ "sin1",
+ "syd1",
+];
diff --git a/app/client/api.ts b/app/client/api.ts
index eedd2c9ab..56fa32996 100644
--- a/app/client/api.ts
+++ b/app/client/api.ts
@@ -1,8 +1,13 @@
import { getClientConfig } from "../config/client";
-import { ACCESS_CODE_PREFIX, Azure, ServiceProvider } from "../constant";
-import { ChatMessage, ModelType, useAccessStore } from "../store";
+import {
+ ACCESS_CODE_PREFIX,
+ Azure,
+ ModelProvider,
+ ServiceProvider,
+} from "../constant";
+import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
import { ChatGPTApi } from "./platforms/openai";
-
+import { GeminiProApi } from "./platforms/google";
export const ROLES = ["system", "user", "assistant"] as const;
export type MessageRole = (typeof ROLES)[number];
@@ -41,6 +46,13 @@ export interface LLMUsage {
export interface LLMModel {
name: string;
available: boolean;
+ provider: LLMModelProvider;
+}
+
+export interface LLMModelProvider {
+ id: string;
+ providerName: string;
+ providerType: string;
}
export abstract class LLMApi {
@@ -73,7 +85,11 @@ interface ChatProvider {
export class ClientApi {
public llm: LLMApi;
- constructor() {
+ constructor(provider: ModelProvider = ModelProvider.GPT) {
+ if (provider === ModelProvider.GeminiPro) {
+ this.llm = new GeminiProApi();
+ return;
+ }
this.llm = new ChatGPTApi();
}
@@ -93,7 +109,7 @@ export class ClientApi {
{
from: "human",
value:
- "Share from [ChatGPT Next Web]: https://github.com/Yidadaa/ChatGPT-Next-Web",
+ "Share from [NextChat]: https://github.com/Yidadaa/ChatGPT-Next-Web",
},
]);
// 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用
@@ -123,18 +139,22 @@ export class ClientApi {
}
}
-export const api = new ClientApi();
-
export function getHeaders() {
const accessStore = useAccessStore.getState();
const headers: Record = {
"Content-Type": "application/json",
"x-requested-with": "XMLHttpRequest",
+ "Accept": "application/json",
};
-
+ const modelConfig = useChatStore.getState().currentSession().mask.modelConfig;
+ const isGoogle = modelConfig.model === "gemini-pro";
const isAzure = accessStore.provider === ServiceProvider.Azure;
const authHeader = isAzure ? "api-key" : "Authorization";
- const apiKey = isAzure ? accessStore.azureApiKey : accessStore.openaiApiKey;
+ const apiKey = isGoogle
+ ? accessStore.googleApiKey
+ : isAzure
+ ? accessStore.azureApiKey
+ : accessStore.openaiApiKey;
const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`;
const validString = (x: string) => x && x.length > 0;
diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts
new file mode 100644
index 000000000..c35e93cb3
--- /dev/null
+++ b/app/client/platforms/google.ts
@@ -0,0 +1,222 @@
+import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
+import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
+import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
+import {
+ EventStreamContentType,
+ fetchEventSource,
+} from "@fortaine/fetch-event-source";
+import { prettyObject } from "@/app/utils/format";
+import { getClientConfig } from "@/app/config/client";
+import Locale from "../../locales";
+import { getServerSideConfig } from "@/app/config/server";
+export class GeminiProApi implements LLMApi {
+ extractMessage(res: any) {
+ console.log("[Response] gemini-pro response: ", res);
+
+ return (
+ res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
+ res?.error?.message ||
+ ""
+ );
+ }
+ async chat(options: ChatOptions): Promise {
+ const messages = options.messages.map((v) => ({
+ role: v.role.replace("assistant", "model").replace("system", "user"),
+ parts: [{ text: v.content }],
+ }));
+
+ // google requires that role in neighboring messages must not be the same
+ for (let i = 0; i < messages.length - 1; ) {
+ // Check if current and next item both have the role "model"
+ if (messages[i].role === messages[i + 1].role) {
+ // Concatenate the 'parts' of the current and next item
+ messages[i].parts = messages[i].parts.concat(messages[i + 1].parts);
+ // Remove the next item
+ messages.splice(i + 1, 1);
+ } else {
+ // Move to the next item
+ i++;
+ }
+ }
+
+ const modelConfig = {
+ ...useAppConfig.getState().modelConfig,
+ ...useChatStore.getState().currentSession().mask.modelConfig,
+ ...{
+ model: options.config.model,
+ },
+ };
+ const requestPayload = {
+ contents: messages,
+ generationConfig: {
+ // stopSequences: [
+ // "Title"
+ // ],
+ temperature: modelConfig.temperature,
+ maxOutputTokens: modelConfig.max_tokens,
+ topP: modelConfig.top_p,
+ // "topK": modelConfig.top_k,
+ },
+ };
+
+ console.log("[Request] google payload: ", requestPayload);
+
+ // todo: support stream later
+ const shouldStream = false;
+ const controller = new AbortController();
+ options.onController?.(controller);
+ try {
+ const chatPath = this.path(Google.ChatPath);
+ const chatPayload = {
+ method: "POST",
+ body: JSON.stringify(requestPayload),
+ signal: controller.signal,
+ headers: getHeaders(),
+ };
+
+ // make a fetch request
+ const requestTimeoutId = setTimeout(
+ () => controller.abort(),
+ REQUEST_TIMEOUT_MS,
+ );
+ if (shouldStream) {
+ let responseText = "";
+ let remainText = "";
+ let finished = false;
+
+ // animate response to make it looks smooth
+ function animateResponseText() {
+ if (finished || controller.signal.aborted) {
+ responseText += remainText;
+ console.log("[Response Animation] finished");
+ return;
+ }
+
+ if (remainText.length > 0) {
+ const fetchCount = Math.max(1, Math.round(remainText.length / 60));
+ const fetchText = remainText.slice(0, fetchCount);
+ responseText += fetchText;
+ remainText = remainText.slice(fetchCount);
+ options.onUpdate?.(responseText, fetchText);
+ }
+
+ requestAnimationFrame(animateResponseText);
+ }
+
+ // start animaion
+ animateResponseText();
+
+ const finish = () => {
+ if (!finished) {
+ finished = true;
+ options.onFinish(responseText + remainText);
+ }
+ };
+
+ controller.signal.onabort = finish;
+
+ fetchEventSource(chatPath, {
+ ...chatPayload,
+ async onopen(res) {
+ clearTimeout(requestTimeoutId);
+ const contentType = res.headers.get("content-type");
+ console.log(
+ "[OpenAI] request response content type: ",
+ contentType,
+ );
+
+ if (contentType?.startsWith("text/plain")) {
+ responseText = await res.clone().text();
+ return finish();
+ }
+
+ if (
+ !res.ok ||
+ !res.headers
+ .get("content-type")
+ ?.startsWith(EventStreamContentType) ||
+ res.status !== 200
+ ) {
+ const responseTexts = [responseText];
+ let extraInfo = await res.clone().text();
+ try {
+ const resJson = await res.clone().json();
+ extraInfo = prettyObject(resJson);
+ } catch {}
+
+ if (res.status === 401) {
+ responseTexts.push(Locale.Error.Unauthorized);
+ }
+
+ if (extraInfo) {
+ responseTexts.push(extraInfo);
+ }
+
+ responseText = responseTexts.join("\n\n");
+
+ return finish();
+ }
+ },
+ onmessage(msg) {
+ if (msg.data === "[DONE]" || finished) {
+ return finish();
+ }
+ const text = msg.data;
+ try {
+ const json = JSON.parse(text) as {
+ choices: Array<{
+ delta: {
+ content: string;
+ };
+ }>;
+ };
+ const delta = json.choices[0]?.delta?.content;
+ if (delta) {
+ remainText += delta;
+ }
+ } catch (e) {
+ console.error("[Request] parse error", text);
+ }
+ },
+ onclose() {
+ finish();
+ },
+ onerror(e) {
+ options.onError?.(e);
+ throw e;
+ },
+ openWhenHidden: true,
+ });
+ } else {
+ const res = await fetch(chatPath, chatPayload);
+ clearTimeout(requestTimeoutId);
+
+ const resJson = await res.json();
+
+ if (resJson?.promptFeedback?.blockReason) {
+ // being blocked
+ options.onError?.(
+ new Error(
+ "Message is being blocked for reason: " +
+ resJson.promptFeedback.blockReason,
+ ),
+ );
+ }
+ const message = this.extractMessage(resJson);
+ options.onFinish(message);
+ }
+ } catch (e) {
+ console.log("[Request] failed to make a chat request", e);
+ options.onError?.(e as Error);
+ }
+ }
+ usage(): Promise {
+ throw new Error("Method not implemented.");
+ }
+ async models(): Promise {
+ return [];
+ }
+ path(path: string): string {
+ return "/api/google/" + path;
+ }
+}
diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts
index 8ea864692..68a0fda75 100644
--- a/app/client/platforms/openai.ts
+++ b/app/client/platforms/openai.ts
@@ -323,6 +323,11 @@ export class ChatGPTApi implements LLMApi {
return chatModels.map((m) => ({
name: m.id,
available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
}));
}
}
diff --git a/app/components/auth.tsx b/app/components/auth.tsx
index 7962d46be..57118349b 100644
--- a/app/components/auth.tsx
+++ b/app/components/auth.tsx
@@ -64,6 +64,17 @@ export function AuthPage() {
);
}}
/>
+ {
+ accessStore.update(
+ (access) => (access.googleApiKey = e.currentTarget.value),
+ );
+ }}
+ />
>
) : null}
diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx
index 03aac05f2..a2a50320d 100644
--- a/app/components/emoji.tsx
+++ b/app/components/emoji.tsx
@@ -10,7 +10,10 @@ import BotIcon from "../icons/bot.svg";
import BlackBotIcon from "../icons/black-bot.svg";
export function getEmojiUrl(unified: string, style: EmojiStyle) {
- return `https://cdn.staticfile.org/emoji-datasource-apple/14.0.0/img/${style}/64/${unified}.png`;
+ // Whoever owns this Content Delivery Network (CDN), I am using your CDN to serve emojis
+ // Old CDN broken, so I had to switch to this one
+ // Author: https://github.com/H0llyW00dzZ
+ return `https://cdn.jsdelivr.net/npm/emoji-datasource-apple/img/${style}/64/${unified}.png`;
}
export function AvatarPicker(props: {
diff --git a/app/components/exporter.tsx b/app/components/exporter.tsx
index 4ca6427a7..dff17e4ab 100644
--- a/app/components/exporter.tsx
+++ b/app/components/exporter.tsx
@@ -29,10 +29,11 @@ import NextImage from "next/image";
import { toBlob, toPng } from "html-to-image";
import { DEFAULT_MASK_AVATAR } from "../store/mask";
-import { api } from "../client/api";
+
import { prettyObject } from "../utils/format";
-import { EXPORT_MESSAGE_CLASS_NAME } from "../constant";
+import { EXPORT_MESSAGE_CLASS_NAME, ModelProvider } from "../constant";
import { getClientConfig } from "../config/client";
+import { ClientApi } from "../client/api";
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
loading: () => ,
@@ -301,10 +302,17 @@ export function PreviewActions(props: {
}) {
const [loading, setLoading] = useState(false);
const [shouldExport, setShouldExport] = useState(false);
-
+ const config = useAppConfig();
const onRenderMsgs = (msgs: ChatMessage[]) => {
setShouldExport(false);
+ var api: ClientApi;
+ if (config.modelConfig.model === "gemini-pro") {
+ api = new ClientApi(ModelProvider.GeminiPro);
+ } else {
+ api = new ClientApi(ModelProvider.GPT);
+ }
+
api
.share(msgs)
.then((res) => {
@@ -530,7 +538,7 @@ export function ImagePreviewer(props: {