diff --git a/.env.template b/.env.template
index 89bab2cb1..166cc4ef4 100644
--- a/.env.template
+++ b/.env.template
@@ -14,8 +14,8 @@ PROXY_URL=http://localhost:7890
GOOGLE_API_KEY=
# (optional)
-# Default: https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent
-# Googel Gemini Pro API url, set if you want to customize Google Gemini Pro API url.
+# Default: https://generativelanguage.googleapis.com/
+# Googel Gemini Pro API url without pathname, set if you want to customize Google Gemini Pro API url.
GOOGLE_URL=
# Override openai api request base url. (optional)
diff --git a/.github/workflows/deploy_preview.yml b/.github/workflows/deploy_preview.yml
new file mode 100644
index 000000000..02ee0f192
--- /dev/null
+++ b/.github/workflows/deploy_preview.yml
@@ -0,0 +1,83 @@
+name: VercelPreviewDeployment
+
+on:
+ pull_request_target:
+ types:
+ - opened
+ - synchronize
+
+env:
+ VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }}
+ VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }}
+ VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }}
+ VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }}
+ VERCEL_PR_DOMAIN_SUFFIX: ${{ secrets.VERCEL_PR_DOMAIN_SUFFIX }}
+
+permissions:
+ contents: read
+ statuses: write
+ pull-requests: write
+
+jobs:
+ deploy-preview:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+
+ - name: Extract branch name
+ shell: bash
+ run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> "$GITHUB_OUTPUT"
+ id: extract_branch
+
+ - name: Hash branch name
+ uses: pplanel/hash-calculator-action@v1.3.1
+ id: hash_branch
+ with:
+ input: ${{ steps.extract_branch.outputs.branch }}
+ method: MD5
+
+ - name: Set Environment Variables
+ id: set_env
+ if: github.event_name == 'pull_request_target'
+ run: |
+ echo "VERCEL_ALIAS_DOMAIN=${{ github.event.pull_request.number }}-${{ github.workflow }}.${VERCEL_PR_DOMAIN_SUFFIX}" >> $GITHUB_OUTPUT
+
+ - name: Install Vercel CLI
+ run: npm install --global vercel@latest
+
+ - name: Cache dependencies
+ uses: actions/cache@v2
+ id: cache-npm
+ with:
+ path: ~/.npm
+ key: npm-${{ hashFiles('package-lock.json') }}
+ restore-keys: npm-
+
+ - name: Pull Vercel Environment Information
+ run: vercel pull --yes --environment=preview --token=${VERCEL_TOKEN}
+
+ - name: Deploy Project Artifacts to Vercel
+ id: vercel
+ env:
+ META_TAG: ${{ steps.hash_branch.outputs.digest }}-${{ github.run_number }}-${{ github.run_attempt}}
+ run: |
+ set -e
+ vercel pull --yes --environment=preview --token=${VERCEL_TOKEN}
+ vercel build --token=${VERCEL_TOKEN}
+ vercel deploy --prebuilt --archive=tgz --token=${VERCEL_TOKEN} --meta base_hash=${{ env.META_TAG }}
+
+ DEFAULT_URL=$(vercel ls --token=${VERCEL_TOKEN} --meta base_hash=${{ env.META_TAG }})
+ ALIAS_URL=$(vercel alias set ${DEFAULT_URL} ${{ steps.set_env.outputs.VERCEL_ALIAS_DOMAIN }} --token=${VERCEL_TOKEN} --scope ${VERCEL_TEAM}| awk '{print $3}')
+
+ echo "New preview URL: ${DEFAULT_URL}"
+ echo "New alias URL: ${ALIAS_URL}"
+ echo "VERCEL_URL=${ALIAS_URL}" >> "$GITHUB_OUTPUT"
+
+ - uses: mshick/add-pr-comment@v2
+ with:
+ message: |
+ Your build has completed!
+
+ [Preview deployment](${{ steps.vercel.outputs.VERCEL_URL }})
diff --git a/.github/workflows/remove_deploy_preview.yml b/.github/workflows/remove_deploy_preview.yml
new file mode 100644
index 000000000..4846cda2d
--- /dev/null
+++ b/.github/workflows/remove_deploy_preview.yml
@@ -0,0 +1,40 @@
+name: Removedeploypreview
+
+permissions:
+ contents: read
+ statuses: write
+ pull-requests: write
+
+env:
+ VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }}
+ VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }}
+ VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }}
+
+on:
+ pull_request_target:
+ types:
+ - closed
+
+jobs:
+ delete-deployments:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Extract branch name
+ shell: bash
+ run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT
+ id: extract_branch
+
+ - name: Hash branch name
+ uses: pplanel/hash-calculator-action@v1.3.1
+ id: hash_branch
+ with:
+ input: ${{ steps.extract_branch.outputs.branch }}
+ method: MD5
+
+ - name: Call the delete-deployment-preview.sh script
+ env:
+ META_TAG: ${{ steps.hash_branch.outputs.digest }}
+ run: |
+ bash ./scripts/delete-deployment-preview.sh
diff --git a/README.md b/README.md
index 69b649926..3ac537abc 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-

+
NextChat (ChatGPT Next Web)
@@ -14,9 +14,9 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4
[![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url]
-[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/mortiest_ricky) / [Buy Me a Coffee](https://www.buymeacoffee.com/yidadaa)
+[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/NextChatDev)
-[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [QQ 群](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg)
+[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues)
[web-url]: https://chatgpt.nextweb.fun
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
@@ -61,10 +61,11 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4
## What's New
-- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/).
-- 🚀 v2.7 let's share conversations as image, or share to ShareGPT!
-- 🚀 v2.8 now we have a client that runs across all platforms!
+- 🚀 v2.10.1 support Google Gemini Pro model.
- 🚀 v2.9.11 you can use azure endpoint now.
+- 🚀 v2.8 now we have a client that runs across all platforms!
+- 🚀 v2.7 let's share conversations as image, or share to ShareGPT!
+- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/).
## 主要功能
@@ -360,9 +361,11 @@ If you want to add a new translation, read this [document](./docs/translation.md
[@Licoy](https://github.com/Licoy)
[@shangmin2009](https://github.com/shangmin2009)
-### Contributor
+### Contributors
-[Contributors](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors)
+
+
+
## LICENSE
diff --git a/README_CN.md b/README_CN.md
index 33acb44a3..bc1453778 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -5,7 +5,7 @@
一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。
-[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) / [QQ 群](https://user-images.githubusercontent.com/16968934/228190818-7dd00845-e9b9-4363-97e5-44c507ac76da.jpeg) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg) / [Donate](#捐赠-donate-usdt)
+[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N)
[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web)
diff --git a/app/api/cors/[...path]/route.ts b/app/api/cors/[...path]/route.ts
index 0217b12b0..1f70d6630 100644
--- a/app/api/cors/[...path]/route.ts
+++ b/app/api/cors/[...path]/route.ts
@@ -40,4 +40,4 @@ export const POST = handle;
export const GET = handle;
export const OPTIONS = handle;
-export const runtime = "nodejs";
+export const runtime = "edge";
diff --git a/app/api/google/[...path]/route.ts b/app/api/google/[...path]/route.ts
index 869bd5076..ebd192891 100644
--- a/app/api/google/[...path]/route.ts
+++ b/app/api/google/[...path]/route.ts
@@ -101,19 +101,14 @@ export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
- "arn1",
"bom1",
- "cdg1",
"cle1",
"cpt1",
- "dub1",
- "fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
- "lhr1",
"pdx1",
"sfo1",
"sin1",
diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts
index c35e93cb3..f0f63659f 100644
--- a/app/client/platforms/google.ts
+++ b/app/client/platforms/google.ts
@@ -9,6 +9,7 @@ import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import Locale from "../../locales";
import { getServerSideConfig } from "@/app/config/server";
+import de from "@/app/locales/de";
export class GeminiProApi implements LLMApi {
extractMessage(res: any) {
console.log("[Response] gemini-pro response: ", res);
@@ -20,6 +21,7 @@ export class GeminiProApi implements LLMApi {
);
}
async chat(options: ChatOptions): Promise
{
+ const apiClient = this;
const messages = options.messages.map((v) => ({
role: v.role.replace("assistant", "model").replace("system", "user"),
parts: [{ text: v.content }],
@@ -57,12 +59,29 @@ export class GeminiProApi implements LLMApi {
topP: modelConfig.top_p,
// "topK": modelConfig.top_k,
},
+ safetySettings: [
+ {
+ category: "HARM_CATEGORY_HARASSMENT",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ {
+ category: "HARM_CATEGORY_HATE_SPEECH",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ {
+ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ {
+ category: "HARM_CATEGORY_DANGEROUS_CONTENT",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ ],
};
console.log("[Request] google payload: ", requestPayload);
- // todo: support stream later
- const shouldStream = false;
+ const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
@@ -82,13 +101,23 @@ export class GeminiProApi implements LLMApi {
if (shouldStream) {
let responseText = "";
let remainText = "";
+ let streamChatPath = chatPath.replace(
+ "generateContent",
+ "streamGenerateContent",
+ );
let finished = false;
+ let existingTexts: string[] = [];
+ const finish = () => {
+ finished = true;
+ options.onFinish(existingTexts.join(""));
+ };
+
// animate response to make it looks smooth
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
- console.log("[Response Animation] finished");
+ finish();
return;
}
@@ -105,88 +134,56 @@ export class GeminiProApi implements LLMApi {
// start animaion
animateResponseText();
+ fetch(streamChatPath, chatPayload)
+ .then((response) => {
+ const reader = response?.body?.getReader();
+ const decoder = new TextDecoder();
+ let partialData = "";
- const finish = () => {
- if (!finished) {
- finished = true;
- options.onFinish(responseText + remainText);
- }
- };
+ return reader?.read().then(function processText({
+ done,
+ value,
+ }): Promise {
+ if (done) {
+ console.log("Stream complete");
+ // options.onFinish(responseText + remainText);
+ finished = true;
+ return Promise.resolve();
+ }
- controller.signal.onabort = finish;
+ partialData += decoder.decode(value, { stream: true });
- fetchEventSource(chatPath, {
- ...chatPayload,
- async onopen(res) {
- clearTimeout(requestTimeoutId);
- const contentType = res.headers.get("content-type");
- console.log(
- "[OpenAI] request response content type: ",
- contentType,
- );
-
- if (contentType?.startsWith("text/plain")) {
- responseText = await res.clone().text();
- return finish();
- }
-
- if (
- !res.ok ||
- !res.headers
- .get("content-type")
- ?.startsWith(EventStreamContentType) ||
- res.status !== 200
- ) {
- const responseTexts = [responseText];
- let extraInfo = await res.clone().text();
try {
- const resJson = await res.clone().json();
- extraInfo = prettyObject(resJson);
- } catch {}
+ let data = JSON.parse(ensureProperEnding(partialData));
- if (res.status === 401) {
- responseTexts.push(Locale.Error.Unauthorized);
+ const textArray = data.reduce(
+ (acc: string[], item: { candidates: any[] }) => {
+ const texts = item.candidates.map((candidate) =>
+ candidate.content.parts
+ .map((part: { text: any }) => part.text)
+ .join(""),
+ );
+ return acc.concat(texts);
+ },
+ [],
+ );
+
+ if (textArray.length > existingTexts.length) {
+ const deltaArray = textArray.slice(existingTexts.length);
+ existingTexts = textArray;
+ remainText += deltaArray.join("");
+ }
+ } catch (error) {
+ // console.log("[Response Animation] error: ", error,partialData);
+ // skip error message when parsing json
}
- if (extraInfo) {
- responseTexts.push(extraInfo);
- }
-
- responseText = responseTexts.join("\n\n");
-
- return finish();
- }
- },
- onmessage(msg) {
- if (msg.data === "[DONE]" || finished) {
- return finish();
- }
- const text = msg.data;
- try {
- const json = JSON.parse(text) as {
- choices: Array<{
- delta: {
- content: string;
- };
- }>;
- };
- const delta = json.choices[0]?.delta?.content;
- if (delta) {
- remainText += delta;
- }
- } catch (e) {
- console.error("[Request] parse error", text);
- }
- },
- onclose() {
- finish();
- },
- onerror(e) {
- options.onError?.(e);
- throw e;
- },
- openWhenHidden: true,
- });
+ return reader.read().then(processText);
+ });
+ })
+ .catch((error) => {
+ console.error("Error:", error);
+ });
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
@@ -220,3 +217,10 @@ export class GeminiProApi implements LLMApi {
return "/api/google/" + path;
}
}
+
+function ensureProperEnding(str: string) {
+ if (str.startsWith("[") && !str.endsWith("]")) {
+ return str + "]";
+ }
+ return str;
+}
diff --git a/app/config/server.ts b/app/config/server.ts
index c6251a5c2..c455d0b73 100644
--- a/app/config/server.ts
+++ b/app/config/server.ts
@@ -89,6 +89,8 @@ export const getServerSideConfig = () => {
googleApiKey: process.env.GOOGLE_API_KEY,
googleUrl: process.env.GOOGLE_URL,
+ gtmId: process.env.GTM_ID,
+
needCode: ACCESS_CODES.size > 0,
code: process.env.CODE,
codes: ACCESS_CODES,
diff --git a/app/constant.ts b/app/constant.ts
index 7668381c1..8f2518140 100644
--- a/app/constant.ts
+++ b/app/constant.ts
@@ -87,8 +87,7 @@ export const Azure = {
};
export const Google = {
- ExampleEndpoint:
- "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
+ ExampleEndpoint: "https://generativelanguage.googleapis.com/",
ChatPath: "v1beta/models/gemini-pro:generateContent",
// /api/openai/v1/chat/completions
@@ -108,7 +107,9 @@ export const SUMMARIZE_MODEL = "gpt-3.5-turbo";
export const KnowledgeCutOffDate: Record = {
default: "2021-09",
+ "gpt-4-turbo-preview": "2023-04",
"gpt-4-1106-preview": "2023-04",
+ "gpt-4-0125-preview": "2023-04",
"gpt-4-vision-preview": "2023-04",
};
@@ -167,6 +168,15 @@ export const DEFAULT_MODELS = [
providerType: "openai",
},
},
+ {
+ name: "gpt-4-turbo-preview",
+ available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
+ },
{
name: "gpt-4-1106-preview",
available: true,
@@ -176,6 +186,15 @@ export const DEFAULT_MODELS = [
providerType: "openai",
},
},
+ {
+ name: "gpt-4-0125-preview",
+ available: true,
+ provider: {
+ id: "openai",
+ providerName: "OpenAI",
+ providerType: "openai",
+ },
+ },
{
name: "gpt-4-vision-preview",
available: true,
diff --git a/app/layout.tsx b/app/layout.tsx
index 3ad84a779..a2c85fd47 100644
--- a/app/layout.tsx
+++ b/app/layout.tsx
@@ -4,6 +4,10 @@ import "./styles/markdown.scss";
import "./styles/highlight.scss";
import { getClientConfig } from "./config/client";
import { type Metadata } from "next";
+import { SpeedInsights } from "@vercel/speed-insights/next";
+import { getServerSideConfig } from "./config/server";
+import { GoogleTagManager } from "@next/third-parties/google";
+const serverConfig = getServerSideConfig();
export const metadata: Metadata = {
@@ -36,7 +40,19 @@ export default function RootLayout({
- {children}
+
+ {children}
+ {serverConfig?.isVercel && (
+ <>
+
+ >
+ )}
+ {serverConfig?.gtmId && (
+ <>
+
+ >
+ )}
+