diff --git a/.env.template b/.env.template
index 89bab2cb1..166cc4ef4 100644
--- a/.env.template
+++ b/.env.template
@@ -14,8 +14,8 @@ PROXY_URL=http://localhost:7890
GOOGLE_API_KEY=
# (optional)
-# Default: https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent
-# Googel Gemini Pro API url, set if you want to customize Google Gemini Pro API url.
+# Default: https://generativelanguage.googleapis.com/
+# Googel Gemini Pro API url without pathname, set if you want to customize Google Gemini Pro API url.
GOOGLE_URL=
# Override openai api request base url. (optional)
diff --git a/README.md b/README.md
index 69b649926..fec3ec108 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-

+
NextChat (ChatGPT Next Web)
@@ -61,10 +61,11 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4
## What's New
-- π v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/).
-- π v2.7 let's share conversations as image, or share to ShareGPT!
-- π v2.8 now we have a client that runs across all platforms!
+- π v2.10.1 support Google Gemini Pro model.
- π v2.9.11 you can use azure endpoint now.
+- π v2.8 now we have a client that runs across all platforms!
+- π v2.7 let's share conversations as image, or share to ShareGPT!
+- π v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/).
## δΈ»θ¦εθ½
@@ -360,9 +361,11 @@ If you want to add a new translation, read this [document](./docs/translation.md
[@Licoy](https://github.com/Licoy)
[@shangmin2009](https://github.com/shangmin2009)
-### Contributor
+### Contributors
-[Contributors](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors)
+
+
+
## LICENSE
diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts
index c35e93cb3..f0f63659f 100644
--- a/app/client/platforms/google.ts
+++ b/app/client/platforms/google.ts
@@ -9,6 +9,7 @@ import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import Locale from "../../locales";
import { getServerSideConfig } from "@/app/config/server";
+import de from "@/app/locales/de";
export class GeminiProApi implements LLMApi {
extractMessage(res: any) {
console.log("[Response] gemini-pro response: ", res);
@@ -20,6 +21,7 @@ export class GeminiProApi implements LLMApi {
);
}
async chat(options: ChatOptions): Promise
{
+ const apiClient = this;
const messages = options.messages.map((v) => ({
role: v.role.replace("assistant", "model").replace("system", "user"),
parts: [{ text: v.content }],
@@ -57,12 +59,29 @@ export class GeminiProApi implements LLMApi {
topP: modelConfig.top_p,
// "topK": modelConfig.top_k,
},
+ safetySettings: [
+ {
+ category: "HARM_CATEGORY_HARASSMENT",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ {
+ category: "HARM_CATEGORY_HATE_SPEECH",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ {
+ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ {
+ category: "HARM_CATEGORY_DANGEROUS_CONTENT",
+ threshold: "BLOCK_ONLY_HIGH",
+ },
+ ],
};
console.log("[Request] google payload: ", requestPayload);
- // todo: support stream later
- const shouldStream = false;
+ const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
@@ -82,13 +101,23 @@ export class GeminiProApi implements LLMApi {
if (shouldStream) {
let responseText = "";
let remainText = "";
+ let streamChatPath = chatPath.replace(
+ "generateContent",
+ "streamGenerateContent",
+ );
let finished = false;
+ let existingTexts: string[] = [];
+ const finish = () => {
+ finished = true;
+ options.onFinish(existingTexts.join(""));
+ };
+
// animate response to make it looks smooth
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
- console.log("[Response Animation] finished");
+ finish();
return;
}
@@ -105,88 +134,56 @@ export class GeminiProApi implements LLMApi {
// start animaion
animateResponseText();
+ fetch(streamChatPath, chatPayload)
+ .then((response) => {
+ const reader = response?.body?.getReader();
+ const decoder = new TextDecoder();
+ let partialData = "";
- const finish = () => {
- if (!finished) {
- finished = true;
- options.onFinish(responseText + remainText);
- }
- };
+ return reader?.read().then(function processText({
+ done,
+ value,
+ }): Promise {
+ if (done) {
+ console.log("Stream complete");
+ // options.onFinish(responseText + remainText);
+ finished = true;
+ return Promise.resolve();
+ }
- controller.signal.onabort = finish;
+ partialData += decoder.decode(value, { stream: true });
- fetchEventSource(chatPath, {
- ...chatPayload,
- async onopen(res) {
- clearTimeout(requestTimeoutId);
- const contentType = res.headers.get("content-type");
- console.log(
- "[OpenAI] request response content type: ",
- contentType,
- );
-
- if (contentType?.startsWith("text/plain")) {
- responseText = await res.clone().text();
- return finish();
- }
-
- if (
- !res.ok ||
- !res.headers
- .get("content-type")
- ?.startsWith(EventStreamContentType) ||
- res.status !== 200
- ) {
- const responseTexts = [responseText];
- let extraInfo = await res.clone().text();
try {
- const resJson = await res.clone().json();
- extraInfo = prettyObject(resJson);
- } catch {}
+ let data = JSON.parse(ensureProperEnding(partialData));
- if (res.status === 401) {
- responseTexts.push(Locale.Error.Unauthorized);
+ const textArray = data.reduce(
+ (acc: string[], item: { candidates: any[] }) => {
+ const texts = item.candidates.map((candidate) =>
+ candidate.content.parts
+ .map((part: { text: any }) => part.text)
+ .join(""),
+ );
+ return acc.concat(texts);
+ },
+ [],
+ );
+
+ if (textArray.length > existingTexts.length) {
+ const deltaArray = textArray.slice(existingTexts.length);
+ existingTexts = textArray;
+ remainText += deltaArray.join("");
+ }
+ } catch (error) {
+ // console.log("[Response Animation] error: ", error,partialData);
+ // skip error message when parsing json
}
- if (extraInfo) {
- responseTexts.push(extraInfo);
- }
-
- responseText = responseTexts.join("\n\n");
-
- return finish();
- }
- },
- onmessage(msg) {
- if (msg.data === "[DONE]" || finished) {
- return finish();
- }
- const text = msg.data;
- try {
- const json = JSON.parse(text) as {
- choices: Array<{
- delta: {
- content: string;
- };
- }>;
- };
- const delta = json.choices[0]?.delta?.content;
- if (delta) {
- remainText += delta;
- }
- } catch (e) {
- console.error("[Request] parse error", text);
- }
- },
- onclose() {
- finish();
- },
- onerror(e) {
- options.onError?.(e);
- throw e;
- },
- openWhenHidden: true,
- });
+ return reader.read().then(processText);
+ });
+ })
+ .catch((error) => {
+ console.error("Error:", error);
+ });
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
@@ -220,3 +217,10 @@ export class GeminiProApi implements LLMApi {
return "/api/google/" + path;
}
}
+
+function ensureProperEnding(str: string) {
+ if (str.startsWith("[") && !str.endsWith("]")) {
+ return str + "]";
+ }
+ return str;
+}
diff --git a/app/constant.ts b/app/constant.ts
index b64a6cd58..03d745d11 100644
--- a/app/constant.ts
+++ b/app/constant.ts
@@ -91,8 +91,7 @@ export const Azure = {
};
export const Google = {
- ExampleEndpoint:
- "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
+ ExampleEndpoint: "https://generativelanguage.googleapis.com/",
ChatPath: "v1beta/models/gemini-pro:generateContent",
// /api/openai/v1/chat/completions
diff --git a/app/layout.tsx b/app/layout.tsx
index c846a7172..f46ca2ad1 100644
--- a/app/layout.tsx
+++ b/app/layout.tsx
@@ -4,6 +4,10 @@ import "./styles/markdown.scss";
import "./styles/highlight.scss";
import { getClientConfig } from "./config/client";
import { type Metadata } from "next";
+import { SpeedInsights } from "@vercel/speed-insights/next";
+import { getServerSideConfig } from "./config/server";
+
+const serverConfig = getServerSideConfig();
import { Providers } from "@/app/providers";
import { Viewport } from "next";
@@ -43,6 +47,11 @@ export default function RootLayout({
{children}
+ {serverConfig?.isVercel && (
+ <>
+
+ >
+ )}