diff --git a/.env.template b/.env.template index 89bab2cb1..166cc4ef4 100644 --- a/.env.template +++ b/.env.template @@ -14,8 +14,8 @@ PROXY_URL=http://localhost:7890 GOOGLE_API_KEY= # (optional) -# Default: https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent -# Googel Gemini Pro API url, set if you want to customize Google Gemini Pro API url. +# Default: https://generativelanguage.googleapis.com/ +# Googel Gemini Pro API url without pathname, set if you want to customize Google Gemini Pro API url. GOOGLE_URL= # Override openai api request base url. (optional) diff --git a/README.md b/README.md index 69b649926..fec3ec108 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@
-icon +icon

NextChat (ChatGPT Next Web)

@@ -61,10 +61,11 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 ## What's New -- πŸš€ v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/). -- πŸš€ v2.7 let's share conversations as image, or share to ShareGPT! -- πŸš€ v2.8 now we have a client that runs across all platforms! +- πŸš€ v2.10.1 support Google Gemini Pro model. - πŸš€ v2.9.11 you can use azure endpoint now. +- πŸš€ v2.8 now we have a client that runs across all platforms! +- πŸš€ v2.7 let's share conversations as image, or share to ShareGPT! +- πŸš€ v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/). ## δΈ»θ¦εŠŸθƒ½ @@ -360,9 +361,11 @@ If you want to add a new translation, read this [document](./docs/translation.md [@Licoy](https://github.com/Licoy) [@shangmin2009](https://github.com/shangmin2009) -### Contributor +### Contributors -[Contributors](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors) + + + ## LICENSE diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index c35e93cb3..f0f63659f 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -9,6 +9,7 @@ import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import Locale from "../../locales"; import { getServerSideConfig } from "@/app/config/server"; +import de from "@/app/locales/de"; export class GeminiProApi implements LLMApi { extractMessage(res: any) { console.log("[Response] gemini-pro response: ", res); @@ -20,6 +21,7 @@ export class GeminiProApi implements LLMApi { ); } async chat(options: ChatOptions): Promise { + const apiClient = this; const messages = options.messages.map((v) => ({ role: v.role.replace("assistant", "model").replace("system", "user"), parts: [{ text: v.content }], @@ -57,12 +59,29 @@ export class GeminiProApi implements LLMApi { topP: modelConfig.top_p, // "topK": modelConfig.top_k, }, + safetySettings: [ + { + category: "HARM_CATEGORY_HARASSMENT", + threshold: "BLOCK_ONLY_HIGH", + }, + { + category: "HARM_CATEGORY_HATE_SPEECH", + threshold: "BLOCK_ONLY_HIGH", + }, + { + category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", + threshold: "BLOCK_ONLY_HIGH", + }, + { + category: "HARM_CATEGORY_DANGEROUS_CONTENT", + threshold: "BLOCK_ONLY_HIGH", + }, + ], }; console.log("[Request] google payload: ", requestPayload); - // todo: support stream later - const shouldStream = false; + const shouldStream = !!options.config.stream; const controller = new AbortController(); options.onController?.(controller); try { @@ -82,13 +101,23 @@ export class GeminiProApi implements LLMApi { if (shouldStream) { let responseText = ""; let remainText = ""; + let streamChatPath = chatPath.replace( + "generateContent", + "streamGenerateContent", + ); let finished = false; + let existingTexts: string[] = []; + const finish = () => { + finished = true; + options.onFinish(existingTexts.join("")); + }; + // animate response to make it looks smooth function animateResponseText() { if (finished || controller.signal.aborted) { responseText += remainText; - console.log("[Response Animation] finished"); + finish(); return; } @@ -105,88 +134,56 @@ export class GeminiProApi implements LLMApi { // start animaion animateResponseText(); + fetch(streamChatPath, chatPayload) + .then((response) => { + const reader = response?.body?.getReader(); + const decoder = new TextDecoder(); + let partialData = ""; - const finish = () => { - if (!finished) { - finished = true; - options.onFinish(responseText + remainText); - } - }; + return reader?.read().then(function processText({ + done, + value, + }): Promise { + if (done) { + console.log("Stream complete"); + // options.onFinish(responseText + remainText); + finished = true; + return Promise.resolve(); + } - controller.signal.onabort = finish; + partialData += decoder.decode(value, { stream: true }); - fetchEventSource(chatPath, { - ...chatPayload, - async onopen(res) { - clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log( - "[OpenAI] request response content type: ", - contentType, - ); - - if (contentType?.startsWith("text/plain")) { - responseText = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [responseText]; - let extraInfo = await res.clone().text(); try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} + let data = JSON.parse(ensureProperEnding(partialData)); - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); + const textArray = data.reduce( + (acc: string[], item: { candidates: any[] }) => { + const texts = item.candidates.map((candidate) => + candidate.content.parts + .map((part: { text: any }) => part.text) + .join(""), + ); + return acc.concat(texts); + }, + [], + ); + + if (textArray.length > existingTexts.length) { + const deltaArray = textArray.slice(existingTexts.length); + existingTexts = textArray; + remainText += deltaArray.join(""); + } + } catch (error) { + // console.log("[Response Animation] error: ", error,partialData); + // skip error message when parsing json } - if (extraInfo) { - responseTexts.push(extraInfo); - } - - responseText = responseTexts.join("\n\n"); - - return finish(); - } - }, - onmessage(msg) { - if (msg.data === "[DONE]" || finished) { - return finish(); - } - const text = msg.data; - try { - const json = JSON.parse(text) as { - choices: Array<{ - delta: { - content: string; - }; - }>; - }; - const delta = json.choices[0]?.delta?.content; - if (delta) { - remainText += delta; - } - } catch (e) { - console.error("[Request] parse error", text); - } - }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; - }, - openWhenHidden: true, - }); + return reader.read().then(processText); + }); + }) + .catch((error) => { + console.error("Error:", error); + }); } else { const res = await fetch(chatPath, chatPayload); clearTimeout(requestTimeoutId); @@ -220,3 +217,10 @@ export class GeminiProApi implements LLMApi { return "/api/google/" + path; } } + +function ensureProperEnding(str: string) { + if (str.startsWith("[") && !str.endsWith("]")) { + return str + "]"; + } + return str; +} diff --git a/app/constant.ts b/app/constant.ts index b64a6cd58..03d745d11 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -91,8 +91,7 @@ export const Azure = { }; export const Google = { - ExampleEndpoint: - "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent", + ExampleEndpoint: "https://generativelanguage.googleapis.com/", ChatPath: "v1beta/models/gemini-pro:generateContent", // /api/openai/v1/chat/completions diff --git a/app/layout.tsx b/app/layout.tsx index c846a7172..f46ca2ad1 100644 --- a/app/layout.tsx +++ b/app/layout.tsx @@ -4,6 +4,10 @@ import "./styles/markdown.scss"; import "./styles/highlight.scss"; import { getClientConfig } from "./config/client"; import { type Metadata } from "next"; +import { SpeedInsights } from "@vercel/speed-insights/next"; +import { getServerSideConfig } from "./config/server"; + +const serverConfig = getServerSideConfig(); import { Providers } from "@/app/providers"; import { Viewport } from "next"; @@ -43,6 +47,11 @@ export default function RootLayout({ {children} + {serverConfig?.isVercel && ( + <> + + + )} ); diff --git a/app/locales/cn.ts b/app/locales/cn.ts index 04ffb5b7a..8f41eecd8 100644 --- a/app/locales/cn.ts +++ b/app/locales/cn.ts @@ -362,7 +362,7 @@ const cn = { Endpoint: { Title: "ζŽ₯ε£εœ°ε€", - SubTitle: "ζ ·δΎ‹οΌš", + SubTitle: "δΈεŒ…ε«θ―·ζ±‚θ·―εΎ„οΌŒζ ·δΎ‹οΌš", }, ApiVerion: { diff --git a/app/page.tsx b/app/page.tsx index 35aa33e7e..ac5999ee7 100644 --- a/app/page.tsx +++ b/app/page.tsx @@ -18,7 +18,11 @@ export default async function App() { return ( <> - {serverConfig?.isVercel && } + {serverConfig?.isVercel && ( + <> + + + )} ); } diff --git a/app/store/chat.ts b/app/store/chat.ts index af6b75372..504e3f21f 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -689,7 +689,9 @@ export const useChatStore = createPersistStore( const contextPrompts = session.mask.context.slice(); // system prompts, to get close to OpenAI Web ChatGPT - const shouldInjectSystemPrompts = modelConfig.enableInjectSystemPrompts; + const shouldInjectSystemPrompts = + modelConfig.enableInjectSystemPrompts && + session.mask.modelConfig.model.startsWith("gpt-"); var systemPrompts: ChatMessage[] = []; systemPrompts = shouldInjectSystemPrompts diff --git a/docs/images/head-cover.png b/docs/images/head-cover.png new file mode 100644 index 000000000..859d83b05 Binary files /dev/null and b/docs/images/head-cover.png differ diff --git a/package.json b/package.json index 78b37ce8b..9c71028f2 100644 --- a/package.json +++ b/package.json @@ -24,6 +24,7 @@ "@tremor/react": "^3.12.1", "@vercel/analytics": "^1.1.1", "echarts": "^5.4.3", + "@vercel/speed-insights": "^1.0.2", "emoji-picker-react": "^4.5.15", "fuse.js": "^7.0.0", "html-to-image": "^1.11.11", diff --git a/public/android-chrome-192x192.png b/public/android-chrome-192x192.png index 700c48286..b191a58ac 100644 Binary files a/public/android-chrome-192x192.png and b/public/android-chrome-192x192.png differ diff --git a/public/android-chrome-512x512.png b/public/android-chrome-512x512.png index e701ed2fb..c7e52c394 100644 Binary files a/public/android-chrome-512x512.png and b/public/android-chrome-512x512.png differ diff --git a/public/apple-touch-icon.png b/public/apple-touch-icon.png index 387303114..b0da95315 100644 Binary files a/public/apple-touch-icon.png and b/public/apple-touch-icon.png differ diff --git a/public/favicon-16x16.png b/public/favicon-16x16.png index 92f53492f..3f8e0a535 100644 Binary files a/public/favicon-16x16.png and b/public/favicon-16x16.png differ diff --git a/public/favicon-32x32.png b/public/favicon-32x32.png index f1f439e85..2fee10dfb 100644 Binary files a/public/favicon-32x32.png and b/public/favicon-32x32.png differ diff --git a/public/favicon.ico b/public/favicon.ico index a3737b350..b5e8234cd 100644 Binary files a/public/favicon.ico and b/public/favicon.ico differ diff --git a/public/macos.png b/public/macos.png index f1bd0e69f..2eb110707 100644 Binary files a/public/macos.png and b/public/macos.png differ