mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-11-23 17:26:47 +08:00
Merge remote-tracking branch 'upstream/main' into klaas20240303
This commit is contained in:
@@ -1,17 +1,30 @@
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { ACCESS_CODE_PREFIX, Azure, ServiceProvider } from "../constant";
|
||||
import { ChatMessage, ModelType, useAccessStore } from "../store";
|
||||
import {
|
||||
ACCESS_CODE_PREFIX,
|
||||
Azure,
|
||||
ModelProvider,
|
||||
ServiceProvider,
|
||||
} from "../constant";
|
||||
import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
|
||||
import { ChatGPTApi } from "./platforms/openai";
|
||||
|
||||
import { GeminiProApi } from "./platforms/google";
|
||||
export const ROLES = ["system", "user", "assistant"] as const;
|
||||
export type MessageRole = (typeof ROLES)[number];
|
||||
|
||||
export const Models = ["gpt-3.5-turbo", "gpt-4"] as const;
|
||||
export type ChatModel = ModelType;
|
||||
|
||||
export interface MultimodalContent {
|
||||
type: "text" | "image_url";
|
||||
text?: string;
|
||||
image_url?: {
|
||||
url: string;
|
||||
};
|
||||
}
|
||||
|
||||
export interface RequestMessage {
|
||||
role: MessageRole;
|
||||
content: string;
|
||||
content: string | MultimodalContent[];
|
||||
}
|
||||
|
||||
export interface LLMConfig {
|
||||
@@ -41,6 +54,13 @@ export interface LLMUsage {
|
||||
export interface LLMModel {
|
||||
name: string;
|
||||
available: boolean;
|
||||
provider: LLMModelProvider;
|
||||
}
|
||||
|
||||
export interface LLMModelProvider {
|
||||
id: string;
|
||||
providerName: string;
|
||||
providerType: string;
|
||||
}
|
||||
|
||||
export abstract class LLMApi {
|
||||
@@ -73,7 +93,11 @@ interface ChatProvider {
|
||||
export class ClientApi {
|
||||
public llm: LLMApi;
|
||||
|
||||
constructor() {
|
||||
constructor(provider: ModelProvider = ModelProvider.GPT) {
|
||||
if (provider === ModelProvider.GeminiPro) {
|
||||
this.llm = new GeminiProApi();
|
||||
return;
|
||||
}
|
||||
this.llm = new ChatGPTApi();
|
||||
}
|
||||
|
||||
@@ -93,7 +117,7 @@ export class ClientApi {
|
||||
{
|
||||
from: "human",
|
||||
value:
|
||||
"Share from [ChatGPT Next Web]: https://github.com/Yidadaa/ChatGPT-Next-Web",
|
||||
"Share from [NextChat]: https://github.com/Yidadaa/ChatGPT-Next-Web",
|
||||
},
|
||||
]);
|
||||
// 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用
|
||||
@@ -123,32 +147,38 @@ export class ClientApi {
|
||||
}
|
||||
}
|
||||
|
||||
export const api = new ClientApi();
|
||||
|
||||
export function getHeaders() {
|
||||
const accessStore = useAccessStore.getState();
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
Accept: "application/json",
|
||||
};
|
||||
|
||||
const modelConfig = useChatStore.getState().currentSession().mask.modelConfig;
|
||||
const isGoogle = modelConfig.model.startsWith("gemini");
|
||||
const isAzure = accessStore.provider === ServiceProvider.Azure;
|
||||
const authHeader = isAzure ? "api-key" : "Authorization";
|
||||
const apiKey = isAzure ? accessStore.azureApiKey : accessStore.openaiApiKey;
|
||||
|
||||
const apiKey = isGoogle
|
||||
? accessStore.googleApiKey
|
||||
: isAzure
|
||||
? accessStore.azureApiKey
|
||||
: accessStore.openaiApiKey;
|
||||
const clientConfig = getClientConfig();
|
||||
const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`;
|
||||
const validString = (x: string) => x && x.length > 0;
|
||||
|
||||
// use user's api key first
|
||||
if (validString(apiKey)) {
|
||||
headers[authHeader] = makeBearer(apiKey);
|
||||
} else if (
|
||||
accessStore.enabledAccessControl() &&
|
||||
validString(accessStore.accessCode)
|
||||
) {
|
||||
headers[authHeader] = makeBearer(
|
||||
ACCESS_CODE_PREFIX + accessStore.accessCode,
|
||||
);
|
||||
// when using google api in app, not set auth header
|
||||
if (!(isGoogle && clientConfig?.isApp)) {
|
||||
// use user's api key first
|
||||
if (validString(apiKey)) {
|
||||
headers[authHeader] = makeBearer(apiKey);
|
||||
} else if (
|
||||
accessStore.enabledAccessControl() &&
|
||||
validString(accessStore.accessCode)
|
||||
) {
|
||||
headers[authHeader] = makeBearer(
|
||||
ACCESS_CODE_PREFIX + accessStore.accessCode,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return headers;
|
||||
|
||||
278
app/client/platforms/google.ts
Normal file
278
app/client/platforms/google.ts
Normal file
@@ -0,0 +1,278 @@
|
||||
import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageImages,
|
||||
isVisionModel,
|
||||
} from "@/app/utils";
|
||||
|
||||
export class GeminiProApi implements LLMApi {
|
||||
extractMessage(res: any) {
|
||||
console.log("[Response] gemini-pro response: ", res);
|
||||
|
||||
return (
|
||||
res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
|
||||
res?.error?.message ||
|
||||
""
|
||||
);
|
||||
}
|
||||
async chat(options: ChatOptions): Promise<void> {
|
||||
// const apiClient = this;
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
let multimodal = false;
|
||||
const messages = options.messages.map((v) => {
|
||||
let parts: any[] = [{ text: getMessageTextContent(v) }];
|
||||
if (visionModel) {
|
||||
const images = getMessageImages(v);
|
||||
if (images.length > 0) {
|
||||
multimodal = true;
|
||||
parts = parts.concat(
|
||||
images.map((image) => {
|
||||
const imageType = image.split(";")[0].split(":")[1];
|
||||
const imageData = image.split(",")[1];
|
||||
return {
|
||||
inline_data: {
|
||||
mime_type: imageType,
|
||||
data: imageData,
|
||||
},
|
||||
};
|
||||
}),
|
||||
);
|
||||
}
|
||||
}
|
||||
return {
|
||||
role: v.role.replace("assistant", "model").replace("system", "user"),
|
||||
parts: parts,
|
||||
};
|
||||
});
|
||||
|
||||
// google requires that role in neighboring messages must not be the same
|
||||
for (let i = 0; i < messages.length - 1; ) {
|
||||
// Check if current and next item both have the role "model"
|
||||
if (messages[i].role === messages[i + 1].role) {
|
||||
// Concatenate the 'parts' of the current and next item
|
||||
messages[i].parts = messages[i].parts.concat(messages[i + 1].parts);
|
||||
// Remove the next item
|
||||
messages.splice(i + 1, 1);
|
||||
} else {
|
||||
// Move to the next item
|
||||
i++;
|
||||
}
|
||||
}
|
||||
// if (visionModel && messages.length > 1) {
|
||||
// options.onError?.(new Error("Multiturn chat is not enabled for models/gemini-pro-vision"));
|
||||
// }
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
},
|
||||
};
|
||||
const requestPayload = {
|
||||
contents: messages,
|
||||
generationConfig: {
|
||||
// stopSequences: [
|
||||
// "Title"
|
||||
// ],
|
||||
temperature: modelConfig.temperature,
|
||||
maxOutputTokens: modelConfig.max_tokens,
|
||||
topP: modelConfig.top_p,
|
||||
// "topK": modelConfig.top_k,
|
||||
},
|
||||
safetySettings: [
|
||||
{
|
||||
category: "HARM_CATEGORY_HARASSMENT",
|
||||
threshold: "BLOCK_ONLY_HIGH",
|
||||
},
|
||||
{
|
||||
category: "HARM_CATEGORY_HATE_SPEECH",
|
||||
threshold: "BLOCK_ONLY_HIGH",
|
||||
},
|
||||
{
|
||||
category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
||||
threshold: "BLOCK_ONLY_HIGH",
|
||||
},
|
||||
{
|
||||
category: "HARM_CATEGORY_DANGEROUS_CONTENT",
|
||||
threshold: "BLOCK_ONLY_HIGH",
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const accessStore = useAccessStore.getState();
|
||||
let baseUrl = accessStore.googleUrl;
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
|
||||
let shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
try {
|
||||
let googleChatPath = visionModel
|
||||
? Google.VisionChatPath
|
||||
: Google.ChatPath;
|
||||
let chatPath = this.path(googleChatPath);
|
||||
|
||||
// let baseUrl = accessStore.googleUrl;
|
||||
|
||||
if (!baseUrl) {
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/google/" + googleChatPath
|
||||
: chatPath;
|
||||
}
|
||||
|
||||
if (isApp) {
|
||||
baseUrl += `?key=${accessStore.googleApiKey}`;
|
||||
}
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
|
||||
let existingTexts: string[] = [];
|
||||
const finish = () => {
|
||||
finished = true;
|
||||
options.onFinish(existingTexts.join(""));
|
||||
};
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
if (finished || controller.signal.aborted) {
|
||||
responseText += remainText;
|
||||
finish();
|
||||
return;
|
||||
}
|
||||
|
||||
if (remainText.length > 0) {
|
||||
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
||||
const fetchText = remainText.slice(0, fetchCount);
|
||||
responseText += fetchText;
|
||||
remainText = remainText.slice(fetchCount);
|
||||
options.onUpdate?.(responseText, fetchText);
|
||||
}
|
||||
|
||||
requestAnimationFrame(animateResponseText);
|
||||
}
|
||||
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
fetch(
|
||||
baseUrl.replace("generateContent", "streamGenerateContent"),
|
||||
chatPayload,
|
||||
)
|
||||
.then((response) => {
|
||||
const reader = response?.body?.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
let partialData = "";
|
||||
|
||||
return reader?.read().then(function processText({
|
||||
done,
|
||||
value,
|
||||
}): Promise<any> {
|
||||
if (done) {
|
||||
if (response.status !== 200) {
|
||||
try {
|
||||
let data = JSON.parse(ensureProperEnding(partialData));
|
||||
if (data && data[0].error) {
|
||||
options.onError?.(new Error(data[0].error.message));
|
||||
} else {
|
||||
options.onError?.(new Error("Request failed"));
|
||||
}
|
||||
} catch (_) {
|
||||
options.onError?.(new Error("Request failed"));
|
||||
}
|
||||
}
|
||||
|
||||
console.log("Stream complete");
|
||||
// options.onFinish(responseText + remainText);
|
||||
finished = true;
|
||||
return Promise.resolve();
|
||||
}
|
||||
|
||||
partialData += decoder.decode(value, { stream: true });
|
||||
|
||||
try {
|
||||
let data = JSON.parse(ensureProperEnding(partialData));
|
||||
|
||||
const textArray = data.reduce(
|
||||
(acc: string[], item: { candidates: any[] }) => {
|
||||
const texts = item.candidates.map((candidate) =>
|
||||
candidate.content.parts
|
||||
.map((part: { text: any }) => part.text)
|
||||
.join(""),
|
||||
);
|
||||
return acc.concat(texts);
|
||||
},
|
||||
[],
|
||||
);
|
||||
|
||||
if (textArray.length > existingTexts.length) {
|
||||
const deltaArray = textArray.slice(existingTexts.length);
|
||||
existingTexts = textArray;
|
||||
remainText += deltaArray.join("");
|
||||
}
|
||||
} catch (error) {
|
||||
// console.log("[Response Animation] error: ", error,partialData);
|
||||
// skip error message when parsing json
|
||||
}
|
||||
|
||||
return reader.read().then(processText);
|
||||
});
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error("Error:", error);
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(baseUrl, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
const resJson = await res.json();
|
||||
if (resJson?.promptFeedback?.blockReason) {
|
||||
// being blocked
|
||||
options.onError?.(
|
||||
new Error(
|
||||
"Message is being blocked for reason: " +
|
||||
resJson.promptFeedback.blockReason,
|
||||
),
|
||||
);
|
||||
}
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
usage(): Promise<LLMUsage> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
path(path: string): string {
|
||||
return "/api/google/" + path;
|
||||
}
|
||||
}
|
||||
|
||||
function ensureProperEnding(str: string) {
|
||||
if (str.startsWith("[") && !str.endsWith("]")) {
|
||||
return str + "]";
|
||||
}
|
||||
return str;
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
"use client";
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
@@ -8,7 +9,14 @@ import {
|
||||
} from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
|
||||
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
LLMUsage,
|
||||
MultimodalContent,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
@@ -17,6 +25,11 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { makeAzurePath } from "@/app/azure";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageImages,
|
||||
isVisionModel,
|
||||
} from "@/app/utils";
|
||||
|
||||
//# for AdEx custom usage tracking (calls per model per user per datekey)
|
||||
// import { NextApiRequest, NextApiResponse } from 'next';
|
||||
@@ -63,7 +76,9 @@ export class ChatGPTApi implements LLMApi {
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
baseUrl = isApp ? DEFAULT_API_HOST : ApiPath.OpenAI;
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/proxy" + ApiPath.OpenAI
|
||||
: ApiPath.OpenAI;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
@@ -77,6 +92,8 @@ export class ChatGPTApi implements LLMApi {
|
||||
path = makeAzurePath(path, accessStore.azureApiVersion);
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
@@ -85,9 +102,10 @@ export class ChatGPTApi implements LLMApi {
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: v.content,
|
||||
content: visionModel ? v.content : getMessageTextContent(v),
|
||||
}));
|
||||
|
||||
const modelConfig = {
|
||||
@@ -123,6 +141,16 @@ export class ChatGPTApi implements LLMApi {
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
// add max_tokens to vision model
|
||||
if (visionModel) {
|
||||
Object.defineProperty(requestPayload, "max_tokens", {
|
||||
enumerable: true,
|
||||
configurable: true,
|
||||
writable: true,
|
||||
value: modelConfig.max_tokens,
|
||||
});
|
||||
}
|
||||
|
||||
console.log("[Request] openai payload: ", requestPayload);
|
||||
|
||||
const modelIdentifier = modelConfig.model;
|
||||
@@ -177,6 +205,9 @@ export class ChatGPTApi implements LLMApi {
|
||||
if (finished || controller.signal.aborted) {
|
||||
responseText += remainText;
|
||||
console.log("[Response Animation] finished");
|
||||
if (responseText?.length === 0) {
|
||||
options.onError?.(new Error("empty response from server"));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -377,6 +408,11 @@ export class ChatGPTApi implements LLMApi {
|
||||
return chatModels.map((m) => ({
|
||||
name: m.id,
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user