mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-11-14 05:03:43 +08:00
Merge remote-tracking branch 'upstream/main'
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
import { NextRequest } from "next/server";
|
||||
import { getServerSideConfig } from "../config/server";
|
||||
import md5 from "spark-md5";
|
||||
import { ACCESS_CODE_PREFIX } from "../constant";
|
||||
import { ACCESS_CODE_PREFIX, ModelProvider } from "../constant";
|
||||
|
||||
function getIP(req: NextRequest) {
|
||||
let ip = req.ip ?? req.headers.get("x-real-ip");
|
||||
@@ -16,15 +16,15 @@ function getIP(req: NextRequest) {
|
||||
|
||||
function parseApiKey(bearToken: string) {
|
||||
const token = bearToken.trim().replaceAll("Bearer ", "").trim();
|
||||
const isOpenAiKey = !token.startsWith(ACCESS_CODE_PREFIX);
|
||||
const isApiKey = !token.startsWith(ACCESS_CODE_PREFIX);
|
||||
|
||||
return {
|
||||
accessCode: isOpenAiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length),
|
||||
apiKey: isOpenAiKey ? token : "",
|
||||
accessCode: isApiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length),
|
||||
apiKey: isApiKey ? token : "",
|
||||
};
|
||||
}
|
||||
|
||||
export function auth(req: NextRequest) {
|
||||
export function auth(req: NextRequest, modelProvider: ModelProvider) {
|
||||
const authToken = req.headers.get("Authorization") ?? "";
|
||||
|
||||
// check if it is openai api key or user token
|
||||
@@ -49,22 +49,23 @@ export function auth(req: NextRequest) {
|
||||
if (serverConfig.hideUserApiKey && !!apiKey) {
|
||||
return {
|
||||
error: true,
|
||||
msg: "you are not allowed to access openai with your own api key",
|
||||
msg: "you are not allowed to access with your own api key",
|
||||
};
|
||||
}
|
||||
|
||||
// if user does not provide an api key, inject system api key
|
||||
if (!apiKey) {
|
||||
const serverApiKey = serverConfig.isAzure
|
||||
? serverConfig.azureApiKey
|
||||
: serverConfig.apiKey;
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
if (serverApiKey) {
|
||||
const systemApiKey =
|
||||
modelProvider === ModelProvider.GeminiPro
|
||||
? serverConfig.googleApiKey
|
||||
: serverConfig.isAzure
|
||||
? serverConfig.azureApiKey
|
||||
: serverConfig.apiKey;
|
||||
if (systemApiKey) {
|
||||
console.log("[Auth] use system api key");
|
||||
req.headers.set(
|
||||
"Authorization",
|
||||
`${serverConfig.isAzure ? "" : "Bearer "}${serverApiKey}`,
|
||||
);
|
||||
req.headers.set("Authorization", `Bearer ${systemApiKey}`);
|
||||
} else {
|
||||
console.log("[Auth] admin did not provide an api key");
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { getServerSideConfig } from "../config/server";
|
||||
import { DEFAULT_MODELS, GOOGLE_BASE_URL, OPENAI_BASE_URL } from "../constant";
|
||||
import { DEFAULT_MODELS, OPENAI_BASE_URL, GEMINI_BASE_URL } from "../constant";
|
||||
import { collectModelTable } from "../utils/model";
|
||||
import { makeAzurePath } from "../azure";
|
||||
|
||||
@@ -9,7 +9,16 @@ const serverConfig = getServerSideConfig();
|
||||
export async function requestOpenai(req: NextRequest) {
|
||||
const controller = new AbortController();
|
||||
|
||||
const authValue = req.headers.get("Authorization") ?? "";
|
||||
if (serverConfig.isAzure) {
|
||||
const authValue =
|
||||
req.headers
|
||||
.get("Authorization")
|
||||
?.trim()
|
||||
.replaceAll("Bearer ", "")
|
||||
.trim() ?? "";
|
||||
} else {
|
||||
const authValue = req.headers.get("Authorization") ?? "";
|
||||
}
|
||||
const authHeaderName = serverConfig.isAzure ? "api-key" : "Authorization";
|
||||
|
||||
let path = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll(
|
||||
@@ -118,73 +127,3 @@ export async function requestOpenai(req: NextRequest) {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
}
|
||||
|
||||
export async function requestGoogleGemini(req: NextRequest) {
|
||||
const controller = new AbortController();
|
||||
|
||||
const authValue =
|
||||
req.headers.get("Authorization")?.replace("Bearer ", "") ?? "";
|
||||
const authHeaderName = "x-goog-api-key";
|
||||
|
||||
console.log(req.nextUrl);
|
||||
|
||||
let path = `${req.nextUrl.pathname}`.replaceAll("/api/google/", "");
|
||||
|
||||
let baseUrl = serverConfig.googleBaseUrl || GOOGLE_BASE_URL;
|
||||
|
||||
if (!baseUrl.startsWith("http")) {
|
||||
baseUrl = `https://${baseUrl}`;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, -1);
|
||||
}
|
||||
|
||||
console.log("[Proxy] ", path);
|
||||
console.log("[Google Base Url]", baseUrl);
|
||||
// this fix [Org ID] undefined in server side if not using custom point
|
||||
if (serverConfig.openaiOrgId !== undefined) {
|
||||
console.log("[Org ID]", serverConfig.openaiOrgId);
|
||||
}
|
||||
|
||||
const timeoutId = setTimeout(
|
||||
() => {
|
||||
controller.abort();
|
||||
},
|
||||
10 * 60 * 1000,
|
||||
);
|
||||
|
||||
const fetchUrl = `${baseUrl}/${path}?alt=sse`;
|
||||
const fetchOptions: RequestInit = {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Cache-Control": "no-store",
|
||||
[authHeaderName]: authValue,
|
||||
},
|
||||
method: req.method,
|
||||
body: req.body,
|
||||
// to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
|
||||
redirect: "manual",
|
||||
// @ts-ignore
|
||||
duplex: "half",
|
||||
signal: controller.signal,
|
||||
};
|
||||
|
||||
try {
|
||||
const res = await fetch(fetchUrl, fetchOptions);
|
||||
|
||||
// to prevent browser prompt for credentials
|
||||
const newHeaders = new Headers(res.headers);
|
||||
newHeaders.delete("www-authenticate");
|
||||
// to disable nginx buffering
|
||||
newHeaders.set("X-Accel-Buffering", "no");
|
||||
|
||||
return new Response(res.body, {
|
||||
status: res.status,
|
||||
statusText: res.statusText,
|
||||
headers: newHeaders,
|
||||
});
|
||||
} finally {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,46 +1,98 @@
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth, googleAuth } from "../../auth";
|
||||
import { requestGoogleGemini } from "../../common";
|
||||
import { auth } from "../../auth";
|
||||
import { getServerSideConfig } from "@/app/config/server";
|
||||
import { GEMINI_BASE_URL, Google, ModelProvider } from "@/app/constant";
|
||||
|
||||
async function handle(
|
||||
req: NextRequest,
|
||||
{ params }: { params: { path: string[] } },
|
||||
) {
|
||||
console.log("[OpenAI Route] params ", params);
|
||||
console.log("[Google Route] params ", params);
|
||||
|
||||
if (req.method === "OPTIONS") {
|
||||
return NextResponse.json({ body: "OK" }, { status: 200 });
|
||||
}
|
||||
|
||||
const subpath = params.path.join("/");
|
||||
const controller = new AbortController();
|
||||
|
||||
// if (!ALLOWD_PATH.has(subpath)) {
|
||||
// console.log("[OpenAI Route] forbidden path ", subpath);
|
||||
// return NextResponse.json(
|
||||
// {
|
||||
// error: true,
|
||||
// msg: "you are not allowed to request " + subpath,
|
||||
// },
|
||||
// {
|
||||
// status: 403,
|
||||
// },
|
||||
// );
|
||||
// }
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
const authResult = googleAuth(req);
|
||||
let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL;
|
||||
|
||||
if (!baseUrl.startsWith("http")) {
|
||||
baseUrl = `https://${baseUrl}`;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, -1);
|
||||
}
|
||||
|
||||
let path = `${req.nextUrl.pathname}`.replaceAll("/api/google/", "");
|
||||
|
||||
console.log("[Proxy] ", path);
|
||||
console.log("[Base Url]", baseUrl);
|
||||
|
||||
const timeoutId = setTimeout(
|
||||
() => {
|
||||
controller.abort();
|
||||
},
|
||||
10 * 60 * 1000,
|
||||
);
|
||||
|
||||
const authResult = auth(req, ModelProvider.GeminiPro);
|
||||
if (authResult.error) {
|
||||
return NextResponse.json(authResult, {
|
||||
status: 401,
|
||||
});
|
||||
}
|
||||
|
||||
const bearToken = req.headers.get("Authorization") ?? "";
|
||||
const token = bearToken.trim().replaceAll("Bearer ", "").trim();
|
||||
|
||||
const key = token ? token : serverConfig.googleApiKey;
|
||||
|
||||
if (!key) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: true,
|
||||
message: `missing GOOGLE_API_KEY in server env vars`,
|
||||
},
|
||||
{
|
||||
status: 401,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
const fetchUrl = `${baseUrl}/${path}?key=${key}`;
|
||||
const fetchOptions: RequestInit = {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Cache-Control": "no-store",
|
||||
},
|
||||
method: req.method,
|
||||
body: req.body,
|
||||
// to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
|
||||
redirect: "manual",
|
||||
// @ts-ignore
|
||||
duplex: "half",
|
||||
signal: controller.signal,
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await requestGoogleGemini(req);
|
||||
return response;
|
||||
} catch (e) {
|
||||
console.error("[OpenAI] ", e);
|
||||
return NextResponse.json(prettyObject(e));
|
||||
const res = await fetch(fetchUrl, fetchOptions);
|
||||
// to prevent browser prompt for credentials
|
||||
const newHeaders = new Headers(res.headers);
|
||||
newHeaders.delete("www-authenticate");
|
||||
// to disable nginx buffering
|
||||
newHeaders.set("X-Accel-Buffering", "no");
|
||||
|
||||
return new Response(res.body, {
|
||||
status: res.status,
|
||||
statusText: res.statusText,
|
||||
headers: newHeaders,
|
||||
});
|
||||
} finally {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { type OpenAIListModelResponse } from "@/app/client/platforms/openai";
|
||||
import { getServerSideConfig } from "@/app/config/server";
|
||||
import { OpenaiPath } from "@/app/constant";
|
||||
import { ModelProvider, OpenaiPath } from "@/app/constant";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "../../auth";
|
||||
@@ -45,7 +45,7 @@ async function handle(
|
||||
);
|
||||
}
|
||||
|
||||
const authResult = auth(req);
|
||||
const authResult = auth(req, ModelProvider.GPT);
|
||||
if (authResult.error) {
|
||||
return NextResponse.json(authResult, {
|
||||
status: 401,
|
||||
@@ -75,4 +75,22 @@ export const GET = handle;
|
||||
export const POST = handle;
|
||||
|
||||
export const runtime = "edge";
|
||||
export const preferredRegion = ['arn1', 'bom1', 'cdg1', 'cle1', 'cpt1', 'dub1', 'fra1', 'gru1', 'hnd1', 'iad1', 'icn1', 'kix1', 'lhr1', 'pdx1', 'sfo1', 'sin1', 'syd1'];
|
||||
export const preferredRegion = [
|
||||
"arn1",
|
||||
"bom1",
|
||||
"cdg1",
|
||||
"cle1",
|
||||
"cpt1",
|
||||
"dub1",
|
||||
"fra1",
|
||||
"gru1",
|
||||
"hnd1",
|
||||
"iad1",
|
||||
"icn1",
|
||||
"kix1",
|
||||
"lhr1",
|
||||
"pdx1",
|
||||
"sfo1",
|
||||
"sin1",
|
||||
"syd1",
|
||||
];
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { ACCESS_CODE_PREFIX, Azure, ServiceProvider } from "../constant";
|
||||
import { ChatMessage, ModelType, useAccessStore } from "../store";
|
||||
import {
|
||||
ACCESS_CODE_PREFIX,
|
||||
Azure,
|
||||
ModelProvider,
|
||||
ServiceProvider,
|
||||
} from "../constant";
|
||||
import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
|
||||
import { ChatGPTApi } from "./platforms/openai";
|
||||
import { GeminiApi } from "./platforms/google";
|
||||
import { FileApi } from "./platforms/utils";
|
||||
|
||||
import { GeminiProApi } from "./platforms/google";
|
||||
export const ROLES = ["system", "user", "assistant"] as const;
|
||||
export type MessageRole = (typeof ROLES)[number];
|
||||
|
||||
@@ -61,6 +65,13 @@ export interface LLMUsage {
|
||||
export interface LLMModel {
|
||||
name: string;
|
||||
available: boolean;
|
||||
provider: LLMModelProvider;
|
||||
}
|
||||
|
||||
export interface LLMModelProvider {
|
||||
id: string;
|
||||
providerName: string;
|
||||
providerType: string;
|
||||
}
|
||||
|
||||
export abstract class LLMApi {
|
||||
@@ -101,16 +112,15 @@ export class ClientApi {
|
||||
public llm: LLMApi;
|
||||
public file: FileApi;
|
||||
|
||||
constructor() {
|
||||
constructor(provider: ModelProvider = ModelProvider.GPT) {
|
||||
if (provider === ModelProvider.GeminiPro) {
|
||||
this.llm = new GeminiProApi();
|
||||
return;
|
||||
}
|
||||
this.llm = new ChatGPTApi();
|
||||
this.file = new FileApi();
|
||||
}
|
||||
|
||||
switch(model: string) {
|
||||
if (model.startsWith("gemini")) this.llm = new GeminiApi();
|
||||
else this.llm = new ChatGPTApi();
|
||||
}
|
||||
|
||||
config() {}
|
||||
|
||||
prompts() {}
|
||||
@@ -127,7 +137,7 @@ export class ClientApi {
|
||||
{
|
||||
from: "human",
|
||||
value:
|
||||
"Share from [ChatGPT Next Web]: https://github.com/Yidadaa/ChatGPT-Next-Web",
|
||||
"Share from [NextChat]: https://github.com/Yidadaa/ChatGPT-Next-Web",
|
||||
},
|
||||
]);
|
||||
// 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用
|
||||
@@ -157,44 +167,21 @@ export class ClientApi {
|
||||
}
|
||||
}
|
||||
|
||||
export const api = new ClientApi();
|
||||
|
||||
export function getAuthHeaders() {
|
||||
const accessStore = useAccessStore.getState();
|
||||
const headers: Record<string, string> = {};
|
||||
|
||||
const isAzure = accessStore.provider === ServiceProvider.Azure;
|
||||
const authHeader = isAzure ? "api-key" : "Authorization";
|
||||
const apiKey = isAzure ? accessStore.azureApiKey : accessStore.openaiApiKey;
|
||||
|
||||
const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`;
|
||||
const validString = (x: string) => x && x.length > 0;
|
||||
|
||||
// use user's api key first
|
||||
if (validString(apiKey)) {
|
||||
headers[authHeader] = makeBearer(apiKey);
|
||||
} else if (
|
||||
accessStore.enabledAccessControl() &&
|
||||
validString(accessStore.accessCode)
|
||||
) {
|
||||
headers[authHeader] = makeBearer(
|
||||
ACCESS_CODE_PREFIX + accessStore.accessCode,
|
||||
);
|
||||
}
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
||||
export function getHeaders() {
|
||||
const accessStore = useAccessStore.getState();
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
};
|
||||
|
||||
const modelConfig = useChatStore.getState().currentSession().mask.modelConfig;
|
||||
const isGoogle = modelConfig.model === "gemini-pro";
|
||||
const isAzure = accessStore.provider === ServiceProvider.Azure;
|
||||
const authHeader = isAzure ? "api-key" : "Authorization";
|
||||
const apiKey = isAzure ? accessStore.azureApiKey : accessStore.openaiApiKey;
|
||||
const apiKey = isGoogle
|
||||
? accessStore.googleApiKey
|
||||
: isAzure
|
||||
? accessStore.azureApiKey
|
||||
: accessStore.openaiApiKey;
|
||||
|
||||
const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`;
|
||||
const validString = (x: string) => x && x.length > 0;
|
||||
@@ -213,31 +200,3 @@ export function getHeaders() {
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
||||
export function getGeminiHeaders() {
|
||||
const accessStore = useAccessStore.getState();
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
};
|
||||
|
||||
const authHeader = "Authorization";
|
||||
const apiKey = accessStore.googleApiKey;
|
||||
|
||||
const makeBearer = (s: string) => `${"Bearer "}${s.trim()}`;
|
||||
const validString = (x: string) => x && x.length > 0;
|
||||
|
||||
// use user's api key first
|
||||
if (validString(apiKey)) {
|
||||
headers[authHeader] = makeBearer(apiKey);
|
||||
} else if (
|
||||
accessStore.enabledAccessControl() &&
|
||||
validString(accessStore.accessCode)
|
||||
) {
|
||||
headers[authHeader] = makeBearer(
|
||||
ACCESS_CODE_PREFIX + accessStore.accessCode,
|
||||
);
|
||||
}
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
||||
@@ -1,120 +1,77 @@
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
DEFAULT_MODELS,
|
||||
GooglePath,
|
||||
OpenaiPath,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
ServiceProvider,
|
||||
} from "@/app/constant";
|
||||
import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
|
||||
import {
|
||||
AgentChatOptions,
|
||||
ChatOptions,
|
||||
getGeminiHeaders,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
LLMUsage,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
data: Array<{
|
||||
id: string;
|
||||
object: string;
|
||||
root: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
export class GeminiApi implements LLMApi {
|
||||
private disableListModels = true;
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
let baseUrl = ApiPath.GoogleAI;
|
||||
// if (baseUrl.length === 0) {
|
||||
// const isApp = !!getClientConfig()?.isApp;
|
||||
// baseUrl = isApp ? DEFAULT_API_HOST : ApiPath.OpenAI;
|
||||
// }
|
||||
|
||||
// if (baseUrl.endsWith("/")) {
|
||||
// baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
// }
|
||||
// if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.OpenAI)) {
|
||||
// baseUrl = "https://" + baseUrl;
|
||||
// }
|
||||
|
||||
// if (isAzure) {
|
||||
// path = makeAzurePath(path, accessStore.azureApiVersion);
|
||||
// }
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import Locale from "../../locales";
|
||||
import { getServerSideConfig } from "@/app/config/server";
|
||||
export class GeminiProApi implements LLMApi {
|
||||
extractMessage(res: any) {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
console.log("[Response] gemini-pro response: ", res);
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: any[] = [];
|
||||
console.log(options.messages);
|
||||
let systemPrompt = "";
|
||||
for (const v of options.messages) {
|
||||
if (v.role === "system") {
|
||||
// systemPrompt = v.content;
|
||||
continue;
|
||||
return (
|
||||
res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
|
||||
res?.error?.message ||
|
||||
""
|
||||
);
|
||||
}
|
||||
async chat(options: ChatOptions): Promise<void> {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role.replace("assistant", "model").replace("system", "user"),
|
||||
parts: [{ text: v.content }],
|
||||
}));
|
||||
|
||||
// google requires that role in neighboring messages must not be the same
|
||||
for (let i = 0; i < messages.length - 1; ) {
|
||||
// Check if current and next item both have the role "model"
|
||||
if (messages[i].role === messages[i + 1].role) {
|
||||
// Concatenate the 'parts' of the current and next item
|
||||
messages[i].parts = messages[i].parts.concat(messages[i + 1].parts);
|
||||
// Remove the next item
|
||||
messages.splice(i + 1, 1);
|
||||
} else {
|
||||
// Move to the next item
|
||||
i++;
|
||||
}
|
||||
let content = v.content;
|
||||
if (systemPrompt !== "") {
|
||||
content = `${systemPrompt}\n${content}`;
|
||||
systemPrompt = "";
|
||||
}
|
||||
let message: {
|
||||
role: string;
|
||||
parts: { text: string }[];
|
||||
} = {
|
||||
role: v.role === "assistant" ? "model" : "user",
|
||||
parts: [],
|
||||
};
|
||||
message.parts.push({
|
||||
text: content,
|
||||
});
|
||||
messages.push(message);
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
},
|
||||
};
|
||||
const requestPayload = {
|
||||
contents: messages,
|
||||
generationConfig: {
|
||||
temperature: 1.0,
|
||||
maxOutputTokens: 8000,
|
||||
topP: 0.8,
|
||||
topK: 10,
|
||||
// stopSequences: [
|
||||
// "Title"
|
||||
// ],
|
||||
temperature: modelConfig.temperature,
|
||||
maxOutputTokens: modelConfig.max_tokens,
|
||||
topP: modelConfig.top_p,
|
||||
// "topK": modelConfig.top_k,
|
||||
},
|
||||
};
|
||||
|
||||
console.log("[Request] gemini payload: ", requestPayload);
|
||||
console.log("[Request] google payload: ", requestPayload);
|
||||
|
||||
const shouldStream = true;
|
||||
// todo: support stream later
|
||||
const shouldStream = false;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(
|
||||
GooglePath.ChatPath.replace("{{model}}", options.config.model),
|
||||
);
|
||||
const chatPath = this.path(Google.ChatPath);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getGeminiHeaders(),
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
@@ -122,7 +79,6 @@ export class GeminiApi implements LLMApi {
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
@@ -158,13 +114,14 @@ export class GeminiApi implements LLMApi {
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[Google] request response content type: ",
|
||||
"[OpenAI] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
|
||||
@@ -207,15 +164,13 @@ export class GeminiApi implements LLMApi {
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text) as {
|
||||
candidates: Array<{
|
||||
content: {
|
||||
parts: Array<{
|
||||
text: string;
|
||||
}>;
|
||||
choices: Array<{
|
||||
delta: {
|
||||
content: string;
|
||||
};
|
||||
}>;
|
||||
};
|
||||
const delta = json.candidates[0]?.content?.parts[0]?.text;
|
||||
const delta = json.choices[0]?.delta?.content;
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
}
|
||||
@@ -237,6 +192,16 @@ export class GeminiApi implements LLMApi {
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
|
||||
if (resJson?.promptFeedback?.blockReason) {
|
||||
// being blocked
|
||||
options.onError?.(
|
||||
new Error(
|
||||
"Message is being blocked for reason: " +
|
||||
resJson.promptFeedback.blockReason,
|
||||
),
|
||||
);
|
||||
}
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
}
|
||||
@@ -245,249 +210,13 @@ export class GeminiApi implements LLMApi {
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
|
||||
async toolAgentChat(options: AgentChatOptions) {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: v.content,
|
||||
}));
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
},
|
||||
};
|
||||
|
||||
const requestPayload = {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
baseUrl: useAccessStore.getState().openaiUrl,
|
||||
maxIterations: options.agentConfig.maxIterations,
|
||||
returnIntermediateSteps: options.agentConfig.returnIntermediateSteps,
|
||||
useTools: options.agentConfig.useTools,
|
||||
};
|
||||
|
||||
console.log("[Request] openai payload: ", requestPayload);
|
||||
|
||||
const shouldStream = true;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
let path = "/api/langchain/tool/agent/";
|
||||
const enableNodeJSPlugin = !!process.env.NEXT_PUBLIC_ENABLE_NODEJS_PLUGIN;
|
||||
path = enableNodeJSPlugin ? path + "nodejs" : path + "edge";
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
console.log("shouldStream", shouldStream);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let finished = false;
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
options.onFinish(responseText);
|
||||
finished = true;
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(path, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[OpenAI] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
console.warn(`extraInfo: ${extraInfo}`);
|
||||
// try {
|
||||
// const resJson = await res.clone().json();
|
||||
// extraInfo = prettyObject(resJson);
|
||||
// } catch { }
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
let response = JSON.parse(msg.data);
|
||||
if (!response.isSuccess) {
|
||||
console.error("[Request]", msg.data);
|
||||
responseText = msg.data;
|
||||
throw Error(response.message);
|
||||
}
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
try {
|
||||
if (response && !response.isToolMessage) {
|
||||
responseText += response.message;
|
||||
options.onUpdate?.(responseText, response.message);
|
||||
} else {
|
||||
options.onToolUpdate?.(response.toolName!, response.message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", response, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(path, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat reqeust", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
usage(): Promise<LLMUsage> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async usage() {
|
||||
const formatDate = (d: Date) =>
|
||||
`${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d
|
||||
.getDate()
|
||||
.toString()
|
||||
.padStart(2, "0")}`;
|
||||
const ONE_DAY = 1 * 24 * 60 * 60 * 1000;
|
||||
const now = new Date();
|
||||
const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1);
|
||||
const startDate = formatDate(startOfMonth);
|
||||
const endDate = formatDate(new Date(Date.now() + ONE_DAY));
|
||||
|
||||
const [used, subs] = await Promise.all([
|
||||
fetch(
|
||||
this.path(
|
||||
`${OpenaiPath.UsagePath}?start_date=${startDate}&end_date=${endDate}`,
|
||||
),
|
||||
{
|
||||
method: "GET",
|
||||
headers: getHeaders(),
|
||||
},
|
||||
),
|
||||
fetch(this.path(OpenaiPath.SubsPath), {
|
||||
method: "GET",
|
||||
headers: getHeaders(),
|
||||
}),
|
||||
]);
|
||||
|
||||
if (used.status === 401) {
|
||||
throw new Error(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (!used.ok || !subs.ok) {
|
||||
throw new Error("Failed to query usage from openai");
|
||||
}
|
||||
|
||||
const response = (await used.json()) as {
|
||||
total_usage?: number;
|
||||
error?: {
|
||||
type: string;
|
||||
message: string;
|
||||
};
|
||||
};
|
||||
|
||||
const total = (await subs.json()) as {
|
||||
hard_limit_usd?: number;
|
||||
};
|
||||
|
||||
if (response.error && response.error.type) {
|
||||
throw Error(response.error.message);
|
||||
}
|
||||
|
||||
if (response.total_usage) {
|
||||
response.total_usage = Math.round(response.total_usage) / 100;
|
||||
}
|
||||
|
||||
if (total.hard_limit_usd) {
|
||||
total.hard_limit_usd = Math.round(total.hard_limit_usd * 100) / 100;
|
||||
}
|
||||
|
||||
return {
|
||||
used: response.total_usage,
|
||||
total: total.hard_limit_usd,
|
||||
} as LLMUsage;
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
if (this.disableListModels) {
|
||||
return DEFAULT_MODELS.slice();
|
||||
}
|
||||
|
||||
const res = await fetch(this.path(OpenaiPath.ListModelPath), {
|
||||
method: "GET",
|
||||
headers: {
|
||||
...getHeaders(),
|
||||
},
|
||||
});
|
||||
|
||||
const resJson = (await res.json()) as OpenAIListModelResponse;
|
||||
const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-"));
|
||||
console.log("[Models]", chatModels);
|
||||
|
||||
if (!chatModels) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return chatModels.map((m) => ({
|
||||
name: m.id,
|
||||
available: true,
|
||||
}));
|
||||
return [];
|
||||
}
|
||||
path(path: string): string {
|
||||
return "/api/google/" + path;
|
||||
}
|
||||
}
|
||||
export { OpenaiPath };
|
||||
|
||||
@@ -512,6 +512,11 @@ export class ChatGPTApi implements LLMApi {
|
||||
return chatModels.map((m) => ({
|
||||
name: m.id,
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,6 +64,17 @@ export function AuthPage() {
|
||||
);
|
||||
}}
|
||||
/>
|
||||
<input
|
||||
className={styles["auth-input"]}
|
||||
type="password"
|
||||
placeholder={Locale.Settings.Access.Google.ApiKey.Placeholder}
|
||||
value={accessStore.googleApiKey}
|
||||
onChange={(e) => {
|
||||
accessStore.update(
|
||||
(access) => (access.googleApiKey = e.currentTarget.value),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</>
|
||||
) : null}
|
||||
|
||||
|
||||
@@ -29,10 +29,11 @@ import NextImage from "next/image";
|
||||
|
||||
import { toBlob, toPng } from "html-to-image";
|
||||
import { DEFAULT_MASK_AVATAR } from "../store/mask";
|
||||
import { api } from "../client/api";
|
||||
|
||||
import { prettyObject } from "../utils/format";
|
||||
import { EXPORT_MESSAGE_CLASS_NAME } from "../constant";
|
||||
import { EXPORT_MESSAGE_CLASS_NAME, ModelProvider } from "../constant";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { ClientApi } from "../client/api";
|
||||
|
||||
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
|
||||
loading: () => <LoadingIcon />,
|
||||
@@ -301,10 +302,17 @@ export function PreviewActions(props: {
|
||||
}) {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [shouldExport, setShouldExport] = useState(false);
|
||||
|
||||
const config = useAppConfig();
|
||||
const onRenderMsgs = (msgs: ChatMessage[]) => {
|
||||
setShouldExport(false);
|
||||
|
||||
var api: ClientApi;
|
||||
if (config.modelConfig.model === "gemini-pro") {
|
||||
api = new ClientApi(ModelProvider.GeminiPro);
|
||||
} else {
|
||||
api = new ClientApi(ModelProvider.GPT);
|
||||
}
|
||||
|
||||
api
|
||||
.share(msgs)
|
||||
.then((res) => {
|
||||
@@ -530,7 +538,7 @@ export function ImagePreviewer(props: {
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<div className={styles["main-title"]}>ChatGPT Next Web</div>
|
||||
<div className={styles["main-title"]}>NextChat</div>
|
||||
<div className={styles["sub-title"]}>
|
||||
github.com/Yidadaa/ChatGPT-Next-Web
|
||||
</div>
|
||||
|
||||
@@ -12,7 +12,7 @@ import LoadingIcon from "../icons/three-dots.svg";
|
||||
import { getCSSVar, useMobileScreen } from "../utils";
|
||||
|
||||
import dynamic from "next/dynamic";
|
||||
import { Path, SlotID } from "../constant";
|
||||
import { ModelProvider, Path, SlotID } from "../constant";
|
||||
import { ErrorBoundary } from "./error";
|
||||
|
||||
import { getISOLang, getLang } from "../locales";
|
||||
@@ -27,7 +27,7 @@ import { SideBar } from "./sidebar";
|
||||
import { useAppConfig } from "../store/config";
|
||||
import { AuthPage } from "./auth";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { api } from "../client/api";
|
||||
import { ClientApi } from "../client/api";
|
||||
import { useAccessStore } from "../store";
|
||||
|
||||
export function Loading(props: { noLogo?: boolean }) {
|
||||
@@ -132,7 +132,8 @@ function Screen() {
|
||||
const isHome = location.pathname === Path.Home;
|
||||
const isAuth = location.pathname === Path.Auth;
|
||||
const isMobileScreen = useMobileScreen();
|
||||
const shouldTightBorder = getClientConfig()?.isApp || (config.tightBorder && !isMobileScreen);
|
||||
const shouldTightBorder =
|
||||
getClientConfig()?.isApp || (config.tightBorder && !isMobileScreen);
|
||||
|
||||
useEffect(() => {
|
||||
loadAsyncGoogleFont();
|
||||
@@ -174,6 +175,12 @@ function Screen() {
|
||||
export function useLoadData() {
|
||||
const config = useAppConfig();
|
||||
|
||||
var api: ClientApi;
|
||||
if (config.modelConfig.model === "gemini-pro") {
|
||||
api = new ClientApi(ModelProvider.GeminiPro);
|
||||
} else {
|
||||
api = new ClientApi(ModelProvider.GPT);
|
||||
}
|
||||
useEffect(() => {
|
||||
(async () => {
|
||||
const models = await api.llm.models();
|
||||
|
||||
@@ -29,7 +29,7 @@ export function ModelConfigList(props: {
|
||||
.filter((v) => v.available)
|
||||
.map((v, i) => (
|
||||
<option value={v.name} key={i}>
|
||||
{v.displayName}
|
||||
{v.displayName}({v.provider?.providerName})
|
||||
</option>
|
||||
))}
|
||||
</Select>
|
||||
@@ -91,79 +91,84 @@ export function ModelConfigList(props: {
|
||||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Settings.PresencePenalty.Title}
|
||||
subTitle={Locale.Settings.PresencePenalty.SubTitle}
|
||||
>
|
||||
<InputRange
|
||||
value={props.modelConfig.presence_penalty?.toFixed(1)}
|
||||
min="-2"
|
||||
max="2"
|
||||
step="0.1"
|
||||
onChange={(e) => {
|
||||
props.updateConfig(
|
||||
(config) =>
|
||||
(config.presence_penalty =
|
||||
ModalConfigValidator.presence_penalty(
|
||||
e.currentTarget.valueAsNumber,
|
||||
)),
|
||||
);
|
||||
}}
|
||||
></InputRange>
|
||||
</ListItem>
|
||||
|
||||
<ListItem
|
||||
title={Locale.Settings.FrequencyPenalty.Title}
|
||||
subTitle={Locale.Settings.FrequencyPenalty.SubTitle}
|
||||
>
|
||||
<InputRange
|
||||
value={props.modelConfig.frequency_penalty?.toFixed(1)}
|
||||
min="-2"
|
||||
max="2"
|
||||
step="0.1"
|
||||
onChange={(e) => {
|
||||
props.updateConfig(
|
||||
(config) =>
|
||||
(config.frequency_penalty =
|
||||
ModalConfigValidator.frequency_penalty(
|
||||
e.currentTarget.valueAsNumber,
|
||||
)),
|
||||
);
|
||||
}}
|
||||
></InputRange>
|
||||
</ListItem>
|
||||
{props.modelConfig.model === "gemini-pro" ? null : (
|
||||
<>
|
||||
<ListItem
|
||||
title={Locale.Settings.PresencePenalty.Title}
|
||||
subTitle={Locale.Settings.PresencePenalty.SubTitle}
|
||||
>
|
||||
<InputRange
|
||||
value={props.modelConfig.presence_penalty?.toFixed(1)}
|
||||
min="-2"
|
||||
max="2"
|
||||
step="0.1"
|
||||
onChange={(e) => {
|
||||
props.updateConfig(
|
||||
(config) =>
|
||||
(config.presence_penalty =
|
||||
ModalConfigValidator.presence_penalty(
|
||||
e.currentTarget.valueAsNumber,
|
||||
)),
|
||||
);
|
||||
}}
|
||||
></InputRange>
|
||||
</ListItem>
|
||||
|
||||
<ListItem
|
||||
title={Locale.Settings.InjectSystemPrompts.Title}
|
||||
subTitle={Locale.Settings.InjectSystemPrompts.SubTitle}
|
||||
>
|
||||
<input
|
||||
type="checkbox"
|
||||
checked={props.modelConfig.enableInjectSystemPrompts}
|
||||
onChange={(e) =>
|
||||
props.updateConfig(
|
||||
(config) =>
|
||||
(config.enableInjectSystemPrompts = e.currentTarget.checked),
|
||||
)
|
||||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Settings.FrequencyPenalty.Title}
|
||||
subTitle={Locale.Settings.FrequencyPenalty.SubTitle}
|
||||
>
|
||||
<InputRange
|
||||
value={props.modelConfig.frequency_penalty?.toFixed(1)}
|
||||
min="-2"
|
||||
max="2"
|
||||
step="0.1"
|
||||
onChange={(e) => {
|
||||
props.updateConfig(
|
||||
(config) =>
|
||||
(config.frequency_penalty =
|
||||
ModalConfigValidator.frequency_penalty(
|
||||
e.currentTarget.valueAsNumber,
|
||||
)),
|
||||
);
|
||||
}}
|
||||
></InputRange>
|
||||
</ListItem>
|
||||
|
||||
<ListItem
|
||||
title={Locale.Settings.InputTemplate.Title}
|
||||
subTitle={Locale.Settings.InputTemplate.SubTitle}
|
||||
>
|
||||
<input
|
||||
type="text"
|
||||
value={props.modelConfig.template}
|
||||
onChange={(e) =>
|
||||
props.updateConfig(
|
||||
(config) => (config.template = e.currentTarget.value),
|
||||
)
|
||||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Settings.InjectSystemPrompts.Title}
|
||||
subTitle={Locale.Settings.InjectSystemPrompts.SubTitle}
|
||||
>
|
||||
<input
|
||||
type="checkbox"
|
||||
checked={props.modelConfig.enableInjectSystemPrompts}
|
||||
onChange={(e) =>
|
||||
props.updateConfig(
|
||||
(config) =>
|
||||
(config.enableInjectSystemPrompts =
|
||||
e.currentTarget.checked),
|
||||
)
|
||||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
|
||||
<ListItem
|
||||
title={Locale.Settings.InputTemplate.Title}
|
||||
subTitle={Locale.Settings.InputTemplate.SubTitle}
|
||||
>
|
||||
<input
|
||||
type="text"
|
||||
value={props.modelConfig.template}
|
||||
onChange={(e) =>
|
||||
props.updateConfig(
|
||||
(config) => (config.template = e.currentTarget.value),
|
||||
)
|
||||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
</>
|
||||
)}
|
||||
<ListItem
|
||||
title={Locale.Settings.HistoryCount.Title}
|
||||
subTitle={Locale.Settings.HistoryCount.SubTitle}
|
||||
|
||||
@@ -52,6 +52,7 @@ import { copyToClipboard } from "../utils";
|
||||
import Link from "next/link";
|
||||
import {
|
||||
Azure,
|
||||
Google,
|
||||
OPENAI_BASE_URL,
|
||||
Path,
|
||||
RELEASE_URL,
|
||||
@@ -584,6 +585,7 @@ export function Settings() {
|
||||
const accessStore = useAccessStore();
|
||||
const shouldHideBalanceQuery = useMemo(() => {
|
||||
const isOpenAiUrl = accessStore.openaiUrl.includes(OPENAI_BASE_URL);
|
||||
|
||||
return (
|
||||
accessStore.hideBalanceQuery ||
|
||||
isOpenAiUrl ||
|
||||
@@ -999,7 +1001,7 @@ export function Settings() {
|
||||
/>
|
||||
</ListItem>
|
||||
</>
|
||||
) : (
|
||||
) : accessStore.provider === "Azure" ? (
|
||||
<>
|
||||
<ListItem
|
||||
title={Locale.Settings.Access.Azure.Endpoint.Title}
|
||||
@@ -1058,7 +1060,66 @@ export function Settings() {
|
||||
></input>
|
||||
</ListItem>
|
||||
</>
|
||||
)}
|
||||
) : accessStore.provider === "Google" ? (
|
||||
<>
|
||||
<ListItem
|
||||
title={Locale.Settings.Access.Google.Endpoint.Title}
|
||||
subTitle={
|
||||
Locale.Settings.Access.Google.Endpoint.SubTitle +
|
||||
Google.ExampleEndpoint
|
||||
}
|
||||
>
|
||||
<input
|
||||
type="text"
|
||||
value={accessStore.googleUrl}
|
||||
placeholder={Google.ExampleEndpoint}
|
||||
onChange={(e) =>
|
||||
accessStore.update(
|
||||
(access) =>
|
||||
(access.googleUrl = e.currentTarget.value),
|
||||
)
|
||||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Settings.Access.Azure.ApiKey.Title}
|
||||
subTitle={Locale.Settings.Access.Azure.ApiKey.SubTitle}
|
||||
>
|
||||
<PasswordInput
|
||||
value={accessStore.googleApiKey}
|
||||
type="text"
|
||||
placeholder={
|
||||
Locale.Settings.Access.Google.ApiKey.Placeholder
|
||||
}
|
||||
onChange={(e) => {
|
||||
accessStore.update(
|
||||
(access) =>
|
||||
(access.googleApiKey = e.currentTarget.value),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Settings.Access.Google.ApiVerion.Title}
|
||||
subTitle={
|
||||
Locale.Settings.Access.Google.ApiVerion.SubTitle
|
||||
}
|
||||
>
|
||||
<input
|
||||
type="text"
|
||||
value={accessStore.googleApiVersion}
|
||||
placeholder="2023-08-01-preview"
|
||||
onChange={(e) =>
|
||||
accessStore.update(
|
||||
(access) =>
|
||||
(access.googleApiVersion =
|
||||
e.currentTarget.value),
|
||||
)
|
||||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
</>
|
||||
) : null}
|
||||
</>
|
||||
)}
|
||||
</>
|
||||
|
||||
@@ -155,7 +155,7 @@ export function SideBar(props: { className?: string }) {
|
||||
>
|
||||
<div className={styles["sidebar-header"]} data-tauri-drag-region>
|
||||
<div className={styles["sidebar-title"]} data-tauri-drag-region>
|
||||
ChatGPT Next
|
||||
NextChat
|
||||
</div>
|
||||
<div className={styles["sidebar-sub-title"]}>
|
||||
Build your own AI assistant.
|
||||
|
||||
@@ -26,6 +26,10 @@ declare global {
|
||||
AZURE_URL?: string; // https://{azure-url}/openai/deployments/{deploy-name}
|
||||
AZURE_API_KEY?: string;
|
||||
AZURE_API_VERSION?: string;
|
||||
|
||||
// google only
|
||||
GOOGLE_API_KEY?: string;
|
||||
GOOGLE_URL?: string;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -61,6 +65,7 @@ export const getServerSideConfig = () => {
|
||||
}
|
||||
|
||||
const isAzure = !!process.env.AZURE_URL;
|
||||
const isGoogle = !!process.env.GOOGLE_API_KEY;
|
||||
|
||||
const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? "";
|
||||
const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim());
|
||||
@@ -80,8 +85,9 @@ export const getServerSideConfig = () => {
|
||||
azureApiKey: process.env.AZURE_API_KEY,
|
||||
azureApiVersion: process.env.AZURE_API_VERSION,
|
||||
|
||||
googleApiKey: process.env.GOOGLE_API_KEY ?? "",
|
||||
googleBaseUrl: process.env.GOOGLE_BASE_URL,
|
||||
isGoogle,
|
||||
googleApiKey: process.env.GOOGLE_API_KEY,
|
||||
googleUrl: process.env.GOOGLE_URL,
|
||||
|
||||
needCode: ACCESS_CODES.size > 0,
|
||||
code: process.env.CODE,
|
||||
|
||||
@@ -13,6 +13,8 @@ export const DEFAULT_API_HOST = `${DEFAULT_CORS_HOST}/api/proxy`;
|
||||
export const OPENAI_BASE_URL = "https://api.openai.com";
|
||||
export const GOOGLE_BASE_URL = "https://generativelanguage.googleapis.com";
|
||||
|
||||
export const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/";
|
||||
|
||||
export enum Path {
|
||||
Home = "/",
|
||||
Chat = "/chat",
|
||||
@@ -71,6 +73,12 @@ export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown";
|
||||
export enum ServiceProvider {
|
||||
OpenAI = "OpenAI",
|
||||
Azure = "Azure",
|
||||
Google = "Google",
|
||||
}
|
||||
|
||||
export enum ModelProvider {
|
||||
GPT = "GPT",
|
||||
GeminiPro = "GeminiPro",
|
||||
}
|
||||
|
||||
export const OpenaiPath = {
|
||||
@@ -89,6 +97,14 @@ export const Azure = {
|
||||
ExampleEndpoint: "https://{resource-url}/openai/deployments/{deploy-id}",
|
||||
};
|
||||
|
||||
export const Google = {
|
||||
ExampleEndpoint:
|
||||
"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
|
||||
ChatPath: "v1beta/models/gemini-pro:generateContent",
|
||||
|
||||
// /api/openai/v1/chat/completions
|
||||
};
|
||||
|
||||
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
|
||||
export const DEFAULT_SYSTEM_TEMPLATE = `
|
||||
You are ChatGPT, a large language model trained by OpenAI.
|
||||
@@ -111,46 +127,110 @@ export const DEFAULT_MODELS = [
|
||||
{
|
||||
name: "gpt-4",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-0613",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-32k",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-32k-0613",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-1106-preview",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-vision-preview",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-0613",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-1106",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-16k",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-16k-0613",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gemini-pro",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "google",
|
||||
providerName: "Google",
|
||||
providerType: "google",
|
||||
},
|
||||
},
|
||||
] as const;
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import { getClientConfig } from "./config/client";
|
||||
import { type Metadata } from "next";
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: "ChatGPT Next Web",
|
||||
title: "NextChat",
|
||||
description: "Your personal ChatGPT Chat Bot.",
|
||||
viewport: {
|
||||
width: "device-width",
|
||||
@@ -18,7 +18,7 @@ export const metadata: Metadata = {
|
||||
{ media: "(prefers-color-scheme: dark)", color: "#151515" },
|
||||
],
|
||||
appleWebApp: {
|
||||
title: "ChatGPT Next Web",
|
||||
title: "NextChat",
|
||||
statusBarStyle: "default",
|
||||
},
|
||||
};
|
||||
|
||||
@@ -13,7 +13,7 @@ const cn = {
|
||||
Auth: {
|
||||
Title: "需要密码",
|
||||
Tips: "管理员开启了密码验证,请在下方填入访问码",
|
||||
SubTips: "或者输入你的 OpenAI API 密钥",
|
||||
SubTips: "或者输入你的 OpenAI 或 Google API 密钥",
|
||||
Input: "在此处填写访问码",
|
||||
Confirm: "确认",
|
||||
Later: "稍后再说",
|
||||
@@ -314,6 +314,23 @@ const cn = {
|
||||
SubTitle: "选择指定的部分版本",
|
||||
},
|
||||
},
|
||||
Google: {
|
||||
ApiKey: {
|
||||
Title: "接口密钥",
|
||||
SubTitle: "使用自定义 Google AI Studio API Key 绕过密码访问限制",
|
||||
Placeholder: "Google AI Studio API Key",
|
||||
},
|
||||
|
||||
Endpoint: {
|
||||
Title: "接口地址",
|
||||
SubTitle: "样例:",
|
||||
},
|
||||
|
||||
ApiVerion: {
|
||||
Title: "接口版本 (gemini-pro api version)",
|
||||
SubTitle: "选择指定的部分版本",
|
||||
},
|
||||
},
|
||||
CustomModel: {
|
||||
Title: "自定义模型名",
|
||||
SubTitle: "增加自定义模型可选项,使用英文逗号隔开",
|
||||
@@ -363,7 +380,7 @@ const cn = {
|
||||
Prompt: {
|
||||
History: (content: string) => "这是历史聊天总结作为前情提要:" + content,
|
||||
Topic:
|
||||
"使用四到五个字直接返回这句话的简要主题,不要解释、不要标点、不要语气词、不要多余文本,如果没有主题,请直接返回“闲聊”",
|
||||
"使用四到五个字直接返回这句话的简要主题,不要解释、不要标点、不要语气词、不要多余文本,不要加粗,如果没有主题,请直接返回“闲聊”",
|
||||
Summarize:
|
||||
"简要总结一下对话内容,用作后续的上下文提示 prompt,控制在 200 字以内",
|
||||
},
|
||||
|
||||
@@ -15,7 +15,7 @@ const en: LocaleType = {
|
||||
Auth: {
|
||||
Title: "Need Access Code",
|
||||
Tips: "Please enter access code below",
|
||||
SubTips: "Or enter your OpenAI API Key",
|
||||
SubTips: "Or enter your OpenAI or Google API Key",
|
||||
Input: "access code",
|
||||
Confirm: "Confirm",
|
||||
Later: "Later",
|
||||
@@ -321,6 +321,24 @@ const en: LocaleType = {
|
||||
Title: "Custom Models",
|
||||
SubTitle: "Custom model options, seperated by comma",
|
||||
},
|
||||
Google: {
|
||||
ApiKey: {
|
||||
Title: "API Key",
|
||||
SubTitle:
|
||||
"Bypass password access restrictions using a custom Google AI Studio API Key",
|
||||
Placeholder: "Google AI Studio API Key",
|
||||
},
|
||||
|
||||
Endpoint: {
|
||||
Title: "Endpoint Address",
|
||||
SubTitle: "Example:",
|
||||
},
|
||||
|
||||
ApiVerion: {
|
||||
Title: "API Version (gemini-pro api version)",
|
||||
SubTitle: "Select a specific part version",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
Model: "Model",
|
||||
@@ -369,7 +387,7 @@ const en: LocaleType = {
|
||||
History: (content: string) =>
|
||||
"This is a summary of the chat history as a recap: " + content,
|
||||
Topic:
|
||||
"Please generate a four to five word title summarizing our conversation without any lead-in, punctuation, quotation marks, periods, symbols, or additional text. Remove enclosing quotation marks.",
|
||||
"Please generate a four to five word title summarizing our conversation without any lead-in, punctuation, quotation marks, periods, symbols, bold text, or additional text. Remove enclosing quotation marks.",
|
||||
Summarize:
|
||||
"Summarize the discussion briefly in 200 words or less to use as a prompt for future context.",
|
||||
},
|
||||
|
||||
@@ -29,8 +29,10 @@ const DEFAULT_ACCESS_STATE = {
|
||||
azureApiKey: "",
|
||||
azureApiVersion: "2023-08-01-preview",
|
||||
|
||||
// google
|
||||
// google ai studio
|
||||
googleUrl: "",
|
||||
googleApiKey: "",
|
||||
googleApiVersion: "v1",
|
||||
|
||||
// server config
|
||||
needCode: true,
|
||||
@@ -59,6 +61,10 @@ export const useAccessStore = createPersistStore(
|
||||
return ensure(get(), ["azureUrl", "azureApiKey", "azureApiVersion"]);
|
||||
},
|
||||
|
||||
isValidGoogle() {
|
||||
return ensure(get(), ["googleApiKey"]);
|
||||
},
|
||||
|
||||
isAuthorized() {
|
||||
this.fetch();
|
||||
|
||||
@@ -66,6 +72,7 @@ export const useAccessStore = createPersistStore(
|
||||
return (
|
||||
this.isValidOpenAI() ||
|
||||
this.isValidAzure() ||
|
||||
this.isValidGoogle() ||
|
||||
!this.enabledAccessControl() ||
|
||||
(this.enabledAccessControl() && ensure(get(), ["accessCode"]))
|
||||
);
|
||||
|
||||
@@ -8,10 +8,11 @@ import {
|
||||
DEFAULT_INPUT_TEMPLATE,
|
||||
DEFAULT_SYSTEM_TEMPLATE,
|
||||
KnowledgeCutOffDate,
|
||||
ModelProvider,
|
||||
StoreKey,
|
||||
SUMMARIZE_MODEL,
|
||||
} from "../constant";
|
||||
import { api, RequestMessage } from "../client/api";
|
||||
import { ClientApi, RequestMessage } from "../client/api";
|
||||
import { ChatControllerPool } from "../client/controller";
|
||||
import { prettyObject } from "../utils/format";
|
||||
import { estimateTokenLength } from "../utils/token";
|
||||
@@ -319,6 +320,8 @@ export const useChatStore = createPersistStore(
|
||||
session.messages.push(savedUserMessage);
|
||||
session.messages.push(botMessage);
|
||||
});
|
||||
var api: ClientApi;
|
||||
api = new ClientApi(ModelProvider.GPT);
|
||||
if (
|
||||
config.pluginConfig.enable &&
|
||||
session.mask.usePlugins &&
|
||||
@@ -392,8 +395,10 @@ export const useChatStore = createPersistStore(
|
||||
},
|
||||
});
|
||||
} else {
|
||||
if (modelConfig.model === "gemini-pro") {
|
||||
api = new ClientApi(ModelProvider.GeminiPro);
|
||||
}
|
||||
// make request
|
||||
api.switch(modelConfig.model);
|
||||
api.llm.chat({
|
||||
messages: sendMessages,
|
||||
config: { ...modelConfig, stream: true },
|
||||
@@ -472,7 +477,9 @@ export const useChatStore = createPersistStore(
|
||||
|
||||
// system prompts, to get close to OpenAI Web ChatGPT
|
||||
const shouldInjectSystemPrompts = modelConfig.enableInjectSystemPrompts;
|
||||
const systemPrompts = shouldInjectSystemPrompts
|
||||
|
||||
var systemPrompts: ChatMessage[] = [];
|
||||
systemPrompts = shouldInjectSystemPrompts
|
||||
? [
|
||||
createMessage({
|
||||
role: "system",
|
||||
@@ -566,6 +573,14 @@ export const useChatStore = createPersistStore(
|
||||
summarizeSession() {
|
||||
const config = useAppConfig.getState();
|
||||
const session = get().currentSession();
|
||||
const modelConfig = session.mask.modelConfig;
|
||||
|
||||
var api: ClientApi;
|
||||
if (modelConfig.model === "gemini-pro") {
|
||||
api = new ClientApi(ModelProvider.GeminiPro);
|
||||
} else {
|
||||
api = new ClientApi(ModelProvider.GPT);
|
||||
}
|
||||
|
||||
// remove error messages if any
|
||||
const messages = session.messages;
|
||||
@@ -583,7 +598,6 @@ export const useChatStore = createPersistStore(
|
||||
content: Locale.Store.Prompt.Topic,
|
||||
}),
|
||||
);
|
||||
api.switch(session.mask.modelConfig.model);
|
||||
api.llm.chat({
|
||||
messages: topicMessages,
|
||||
config: {
|
||||
@@ -598,8 +612,6 @@ export const useChatStore = createPersistStore(
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
const modelConfig = session.mask.modelConfig;
|
||||
const summarizeIndex = Math.max(
|
||||
session.lastSummarizeIndex,
|
||||
session.clearContextIndex ?? 0,
|
||||
@@ -633,7 +645,6 @@ export const useChatStore = createPersistStore(
|
||||
historyMsgLength > modelConfig.compressMessageLengthThreshold &&
|
||||
modelConfig.sendMemory
|
||||
) {
|
||||
api.switch(modelConfig.model);
|
||||
api.llm.chat({
|
||||
messages: toBeSummarizedMsgs.concat(
|
||||
createMessage({
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
import { FETCH_COMMIT_URL, FETCH_TAG_URL, StoreKey } from "../constant";
|
||||
import { api } from "../client/api";
|
||||
import {
|
||||
FETCH_COMMIT_URL,
|
||||
FETCH_TAG_URL,
|
||||
ModelProvider,
|
||||
StoreKey,
|
||||
} from "../constant";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { createPersistStore } from "../utils/store";
|
||||
import ChatGptIcon from "../icons/chatgpt.png";
|
||||
import Locale from "../locales";
|
||||
import { use } from "react";
|
||||
import { useAppConfig } from ".";
|
||||
import { ClientApi } from "../client/api";
|
||||
|
||||
const ONE_MINUTE = 60 * 1000;
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
@@ -85,35 +92,40 @@ export const useUpdateStore = createPersistStore(
|
||||
}));
|
||||
if (window.__TAURI__?.notification && isApp) {
|
||||
// Check if notification permission is granted
|
||||
await window.__TAURI__?.notification.isPermissionGranted().then((granted) => {
|
||||
if (!granted) {
|
||||
return;
|
||||
} else {
|
||||
// Request permission to show notifications
|
||||
window.__TAURI__?.notification.requestPermission().then((permission) => {
|
||||
if (permission === 'granted') {
|
||||
if (version === remoteId) {
|
||||
// Show a notification using Tauri
|
||||
window.__TAURI__?.notification.sendNotification({
|
||||
title: "ChatGPT Next Web",
|
||||
body: `${Locale.Settings.Update.IsLatest}`,
|
||||
icon: `${ChatGptIcon.src}`,
|
||||
sound: "Default"
|
||||
});
|
||||
} else {
|
||||
const updateMessage = Locale.Settings.Update.FoundUpdate(`${remoteId}`);
|
||||
// Show a notification for the new version using Tauri
|
||||
window.__TAURI__?.notification.sendNotification({
|
||||
title: "ChatGPT Next Web",
|
||||
body: updateMessage,
|
||||
icon: `${ChatGptIcon.src}`,
|
||||
sound: "Default"
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
await window.__TAURI__?.notification
|
||||
.isPermissionGranted()
|
||||
.then((granted) => {
|
||||
if (!granted) {
|
||||
return;
|
||||
} else {
|
||||
// Request permission to show notifications
|
||||
window.__TAURI__?.notification
|
||||
.requestPermission()
|
||||
.then((permission) => {
|
||||
if (permission === "granted") {
|
||||
if (version === remoteId) {
|
||||
// Show a notification using Tauri
|
||||
window.__TAURI__?.notification.sendNotification({
|
||||
title: "NextChat",
|
||||
body: `${Locale.Settings.Update.IsLatest}`,
|
||||
icon: `${ChatGptIcon.src}`,
|
||||
sound: "Default",
|
||||
});
|
||||
} else {
|
||||
const updateMessage =
|
||||
Locale.Settings.Update.FoundUpdate(`${remoteId}`);
|
||||
// Show a notification for the new version using Tauri
|
||||
window.__TAURI__?.notification.sendNotification({
|
||||
title: "NextChat",
|
||||
body: updateMessage,
|
||||
icon: `${ChatGptIcon.src}`,
|
||||
sound: "Default",
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
console.log("[Got Upstream] ", remoteId);
|
||||
} catch (error) {
|
||||
@@ -122,6 +134,7 @@ export const useUpdateStore = createPersistStore(
|
||||
},
|
||||
|
||||
async updateUsage(force = false) {
|
||||
// only support openai for now
|
||||
const overOneMinute = Date.now() - get().lastUpdateUsage >= ONE_MINUTE;
|
||||
if (!overOneMinute && !force) return;
|
||||
|
||||
@@ -130,6 +143,7 @@ export const useUpdateStore = createPersistStore(
|
||||
}));
|
||||
|
||||
try {
|
||||
const api = new ClientApi(ModelProvider.GPT);
|
||||
const usage = await api.llm.usage();
|
||||
|
||||
if (usage) {
|
||||
|
||||
@@ -6,23 +6,27 @@ export function collectModelTable(
|
||||
) {
|
||||
const modelTable: Record<
|
||||
string,
|
||||
{ available: boolean; name: string; displayName: string }
|
||||
{
|
||||
available: boolean;
|
||||
name: string;
|
||||
displayName: string;
|
||||
provider?: LLMModel["provider"]; // Marked as optional
|
||||
}
|
||||
> = {};
|
||||
|
||||
// default models
|
||||
models.forEach(
|
||||
(m) =>
|
||||
(modelTable[m.name] = {
|
||||
...m,
|
||||
displayName: m.name,
|
||||
}),
|
||||
);
|
||||
models.forEach((m) => {
|
||||
modelTable[m.name] = {
|
||||
...m,
|
||||
displayName: m.name, // 'provider' is copied over if it exists
|
||||
};
|
||||
});
|
||||
|
||||
// server custom models
|
||||
customModels
|
||||
.split(",")
|
||||
.filter((v) => !!v && v.length > 0)
|
||||
.map((m) => {
|
||||
.forEach((m) => {
|
||||
const available = !m.startsWith("-");
|
||||
const nameConfig =
|
||||
m.startsWith("+") || m.startsWith("-") ? m.slice(1) : m;
|
||||
@@ -30,14 +34,17 @@ export function collectModelTable(
|
||||
|
||||
// enable or disable all models
|
||||
if (name === "all") {
|
||||
Object.values(modelTable).forEach((m) => (m.available = available));
|
||||
Object.values(modelTable).forEach(
|
||||
(model) => (model.available = available),
|
||||
);
|
||||
} else {
|
||||
modelTable[name] = {
|
||||
name,
|
||||
displayName: displayName || name,
|
||||
available,
|
||||
provider: modelTable[name]?.provider, // Use optional chaining
|
||||
};
|
||||
}
|
||||
|
||||
modelTable[name] = {
|
||||
name,
|
||||
displayName: displayName || name,
|
||||
available,
|
||||
};
|
||||
});
|
||||
return modelTable;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user