mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2026-02-22 22:34:25 +08:00
Compare commits
16 Commits
88f8ca822f
...
feature/gl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a22c9d6db | ||
|
|
5f96804f3b | ||
|
|
13430ea3e2 | ||
|
|
9df24e568b | ||
|
|
bc322be448 | ||
|
|
a867adaf04 | ||
|
|
0cb186846a | ||
|
|
e467ce028d | ||
|
|
cdfe907fb5 | ||
|
|
d91af7f983 | ||
|
|
87b5e3bf62 | ||
|
|
93c5320bf2 | ||
|
|
cc5e16b045 | ||
|
|
54f6feb2d7 | ||
|
|
e1ac0538b8 | ||
|
|
1a678cb4d8 |
@@ -8,7 +8,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
@@ -89,7 +89,7 @@ async function request(req: NextRequest) {
|
||||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.Alibaba as string,
|
||||
|
||||
@@ -9,7 +9,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "./auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
|
||||
const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]);
|
||||
@@ -122,7 +122,7 @@ async function request(req: NextRequest) {
|
||||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.Anthropic as string,
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
import { getAccessToken } from "@/app/utils/baidu";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
@@ -104,7 +104,7 @@ async function request(req: NextRequest) {
|
||||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.Baidu as string,
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
@@ -88,7 +88,7 @@ async function request(req: NextRequest) {
|
||||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.ByteDance as string,
|
||||
|
||||
@@ -2,7 +2,7 @@ import { NextRequest, NextResponse } from "next/server";
|
||||
import { getServerSideConfig } from "../config/server";
|
||||
import { OPENAI_BASE_URL, ServiceProvider } from "../constant";
|
||||
import { cloudflareAIGatewayUrl } from "../utils/cloudflare";
|
||||
import { getModelProvider, isModelAvailableInServer } from "../utils/model";
|
||||
import { getModelProvider, isModelNotavailableInServer } from "../utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
@@ -118,15 +118,14 @@ export async function requestOpenai(req: NextRequest) {
|
||||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.OpenAI as string,
|
||||
) ||
|
||||
isModelAvailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.Azure as string,
|
||||
[
|
||||
ServiceProvider.OpenAI,
|
||||
ServiceProvider.Azure,
|
||||
jsonBody?.model as string, // support provider-unspecified model
|
||||
],
|
||||
)
|
||||
) {
|
||||
return NextResponse.json(
|
||||
|
||||
@@ -13,7 +13,6 @@ const DANGER_CONFIG = {
|
||||
hideBalanceQuery: serverConfig.hideBalanceQuery,
|
||||
disableFastLink: serverConfig.disableFastLink,
|
||||
customModels: serverConfig.customModels,
|
||||
visionModels: serverConfig.visionModels,
|
||||
defaultModel: serverConfig.defaultModel,
|
||||
};
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
@@ -89,7 +89,7 @@ async function request(req: NextRequest) {
|
||||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.ChatGLM as string,
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
// iflytek
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
@@ -89,7 +89,7 @@ async function request(req: NextRequest) {
|
||||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.Iflytek as string,
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
@@ -88,7 +88,7 @@ async function request(req: NextRequest) {
|
||||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.Moonshot as string,
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
@@ -88,7 +88,7 @@ async function request(req: NextRequest) {
|
||||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.XAI as string,
|
||||
|
||||
@@ -84,12 +84,9 @@ export class ClaudeApi implements LLMApi {
|
||||
return res?.content?.[0]?.text;
|
||||
}
|
||||
async chat(options: ChatOptions): Promise<void> {
|
||||
const accessStore = useAccessStore.getState();
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
|
||||
const visionModel = isVisionModel(
|
||||
options.config.model,
|
||||
accessStore.visionModels,
|
||||
);
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
|
||||
|
||||
@@ -21,16 +21,108 @@ import {
|
||||
SpeechOptions,
|
||||
} from "../api";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
|
||||
interface BasePayload {
|
||||
model: string;
|
||||
}
|
||||
|
||||
interface ChatPayload extends BasePayload {
|
||||
messages: ChatOptions["messages"];
|
||||
stream?: boolean;
|
||||
temperature?: number;
|
||||
presence_penalty?: number;
|
||||
frequency_penalty?: number;
|
||||
top_p?: number;
|
||||
}
|
||||
|
||||
interface ImageGenerationPayload extends BasePayload {
|
||||
prompt: string;
|
||||
size?: string;
|
||||
user_id?: string;
|
||||
}
|
||||
|
||||
interface VideoGenerationPayload extends BasePayload {
|
||||
prompt: string;
|
||||
duration?: number;
|
||||
resolution?: string;
|
||||
user_id?: string;
|
||||
}
|
||||
|
||||
type ModelType = "chat" | "image" | "video";
|
||||
|
||||
export class ChatGLMApi implements LLMApi {
|
||||
private disableListModels = true;
|
||||
|
||||
private getModelType(model: string): ModelType {
|
||||
if (model.startsWith("cogview-")) return "image";
|
||||
if (model.startsWith("cogvideo-")) return "video";
|
||||
return "chat";
|
||||
}
|
||||
|
||||
private getModelPath(type: ModelType): string {
|
||||
switch (type) {
|
||||
case "image":
|
||||
return ChatGLM.ImagePath;
|
||||
case "video":
|
||||
return ChatGLM.VideoPath;
|
||||
default:
|
||||
return ChatGLM.ChatPath;
|
||||
}
|
||||
}
|
||||
|
||||
private createPayload(
|
||||
messages: ChatOptions["messages"],
|
||||
modelConfig: any,
|
||||
options: ChatOptions,
|
||||
): BasePayload {
|
||||
const modelType = this.getModelType(modelConfig.model);
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
const prompt =
|
||||
typeof lastMessage.content === "string"
|
||||
? lastMessage.content
|
||||
: lastMessage.content.map((c) => c.text).join("\n");
|
||||
|
||||
switch (modelType) {
|
||||
case "image":
|
||||
return {
|
||||
model: modelConfig.model,
|
||||
prompt,
|
||||
size: options.config.size,
|
||||
} as ImageGenerationPayload;
|
||||
default:
|
||||
return {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
} as ChatPayload;
|
||||
}
|
||||
}
|
||||
|
||||
private parseResponse(modelType: ModelType, json: any): string {
|
||||
switch (modelType) {
|
||||
case "image": {
|
||||
const imageUrl = json.data?.[0]?.url;
|
||||
return imageUrl ? `` : "";
|
||||
}
|
||||
case "video": {
|
||||
const videoUrl = json.data?.[0]?.url;
|
||||
return videoUrl ? `<video controls src="${videoUrl}"></video>` : "";
|
||||
}
|
||||
default:
|
||||
return this.extractMessage(json);
|
||||
}
|
||||
}
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
@@ -51,7 +143,6 @@ export class ChatGLMApi implements LLMApi {
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
@@ -64,9 +155,12 @@ export class ChatGLMApi implements LLMApi {
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
const content = visionModel
|
||||
? await preProcessImageContent(v.content)
|
||||
: getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
@@ -78,25 +172,16 @@ export class ChatGLMApi implements LLMApi {
|
||||
providerName: options.config.providerName,
|
||||
},
|
||||
};
|
||||
const modelType = this.getModelType(modelConfig.model);
|
||||
const requestPayload = this.createPayload(messages, modelConfig, options);
|
||||
const path = this.path(this.getModelPath(modelType));
|
||||
|
||||
const requestPayload: RequestPayload = {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
};
|
||||
console.log(`[Request] glm ${modelType} payload: `, requestPayload);
|
||||
|
||||
console.log("[Request] glm payload: ", requestPayload);
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(ChatGLM.ChatPath);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
@@ -104,12 +189,23 @@ export class ChatGLMApi implements LLMApi {
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (modelType === "image" || modelType === "video") {
|
||||
const res = await fetch(path, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
console.log(`[Response] glm ${modelType}:`, resJson);
|
||||
const message = this.parseResponse(modelType, resJson);
|
||||
options.onFinish(message, res);
|
||||
return;
|
||||
}
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
if (shouldStream) {
|
||||
const [tools, funcs] = usePluginStore
|
||||
.getState()
|
||||
@@ -117,7 +213,7 @@ export class ChatGLMApi implements LLMApi {
|
||||
useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
);
|
||||
return stream(
|
||||
chatPath,
|
||||
path,
|
||||
requestPayload,
|
||||
getHeaders(),
|
||||
tools as any,
|
||||
@@ -125,7 +221,6 @@ export class ChatGLMApi implements LLMApi {
|
||||
controller,
|
||||
// parseSSE
|
||||
(text: string, runTools: ChatMessageTool[]) => {
|
||||
// console.log("parseSSE", text, runTools);
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.choices as Array<{
|
||||
delta: {
|
||||
@@ -154,7 +249,7 @@ export class ChatGLMApi implements LLMApi {
|
||||
}
|
||||
return choices[0]?.delta?.content;
|
||||
},
|
||||
// processToolMessage, include tool_calls message and tool call results
|
||||
// processToolMessage
|
||||
(
|
||||
requestPayload: RequestPayload,
|
||||
toolCallMessage: any,
|
||||
@@ -172,7 +267,7 @@ export class ChatGLMApi implements LLMApi {
|
||||
options,
|
||||
);
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
const res = await fetch(path, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
@@ -184,6 +279,7 @@ export class ChatGLMApi implements LLMApi {
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
|
||||
@@ -60,9 +60,18 @@ export class GeminiProApi implements LLMApi {
|
||||
extractMessage(res: any) {
|
||||
console.log("[Response] gemini-pro response: ", res);
|
||||
|
||||
const getTextFromParts = (parts: any[]) => {
|
||||
if (!Array.isArray(parts)) return "";
|
||||
|
||||
return parts
|
||||
.map((part) => part?.text || "")
|
||||
.filter((text) => text.trim() !== "")
|
||||
.join("\n\n");
|
||||
};
|
||||
|
||||
return (
|
||||
res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
|
||||
res?.at(0)?.candidates?.at(0)?.content?.parts.at(0)?.text ||
|
||||
getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
|
||||
getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
|
||||
res?.error?.message ||
|
||||
""
|
||||
);
|
||||
@@ -83,7 +92,7 @@ export class GeminiProApi implements LLMApi {
|
||||
}
|
||||
const messages = _messages.map((v) => {
|
||||
let parts: any[] = [{ text: getMessageTextContent(v) }];
|
||||
if (isVisionModel(options.config.model, accessStore.visionModels)) {
|
||||
if (isVisionModel(options.config.model)) {
|
||||
const images = getMessageImages(v);
|
||||
if (images.length > 0) {
|
||||
multimodal = true;
|
||||
@@ -223,7 +232,10 @@ export class GeminiProApi implements LLMApi {
|
||||
},
|
||||
});
|
||||
}
|
||||
return chunkJson?.candidates?.at(0)?.content.parts.at(0)?.text;
|
||||
return chunkJson?.candidates
|
||||
?.at(0)
|
||||
?.content.parts?.map((part: { text: string }) => part.text)
|
||||
.join("\n\n");
|
||||
},
|
||||
// processToolMessage, include tool_calls message and tool call results
|
||||
(
|
||||
|
||||
@@ -24,7 +24,7 @@ import {
|
||||
stream,
|
||||
} from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
import { DalleSize, DalleQuality, DalleStyle } from "@/app/typing";
|
||||
import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
|
||||
|
||||
import {
|
||||
ChatOptions,
|
||||
@@ -73,7 +73,7 @@ export interface DalleRequestPayload {
|
||||
prompt: string;
|
||||
response_format: "url" | "b64_json";
|
||||
n: number;
|
||||
size: DalleSize;
|
||||
size: ModelSize;
|
||||
quality: DalleQuality;
|
||||
style: DalleStyle;
|
||||
}
|
||||
@@ -194,8 +194,6 @@ export class ChatGPTApi implements LLMApi {
|
||||
|
||||
let requestPayload: RequestPayload | DalleRequestPayload;
|
||||
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
const isDalle3 = _isDalle3(options.config.model);
|
||||
const isO1 = options.config.model.startsWith("o1");
|
||||
if (isDalle3) {
|
||||
@@ -213,10 +211,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
style: options.config?.style ?? "vivid",
|
||||
};
|
||||
} else {
|
||||
const visionModel = isVisionModel(
|
||||
options.config.model,
|
||||
accessStore.visionModels,
|
||||
);
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = visionModel
|
||||
|
||||
@@ -94,11 +94,7 @@ export class HunyuanApi implements LLMApi {
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const accessStore = useAccessStore.getState();
|
||||
const visionModel = isVisionModel(
|
||||
options.config.model,
|
||||
accessStore.visionModels,
|
||||
);
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
const messages = options.messages.map((v, index) => ({
|
||||
// "Messages 中 system 角色必须位于列表的最开始"
|
||||
role: index !== 0 && v.role === "system" ? "user" : v.role,
|
||||
|
||||
@@ -72,6 +72,8 @@ import {
|
||||
isDalle3,
|
||||
showPlugins,
|
||||
safeLocalStorage,
|
||||
getModelSizes,
|
||||
supportsCustomSize,
|
||||
} from "../utils";
|
||||
|
||||
import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
|
||||
@@ -79,7 +81,7 @@ import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
|
||||
import dynamic from "next/dynamic";
|
||||
|
||||
import { ChatControllerPool } from "../client/controller";
|
||||
import { DalleSize, DalleQuality, DalleStyle } from "../typing";
|
||||
import { DalleQuality, DalleStyle, ModelSize } from "../typing";
|
||||
import { Prompt, usePromptStore } from "../store/prompt";
|
||||
import Locale from "../locales";
|
||||
|
||||
@@ -107,7 +109,6 @@ import {
|
||||
} from "../constant";
|
||||
import { Avatar } from "./emoji";
|
||||
import { ContextPrompts, MaskAvatar, MaskConfig } from "./mask";
|
||||
import { useSyncStore } from "../store/sync";
|
||||
import { useMaskStore } from "../store/mask";
|
||||
import { ChatCommandPrefix, useChatCommand, useCommand } from "../command";
|
||||
import { prettyObject } from "../utils/format";
|
||||
@@ -491,7 +492,6 @@ export function ChatActions(props: {
|
||||
const currentProviderName =
|
||||
session.mask.modelConfig?.providerName || ServiceProvider.OpenAI;
|
||||
const allModels = useAllModels();
|
||||
const customVisionModels = useAccessStore().visionModels;
|
||||
const models = useMemo(() => {
|
||||
const filteredModels = allModels.filter((m) => m.available);
|
||||
const defaultModel = filteredModels.find((m) => m.isDefault);
|
||||
@@ -521,17 +521,18 @@ export function ChatActions(props: {
|
||||
const [showSizeSelector, setShowSizeSelector] = useState(false);
|
||||
const [showQualitySelector, setShowQualitySelector] = useState(false);
|
||||
const [showStyleSelector, setShowStyleSelector] = useState(false);
|
||||
const dalle3Sizes: DalleSize[] = ["1024x1024", "1792x1024", "1024x1792"];
|
||||
const modelSizes = getModelSizes(currentModel);
|
||||
const dalle3Qualitys: DalleQuality[] = ["standard", "hd"];
|
||||
const dalle3Styles: DalleStyle[] = ["vivid", "natural"];
|
||||
const currentSize = session.mask.modelConfig?.size ?? "1024x1024";
|
||||
const currentSize =
|
||||
session.mask.modelConfig?.size ?? ("1024x1024" as ModelSize);
|
||||
const currentQuality = session.mask.modelConfig?.quality ?? "standard";
|
||||
const currentStyle = session.mask.modelConfig?.style ?? "vivid";
|
||||
|
||||
const isMobileScreen = useMobileScreen();
|
||||
|
||||
useEffect(() => {
|
||||
const show = isVisionModel(currentModel, customVisionModels);
|
||||
const show = isVisionModel(currentModel);
|
||||
setShowUploadImage(show);
|
||||
if (!show) {
|
||||
props.setAttachImages([]);
|
||||
@@ -675,7 +676,7 @@ export function ChatActions(props: {
|
||||
/>
|
||||
)}
|
||||
|
||||
{isDalle3(currentModel) && (
|
||||
{supportsCustomSize(currentModel) && (
|
||||
<ChatAction
|
||||
onClick={() => setShowSizeSelector(true)}
|
||||
text={currentSize}
|
||||
@@ -686,7 +687,7 @@ export function ChatActions(props: {
|
||||
{showSizeSelector && (
|
||||
<Selector
|
||||
defaultSelectedValue={currentSize}
|
||||
items={dalle3Sizes.map((m) => ({
|
||||
items={modelSizes.map((m) => ({
|
||||
title: m,
|
||||
value: m,
|
||||
}))}
|
||||
@@ -949,8 +950,6 @@ function _Chat() {
|
||||
const fontSize = config.fontSize;
|
||||
const fontFamily = config.fontFamily;
|
||||
|
||||
const syncStore = useSyncStore();
|
||||
|
||||
const [showExport, setShowExport] = useState(false);
|
||||
|
||||
const inputRef = useRef<HTMLTextAreaElement>(null);
|
||||
@@ -1398,51 +1397,42 @@ function _Chat() {
|
||||
submit: (text) => {
|
||||
doSubmit(text);
|
||||
},
|
||||
// code: (text) => {
|
||||
// if (accessStore.disableFastLink) return;
|
||||
// console.log("[Command] got code from url: ", text);
|
||||
// showConfirm(Locale.URLCommand.Code + `code = ${text}`).then((res) => {
|
||||
// if (res) {
|
||||
// accessStore.update((access) => (access.accessCode = text));
|
||||
// }
|
||||
// });
|
||||
// },
|
||||
code: (text) => {
|
||||
if (accessStore.disableFastLink) return;
|
||||
console.log("[Command] got code from url: ", text);
|
||||
showConfirm(Locale.URLCommand.Code + `code = ${text}`).then((res) => {
|
||||
if (res) {
|
||||
accessStore.update((access) => (access.accessCode = text));
|
||||
}
|
||||
});
|
||||
},
|
||||
settings: (text) => {
|
||||
if (accessStore.disableFastLink) return;
|
||||
|
||||
try {
|
||||
const payload = JSON.parse(text) as {
|
||||
code?: string;
|
||||
username?: string;
|
||||
password?: string;
|
||||
key?: string;
|
||||
url?: string;
|
||||
};
|
||||
|
||||
console.log("[Command] got settings from url: ", payload);
|
||||
|
||||
if (payload.code) {
|
||||
accessStore.update((access) => (access.accessCode = payload.code!));
|
||||
if (accessStore.isAuthorized()) {
|
||||
context.pop();
|
||||
const copiedHello = Object.assign({}, BOT_HELLO);
|
||||
context.push(copiedHello);
|
||||
setUserInput(" ");
|
||||
}
|
||||
}
|
||||
|
||||
if (payload.username) {
|
||||
syncStore.update(
|
||||
(config) => (config.webdav.username = payload.username!),
|
||||
);
|
||||
}
|
||||
|
||||
if (payload.password) {
|
||||
syncStore.update(
|
||||
(config) => (config.webdav.password = payload.password!),
|
||||
);
|
||||
}
|
||||
|
||||
if (payload.username && payload.password) {
|
||||
syncStore.sync();
|
||||
if (payload.key || payload.url) {
|
||||
showConfirm(
|
||||
Locale.URLCommand.Settings +
|
||||
`\n${JSON.stringify(payload, null, 4)}`,
|
||||
).then((res) => {
|
||||
if (!res) return;
|
||||
if (payload.key) {
|
||||
accessStore.update(
|
||||
(access) => (access.openaiApiKey = payload.key!),
|
||||
);
|
||||
}
|
||||
if (payload.url) {
|
||||
accessStore.update((access) => (access.openaiUrl = payload.url!));
|
||||
}
|
||||
accessStore.update((access) => (access.useCustomConfig = true));
|
||||
});
|
||||
}
|
||||
} catch {
|
||||
console.error("[Command] failed to get settings from url: ", text);
|
||||
@@ -1470,12 +1460,10 @@ function _Chat() {
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
const customVisionModels = useAccessStore().visionModels;
|
||||
|
||||
const handlePaste = useCallback(
|
||||
async (event: React.ClipboardEvent<HTMLTextAreaElement>) => {
|
||||
const currentModel = chatStore.currentSession().mask.modelConfig.model;
|
||||
if (!isVisionModel(currentModel, customVisionModels)) {
|
||||
if (!isVisionModel(currentModel)) {
|
||||
return;
|
||||
}
|
||||
const items = (event.clipboardData || window.clipboardData).items;
|
||||
@@ -1512,7 +1500,7 @@ function _Chat() {
|
||||
}
|
||||
}
|
||||
},
|
||||
[attachImages, chatStore, customVisionModels],
|
||||
[attachImages, chatStore],
|
||||
);
|
||||
|
||||
async function uploadImage() {
|
||||
@@ -1560,7 +1548,7 @@ function _Chat() {
|
||||
setAttachImages(images);
|
||||
}
|
||||
|
||||
// 捷键 shortcut keys
|
||||
// 快捷键 shortcut keys
|
||||
const [showShortcutKeyModal, setShowShortcutKeyModal] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
@@ -528,21 +528,6 @@ function SyncItems() {
|
||||
setShowSyncConfigModal(true);
|
||||
}}
|
||||
/>
|
||||
{couldSync && (
|
||||
<IconButton
|
||||
icon={<UploadIcon />}
|
||||
text={Locale.UI.Overwrite}
|
||||
onClick={async () => {
|
||||
try {
|
||||
await syncStore.overwrite();
|
||||
showToast(Locale.Settings.Sync.Success);
|
||||
} catch (e) {
|
||||
showToast(Locale.Settings.Sync.Fail);
|
||||
console.error("[Sync]", e);
|
||||
}
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
{couldSync && (
|
||||
<IconButton
|
||||
icon={<ResetIcon />}
|
||||
|
||||
@@ -22,7 +22,6 @@ import {
|
||||
MIN_SIDEBAR_WIDTH,
|
||||
NARROW_SIDEBAR_WIDTH,
|
||||
Path,
|
||||
PLUGINS,
|
||||
REPO_URL,
|
||||
} from "../constant";
|
||||
|
||||
@@ -32,6 +31,12 @@ import dynamic from "next/dynamic";
|
||||
import { showConfirm, Selector } from "./ui-lib";
|
||||
import clsx from "clsx";
|
||||
|
||||
const DISCOVERY = [
|
||||
{ name: Locale.Plugin.Name, path: Path.Plugins },
|
||||
{ name: "Stable Diffusion", path: Path.Sd },
|
||||
{ name: Locale.SearchChat.Page.Title, path: Path.SearchChat },
|
||||
];
|
||||
|
||||
const ChatList = dynamic(async () => (await import("./chat-list")).ChatList, {
|
||||
loading: () => null,
|
||||
});
|
||||
@@ -219,7 +224,7 @@ export function SideBarTail(props: {
|
||||
export function SideBar(props: { className?: string }) {
|
||||
useHotKey();
|
||||
const { onDragStart, shouldNarrow } = useDragSideBar();
|
||||
const [showPluginSelector, setShowPluginSelector] = useState(false);
|
||||
const [showDiscoverySelector, setshowDiscoverySelector] = useState(false);
|
||||
const navigate = useNavigate();
|
||||
const config = useAppConfig();
|
||||
const chatStore = useChatStore();
|
||||
@@ -254,21 +259,21 @@ export function SideBar(props: { className?: string }) {
|
||||
icon={<DiscoveryIcon />}
|
||||
text={shouldNarrow ? undefined : Locale.Discovery.Name}
|
||||
className={styles["sidebar-bar-button"]}
|
||||
onClick={() => setShowPluginSelector(true)}
|
||||
onClick={() => setshowDiscoverySelector(true)}
|
||||
shadow
|
||||
/>
|
||||
</div>
|
||||
{showPluginSelector && (
|
||||
{showDiscoverySelector && (
|
||||
<Selector
|
||||
items={[
|
||||
...PLUGINS.map((item) => {
|
||||
...DISCOVERY.map((item) => {
|
||||
return {
|
||||
title: item.name,
|
||||
value: item.path,
|
||||
};
|
||||
}),
|
||||
]}
|
||||
onClose={() => setShowPluginSelector(false)}
|
||||
onClose={() => setshowDiscoverySelector(false)}
|
||||
onSelection={(s) => {
|
||||
navigate(s[0], { state: { fromHome: true } });
|
||||
}}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import md5 from "spark-md5";
|
||||
import { DEFAULT_MODELS, DEFAULT_GA_ID } from "../constant";
|
||||
import { isGPT4Model } from "../utils/model";
|
||||
|
||||
declare global {
|
||||
namespace NodeJS {
|
||||
@@ -21,7 +22,6 @@ declare global {
|
||||
ENABLE_BALANCE_QUERY?: string; // allow user to query balance or not
|
||||
DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
|
||||
CUSTOM_MODELS?: string; // to control custom models
|
||||
VISION_MODELS?: string; // to control vision models
|
||||
DEFAULT_MODEL?: string; // to control default model in every new chat window
|
||||
|
||||
// stability only
|
||||
@@ -124,27 +124,16 @@ export const getServerSideConfig = () => {
|
||||
|
||||
const disableGPT4 = !!process.env.DISABLE_GPT4;
|
||||
let customModels = process.env.CUSTOM_MODELS ?? "";
|
||||
let visionModels = process.env.VISION_MODELS ?? "";
|
||||
let defaultModel = process.env.DEFAULT_MODEL ?? "";
|
||||
|
||||
if (disableGPT4) {
|
||||
if (customModels) customModels += ",";
|
||||
customModels += DEFAULT_MODELS.filter(
|
||||
(m) =>
|
||||
(m.name.startsWith("gpt-4") ||
|
||||
m.name.startsWith("chatgpt-4o") ||
|
||||
m.name.startsWith("o1")) &&
|
||||
!m.name.startsWith("gpt-4o-mini"),
|
||||
)
|
||||
customModels += DEFAULT_MODELS.filter((m) => isGPT4Model(m.name))
|
||||
.map((m) => "-" + m.name)
|
||||
.join(",");
|
||||
if (
|
||||
(defaultModel.startsWith("gpt-4") ||
|
||||
defaultModel.startsWith("chatgpt-4o") ||
|
||||
defaultModel.startsWith("o1")) &&
|
||||
!defaultModel.startsWith("gpt-4o-mini")
|
||||
)
|
||||
if (defaultModel && isGPT4Model(defaultModel)) {
|
||||
defaultModel = "";
|
||||
}
|
||||
}
|
||||
|
||||
const isStability = !!process.env.STABILITY_API_KEY;
|
||||
@@ -251,7 +240,6 @@ export const getServerSideConfig = () => {
|
||||
hideBalanceQuery: !process.env.ENABLE_BALANCE_QUERY,
|
||||
disableFastLink: !!process.env.DISABLE_FAST_LINK,
|
||||
customModels,
|
||||
visionModels,
|
||||
defaultModel,
|
||||
allowedWebDavEndpoints,
|
||||
};
|
||||
|
||||
@@ -233,6 +233,8 @@ export const XAI = {
|
||||
export const ChatGLM = {
|
||||
ExampleEndpoint: CHATGLM_BASE_URL,
|
||||
ChatPath: "api/paas/v4/chat/completions",
|
||||
ImagePath: "api/paas/v4/images/generations",
|
||||
VideoPath: "api/paas/v4/videos/generations",
|
||||
};
|
||||
|
||||
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
|
||||
@@ -303,6 +305,9 @@ export const VISION_MODEL_REGEXES = [
|
||||
/qwen2-vl/,
|
||||
/gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview"
|
||||
/^dall-e-3$/, // Matches exactly "dall-e-3"
|
||||
/glm-4v-plus/,
|
||||
/glm-4v/,
|
||||
/glm-4v-flash/,
|
||||
];
|
||||
|
||||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
|
||||
@@ -431,6 +436,15 @@ const chatglmModels = [
|
||||
"glm-4-long",
|
||||
"glm-4-flashx",
|
||||
"glm-4-flash",
|
||||
"glm-4v-plus",
|
||||
"glm-4v",
|
||||
"glm-4v-flash", // free
|
||||
"cogview-3-plus",
|
||||
"cogview-3",
|
||||
"cogview-3-flash", // free
|
||||
// 目前无法适配轮询任务
|
||||
// "cogvideox",
|
||||
// "cogvideox-flash", // free
|
||||
];
|
||||
|
||||
let seq = 1000; // 内置的模型序号生成器从1000开始
|
||||
@@ -586,11 +600,6 @@ export const internalAllowedWebDavEndpoints = [
|
||||
];
|
||||
|
||||
export const DEFAULT_GA_ID = "G-89WN60ZK2E";
|
||||
export const PLUGINS = [
|
||||
{ name: "Plugins", path: Path.Plugins },
|
||||
{ name: "Stable Diffusion", path: Path.Sd },
|
||||
{ name: "Search Chat", path: Path.SearchChat },
|
||||
];
|
||||
|
||||
export const SAAS_CHAT_URL = "https://nextchat.dev/chat";
|
||||
export const SAAS_CHAT_UTM_URL = "https://nextchat.dev/chat?utm=github";
|
||||
|
||||
@@ -176,7 +176,7 @@ const cn = {
|
||||
},
|
||||
},
|
||||
Lang: {
|
||||
Name: "Language", // ATTENTION: if you wanna add a new translation, please do not translate this value, leave it as `Language`
|
||||
Name: "Language", // 注意:如果要添加新的翻译,请不要翻译此值,将它保留为 `Language`
|
||||
All: "所有语言",
|
||||
},
|
||||
Avatar: "头像",
|
||||
@@ -630,7 +630,7 @@ const cn = {
|
||||
Sysmessage: "你是一个助手",
|
||||
},
|
||||
SearchChat: {
|
||||
Name: "搜索",
|
||||
Name: "搜索聊天记录",
|
||||
Page: {
|
||||
Title: "搜索聊天记录",
|
||||
Search: "输入搜索关键词",
|
||||
@@ -757,7 +757,6 @@ const cn = {
|
||||
Export: "导出",
|
||||
Import: "导入",
|
||||
Sync: "同步",
|
||||
Overwrite: "覆盖",
|
||||
Config: "配置",
|
||||
},
|
||||
Exporter: {
|
||||
|
||||
@@ -762,7 +762,6 @@ const en: LocaleType = {
|
||||
Edit: "Edit",
|
||||
Export: "Export",
|
||||
Import: "Import",
|
||||
Overwrite: "Overwrite",
|
||||
Sync: "Sync",
|
||||
Config: "Config",
|
||||
},
|
||||
|
||||
@@ -589,7 +589,6 @@ const fr: PartialLocaleType = {
|
||||
Edit: "Modifier",
|
||||
Export: "Exporter",
|
||||
Import: "Importer",
|
||||
Overwrite: "Remplacer",
|
||||
Sync: "Synchroniser",
|
||||
Config: "Configurer",
|
||||
},
|
||||
|
||||
@@ -590,7 +590,6 @@ const it: PartialLocaleType = {
|
||||
Edit: "Modifica",
|
||||
Export: "Esporta",
|
||||
Import: "Importa",
|
||||
Overwrite: "Sostituisci",
|
||||
Sync: "Sincronizza",
|
||||
Config: "Configura",
|
||||
},
|
||||
|
||||
@@ -505,7 +505,6 @@ const pt: PartialLocaleType = {
|
||||
Edit: "Editar",
|
||||
Export: "Exportar",
|
||||
Import: "Importar",
|
||||
Overwrite: "Substituir",
|
||||
Sync: "Sincronizar",
|
||||
Config: "Configurar",
|
||||
},
|
||||
|
||||
@@ -485,7 +485,7 @@ const tw = {
|
||||
},
|
||||
},
|
||||
SearchChat: {
|
||||
Name: "搜尋",
|
||||
Name: "搜尋聊天記錄",
|
||||
Page: {
|
||||
Title: "搜尋聊天記錄",
|
||||
Search: "輸入搜尋關鍵詞",
|
||||
|
||||
@@ -123,7 +123,6 @@ const DEFAULT_ACCESS_STATE = {
|
||||
disableGPT4: false,
|
||||
disableFastLink: false,
|
||||
customModels: "",
|
||||
visionModels: "",
|
||||
defaultModel: "",
|
||||
|
||||
// tts config
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import { LLMModel } from "../client/api";
|
||||
import { DalleSize, DalleQuality, DalleStyle } from "../typing";
|
||||
import { DalleQuality, DalleStyle, ModelSize } from "../typing";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import {
|
||||
DEFAULT_INPUT_TEMPLATE,
|
||||
DEFAULT_MODELS,
|
||||
DEFAULT_SIDEBAR_WIDTH,
|
||||
DEFAULT_TTS_ENGINE,
|
||||
DEFAULT_TTS_ENGINES,
|
||||
DEFAULT_TTS_MODEL,
|
||||
@@ -45,20 +46,18 @@ export const DEFAULT_CONFIG = {
|
||||
fontSize: 14,
|
||||
fontFamily: "",
|
||||
theme: Theme.Auto as Theme,
|
||||
// tightBorder: !!config?.isApp,
|
||||
tightBorder: true,
|
||||
sendPreviewBubble: false,
|
||||
tightBorder: !!config?.isApp,
|
||||
sendPreviewBubble: true,
|
||||
enableAutoGenerateTitle: true,
|
||||
// sidebarWidth: DEFAULT_SIDEBAR_WIDTH,
|
||||
sidebarWidth: 100,
|
||||
sidebarWidth: DEFAULT_SIDEBAR_WIDTH,
|
||||
|
||||
enableArtifacts: true, // show artifacts config
|
||||
|
||||
enableCodeFold: true, // code fold config
|
||||
|
||||
disablePromptHint: true,
|
||||
disablePromptHint: false,
|
||||
|
||||
dontShowMaskSplashScreen: true, // dont show splash screen when create chat
|
||||
dontShowMaskSplashScreen: false, // dont show splash screen when create chat
|
||||
hideBuiltinMasks: false, // dont add builtin masks
|
||||
|
||||
customModels: "",
|
||||
@@ -69,17 +68,17 @@ export const DEFAULT_CONFIG = {
|
||||
providerName: "OpenAI" as ServiceProvider,
|
||||
temperature: 0.5,
|
||||
top_p: 1,
|
||||
max_tokens: 8000,
|
||||
max_tokens: 4000,
|
||||
presence_penalty: 0,
|
||||
frequency_penalty: 0,
|
||||
sendMemory: true,
|
||||
historyMessageCount: 16,
|
||||
compressMessageLengthThreshold: 1000000,
|
||||
historyMessageCount: 4,
|
||||
compressMessageLengthThreshold: 1000,
|
||||
compressModel: "",
|
||||
compressProviderName: "",
|
||||
enableInjectSystemPrompts: true,
|
||||
template: config?.template ?? DEFAULT_INPUT_TEMPLATE,
|
||||
size: "1024x1024" as DalleSize,
|
||||
size: "1024x1024" as ModelSize,
|
||||
quality: "standard" as DalleQuality,
|
||||
style: "vivid" as DalleStyle,
|
||||
},
|
||||
|
||||
@@ -28,7 +28,7 @@ const DEFAULT_SYNC_STATE = {
|
||||
proxyUrl: ApiPath.Cors as string,
|
||||
|
||||
webdav: {
|
||||
endpoint: "https://dav.jyj.cx",
|
||||
endpoint: "",
|
||||
username: "",
|
||||
password: "",
|
||||
},
|
||||
@@ -88,7 +88,7 @@ export const useSyncStore = createPersistStore(
|
||||
return client;
|
||||
},
|
||||
|
||||
async sync(overwrite = false) {
|
||||
async sync() {
|
||||
const localState = getLocalAppState();
|
||||
const provider = get().provider;
|
||||
const config = get()[provider];
|
||||
@@ -103,13 +103,11 @@ export const useSyncStore = createPersistStore(
|
||||
);
|
||||
return;
|
||||
} else {
|
||||
if (!overwrite) {
|
||||
const parsedRemoteState = JSON.parse(
|
||||
await client.get(config.username),
|
||||
) as AppState;
|
||||
mergeAppState(localState, parsedRemoteState);
|
||||
setLocalAppState(localState);
|
||||
}
|
||||
const parsedRemoteState = JSON.parse(
|
||||
await client.get(config.username),
|
||||
) as AppState;
|
||||
mergeAppState(localState, parsedRemoteState);
|
||||
setLocalAppState(localState);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Sync] failed to get remote state", e);
|
||||
@@ -121,10 +119,6 @@ export const useSyncStore = createPersistStore(
|
||||
this.markSyncTime();
|
||||
},
|
||||
|
||||
async overwrite() {
|
||||
await this.sync(true);
|
||||
},
|
||||
|
||||
async check() {
|
||||
const client = this.getClient();
|
||||
return await client.check();
|
||||
|
||||
@@ -11,3 +11,14 @@ export interface RequestMessage {
|
||||
export type DalleSize = "1024x1024" | "1792x1024" | "1024x1792";
|
||||
export type DalleQuality = "standard" | "hd";
|
||||
export type DalleStyle = "vivid" | "natural";
|
||||
|
||||
export type ModelSize =
|
||||
| "1024x1024"
|
||||
| "1792x1024"
|
||||
| "1024x1792"
|
||||
| "768x1344"
|
||||
| "864x1152"
|
||||
| "1344x768"
|
||||
| "1152x864"
|
||||
| "1440x720"
|
||||
| "720x1440";
|
||||
|
||||
37
app/utils.ts
37
app/utils.ts
@@ -7,7 +7,7 @@ import { ServiceProvider } from "./constant";
|
||||
import { fetch as tauriStreamFetch } from "./utils/stream";
|
||||
import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant";
|
||||
import { getClientConfig } from "./config/client";
|
||||
import { getModelProvider } from "./utils/model";
|
||||
import { ModelSize } from "./typing";
|
||||
|
||||
export function trimTopic(topic: string) {
|
||||
// Fix an issue where double quotes still show in the Indonesian language
|
||||
@@ -254,15 +254,12 @@ export function getMessageImages(message: RequestMessage): string[] {
|
||||
return urls;
|
||||
}
|
||||
|
||||
export function isVisionModel(model: string, customVisionModels: string) {
|
||||
export function isVisionModel(model: string) {
|
||||
const clientConfig = getClientConfig();
|
||||
const allVisionModelsList = [customVisionModels, clientConfig?.visionModels]
|
||||
?.join(",")
|
||||
.split(",")
|
||||
.map((m) => m.trim())
|
||||
.filter(Boolean)
|
||||
.map((m) => getModelProvider(m)[0]);
|
||||
if (allVisionModelsList?.includes(model)) {
|
||||
const envVisionModels = clientConfig?.visionModels
|
||||
?.split(",")
|
||||
.map((m) => m.trim());
|
||||
if (envVisionModels?.includes(model)) {
|
||||
return true;
|
||||
}
|
||||
return (
|
||||
@@ -275,6 +272,28 @@ export function isDalle3(model: string) {
|
||||
return "dall-e-3" === model;
|
||||
}
|
||||
|
||||
export function getModelSizes(model: string): ModelSize[] {
|
||||
if (isDalle3(model)) {
|
||||
return ["1024x1024", "1792x1024", "1024x1792"];
|
||||
}
|
||||
if (model.toLowerCase().includes("cogview")) {
|
||||
return [
|
||||
"1024x1024",
|
||||
"768x1344",
|
||||
"864x1152",
|
||||
"1344x768",
|
||||
"1152x864",
|
||||
"1440x720",
|
||||
"720x1440",
|
||||
];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
export function supportsCustomSize(model: string): boolean {
|
||||
return getModelSizes(model).length > 0;
|
||||
}
|
||||
|
||||
export function showPlugins(provider: ServiceProvider, model: string) {
|
||||
if (
|
||||
provider == ServiceProvider.OpenAI ||
|
||||
|
||||
@@ -202,3 +202,52 @@ export function isModelAvailableInServer(
|
||||
const modelTable = collectModelTable(DEFAULT_MODELS, customModels);
|
||||
return modelTable[fullName]?.available === false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the model name is a GPT-4 related model
|
||||
*
|
||||
* @param modelName The name of the model to check
|
||||
* @returns True if the model is a GPT-4 related model (excluding gpt-4o-mini)
|
||||
*/
|
||||
export function isGPT4Model(modelName: string): boolean {
|
||||
return (
|
||||
(modelName.startsWith("gpt-4") ||
|
||||
modelName.startsWith("chatgpt-4o") ||
|
||||
modelName.startsWith("o1")) &&
|
||||
!modelName.startsWith("gpt-4o-mini")
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a model is not available on any of the specified providers in the server.
|
||||
*
|
||||
* @param {string} customModels - A string of custom models, comma-separated.
|
||||
* @param {string} modelName - The name of the model to check.
|
||||
* @param {string|string[]} providerNames - A string or array of provider names to check against.
|
||||
*
|
||||
* @returns {boolean} True if the model is not available on any of the specified providers, false otherwise.
|
||||
*/
|
||||
export function isModelNotavailableInServer(
|
||||
customModels: string,
|
||||
modelName: string,
|
||||
providerNames: string | string[],
|
||||
): boolean {
|
||||
// Check DISABLE_GPT4 environment variable
|
||||
if (
|
||||
process.env.DISABLE_GPT4 === "1" &&
|
||||
isGPT4Model(modelName.toLowerCase())
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const modelTable = collectModelTable(DEFAULT_MODELS, customModels);
|
||||
|
||||
const providerNamesArray = Array.isArray(providerNames)
|
||||
? providerNames
|
||||
: [providerNames];
|
||||
for (const providerName of providerNamesArray) {
|
||||
const fullName = `${modelName}@${providerName.toLowerCase()}`;
|
||||
if (modelTable?.[fullName]?.available === true) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import {
|
||||
ChatSession,
|
||||
// useAccessStore,
|
||||
// useAppConfig,
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
useChatStore,
|
||||
} from "../store";
|
||||
// import { useMaskStore } from "../store/mask";
|
||||
// import { usePromptStore } from "../store/prompt";
|
||||
import { useMaskStore } from "../store/mask";
|
||||
import { usePromptStore } from "../store/prompt";
|
||||
import { StoreKey } from "../constant";
|
||||
import { merge } from "./merge";
|
||||
|
||||
@@ -32,18 +32,18 @@ export type GetStoreState<T> = T extends { getState: () => infer U }
|
||||
|
||||
const LocalStateSetters = {
|
||||
[StoreKey.Chat]: useChatStore.setState,
|
||||
// [StoreKey.Access]: useAccessStore.setState,
|
||||
// [StoreKey.Config]: useAppConfig.setState,
|
||||
// [StoreKey.Mask]: useMaskStore.setState,
|
||||
// [StoreKey.Prompt]: usePromptStore.setState,
|
||||
[StoreKey.Access]: useAccessStore.setState,
|
||||
[StoreKey.Config]: useAppConfig.setState,
|
||||
[StoreKey.Mask]: useMaskStore.setState,
|
||||
[StoreKey.Prompt]: usePromptStore.setState,
|
||||
} as const;
|
||||
|
||||
const LocalStateGetters = {
|
||||
[StoreKey.Chat]: () => getNonFunctionFileds(useChatStore.getState()),
|
||||
// [StoreKey.Access]: () => getNonFunctionFileds(useAccessStore.getState()),
|
||||
// [StoreKey.Config]: () => getNonFunctionFileds(useAppConfig.getState()),
|
||||
// [StoreKey.Mask]: () => getNonFunctionFileds(useMaskStore.getState()),
|
||||
// [StoreKey.Prompt]: () => getNonFunctionFileds(usePromptStore.getState()),
|
||||
[StoreKey.Access]: () => getNonFunctionFileds(useAccessStore.getState()),
|
||||
[StoreKey.Config]: () => getNonFunctionFileds(useAppConfig.getState()),
|
||||
[StoreKey.Mask]: () => getNonFunctionFileds(useMaskStore.getState()),
|
||||
[StoreKey.Prompt]: () => getNonFunctionFileds(usePromptStore.getState()),
|
||||
} as const;
|
||||
|
||||
export type AppState = {
|
||||
@@ -100,22 +100,22 @@ const MergeStates: StateMerger = {
|
||||
|
||||
return localState;
|
||||
},
|
||||
// [StoreKey.Prompt]: (localState, remoteState) => {
|
||||
// localState.prompts = {
|
||||
// ...remoteState.prompts,
|
||||
// ...localState.prompts,
|
||||
// };
|
||||
// return localState;
|
||||
// },
|
||||
// [StoreKey.Mask]: (localState, remoteState) => {
|
||||
// localState.masks = {
|
||||
// ...remoteState.masks,
|
||||
// ...localState.masks,
|
||||
// };
|
||||
// return localState;
|
||||
// },
|
||||
// [StoreKey.Config]: mergeWithUpdate<AppState[StoreKey.Config]>,
|
||||
// [StoreKey.Access]: mergeWithUpdate<AppState[StoreKey.Access]>,
|
||||
[StoreKey.Prompt]: (localState, remoteState) => {
|
||||
localState.prompts = {
|
||||
...remoteState.prompts,
|
||||
...localState.prompts,
|
||||
};
|
||||
return localState;
|
||||
},
|
||||
[StoreKey.Mask]: (localState, remoteState) => {
|
||||
localState.masks = {
|
||||
...remoteState.masks,
|
||||
...localState.masks,
|
||||
};
|
||||
return localState;
|
||||
},
|
||||
[StoreKey.Config]: mergeWithUpdate<AppState[StoreKey.Config]>,
|
||||
[StoreKey.Access]: mergeWithUpdate<AppState[StoreKey.Access]>,
|
||||
};
|
||||
|
||||
export function getLocalAppState() {
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"name": "nextchat",
|
||||
"cwd": "/www/nextchat",
|
||||
"script": "server.js",
|
||||
"env": {
|
||||
"PORT": 8032,
|
||||
"CODE": "scut",
|
||||
"BASE_URL": "https://oneapi.jyj.cx",
|
||||
"OPENAI_API_KEY": "sk-jiangyj",
|
||||
"HIDE_USER_API_KEY": true,
|
||||
"CUSTOM_MODELS": "-all,gemini-2.0-pro-exp-02-05@openai,gemini-2.0-flash-thinking-exp-01-21@openai,gemini-2.0-flash-exp@openai,gemini-2.0-flash@openai,gemini-2.0-flash-lite@openai,gpt-4o-2024-11-20@openai,o3-mini@openai,deepseek-ai/deepseek-v3@openai,deepseek-ai/deepseek-r1@openai,deepseek-chat@openai,deepseek-reasoner@openai,ep-20250124104315-zsg4p@openai",
|
||||
"DEFAULT_MODEL": "gemini-2.0-pro-exp-02-05@openai",
|
||||
"WHITE_WEBDAV_ENDPOINTS": "https://dav.jyj.cx",
|
||||
"VISION_MODELS": "gemini-2.0-flash-thinking-exp-01-21@openai,gemini-2.0-pro-exp-02-05@openai,gemini-2.0-flash-exp@openai,gemini-2.0-flash@openai,gemini-2.0-flash-lite@openai,gpt-4o-2024-11-20@openai,o3-mini@openai,deepseek-ai/DeepSeek-V3@openai,deepseek-ai/DeepSeek-R1@openai,deepseek-chat@openai,deepseek-reasoner@openai,ep-20250124104315-zsg4p@openai"
|
||||
}
|
||||
}
|
||||
80
test/model-available.test.ts
Normal file
80
test/model-available.test.ts
Normal file
@@ -0,0 +1,80 @@
|
||||
import { isModelNotavailableInServer } from "../app/utils/model";
|
||||
|
||||
describe("isModelNotavailableInServer", () => {
|
||||
test("test model will return false, which means the model is available", () => {
|
||||
const customModels = "";
|
||||
const modelName = "gpt-4";
|
||||
const providerNames = "OpenAI";
|
||||
const result = isModelNotavailableInServer(
|
||||
customModels,
|
||||
modelName,
|
||||
providerNames,
|
||||
);
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
test("test model will return true when model is not available in custom models", () => {
|
||||
const customModels = "-all,gpt-4o-mini";
|
||||
const modelName = "gpt-4";
|
||||
const providerNames = "OpenAI";
|
||||
const result = isModelNotavailableInServer(
|
||||
customModels,
|
||||
modelName,
|
||||
providerNames,
|
||||
);
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
test("should respect DISABLE_GPT4 setting", () => {
|
||||
process.env.DISABLE_GPT4 = "1";
|
||||
const result = isModelNotavailableInServer("", "gpt-4", "OpenAI");
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
test("should handle empty provider names", () => {
|
||||
const result = isModelNotavailableInServer("-all,gpt-4", "gpt-4", "");
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
test("should be case insensitive for model names", () => {
|
||||
const result = isModelNotavailableInServer("-all,GPT-4", "gpt-4", "OpenAI");
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
test("support passing multiple providers, model unavailable on one of the providers will return true", () => {
|
||||
const customModels = "-all,gpt-4@google";
|
||||
const modelName = "gpt-4";
|
||||
const providerNames = ["OpenAI", "Azure"];
|
||||
const result = isModelNotavailableInServer(
|
||||
customModels,
|
||||
modelName,
|
||||
providerNames,
|
||||
);
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
// FIXME: 这个测试用例有问题,需要修复
|
||||
// test("support passing multiple providers, model available on one of the providers will return false", () => {
|
||||
// const customModels = "-all,gpt-4@google";
|
||||
// const modelName = "gpt-4";
|
||||
// const providerNames = ["OpenAI", "Google"];
|
||||
// const result = isModelNotavailableInServer(
|
||||
// customModels,
|
||||
// modelName,
|
||||
// providerNames,
|
||||
// );
|
||||
// expect(result).toBe(false);
|
||||
// });
|
||||
|
||||
test("test custom model without setting provider", () => {
|
||||
const customModels = "-all,mistral-large";
|
||||
const modelName = "mistral-large";
|
||||
const providerNames = modelName;
|
||||
const result = isModelNotavailableInServer(
|
||||
customModels,
|
||||
modelName,
|
||||
providerNames,
|
||||
);
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -2,7 +2,6 @@ import { isVisionModel } from "../app/utils";
|
||||
|
||||
describe("isVisionModel", () => {
|
||||
const originalEnv = process.env;
|
||||
const customVisionModels = "custom-vlm,another-vlm";
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetModules();
|
||||
@@ -28,12 +27,12 @@ describe("isVisionModel", () => {
|
||||
];
|
||||
|
||||
visionModels.forEach((model) => {
|
||||
expect(isVisionModel(model, customVisionModels)).toBe(true);
|
||||
expect(isVisionModel(model)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
test("should exclude specific models", () => {
|
||||
expect(isVisionModel("claude-3-5-haiku-20241022", customVisionModels)).toBe(false);
|
||||
expect(isVisionModel("claude-3-5-haiku-20241022")).toBe(false);
|
||||
});
|
||||
|
||||
test("should not identify non-vision models", () => {
|
||||
@@ -45,26 +44,24 @@ describe("isVisionModel", () => {
|
||||
];
|
||||
|
||||
nonVisionModels.forEach((model) => {
|
||||
expect(isVisionModel(model, customVisionModels)).toBe(false);
|
||||
expect(isVisionModel(model)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
test("should identify models from VISION_MODELS env var", () => {
|
||||
process.env.VISION_MODELS = "custom-vision-model,another-vision-model";
|
||||
|
||||
expect(isVisionModel("custom-vision-model", customVisionModels)).toBe(true);
|
||||
expect(isVisionModel("another-vision-model", customVisionModels)).toBe(true);
|
||||
expect(isVisionModel("custom-vlm", customVisionModels)).toBe(true);
|
||||
expect(isVisionModel("another-vlm", customVisionModels)).toBe(true);
|
||||
expect(isVisionModel("unrelated-model", customVisionModels)).toBe(false);
|
||||
|
||||
expect(isVisionModel("custom-vision-model")).toBe(true);
|
||||
expect(isVisionModel("another-vision-model")).toBe(true);
|
||||
expect(isVisionModel("unrelated-model")).toBe(false);
|
||||
});
|
||||
|
||||
test("should handle empty or missing VISION_MODELS", () => {
|
||||
process.env.VISION_MODELS = "";
|
||||
expect(isVisionModel("unrelated-model", customVisionModels)).toBe(false);
|
||||
expect(isVisionModel("unrelated-model")).toBe(false);
|
||||
|
||||
delete process.env.VISION_MODELS;
|
||||
expect(isVisionModel("unrelated-model", customVisionModels)).toBe(false);
|
||||
expect(isVisionModel("gpt-4-vision", customVisionModels)).toBe(true);
|
||||
expect(isVisionModel("unrelated-model")).toBe(false);
|
||||
expect(isVisionModel("gpt-4-vision")).toBe(true);
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user