mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-11-25 18:26:48 +08:00
Merge branch 'main' into main
This commit is contained in:
@@ -13,6 +13,7 @@ const DANGER_CONFIG = {
|
||||
hideBalanceQuery: serverConfig.hideBalanceQuery,
|
||||
disableFastLink: serverConfig.disableFastLink,
|
||||
customModels: serverConfig.customModels,
|
||||
defaultModel: serverConfig.defaultModel,
|
||||
};
|
||||
|
||||
declare global {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { STORAGE_KEY, internalWhiteWebDavEndpoints } from "../../../constant";
|
||||
import { STORAGE_KEY, internalAllowedWebDavEndpoints } from "../../../constant";
|
||||
import { getServerSideConfig } from "@/app/config/server";
|
||||
|
||||
const config = getServerSideConfig();
|
||||
|
||||
const mergedWhiteWebDavEndpoints = [
|
||||
...internalWhiteWebDavEndpoints,
|
||||
...config.whiteWebDevEndpoints,
|
||||
const mergedAllowedWebDavEndpoints = [
|
||||
...internalAllowedWebDavEndpoints,
|
||||
...config.allowedWebDevEndpoints,
|
||||
].filter((domain) => Boolean(domain.trim()));
|
||||
|
||||
async function handle(
|
||||
@@ -24,7 +24,9 @@ async function handle(
|
||||
|
||||
// Validate the endpoint to prevent potential SSRF attacks
|
||||
if (
|
||||
!mergedWhiteWebDavEndpoints.some((white) => endpoint?.startsWith(white))
|
||||
!mergedAllowedWebDavEndpoints.some(
|
||||
(allowedEndpoint) => endpoint?.startsWith(allowedEndpoint),
|
||||
)
|
||||
) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
|
||||
@@ -161,6 +161,13 @@ export class ClaudeApi implements LLMApi {
|
||||
};
|
||||
});
|
||||
|
||||
if (prompt[0]?.role === "assistant") {
|
||||
prompt.unshift({
|
||||
role: "user",
|
||||
content: ";",
|
||||
});
|
||||
}
|
||||
|
||||
const requestBody: AnthropicChatRequest = {
|
||||
messages: prompt,
|
||||
stream: shouldStream,
|
||||
@@ -348,7 +355,11 @@ export class ClaudeApi implements LLMApi {
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl: string = accessStore.anthropicUrl;
|
||||
let baseUrl: string = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.anthropicUrl;
|
||||
}
|
||||
|
||||
// if endpoint is empty, use default endpoint
|
||||
if (baseUrl.trim().length === 0) {
|
||||
|
||||
@@ -21,11 +21,10 @@ export class GeminiProApi implements LLMApi {
|
||||
}
|
||||
async chat(options: ChatOptions): Promise<void> {
|
||||
// const apiClient = this;
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
let multimodal = false;
|
||||
const messages = options.messages.map((v) => {
|
||||
let parts: any[] = [{ text: getMessageTextContent(v) }];
|
||||
if (visionModel) {
|
||||
if (isVisionModel(options.config.model)) {
|
||||
const images = getMessageImages(v);
|
||||
if (images.length > 0) {
|
||||
multimodal = true;
|
||||
@@ -104,24 +103,25 @@ export class GeminiProApi implements LLMApi {
|
||||
};
|
||||
|
||||
const accessStore = useAccessStore.getState();
|
||||
let baseUrl = accessStore.googleUrl;
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.googleUrl;
|
||||
}
|
||||
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
|
||||
let shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
try {
|
||||
let googleChatPath = visionModel
|
||||
? Google.VisionChatPath
|
||||
: Google.ChatPath;
|
||||
let chatPath = this.path(googleChatPath);
|
||||
|
||||
// let baseUrl = accessStore.googleUrl;
|
||||
|
||||
if (!baseUrl) {
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/google/" + googleChatPath
|
||||
: chatPath;
|
||||
? DEFAULT_API_HOST + "/api/proxy/google/" + Google.ChatPath(modelConfig.model)
|
||||
: this.path(Google.ChatPath(modelConfig.model));
|
||||
}
|
||||
|
||||
if (isApp) {
|
||||
@@ -139,6 +139,7 @@ export class GeminiProApi implements LLMApi {
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
|
||||
@@ -60,16 +60,24 @@ export class ChatGPTApi implements LLMApi {
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
const isAzure = accessStore.provider === ServiceProvider.Azure;
|
||||
let baseUrl = "";
|
||||
|
||||
if (isAzure && !accessStore.isValidAzure()) {
|
||||
throw Error(
|
||||
"incomplete azure config, please check it in your settings page",
|
||||
);
|
||||
if (accessStore.useCustomConfig) {
|
||||
const isAzure = accessStore.provider === ServiceProvider.Azure;
|
||||
|
||||
if (isAzure && !accessStore.isValidAzure()) {
|
||||
throw Error(
|
||||
"incomplete azure config, please check it in your settings page",
|
||||
);
|
||||
}
|
||||
|
||||
if (isAzure) {
|
||||
path = makeAzurePath(path, accessStore.azureApiVersion);
|
||||
}
|
||||
|
||||
baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl;
|
||||
}
|
||||
|
||||
let baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl;
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
baseUrl = isApp
|
||||
@@ -84,10 +92,6 @@ export class ChatGPTApi implements LLMApi {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
if (isAzure) {
|
||||
path = makeAzurePath(path, accessStore.azureApiVersion);
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
@@ -125,7 +129,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
};
|
||||
|
||||
// add max_tokens to vision model
|
||||
if (visionModel) {
|
||||
if (visionModel && modelConfig.model.includes("preview")) {
|
||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||
}
|
||||
|
||||
|
||||
@@ -59,9 +59,10 @@ import {
|
||||
getMessageTextContent,
|
||||
getMessageImages,
|
||||
isVisionModel,
|
||||
compressImage,
|
||||
} from "../utils";
|
||||
|
||||
import { compressImage } from "@/app/utils/chat";
|
||||
|
||||
import dynamic from "next/dynamic";
|
||||
|
||||
import { ChatControllerPool } from "../client/controller";
|
||||
@@ -448,10 +449,20 @@ export function ChatActions(props: {
|
||||
// switch model
|
||||
const currentModel = chatStore.currentSession().mask.modelConfig.model;
|
||||
const allModels = useAllModels();
|
||||
const models = useMemo(
|
||||
() => allModels.filter((m) => m.available),
|
||||
[allModels],
|
||||
);
|
||||
const models = useMemo(() => {
|
||||
const filteredModels = allModels.filter((m) => m.available);
|
||||
const defaultModel = filteredModels.find((m) => m.isDefault);
|
||||
|
||||
if (defaultModel) {
|
||||
const arr = [
|
||||
defaultModel,
|
||||
...filteredModels.filter((m) => m !== defaultModel),
|
||||
];
|
||||
return arr;
|
||||
} else {
|
||||
return filteredModels;
|
||||
}
|
||||
}, [allModels]);
|
||||
const [showModelSelector, setShowModelSelector] = useState(false);
|
||||
const [showUploadImage, setShowUploadImage] = useState(false);
|
||||
|
||||
@@ -467,7 +478,10 @@ export function ChatActions(props: {
|
||||
// switch to first available model
|
||||
const isUnavaliableModel = !models.some((m) => m.name === currentModel);
|
||||
if (isUnavaliableModel && models.length > 0) {
|
||||
const nextModel = models[0].name as ModelType;
|
||||
// show next model to default model if exist
|
||||
let nextModel: ModelType = (
|
||||
models.find((model) => model.isDefault) || models[0]
|
||||
).name;
|
||||
chatStore.updateCurrentSession(
|
||||
(session) => (session.mask.modelConfig.model = nextModel),
|
||||
);
|
||||
@@ -1075,6 +1089,7 @@ function _Chat() {
|
||||
if (payload.url) {
|
||||
accessStore.update((access) => (access.openaiUrl = payload.url!));
|
||||
}
|
||||
accessStore.update((access) => (access.useCustomConfig = true));
|
||||
});
|
||||
}
|
||||
} catch {
|
||||
@@ -1102,11 +1117,13 @@ function _Chat() {
|
||||
};
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
|
||||
const handlePaste = useCallback(
|
||||
async (event: React.ClipboardEvent<HTMLTextAreaElement>) => {
|
||||
const currentModel = chatStore.currentSession().mask.modelConfig.model;
|
||||
if(!isVisionModel(currentModel)){return;}
|
||||
if (!isVisionModel(currentModel)) {
|
||||
return;
|
||||
}
|
||||
const items = (event.clipboardData || window.clipboardData).items;
|
||||
for (const item of items) {
|
||||
if (item.kind === "file" && item.type.startsWith("image/")) {
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import tauriConfig from "../../src-tauri/tauri.conf.json";
|
||||
import { DEFAULT_INPUT_TEMPLATE } from "../constant";
|
||||
|
||||
export const getBuildConfig = () => {
|
||||
if (typeof process === "undefined") {
|
||||
@@ -38,6 +39,7 @@ export const getBuildConfig = () => {
|
||||
...commitInfo,
|
||||
buildMode,
|
||||
isApp,
|
||||
template: process.env.DEFAULT_INPUT_TEMPLATE ?? DEFAULT_INPUT_TEMPLATE,
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ declare global {
|
||||
ENABLE_BALANCE_QUERY?: string; // allow user to query balance or not
|
||||
DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
|
||||
CUSTOM_MODELS?: string; // to control custom models
|
||||
DEFAULT_MODEL?: string; // to cnntrol default model in every new chat window
|
||||
|
||||
// azure only
|
||||
AZURE_URL?: string; // https://{azure-url}/openai/deployments/{deploy-name}
|
||||
@@ -33,6 +34,9 @@ declare global {
|
||||
|
||||
// google tag manager
|
||||
GTM_ID?: string;
|
||||
|
||||
// custom template for preprocessing user input
|
||||
DEFAULT_INPUT_TEMPLATE?: string;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -50,6 +54,22 @@ const ACCESS_CODES = (function getAccessCodes(): Set<string> {
|
||||
}
|
||||
})();
|
||||
|
||||
function getApiKey(keys?: string) {
|
||||
const apiKeyEnvVar = keys ?? "";
|
||||
const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim());
|
||||
const randomIndex = Math.floor(Math.random() * apiKeys.length);
|
||||
const apiKey = apiKeys[randomIndex];
|
||||
if (apiKey) {
|
||||
console.log(
|
||||
`[Server Config] using ${randomIndex + 1} of ${
|
||||
apiKeys.length
|
||||
} api key - ${apiKey}`,
|
||||
);
|
||||
}
|
||||
|
||||
return apiKey;
|
||||
}
|
||||
|
||||
export const getServerSideConfig = () => {
|
||||
if (typeof process === "undefined") {
|
||||
throw Error(
|
||||
@@ -59,46 +79,48 @@ export const getServerSideConfig = () => {
|
||||
|
||||
const disableGPT4 = !!process.env.DISABLE_GPT4;
|
||||
let customModels = process.env.CUSTOM_MODELS ?? "";
|
||||
let defaultModel = process.env.DEFAULT_MODEL ?? "";
|
||||
|
||||
if (disableGPT4) {
|
||||
if (customModels) customModels += ",";
|
||||
customModels += DEFAULT_MODELS.filter((m) => m.name.startsWith("gpt-4"))
|
||||
.map((m) => "-" + m.name)
|
||||
.join(",");
|
||||
if (defaultModel.startsWith("gpt-4")) defaultModel = "";
|
||||
}
|
||||
|
||||
const isAzure = !!process.env.AZURE_URL;
|
||||
const isGoogle = !!process.env.GOOGLE_API_KEY;
|
||||
const isAnthropic = !!process.env.ANTHROPIC_API_KEY;
|
||||
|
||||
const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? "";
|
||||
const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim());
|
||||
const randomIndex = Math.floor(Math.random() * apiKeys.length);
|
||||
const apiKey = apiKeys[randomIndex];
|
||||
console.log(
|
||||
`[Server Config] using ${randomIndex + 1} of ${apiKeys.length} api key`,
|
||||
);
|
||||
// const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? "";
|
||||
// const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim());
|
||||
// const randomIndex = Math.floor(Math.random() * apiKeys.length);
|
||||
// const apiKey = apiKeys[randomIndex];
|
||||
// console.log(
|
||||
// `[Server Config] using ${randomIndex + 1} of ${apiKeys.length} api key`,
|
||||
// );
|
||||
|
||||
const whiteWebDevEndpoints = (process.env.WHITE_WEBDEV_ENDPOINTS ?? "").split(
|
||||
",",
|
||||
);
|
||||
const allowedWebDevEndpoints = (
|
||||
process.env.WHITE_WEBDEV_ENDPOINTS ?? ""
|
||||
).split(",");
|
||||
|
||||
return {
|
||||
baseUrl: process.env.BASE_URL,
|
||||
apiKey,
|
||||
apiKey: getApiKey(process.env.OPENAI_API_KEY),
|
||||
openaiOrgId: process.env.OPENAI_ORG_ID,
|
||||
|
||||
isAzure,
|
||||
azureUrl: process.env.AZURE_URL,
|
||||
azureApiKey: process.env.AZURE_API_KEY,
|
||||
azureApiKey: getApiKey(process.env.AZURE_API_KEY),
|
||||
azureApiVersion: process.env.AZURE_API_VERSION,
|
||||
|
||||
isGoogle,
|
||||
googleApiKey: process.env.GOOGLE_API_KEY,
|
||||
googleApiKey: getApiKey(process.env.GOOGLE_API_KEY),
|
||||
googleUrl: process.env.GOOGLE_URL,
|
||||
|
||||
isAnthropic,
|
||||
anthropicApiKey: process.env.ANTHROPIC_API_KEY,
|
||||
anthropicApiKey: getApiKey(process.env.ANTHROPIC_API_KEY),
|
||||
anthropicApiVersion: process.env.ANTHROPIC_API_VERSION,
|
||||
anthropicUrl: process.env.ANTHROPIC_URL,
|
||||
|
||||
@@ -116,6 +138,7 @@ export const getServerSideConfig = () => {
|
||||
hideBalanceQuery: !process.env.ENABLE_BALANCE_QUERY,
|
||||
disableFastLink: !!process.env.DISABLE_FAST_LINK,
|
||||
customModels,
|
||||
whiteWebDevEndpoints,
|
||||
defaultModel,
|
||||
allowedWebDevEndpoints,
|
||||
};
|
||||
};
|
||||
|
||||
277
app/constant.ts
277
app/constant.ts
@@ -98,10 +98,8 @@ export const Azure = {
|
||||
|
||||
export const Google = {
|
||||
ExampleEndpoint: "https://generativelanguage.googleapis.com/",
|
||||
ChatPath: "v1beta/models/gemini-1.5-pro-latest:generateContent",
|
||||
VisionChatPath: "v1beta/models/gemini-1.5-pro-latest:generateContent",
|
||||
ChatPath: (modelName: string) => `v1beta/models/${modelName}:generateContent`,
|
||||
|
||||
// /api/openai/v1/chat/completions
|
||||
};
|
||||
|
||||
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
|
||||
@@ -130,265 +128,82 @@ export const KnowledgeCutOffDate: Record<string, string> = {
|
||||
"gpt-4-turbo": "2023-12",
|
||||
"gpt-4-turbo-2024-04-09": "2023-12",
|
||||
"gpt-4-turbo-preview": "2023-12",
|
||||
"gpt-4-1106-preview": "2023-04",
|
||||
"gpt-4-0125-preview": "2023-12",
|
||||
"gpt-4o": "2023-10",
|
||||
"gpt-4o-2024-05-13": "2023-10",
|
||||
"gpt-4-vision-preview": "2023-04",
|
||||
// After improvements,
|
||||
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
|
||||
"gemini-pro": "2023-12",
|
||||
"gemini-pro-vision": "2023-12",
|
||||
};
|
||||
|
||||
const openaiModels = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-4",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4-turbo-2024-04-09"
|
||||
];
|
||||
|
||||
const googleModels = [
|
||||
"gemini-1.0-pro",
|
||||
"gemini-1.5-pro-latest",
|
||||
"gemini-1.5-flash-latest",
|
||||
"gemini-pro-vision",
|
||||
];
|
||||
|
||||
const anthropicModels = [
|
||||
"claude-instant-1.2",
|
||||
"claude-2.0",
|
||||
"claude-2.1",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
];
|
||||
|
||||
export const DEFAULT_MODELS = [
|
||||
{
|
||||
name: "gpt-4",
|
||||
...openaiModels.map((name) => ({
|
||||
name,
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-0314",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-0613",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-32k",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-32k-0314",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-32k-0613",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-turbo",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-turbo-2024-04-09",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-turbo-preview",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-1106-preview",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-0125-preview",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-4-vision-preview",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-0125",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-0301",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-0613",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-1106",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-16k",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-16k-0613",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gemini-pro",
|
||||
})),
|
||||
...googleModels.map((name) => ({
|
||||
name,
|
||||
available: true,
|
||||
provider: {
|
||||
id: "google",
|
||||
providerName: "Google",
|
||||
providerType: "google",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gemini-pro-vision",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "google",
|
||||
providerName: "Google",
|
||||
providerType: "google",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "claude-instant-1.2",
|
||||
})),
|
||||
...anthropicModels.map((name) => ({
|
||||
name,
|
||||
available: true,
|
||||
provider: {
|
||||
id: "anthropic",
|
||||
providerName: "Anthropic",
|
||||
providerType: "anthropic",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "claude-2.0",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "anthropic",
|
||||
providerName: "Anthropic",
|
||||
providerType: "anthropic",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "claude-2.1",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "anthropic",
|
||||
providerName: "Anthropic",
|
||||
providerType: "anthropic",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "claude-3-opus-20240229",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "anthropic",
|
||||
providerName: "Anthropic",
|
||||
providerType: "anthropic",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "claude-3-sonnet-20240229",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "anthropic",
|
||||
providerName: "Anthropic",
|
||||
providerType: "anthropic",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "claude-3-haiku-20240307",
|
||||
available: true,
|
||||
provider: {
|
||||
id: "anthropic",
|
||||
providerName: "Anthropic",
|
||||
providerType: "anthropic",
|
||||
},
|
||||
},
|
||||
})),
|
||||
] as const;
|
||||
|
||||
export const CHAT_PAGE_SIZE = 15;
|
||||
export const MAX_RENDER_MSG_COUNT = 45;
|
||||
|
||||
// some famous webdav endpoints
|
||||
export const internalWhiteWebDavEndpoints = [
|
||||
export const internalAllowedWebDavEndpoints = [
|
||||
"https://dav.jianguoyun.com/dav/",
|
||||
"https://dav.dropdav.com/",
|
||||
"https://dav.box.com/dav",
|
||||
|
||||
@@ -296,7 +296,7 @@ const en: LocaleType = {
|
||||
|
||||
Endpoint: {
|
||||
Title: "OpenAI Endpoint",
|
||||
SubTitle: "Must starts with http(s):// or use /api/openai as default",
|
||||
SubTitle: "Must start with http(s):// or use /api/openai as default",
|
||||
},
|
||||
},
|
||||
Azure: {
|
||||
|
||||
@@ -8,6 +8,7 @@ import { getHeaders } from "../client/api";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { createPersistStore } from "../utils/store";
|
||||
import { ensure } from "../utils/clone";
|
||||
import { DEFAULT_CONFIG } from "./config";
|
||||
|
||||
let fetchState = 0; // 0 not fetch, 1 fetching, 2 done
|
||||
|
||||
@@ -48,6 +49,7 @@ const DEFAULT_ACCESS_STATE = {
|
||||
disableGPT4: false,
|
||||
disableFastLink: false,
|
||||
customModels: "",
|
||||
defaultModel: "",
|
||||
};
|
||||
|
||||
export const useAccessStore = createPersistStore(
|
||||
@@ -100,6 +102,13 @@ export const useAccessStore = createPersistStore(
|
||||
},
|
||||
})
|
||||
.then((res) => res.json())
|
||||
.then((res) => {
|
||||
// Set default model from env request
|
||||
let defaultModel = res.defaultModel ?? "";
|
||||
DEFAULT_CONFIG.modelConfig.model =
|
||||
defaultModel !== "" ? defaultModel : "gpt-3.5-turbo";
|
||||
return res;
|
||||
})
|
||||
.then((res: DangerConfig) => {
|
||||
console.log("[Config] got config from server", res);
|
||||
set(() => ({ ...res }));
|
||||
|
||||
@@ -21,6 +21,8 @@ import { estimateTokenLength } from "../utils/token";
|
||||
import { nanoid } from "nanoid";
|
||||
import { createPersistStore } from "../utils/store";
|
||||
import { identifyDefaultClaudeModel } from "../utils/checkers";
|
||||
import { collectModelsWithDefaultModel } from "../utils/model";
|
||||
import { useAccessStore } from "./access";
|
||||
|
||||
export type ChatMessage = RequestMessage & {
|
||||
date: string;
|
||||
@@ -87,9 +89,19 @@ function createEmptySession(): ChatSession {
|
||||
function getSummarizeModel(currentModel: string) {
|
||||
// if it is using gpt-* models, force to use 3.5 to summarize
|
||||
if (currentModel.startsWith("gpt")) {
|
||||
return SUMMARIZE_MODEL;
|
||||
const configStore = useAppConfig.getState();
|
||||
const accessStore = useAccessStore.getState();
|
||||
const allModel = collectModelsWithDefaultModel(
|
||||
configStore.models,
|
||||
[configStore.customModels, accessStore.customModels].join(","),
|
||||
accessStore.defaultModel,
|
||||
);
|
||||
const summarizeModel = allModel.find(
|
||||
(m) => m.name === SUMMARIZE_MODEL && m.available,
|
||||
);
|
||||
return summarizeModel?.name ?? currentModel;
|
||||
}
|
||||
if (currentModel.startsWith("gemini-pro")) {
|
||||
if (currentModel.startsWith("gemini")) {
|
||||
return GEMINI_SUMMARIZE_MODEL;
|
||||
}
|
||||
return currentModel;
|
||||
@@ -416,14 +428,13 @@ export const useChatStore = createPersistStore(
|
||||
getMemoryPrompt() {
|
||||
const session = get().currentSession();
|
||||
|
||||
return {
|
||||
role: "system",
|
||||
content:
|
||||
session.memoryPrompt.length > 0
|
||||
? Locale.Store.Prompt.History(session.memoryPrompt)
|
||||
: "",
|
||||
date: "",
|
||||
} as ChatMessage;
|
||||
if (session.memoryPrompt.length) {
|
||||
return {
|
||||
role: "system",
|
||||
content: Locale.Store.Prompt.History(session.memoryPrompt),
|
||||
date: "",
|
||||
} as ChatMessage;
|
||||
}
|
||||
},
|
||||
|
||||
getMessagesWithMemory() {
|
||||
@@ -459,16 +470,15 @@ export const useChatStore = createPersistStore(
|
||||
systemPrompts.at(0)?.content ?? "empty",
|
||||
);
|
||||
}
|
||||
|
||||
const memoryPrompt = get().getMemoryPrompt();
|
||||
// long term memory
|
||||
const shouldSendLongTermMemory =
|
||||
modelConfig.sendMemory &&
|
||||
session.memoryPrompt &&
|
||||
session.memoryPrompt.length > 0 &&
|
||||
session.lastSummarizeIndex > clearContextIndex;
|
||||
const longTermMemoryPrompts = shouldSendLongTermMemory
|
||||
? [get().getMemoryPrompt()]
|
||||
: [];
|
||||
const longTermMemoryPrompts =
|
||||
shouldSendLongTermMemory && memoryPrompt ? [memoryPrompt] : [];
|
||||
const longTermMemoryStartIndex = session.lastSummarizeIndex;
|
||||
|
||||
// short term memory
|
||||
@@ -593,9 +603,11 @@ export const useChatStore = createPersistStore(
|
||||
Math.max(0, n - modelConfig.historyMessageCount),
|
||||
);
|
||||
}
|
||||
|
||||
// add memory prompt
|
||||
toBeSummarizedMsgs.unshift(get().getMemoryPrompt());
|
||||
const memoryPrompt = get().getMemoryPrompt();
|
||||
if (memoryPrompt) {
|
||||
// add memory prompt
|
||||
toBeSummarizedMsgs.unshift(memoryPrompt);
|
||||
}
|
||||
|
||||
const lastSummarizeIndex = session.messages.length;
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { LLMModel } from "../client/api";
|
||||
import { isMacOS } from "../utils";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import {
|
||||
DEFAULT_INPUT_TEMPLATE,
|
||||
@@ -25,6 +24,8 @@ export enum Theme {
|
||||
Light = "light",
|
||||
}
|
||||
|
||||
const config = getClientConfig();
|
||||
|
||||
export const DEFAULT_CONFIG = {
|
||||
lastUpdate: Date.now(), // timestamp, to merge state
|
||||
|
||||
@@ -32,7 +33,7 @@ export const DEFAULT_CONFIG = {
|
||||
avatar: "1f603",
|
||||
fontSize: 14,
|
||||
theme: Theme.Auto as Theme,
|
||||
tightBorder: !!getClientConfig()?.isApp,
|
||||
tightBorder: !!config?.isApp,
|
||||
sendPreviewBubble: true,
|
||||
enableAutoGenerateTitle: true,
|
||||
sidebarWidth: DEFAULT_SIDEBAR_WIDTH,
|
||||
@@ -56,7 +57,7 @@ export const DEFAULT_CONFIG = {
|
||||
historyMessageCount: 4,
|
||||
compressMessageLengthThreshold: 1000,
|
||||
enableInjectSystemPrompts: true,
|
||||
template: DEFAULT_INPUT_TEMPLATE,
|
||||
template: config?.template ?? DEFAULT_INPUT_TEMPLATE,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -132,7 +133,7 @@ export const useAppConfig = createPersistStore(
|
||||
}),
|
||||
{
|
||||
name: StoreKey.Config,
|
||||
version: 3.8,
|
||||
version: 3.9,
|
||||
migrate(persistedState, version) {
|
||||
const state = persistedState as ChatConfig;
|
||||
|
||||
@@ -163,6 +164,13 @@ export const useAppConfig = createPersistStore(
|
||||
state.lastUpdate = Date.now();
|
||||
}
|
||||
|
||||
if (version < 3.9) {
|
||||
state.modelConfig.template =
|
||||
state.modelConfig.template !== DEFAULT_INPUT_TEMPLATE
|
||||
? state.modelConfig.template
|
||||
: config?.template ?? DEFAULT_INPUT_TEMPLATE;
|
||||
}
|
||||
|
||||
return state as any;
|
||||
},
|
||||
},
|
||||
|
||||
@@ -97,11 +97,18 @@ export const useSyncStore = createPersistStore(
|
||||
const client = this.getClient();
|
||||
|
||||
try {
|
||||
const remoteState = JSON.parse(
|
||||
await client.get(config.username),
|
||||
) as AppState;
|
||||
mergeAppState(localState, remoteState);
|
||||
setLocalAppState(localState);
|
||||
const remoteState = await client.get(config.username);
|
||||
if (!remoteState || remoteState === "") {
|
||||
await client.set(config.username, JSON.stringify(localState));
|
||||
console.log("[Sync] Remote state is empty, using local state instead.");
|
||||
return
|
||||
} else {
|
||||
const parsedRemoteState = JSON.parse(
|
||||
await client.get(config.username),
|
||||
) as AppState;
|
||||
mergeAppState(localState, parsedRemoteState);
|
||||
setLocalAppState(localState);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Sync] failed to get remote state", e);
|
||||
throw e;
|
||||
|
||||
@@ -86,6 +86,7 @@
|
||||
@include dark;
|
||||
}
|
||||
}
|
||||
|
||||
html {
|
||||
height: var(--full-height);
|
||||
|
||||
@@ -110,6 +111,10 @@ body {
|
||||
@media only screen and (max-width: 600px) {
|
||||
background-color: var(--second);
|
||||
}
|
||||
|
||||
*:focus-visible {
|
||||
outline: none;
|
||||
}
|
||||
}
|
||||
|
||||
::-webkit-scrollbar {
|
||||
|
||||
59
app/utils.ts
59
app/utils.ts
@@ -83,48 +83,6 @@ export async function downloadAs(text: string, filename: string) {
|
||||
}
|
||||
}
|
||||
|
||||
export function compressImage(file: File, maxSize: number): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = (readerEvent: any) => {
|
||||
const image = new Image();
|
||||
image.onload = () => {
|
||||
let canvas = document.createElement("canvas");
|
||||
let ctx = canvas.getContext("2d");
|
||||
let width = image.width;
|
||||
let height = image.height;
|
||||
let quality = 0.9;
|
||||
let dataUrl;
|
||||
|
||||
do {
|
||||
canvas.width = width;
|
||||
canvas.height = height;
|
||||
ctx?.clearRect(0, 0, canvas.width, canvas.height);
|
||||
ctx?.drawImage(image, 0, 0, width, height);
|
||||
dataUrl = canvas.toDataURL("image/jpeg", quality);
|
||||
|
||||
if (dataUrl.length < maxSize) break;
|
||||
|
||||
if (quality > 0.5) {
|
||||
// Prioritize quality reduction
|
||||
quality -= 0.1;
|
||||
} else {
|
||||
// Then reduce the size
|
||||
width *= 0.9;
|
||||
height *= 0.9;
|
||||
}
|
||||
} while (dataUrl.length > maxSize);
|
||||
|
||||
resolve(dataUrl);
|
||||
};
|
||||
image.onerror = reject;
|
||||
image.src = readerEvent.target.result;
|
||||
};
|
||||
reader.onerror = reject;
|
||||
reader.readAsDataURL(file);
|
||||
});
|
||||
}
|
||||
|
||||
export function readFromFile() {
|
||||
return new Promise<string>((res, rej) => {
|
||||
const fileInput = document.createElement("input");
|
||||
@@ -290,8 +248,19 @@ export function getMessageImages(message: RequestMessage): string[] {
|
||||
}
|
||||
|
||||
export function isVisionModel(model: string) {
|
||||
const visionKeywords = ["vision", "claude-3"];
|
||||
const isGpt4Turbo = model.includes("gpt-4-turbo") && !model.includes("preview");
|
||||
// Note: This is a better way using the TypeScript feature instead of `&&` or `||` (ts v5.5.0-dev.20240314 I've been using)
|
||||
|
||||
return visionKeywords.some((keyword) => model.includes(keyword)) || isGpt4Turbo;
|
||||
const visionKeywords = [
|
||||
"vision",
|
||||
"claude-3",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-flash",
|
||||
"gpt-4o",
|
||||
];
|
||||
const isGpt4Turbo =
|
||||
model.includes("gpt-4-turbo") && !model.includes("preview");
|
||||
|
||||
return (
|
||||
visionKeywords.some((keyword) => model.includes(keyword)) || isGpt4Turbo
|
||||
);
|
||||
}
|
||||
|
||||
54
app/utils/chat.ts
Normal file
54
app/utils/chat.ts
Normal file
@@ -0,0 +1,54 @@
|
||||
import heic2any from "heic2any";
|
||||
|
||||
export function compressImage(file: File, maxSize: number): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = (readerEvent: any) => {
|
||||
const image = new Image();
|
||||
image.onload = () => {
|
||||
let canvas = document.createElement("canvas");
|
||||
let ctx = canvas.getContext("2d");
|
||||
let width = image.width;
|
||||
let height = image.height;
|
||||
let quality = 0.9;
|
||||
let dataUrl;
|
||||
|
||||
do {
|
||||
canvas.width = width;
|
||||
canvas.height = height;
|
||||
ctx?.clearRect(0, 0, canvas.width, canvas.height);
|
||||
ctx?.drawImage(image, 0, 0, width, height);
|
||||
dataUrl = canvas.toDataURL("image/jpeg", quality);
|
||||
|
||||
if (dataUrl.length < maxSize) break;
|
||||
|
||||
if (quality > 0.5) {
|
||||
// Prioritize quality reduction
|
||||
quality -= 0.1;
|
||||
} else {
|
||||
// Then reduce the size
|
||||
width *= 0.9;
|
||||
height *= 0.9;
|
||||
}
|
||||
} while (dataUrl.length > maxSize);
|
||||
|
||||
resolve(dataUrl);
|
||||
};
|
||||
image.onerror = reject;
|
||||
image.src = readerEvent.target.result;
|
||||
};
|
||||
reader.onerror = reject;
|
||||
|
||||
if (file.type.includes("heic")) {
|
||||
heic2any({ blob: file, toType: "image/jpeg" })
|
||||
.then((blob) => {
|
||||
reader.readAsDataURL(blob as Blob);
|
||||
})
|
||||
.catch((e) => {
|
||||
reject(e);
|
||||
});
|
||||
}
|
||||
|
||||
reader.readAsDataURL(file);
|
||||
});
|
||||
}
|
||||
@@ -93,14 +93,17 @@ export function createUpstashClient(store: SyncStore) {
|
||||
}
|
||||
|
||||
let url;
|
||||
if (proxyUrl.length > 0 || proxyUrl === "/") {
|
||||
let u = new URL(proxyUrl + "/api/upstash/" + path);
|
||||
const pathPrefix = "/api/upstash/";
|
||||
|
||||
try {
|
||||
let u = new URL(proxyUrl + pathPrefix + path);
|
||||
// add query params
|
||||
u.searchParams.append("endpoint", config.endpoint);
|
||||
url = u.toString();
|
||||
} else {
|
||||
url = "/api/upstash/" + path + "?endpoint=" + config.endpoint;
|
||||
} catch (e) {
|
||||
url = pathPrefix + path + "?endpoint=" + config.endpoint;
|
||||
}
|
||||
|
||||
return url;
|
||||
},
|
||||
};
|
||||
|
||||
@@ -63,26 +63,26 @@ export function createWebDavClient(store: SyncStore) {
|
||||
};
|
||||
},
|
||||
path(path: string, proxyUrl: string = "") {
|
||||
// if (!path.endsWith("/")) {
|
||||
// path += "/";
|
||||
// }
|
||||
if (path.startsWith("/")) {
|
||||
path = path.slice(1);
|
||||
}
|
||||
|
||||
if (proxyUrl.length > 0 && !proxyUrl.endsWith("/")) {
|
||||
proxyUrl += "/";
|
||||
if (proxyUrl.endsWith("/")) {
|
||||
proxyUrl = proxyUrl.slice(0, -1);
|
||||
}
|
||||
|
||||
let url;
|
||||
if (proxyUrl.length > 0 || proxyUrl === "/") {
|
||||
let u = new URL(proxyUrl + "api/webdav/" + path);
|
||||
const pathPrefix = "/api/webdav/";
|
||||
|
||||
try {
|
||||
let u = new URL(proxyUrl + pathPrefix + path);
|
||||
// add query params
|
||||
u.searchParams.append("endpoint", config.endpoint);
|
||||
url = u.toString();
|
||||
} else {
|
||||
url = "/api/upstash/" + path + "?endpoint=" + config.endpoint;
|
||||
} catch (e) {
|
||||
url = pathPrefix + path + "?endpoint=" + config.endpoint;
|
||||
}
|
||||
|
||||
return url;
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
import { useMemo } from "react";
|
||||
import { useAccessStore, useAppConfig } from "../store";
|
||||
import { collectModels } from "./model";
|
||||
import { collectModels, collectModelsWithDefaultModel } from "./model";
|
||||
|
||||
export function useAllModels() {
|
||||
const accessStore = useAccessStore();
|
||||
const configStore = useAppConfig();
|
||||
const models = useMemo(() => {
|
||||
return collectModels(
|
||||
return collectModelsWithDefaultModel(
|
||||
configStore.models,
|
||||
[configStore.customModels, accessStore.customModels].join(","),
|
||||
accessStore.defaultModel,
|
||||
);
|
||||
}, [accessStore.customModels, configStore.customModels, configStore.models]);
|
||||
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
import { LLMModel } from "../client/api";
|
||||
|
||||
const customProvider = (modelName: string) => ({
|
||||
id: modelName,
|
||||
providerName: "",
|
||||
providerType: "custom",
|
||||
});
|
||||
|
||||
export function collectModelTable(
|
||||
models: readonly LLMModel[],
|
||||
customModels: string,
|
||||
@@ -11,6 +17,7 @@ export function collectModelTable(
|
||||
name: string;
|
||||
displayName: string;
|
||||
provider?: LLMModel["provider"]; // Marked as optional
|
||||
isDefault?: boolean;
|
||||
}
|
||||
> = {};
|
||||
|
||||
@@ -22,12 +29,6 @@ export function collectModelTable(
|
||||
};
|
||||
});
|
||||
|
||||
const customProvider = (modelName: string) => ({
|
||||
id: modelName,
|
||||
providerName: "",
|
||||
providerType: "custom",
|
||||
});
|
||||
|
||||
// server custom models
|
||||
customModels
|
||||
.split(",")
|
||||
@@ -52,6 +53,24 @@ export function collectModelTable(
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
return modelTable;
|
||||
}
|
||||
|
||||
export function collectModelTableWithDefaultModel(
|
||||
models: readonly LLMModel[],
|
||||
customModels: string,
|
||||
defaultModel: string,
|
||||
) {
|
||||
let modelTable = collectModelTable(models, customModels);
|
||||
if (defaultModel && defaultModel !== "") {
|
||||
modelTable[defaultModel] = {
|
||||
...modelTable[defaultModel],
|
||||
name: defaultModel,
|
||||
available: true,
|
||||
isDefault: true,
|
||||
};
|
||||
}
|
||||
return modelTable;
|
||||
}
|
||||
|
||||
@@ -67,3 +86,17 @@ export function collectModels(
|
||||
|
||||
return allModels;
|
||||
}
|
||||
|
||||
export function collectModelsWithDefaultModel(
|
||||
models: readonly LLMModel[],
|
||||
customModels: string,
|
||||
defaultModel: string,
|
||||
) {
|
||||
const modelTable = collectModelTableWithDefaultModel(
|
||||
models,
|
||||
customModels,
|
||||
defaultModel,
|
||||
);
|
||||
const allModels = Object.values(modelTable);
|
||||
return allModels;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user