Merge remote-tracking branch 'upstream/main' into klaas20240303

This commit is contained in:
Klaas Reineke
2024-03-25 15:56:20 +01:00
134 changed files with 4418 additions and 920 deletions

View File

@@ -12,7 +12,9 @@ import { ensure } from "../utils/clone";
let fetchState = 0; // 0 not fetch, 1 fetching, 2 done
const DEFAULT_OPENAI_URL =
getClientConfig()?.buildMode === "export" ? DEFAULT_API_HOST : ApiPath.OpenAI;
getClientConfig()?.buildMode === "export"
? DEFAULT_API_HOST + "/api/proxy/openai"
: ApiPath.OpenAI;
const DEFAULT_ACCESS_STATE = {
accessCode: "",
@@ -29,6 +31,11 @@ const DEFAULT_ACCESS_STATE = {
azureApiKey: "",
azureApiVersion: "2023-08-01-preview",
// google ai studio
googleUrl: "",
googleApiKey: "",
googleApiVersion: "v1",
// server config
needCode: true,
hideUserApiKey: false,
@@ -56,6 +63,10 @@ export const useAccessStore = createPersistStore(
return ensure(get(), ["azureUrl", "azureApiKey", "azureApiVersion"]);
},
isValidGoogle() {
return ensure(get(), ["googleApiKey"]);
},
isAuthorized() {
this.fetch();
@@ -63,6 +74,7 @@ export const useAccessStore = createPersistStore(
return (
this.isValidOpenAI() ||
this.isValidAzure() ||
this.isValidGoogle() ||
!this.enabledAccessControl() ||
(this.enabledAccessControl() && ensure(get(), ["accessCode"]))
);
@@ -99,6 +111,7 @@ export const useAccessStore = createPersistStore(
token: string;
openaiApiKey: string;
azureApiVersion: string;
googleApiKey: string;
};
state.openaiApiKey = state.token;
state.azureApiVersion = "2023-08-01-preview";

View File

@@ -1,4 +1,4 @@
import { trimTopic } from "../utils";
import { trimTopic, getMessageTextContent } from "../utils";
import Locale, { getLang } from "../locales";
import { showToast } from "../components/ui-lib";
@@ -6,12 +6,15 @@ import { ModelConfig, ModelType, useAppConfig } from "./config";
import { createEmptyMask, Mask } from "./mask";
import {
DEFAULT_INPUT_TEMPLATE,
DEFAULT_MODELS,
DEFAULT_SYSTEM_TEMPLATE,
KnowledgeCutOffDate,
ModelProvider,
StoreKey,
SUMMARIZE_MODEL,
GEMINI_SUMMARIZE_MODEL,
} from "../constant";
import { api, RequestMessage } from "../client/api";
import { ClientApi, RequestMessage, MultimodalContent } from "../client/api";
import { ChatControllerPool } from "../client/controller";
import { prettyObject } from "../utils/format";
import { estimateTokenLength } from "../utils/token";
@@ -82,18 +85,38 @@ function createEmptySession(): ChatSession {
function getSummarizeModel(currentModel: string) {
// if it is using gpt-* models, force to use 3.5 to summarize
return currentModel.startsWith("gpt") ? SUMMARIZE_MODEL : currentModel;
if (currentModel.startsWith("gpt")) {
return SUMMARIZE_MODEL;
}
if (currentModel.startsWith("gemini-pro")) {
return GEMINI_SUMMARIZE_MODEL;
}
return currentModel;
}
function countMessages(msgs: ChatMessage[]) {
return msgs.reduce((pre, cur) => pre + estimateTokenLength(cur.content), 0);
return msgs.reduce(
(pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
0,
);
}
function fillTemplateWith(input: string, modelConfig: ModelConfig) {
let cutoff =
const cutoff =
KnowledgeCutOffDate[modelConfig.model] ?? KnowledgeCutOffDate.default;
// Find the model in the DEFAULT_MODELS array that matches the modelConfig.model
const modelInfo = DEFAULT_MODELS.find((m) => m.name === modelConfig.model);
var serviceProvider = "OpenAI";
if (modelInfo) {
// TODO: auto detect the providerName from the modelConfig.model
// Directly use the providerName from the modelInfo
serviceProvider = modelInfo.provider.providerName;
}
const vars = {
ServiceProvider: serviceProvider,
cutoff,
model: modelConfig.model,
time: new Date().toLocaleString(),
@@ -110,7 +133,8 @@ function fillTemplateWith(input: string, modelConfig: ModelConfig) {
}
Object.entries(vars).forEach(([name, value]) => {
output = output.replaceAll(`{{${name}}}`, value);
const regex = new RegExp(`{{${name}}}`, "g");
output = output.replace(regex, value.toString()); // Ensure value is a string
});
return output;
@@ -266,16 +290,36 @@ export const useChatStore = createPersistStore(
get().summarizeSession();
},
async onUserInput(content: string) {
async onUserInput(content: string, attachImages?: string[]) {
const session = get().currentSession();
const modelConfig = session.mask.modelConfig;
const userContent = fillTemplateWith(content, modelConfig);
console.log("[User Input] after template: ", userContent);
const userMessage: ChatMessage = createMessage({
let mContent: string | MultimodalContent[] = userContent;
if (attachImages && attachImages.length > 0) {
mContent = [
{
type: "text",
text: userContent,
},
];
mContent = mContent.concat(
attachImages.map((url) => {
return {
type: "image_url",
image_url: {
url: url,
},
};
}),
);
}
let userMessage: ChatMessage = createMessage({
role: "user",
content: userContent,
content: mContent,
});
const botMessage: ChatMessage = createMessage({
@@ -293,7 +337,7 @@ export const useChatStore = createPersistStore(
get().updateCurrentSession((session) => {
const savedUserMessage = {
...userMessage,
content,
content: mContent,
};
session.messages = session.messages.concat([
savedUserMessage,
@@ -301,6 +345,13 @@ export const useChatStore = createPersistStore(
]);
});
var api: ClientApi;
if (modelConfig.model.startsWith("gemini")) {
api = new ClientApi(ModelProvider.GeminiPro);
} else {
api = new ClientApi(ModelProvider.GPT);
}
// make request
api.llm.chat({
messages: sendMessages,
@@ -378,8 +429,12 @@ export const useChatStore = createPersistStore(
const contextPrompts = session.mask.context.slice();
// system prompts, to get close to OpenAI Web ChatGPT
const shouldInjectSystemPrompts = modelConfig.enableInjectSystemPrompts;
const systemPrompts = shouldInjectSystemPrompts
const shouldInjectSystemPrompts =
modelConfig.enableInjectSystemPrompts &&
session.mask.modelConfig.model.startsWith("gpt-");
var systemPrompts: ChatMessage[] = [];
systemPrompts = shouldInjectSystemPrompts
? [
createMessage({
role: "system",
@@ -436,7 +491,7 @@ export const useChatStore = createPersistStore(
) {
const msg = messages[i];
if (!msg || msg.isError) continue;
tokenCount += estimateTokenLength(msg.content);
tokenCount += estimateTokenLength(getMessageTextContent(msg));
reversedRecentMessages.push(msg);
}
@@ -473,6 +528,14 @@ export const useChatStore = createPersistStore(
summarizeSession() {
const config = useAppConfig.getState();
const session = get().currentSession();
const modelConfig = session.mask.modelConfig;
var api: ClientApi;
if (modelConfig.model.startsWith("gemini")) {
api = new ClientApi(ModelProvider.GeminiPro);
} else {
api = new ClientApi(ModelProvider.GPT);
}
// remove error messages if any
const messages = session.messages;
@@ -504,8 +567,6 @@ export const useChatStore = createPersistStore(
},
});
}
const modelConfig = session.mask.modelConfig;
const summarizeIndex = Math.max(
session.lastSummarizeIndex,
session.clearContextIndex ?? 0,

View File

@@ -28,7 +28,7 @@ export enum Theme {
export const DEFAULT_CONFIG = {
lastUpdate: Date.now(), // timestamp, to merge state
submitKey: isMacOS() ? SubmitKey.MetaEnter : SubmitKey.CtrlEnter,
submitKey: SubmitKey.Enter,
avatar: "1f603",
fontSize: 14,
theme: Theme.Auto as Theme,
@@ -91,7 +91,7 @@ export const ModalConfigValidator = {
return limitNumber(x, -2, 2, 0);
},
temperature(x: number) {
return limitNumber(x, 0, 1, 1);
return limitNumber(x, 0, 2, 1);
},
top_p(x: number) {
return limitNumber(x, 0, 1, 1);

View File

@@ -48,7 +48,7 @@ const DEFAULT_SYNC_STATE = {
export const useSyncStore = createPersistStore(
DEFAULT_SYNC_STATE,
(set, get) => ({
coundSync() {
cloudSync() {
const config = get()[get().provider];
return Object.values(config).every((c) => c.toString().length > 0);
},
@@ -60,8 +60,10 @@ export const useSyncStore = createPersistStore(
export() {
const state = getLocalAppState();
const datePart = isApp
? `${new Date().toLocaleDateString().replace(/\//g, '_')} ${new Date().toLocaleTimeString().replace(/:/g, '_')}`
: new Date().toLocaleString();
? `${new Date().toLocaleDateString().replace(/\//g, "_")} ${new Date()
.toLocaleTimeString()
.replace(/:/g, "_")}`
: new Date().toLocaleString();
const fileName = `Backup-${datePart}.json`;
downloadAs(JSON.stringify(state), fileName);
@@ -116,7 +118,7 @@ export const useSyncStore = createPersistStore(
}),
{
name: StoreKey.Sync,
version: 1.1,
version: 1.2,
migrate(persistedState, version) {
const newState = persistedState as typeof DEFAULT_SYNC_STATE;
@@ -125,6 +127,15 @@ export const useSyncStore = createPersistStore(
newState.upstash.username = STORAGE_KEY;
}
if (version < 1.2) {
if (
(persistedState as typeof DEFAULT_SYNC_STATE).proxyUrl ===
"/api/cors/"
) {
newState.proxyUrl = "";
}
}
return newState as any;
},
},

View File

@@ -1,9 +1,16 @@
import { FETCH_COMMIT_URL, FETCH_TAG_URL, StoreKey } from "../constant";
import { api } from "../client/api";
import {
FETCH_COMMIT_URL,
FETCH_TAG_URL,
ModelProvider,
StoreKey,
} from "../constant";
import { getClientConfig } from "../config/client";
import { createPersistStore } from "../utils/store";
import ChatGptIcon from "../icons/chatgpt.png";
import Locale from "../locales";
import { use } from "react";
import { useAppConfig } from ".";
import { ClientApi } from "../client/api";
const ONE_MINUTE = 60 * 1000;
const isApp = !!getClientConfig()?.isApp;
@@ -85,35 +92,40 @@ export const useUpdateStore = createPersistStore(
}));
if (window.__TAURI__?.notification && isApp) {
// Check if notification permission is granted
await window.__TAURI__?.notification.isPermissionGranted().then((granted) => {
if (!granted) {
return;
} else {
// Request permission to show notifications
window.__TAURI__?.notification.requestPermission().then((permission) => {
if (permission === 'granted') {
if (version === remoteId) {
// Show a notification using Tauri
window.__TAURI__?.notification.sendNotification({
title: "ChatGPT Next Web",
body: `${Locale.Settings.Update.IsLatest}`,
icon: `${ChatGptIcon.src}`,
sound: "Default"
});
} else {
const updateMessage = Locale.Settings.Update.FoundUpdate(`${remoteId}`);
// Show a notification for the new version using Tauri
window.__TAURI__?.notification.sendNotification({
title: "ChatGPT Next Web",
body: updateMessage,
icon: `${ChatGptIcon.src}`,
sound: "Default"
});
}
}
});
}
});
await window.__TAURI__?.notification
.isPermissionGranted()
.then((granted) => {
if (!granted) {
return;
} else {
// Request permission to show notifications
window.__TAURI__?.notification
.requestPermission()
.then((permission) => {
if (permission === "granted") {
if (version === remoteId) {
// Show a notification using Tauri
window.__TAURI__?.notification.sendNotification({
title: "NextChat",
body: `${Locale.Settings.Update.IsLatest}`,
icon: `${ChatGptIcon.src}`,
sound: "Default",
});
} else {
const updateMessage =
Locale.Settings.Update.FoundUpdate(`${remoteId}`);
// Show a notification for the new version using Tauri
window.__TAURI__?.notification.sendNotification({
title: "NextChat",
body: updateMessage,
icon: `${ChatGptIcon.src}`,
sound: "Default",
});
}
}
});
}
});
}
console.log("[Got Upstream] ", remoteId);
} catch (error) {
@@ -122,6 +134,7 @@ export const useUpdateStore = createPersistStore(
},
async updateUsage(force = false) {
// only support openai for now
const overOneMinute = Date.now() - get().lastUpdateUsage >= ONE_MINUTE;
if (!overOneMinute && !force) return;
@@ -130,6 +143,7 @@ export const useUpdateStore = createPersistStore(
}));
try {
const api = new ClientApi(ModelProvider.GPT);
const usage = await api.llm.usage();
if (usage) {