Merge remote-tracking branch 'up/main'

# Conflicts:
#	app/store/chat.ts
This commit is contained in:
织梦人
2024-09-26 20:47:29 +08:00
93 changed files with 2232 additions and 324 deletions

View File

@@ -120,6 +120,9 @@ const DEFAULT_ACCESS_STATE = {
disableFastLink: false,
customModels: "",
defaultModel: "",
// tts config
edgeTTSVoiceName: "zh-CN-YunxiNeural",
};
export const useAccessStore = createPersistStore(
@@ -132,6 +135,12 @@ export const useAccessStore = createPersistStore(
return get().needCode;
},
edgeVoiceName() {
this.fetch();
return get().edgeTTSVoiceName;
},
isValidOpenAI() {
return ensure(get(), ["openaiApiKey"]);
},
@@ -204,8 +213,8 @@ export const useAccessStore = createPersistStore(
.then((res) => {
// Set default model from env request
let defaultModel = res.defaultModel ?? "";
DEFAULT_CONFIG.modelConfig.model =
defaultModel !== "" ? defaultModel : "gpt-3.5-turbo";
if (defaultModel !== "")
DEFAULT_CONFIG.modelConfig.model = defaultModel;
return res;
})
.then((res: DangerConfig) => {

View File

@@ -1,38 +1,34 @@
import {
trimTopic,
getMessageTextContent,
trimTopic,
removeOutdatedEntries,
} from "../utils";
import Locale, { getLang } from "../locales";
import { indexedDBStorage } from "@/app/utils/indexedDB-storage";
import { nanoid } from "nanoid";
import type {
ClientApi,
MultimodalContent,
RequestMessage,
} from "../client/api";
import { getClientApi } from "../client/api";
import { ChatControllerPool } from "../client/controller";
import { showToast } from "../components/ui-lib";
import { ModelConfig, ModelType, useAppConfig } from "./config";
import { createEmptyMask, Mask } from "./mask";
import {
DEFAULT_INPUT_TEMPLATE,
DEFAULT_MODELS,
DEFAULT_SYSTEM_TEMPLATE,
KnowledgeCutOffDate,
StoreKey,
SUMMARIZE_MODEL,
GEMINI_SUMMARIZE_MODEL,
} from "../constant";
import { getClientApi } from "../client/api";
import type {
ClientApi,
RequestMessage,
MultimodalContent,
} from "../client/api";
import { ChatControllerPool } from "../client/controller";
import { prettyObject } from "../utils/format";
import { estimateTokenLength } from "../utils/token";
import { nanoid } from "nanoid";
import { createPersistStore } from "../utils/store";
import { collectModelsWithDefaultModel } from "../utils/model";
import { useAccessStore } from "./access";
import Locale, { getLang } from "../locales";
import { isDalle3, safeLocalStorage } from "../utils";
import { prettyObject } from "../utils/format";
import { createPersistStore } from "../utils/store";
import { estimateTokenLength } from "../utils/token";
import { ModelConfig, ModelType, useAppConfig } from "./config";
import { createEmptyMask, Mask } from "./mask";
import { useSyncStore } from "./sync";
import { indexedDBStorage } from "@/app/utils/indexedDB-storage";
const localStorage = safeLocalStorage();
@@ -113,27 +109,6 @@ function createEmptySession(): ChatSession {
};
}
function getSummarizeModel(currentModel: string) {
// if it is using gpt-* models, force to use 4o-mini to summarize
if (currentModel.startsWith("gpt") || currentModel.startsWith("chatgpt")) {
const configStore = useAppConfig.getState();
const accessStore = useAccessStore.getState();
const allModel = collectModelsWithDefaultModel(
configStore.models,
[configStore.customModels, accessStore.customModels].join(","),
accessStore.defaultModel,
);
const summarizeModel = allModel.find(
(m) => m.name === SUMMARIZE_MODEL && m.available,
);
return summarizeModel?.name ?? currentModel;
}
if (currentModel.startsWith("gemini")) {
return GEMINI_SUMMARIZE_MODEL;
}
return currentModel;
}
function countMessages(msgs: ChatMessage[]) {
return msgs.reduce(
(pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
@@ -212,6 +187,28 @@ export const useChatStore = createPersistStore(
}
const methods = {
forkSession() {
// 获取当前会话
const currentSession = get().currentSession();
if (!currentSession) return;
const newSession = createEmptySession();
newSession.topic = currentSession.topic;
newSession.messages = [...currentSession.messages];
newSession.mask = {
...currentSession.mask,
modelConfig: {
...currentSession.mask.modelConfig,
},
};
set((state) => ({
currentSessionIndex: 0,
sessions: [newSession, ...state.sessions],
}));
},
clearSessions() {
set(() => ({
sessions: [createEmptySession()],
@@ -647,7 +644,7 @@ export const useChatStore = createPersistStore(
});
},
summarizeSession() {
summarizeSession(refreshTitle: boolean = false) {
const config = useAppConfig.getState();
const session = get().currentSession();
const modelConfig = session.mask.modelConfig;
@@ -656,7 +653,7 @@ export const useChatStore = createPersistStore(
return;
}
const providerName = modelConfig.providerName;
const providerName = modelConfig.compressProviderName;
const api: ClientApi = getClientApi(providerName);
// remove error messages if any
@@ -665,24 +662,35 @@ export const useChatStore = createPersistStore(
// should summarize topic after chating more than 50 words
const SUMMARIZE_MIN_LEN = 50;
if (
config.enableAutoGenerateTitle &&
session.topic === DEFAULT_TOPIC &&
countMessages(messages) >= SUMMARIZE_MIN_LEN
(config.enableAutoGenerateTitle &&
session.topic === DEFAULT_TOPIC &&
countMessages(messages) >= SUMMARIZE_MIN_LEN) ||
refreshTitle
) {
const topicMessages = messages.concat(
createMessage({
role: "user",
content: Locale.Store.Prompt.Topic,
}),
const startIndex = Math.max(
0,
messages.length - modelConfig.historyMessageCount,
);
const topicMessages = messages
.slice(
startIndex < messages.length ? startIndex : messages.length - 1,
messages.length,
)
.concat(
createMessage({
role: "user",
content: Locale.Store.Prompt.Topic,
}),
);
api.llm.chat({
messages: topicMessages,
config: {
model: getSummarizeModel(session.mask.modelConfig.model),
model: modelConfig.compressModel,
stream: false,
providerName,
},
onFinish(message) {
if (!isValidMessage(message)) return;
get().updateCurrentSession(
(session) =>
(session.topic =
@@ -741,7 +749,7 @@ export const useChatStore = createPersistStore(
config: {
...modelcfg,
stream: true,
model: getSummarizeModel(session.mask.modelConfig.model),
model: modelConfig.compressModel,
},
onUpdate(message) {
session.memoryPrompt = message;
@@ -758,6 +766,10 @@ export const useChatStore = createPersistStore(
},
});
}
function isValidMessage(message: any): boolean {
return typeof message === "string" && !message.startsWith("```json");
}
},
updateStat(message: ChatMessage) {
@@ -790,7 +802,7 @@ export const useChatStore = createPersistStore(
},
{
name: StoreKey.Chat,
version: 3.1,
version: 3.2,
migrate(persistedState, version) {
const state = persistedState as any;
const newState = JSON.parse(
@@ -837,6 +849,16 @@ export const useChatStore = createPersistStore(
});
}
// add default summarize model for every session
if (version < 3.2) {
newState.sessions.forEach((s) => {
const config = useAppConfig.getState();
s.mask.modelConfig.compressModel = config.modelConfig.compressModel;
s.mask.modelConfig.compressProviderName =
config.modelConfig.compressProviderName;
});
}
return newState as any;
},
},

View File

@@ -5,12 +5,21 @@ import {
DEFAULT_INPUT_TEMPLATE,
DEFAULT_MODELS,
DEFAULT_SIDEBAR_WIDTH,
DEFAULT_TTS_ENGINE,
DEFAULT_TTS_ENGINES,
DEFAULT_TTS_MODEL,
DEFAULT_TTS_MODELS,
DEFAULT_TTS_VOICE,
DEFAULT_TTS_VOICES,
StoreKey,
ServiceProvider,
} from "../constant";
import { createPersistStore } from "../utils/store";
export type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
export type TTSModelType = (typeof DEFAULT_TTS_MODELS)[number];
export type TTSVoiceType = (typeof DEFAULT_TTS_VOICES)[number];
export type TTSEngineType = (typeof DEFAULT_TTS_ENGINES)[number];
export enum SubmitKey {
Enter = "Enter",
@@ -41,6 +50,8 @@ export const DEFAULT_CONFIG = {
enableAutoGenerateTitle: true,
sidebarWidth: DEFAULT_SIDEBAR_WIDTH,
enableArtifacts: true, // show artifacts config
disablePromptHint: false,
dontShowMaskSplashScreen: false, // dont show splash screen when create chat
@@ -50,7 +61,7 @@ export const DEFAULT_CONFIG = {
models: DEFAULT_MODELS as any as LLMModel[],
modelConfig: {
model: "gpt-3.5-turbo" as ModelType,
model: "gpt-4o-mini" as ModelType,
providerName: "OpenAI" as ServiceProvider,
temperature: 0.5,
top_p: 1,
@@ -60,17 +71,29 @@ export const DEFAULT_CONFIG = {
sendMemory: true,
historyMessageCount: 4,
compressMessageLengthThreshold: 1000,
compressModel: "gpt-4o-mini" as ModelType,
compressProviderName: "OpenAI" as ServiceProvider,
enableInjectSystemPrompts: true,
template: config?.template ?? DEFAULT_INPUT_TEMPLATE,
size: "1024x1024" as DalleSize,
quality: "standard" as DalleQuality,
style: "vivid" as DalleStyle,
},
ttsConfig: {
enable: false,
autoplay: false,
engine: DEFAULT_TTS_ENGINE,
model: DEFAULT_TTS_MODEL,
voice: DEFAULT_TTS_VOICE,
speed: 1.0,
},
};
export type ChatConfig = typeof DEFAULT_CONFIG;
export type ModelConfig = ChatConfig["modelConfig"];
export type TTSConfig = ChatConfig["ttsConfig"];
export function limitNumber(
x: number,
@@ -85,6 +108,21 @@ export function limitNumber(
return Math.min(max, Math.max(min, x));
}
export const TTSConfigValidator = {
engine(x: string) {
return x as TTSEngineType;
},
model(x: string) {
return x as TTSModelType;
},
voice(x: string) {
return x as TTSVoiceType;
},
speed(x: number) {
return limitNumber(x, 0.25, 4.0, 1.0);
},
};
export const ModalConfigValidator = {
model(x: string) {
return x as ModelType;
@@ -140,7 +178,22 @@ export const useAppConfig = createPersistStore(
}),
{
name: StoreKey.Config,
version: 3.9,
version: 4,
merge(persistedState, currentState) {
const state = persistedState as ChatConfig | undefined;
if (!state) return { ...currentState };
const models = currentState.models.slice();
state.models.forEach((pModel) => {
const idx = models.findIndex(
(v) => v.name === pModel.name && v.provider === pModel.provider,
);
if (idx !== -1) models[idx] = pModel;
else models.push(pModel);
});
return { ...currentState, ...state, models: models };
},
migrate(persistedState, version) {
const state = persistedState as ChatConfig;
@@ -178,6 +231,13 @@ export const useAppConfig = createPersistStore(
: config?.template ?? DEFAULT_INPUT_TEMPLATE;
}
if (version < 4) {
state.modelConfig.compressModel =
DEFAULT_CONFIG.modelConfig.compressModel;
state.modelConfig.compressProviderName =
DEFAULT_CONFIG.modelConfig.compressProviderName;
}
return state as any;
},
},

View File

@@ -1,10 +1,13 @@
import OpenAPIClientAxios from "openapi-client-axios";
import { getLang, Lang } from "../locales";
import { StoreKey } from "../constant";
import { nanoid } from "nanoid";
import { createPersistStore } from "../utils/store";
import { getClientConfig } from "../config/client";
import yaml from "js-yaml";
import { adapter } from "../utils";
import { useAccessStore } from "./access";
const isApp = getClientConfig()?.isApp;
export type Plugin = {
id: string;
@@ -17,7 +20,6 @@ export type Plugin = {
authLocation?: string;
authHeader?: string;
authToken?: string;
usingProxy?: boolean;
};
export type FunctionToolItem = {
@@ -47,18 +49,25 @@ export const FunctionToolService = {
plugin?.authType == "basic"
? `Basic ${plugin?.authToken}`
: plugin?.authType == "bearer"
? ` Bearer ${plugin?.authToken}`
? `Bearer ${plugin?.authToken}`
: plugin?.authToken;
const authLocation = plugin?.authLocation || "header";
const definition = yaml.load(plugin.content) as any;
const serverURL = definition?.servers?.[0]?.url;
const baseURL = !!plugin?.usingProxy ? "/api/proxy" : serverURL;
const baseURL = !isApp ? "/api/proxy" : serverURL;
const headers: Record<string, string | undefined> = {
"X-Base-URL": !!plugin?.usingProxy ? serverURL : undefined,
"X-Base-URL": !isApp ? serverURL : undefined,
};
if (authLocation == "header") {
headers[headerName] = tokenValue;
}
// try using openaiApiKey for Dalle3 Plugin.
if (!tokenValue && plugin.id === "dalle3") {
const openaiApiKey = useAccessStore.getState().openaiApiKey;
if (openaiApiKey) {
headers[headerName] = `Bearer ${openaiApiKey}`;
}
}
const api = new OpenAPIClientAxios({
definition: yaml.load(plugin.content) as any,
axiosConfigDefaults: {
@@ -166,7 +175,7 @@ export const usePluginStore = createPersistStore(
(set, get) => ({
create(plugin?: Partial<Plugin>) {
const plugins = get().plugins;
const id = nanoid();
const id = plugin?.id || nanoid();
plugins[id] = {
...createEmptyPlugin(),
...plugin,
@@ -221,5 +230,42 @@ export const usePluginStore = createPersistStore(
{
name: StoreKey.Plugin,
version: 1,
onRehydrateStorage(state) {
// Skip store rehydration on server side
if (typeof window === "undefined") {
return;
}
fetch("./plugins.json")
.then((res) => res.json())
.then((res) => {
Promise.all(
res.map((item: any) =>
// skip get schema
state.get(item.id)
? item
: fetch(item.schema)
.then((res) => res.text())
.then((content) => ({
...item,
content,
}))
.catch((e) => item),
),
).then((builtinPlugins: any) => {
builtinPlugins
.filter((item: any) => item?.content)
.forEach((item: any) => {
const plugin = state.create(item);
state.updatePlugin(plugin.id, (plugin) => {
const tool = FunctionToolService.add(plugin, true);
plugin.title = tool.api.definition.info.title;
plugin.version = tool.api.definition.info.version;
plugin.builtin = true;
});
});
});
});
},
},
);

View File

@@ -1,7 +1,7 @@
import Fuse from "fuse.js";
import { getLang } from "../locales";
import { StoreKey } from "../constant";
import { nanoid } from "nanoid";
import { StoreKey } from "../constant";
import { getLang } from "../locales";
import { createPersistStore } from "../utils/store";
export interface Prompt {
@@ -147,6 +147,11 @@ export const usePromptStore = createPersistStore(
},
onRehydrateStorage(state) {
// Skip store rehydration on server side
if (typeof window === "undefined") {
return;
}
const PROMPT_URL = "./prompts.json";
type PromptList = Array<[string, string]>;

View File

@@ -1,5 +1,4 @@
import { getClientConfig } from "../config/client";
import { Updater } from "../typing";
import { ApiPath, STORAGE_KEY, StoreKey } from "../constant";
import { createPersistStore } from "../utils/store";
import {

View File

@@ -8,8 +8,6 @@ import { getClientConfig } from "../config/client";
import { createPersistStore } from "../utils/store";
import ChatGptIcon from "../icons/chatgpt.png";
import Locale from "../locales";
import { use } from "react";
import { useAppConfig } from ".";
import { ClientApi } from "../client/api";
const ONE_MINUTE = 60 * 1000;