This commit is contained in:
sijinhui
2024-09-19 00:07:37 +08:00
52 changed files with 3763 additions and 2647 deletions

View File

@@ -123,6 +123,9 @@ const DEFAULT_ACCESS_STATE = {
disableFastLink: false,
customModels: "",
defaultModel: "",
// tts config
edgeTTSVoiceName: "zh-CN-YunxiNeural",
};
export const useAccessStore = createPersistStore(
@@ -135,6 +138,12 @@ export const useAccessStore = createPersistStore(
return get().needCode;
},
edgeVoiceName() {
this.fetch();
return get().edgeTTSVoiceName;
},
isValidOpenAI() {
return ensure(get(), ["openaiApiKey"]);
},
@@ -197,40 +206,40 @@ export const useAccessStore = createPersistStore(
fetch() {
if (fetchState > 0 || getClientConfig()?.buildMode === "export") return;
fetchState = 1;
const res = {
needCode: false,
hideUserApiKey: true,
disableGPT4: false,
hideBalanceQuery: true,
};
set(() => ({ ...res }));
fetchState = 2; // 设置 fetchState 值为 "获取已完成"
// fetch("/api/config", {
// method: "post",
// body: null,
// headers: {
// ...getHeaders(),
// },
// })
// .then((res) => res.json())
// .then((res) => {
// // Set default model from env request
// let defaultModel = res.defaultModel ?? "";
// DEFAULT_CONFIG.modelConfig.model =
// defaultModel !== "" ? defaultModel : "gpt-3.5-turbo";
// return res;
// })
// .then((res: DangerConfig) => {
// console.log("[Config] got config from server", res);
// set(() => ({ ...res }));
// })
// .catch(() => {
// console.error("[Config] failed to fetch config");
// })
// .finally(() => {
// fetchState = 2;
// });
// const res = {
// needCode: false,
// hideUserApiKey: true,
// disableGPT4: false,
// hideBalanceQuery: true,
// };
// set(() => ({ ...res }));
// fetchState = 2; // 设置 fetchState 值为 "获取已完成"
// TODO: 可能有问题
fetch("/api/config", {
method: "post",
body: null,
headers: {
...getHeaders(),
},
})
.then((res) => res.json())
.then((res) => {
// Set default model from env request
let defaultModel = res.defaultModel ?? "";
if (defaultModel !== "")
DEFAULT_CONFIG.modelConfig.model = defaultModel;
return res;
})
.then((res: DangerConfig) => {
console.log("[Config] got config from server", res);
set(() => ({ ...res }));
})
.catch(() => {
console.error("[Config] failed to fetch config");
})
.finally(() => {
fetchState = 2;
});
},
}),
{

View File

@@ -178,6 +178,28 @@ export const useChatStore = createPersistStore(
}
const methods = {
forkSession() {
// 获取当前会话
const currentSession = get().currentSession();
if (!currentSession) return;
const newSession = createEmptySession();
newSession.topic = currentSession.topic;
newSession.messages = [...currentSession.messages];
newSession.mask = {
...currentSession.mask,
modelConfig: {
...currentSession.mask.modelConfig,
},
};
set((state) => ({
currentSessionIndex: 0,
sessions: [newSession, ...state.sessions],
}));
},
clearSessions() {
set(() => ({
sessions: [createEmptySession()],
@@ -889,7 +911,7 @@ export const useChatStore = createPersistStore(
});
},
summarizeSession() {
summarizeSession(refreshTitle: boolean = false) {
const config = useAppConfig.getState();
const session = get().currentSession();
const modelConfig = session.mask.modelConfig;
@@ -907,16 +929,26 @@ export const useChatStore = createPersistStore(
// should summarize topic after chating more than 50 words
const SUMMARIZE_MIN_LEN = 50;
if (
config.enableAutoGenerateTitle &&
session.topic === DEFAULT_TOPIC &&
countMessages(messages) >= SUMMARIZE_MIN_LEN
(config.enableAutoGenerateTitle &&
session.topic === DEFAULT_TOPIC &&
countMessages(messages) >= SUMMARIZE_MIN_LEN) ||
refreshTitle
) {
const topicMessages = messages.concat(
createMessage({
role: "user",
content: Locale.Store.Prompt.Topic,
}),
const startIndex = Math.max(
0,
messages.length - modelConfig.historyMessageCount,
);
const topicMessages = messages
.slice(
startIndex < messages.length ? startIndex : messages.length - 1,
messages.length,
)
.concat(
createMessage({
role: "user",
content: Locale.Store.Prompt.Topic,
}),
);
api.llm.chat({
messages: topicMessages,
config: {
@@ -942,7 +974,7 @@ export const useChatStore = createPersistStore(
const historyMsgLength = countMessages(toBeSummarizedMsgs);
if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
if (historyMsgLength > (modelConfig?.max_tokens ?? 4000)) {
const n = toBeSummarizedMsgs.length;
toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
Math.max(0, n - modelConfig.historyMessageCount),

View File

@@ -5,14 +5,22 @@ import {
DEFAULT_INPUT_TEMPLATE,
DEFAULT_MODELS,
DEFAULT_SIDEBAR_WIDTH,
DEFAULT_TTS_ENGINE,
DEFAULT_TTS_ENGINES,
DEFAULT_TTS_MODEL,
DEFAULT_TTS_MODELS,
DEFAULT_TTS_VOICE,
DEFAULT_TTS_VOICES,
DISABLE_MODELS,
StoreKey,
ServiceProvider,
} from "../constant";
import { createPersistStore } from "../utils/store";
import { get } from "immutable";
export type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
export type TTSModelType = (typeof DEFAULT_TTS_MODELS)[number];
export type TTSVoiceType = (typeof DEFAULT_TTS_VOICES)[number];
export type TTSEngineType = (typeof DEFAULT_TTS_ENGINES)[number];
export enum SubmitKey {
Enter = "Enter",
@@ -73,11 +81,21 @@ export const DEFAULT_CONFIG = {
quality: "standard" as DalleQuality,
style: "vivid" as DalleStyle,
},
ttsConfig: {
enable: false,
autoplay: false,
engine: DEFAULT_TTS_ENGINE,
model: DEFAULT_TTS_MODEL,
voice: DEFAULT_TTS_VOICE,
speed: 1.0,
},
};
export type ChatConfig = typeof DEFAULT_CONFIG;
export type ModelConfig = ChatConfig["modelConfig"];
export type TTSConfig = ChatConfig["ttsConfig"];
export function limitNumber(
x: number,
@@ -92,6 +110,21 @@ export function limitNumber(
return Math.min(max, Math.max(min, x));
}
export const TTSConfigValidator = {
engine(x: string) {
return x as TTSEngineType;
},
model(x: string) {
return x as TTSModelType;
},
voice(x: string) {
return x as TTSVoiceType;
},
speed(x: number) {
return limitNumber(x, 0.25, 4.0, 1.0);
},
};
export const ModalConfigValidator = {
model(x: string) {
return x as ModelType;
@@ -148,6 +181,21 @@ export const useAppConfig = createPersistStore(
{
name: StoreKey.Config,
version: 4,
merge(persistedState, currentState) {
const state = persistedState as ChatConfig | undefined;
if (!state) return { ...currentState };
const models = currentState.models.slice();
state.models.forEach((pModel) => {
const idx = models.findIndex(
(v) => v.name === pModel.name && v.provider === pModel.provider,
);
if (idx !== -1) models[idx] = pModel;
else models.push(pModel);
});
return { ...currentState, ...state, models: models };
},
migrate(persistedState, version) {
const state = persistedState as ChatConfig;

View File

@@ -1,5 +1,4 @@
import OpenAPIClientAxios from "openapi-client-axios";
import { getLang, Lang } from "../locales";
import { StoreKey } from "../constant";
import { nanoid } from "nanoid";
import { createPersistStore } from "../utils/store";

View File

@@ -1,5 +1,4 @@
import { getClientConfig } from "../config/client";
import { Updater } from "../typing";
import { ApiPath, STORAGE_KEY, StoreKey } from "../constant";
import { createPersistStore } from "../utils/store";
import {
@@ -100,15 +99,17 @@ export const useSyncStore = createPersistStore(
const remoteState = await client.get(config.username);
if (!remoteState || remoteState === "") {
await client.set(config.username, JSON.stringify(localState));
console.log("[Sync] Remote state is empty, using local state instead.");
return
console.log(
"[Sync] Remote state is empty, using local state instead.",
);
return;
} else {
const parsedRemoteState = JSON.parse(
await client.get(config.username),
) as AppState;
mergeAppState(localState, parsedRemoteState);
setLocalAppState(localState);
}
}
} catch (e) {
console.log("[Sync] failed to get remote state", e);
throw e;

View File

@@ -8,8 +8,6 @@ import { getClientConfig } from "../config/client";
import { createPersistStore } from "../utils/store";
import ChatGptIcon from "../icons/chatgpt.png";
import Locale from "../locales";
import { use } from "react";
import { useAppConfig } from ".";
import { ClientApi } from "../client/api";
const ONE_MINUTE = 60 * 1000;