Merge remote-tracking branch 'upstream/main' into dev

# Conflicts:
#	app/locales/ar.ts
#	app/locales/bn.ts
#	app/locales/cs.ts
#	app/locales/de.ts
#	app/locales/es.ts
#	app/locales/fr.ts
#	app/locales/id.ts
#	app/locales/it.ts
#	app/locales/jp.ts
#	app/locales/ko.ts
#	app/locales/no.ts
#	app/locales/pt.ts
#	app/locales/ru.ts
#	app/locales/sk.ts
#	app/locales/tr.ts
#	app/locales/vi.ts
#	app/store/chat.ts
#	app/store/config.ts
This commit is contained in:
sijinhui 2024-09-14 15:37:19 +08:00
commit 8d28155c86
14 changed files with 159 additions and 86 deletions

View File

@ -161,6 +161,7 @@ export class ChatGPTApi implements LLMApi {
let requestPayload: RequestPayload | DalleRequestPayload; let requestPayload: RequestPayload | DalleRequestPayload;
const isDalle3 = _isDalle3(options.config.model); const isDalle3 = _isDalle3(options.config.model);
const isO1 = options.config.model.startsWith("o1");
if (isDalle3) { if (isDalle3) {
const prompt = getMessageTextContent( const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any, options.messages.slice(-1)?.pop() as any,
@ -182,30 +183,32 @@ export class ChatGPTApi implements LLMApi {
const content = visionModel const content = visionModel
? await preProcessImageContent(v.content) ? await preProcessImageContent(v.content)
: getMessageTextContent(v); : getMessageTextContent(v);
if (!(isO1 && v.role === "system"))
messages.push({ role: v.role, content }); messages.push({ role: v.role, content });
} }
// O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet.
requestPayload = { requestPayload = {
messages, messages,
stream: options.config.stream, stream: !isO1 ? options.config.stream : false,
model: modelConfig.model, model: modelConfig.model,
temperature: modelConfig.temperature, temperature: !isO1 ? modelConfig.temperature : 1,
presence_penalty: modelConfig.presence_penalty, presence_penalty: !isO1 ? modelConfig.presence_penalty : 0,
frequency_penalty: modelConfig.frequency_penalty, frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0,
top_p: modelConfig.top_p, top_p: !isO1 ? modelConfig.top_p : 1,
// max_tokens: Math.max(modelConfig.max_tokens, 1024), // max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
}; };
// add max_tokens to vision model // add max_tokens to vision model
if (visionModel && modelConfig.model.includes("preview")) { if (visionModel) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
} }
} }
console.log("[Request] openai payload: ", requestPayload); console.log("[Request] openai payload: ", requestPayload);
const shouldStream = !isDalle3 && !!options.config.stream; const shouldStream = !isDalle3 && !!options.config.stream && !isO1;
const controller = new AbortController(); const controller = new AbortController();
options.onController?.(controller); options.onController?.(controller);
@ -326,7 +329,7 @@ export class ChatGPTApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
isDalle3 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
); );
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);

View File

@ -80,7 +80,7 @@ export const HTMLPreview = forwardRef<HTMLPreviewHander, HTMLPreviewProps>(
}, [props.autoHeight, props.height, iframeHeight]); }, [props.autoHeight, props.height, iframeHeight]);
const srcDoc = useMemo(() => { const srcDoc = useMemo(() => {
const script = `<script>new ResizeObserver((entries) => parent.postMessage({id: '${frameId}', height: entries[0].target.clientHeight}, '*')).observe(document.body)</script>`; const script = `<script>window.addEventListener("DOMContentLoaded", () => new ResizeObserver((entries) => parent.postMessage({id: '${frameId}', height: entries[0].target.clientHeight}, '*')).observe(document.body))</script>`;
if (props.code.includes("<!DOCTYPE html>")) { if (props.code.includes("<!DOCTYPE html>")) {
props.code.replace("<!DOCTYPE html>", "<!DOCTYPE html>" + script); props.code.replace("<!DOCTYPE html>", "<!DOCTYPE html>" + script);
} }

View File

@ -204,7 +204,7 @@ function PromptToast(props: {
return ( return (
<div className={styles["prompt-toast"]} key="prompt-toast"> <div className={styles["prompt-toast"]} key="prompt-toast">
{props.showToast && ( {props.showToast && context.length > 0 && (
<div <div
className={styles["prompt-toast-inner"] + " clickable"} className={styles["prompt-toast-inner"] + " clickable"}
role="button" role="button"
@ -531,6 +531,8 @@ export function ChatActions(props: {
const currentStyle = const currentStyle =
chatStore.currentSession().mask.modelConfig?.style ?? "vivid"; chatStore.currentSession().mask.modelConfig?.style ?? "vivid";
const isMobileScreen = useMobileScreen();
useEffect(() => { useEffect(() => {
const show = isVisionModel(currentModel); const show = isVisionModel(currentModel);
setShowUploadImage(show); setShowUploadImage(show);
@ -648,7 +650,7 @@ export function ChatActions(props: {
items={models.map((m) => ({ items={models.map((m) => ({
title: `${m.displayName}${ title: `${m.displayName}${
m?.provider?.providerName m?.provider?.providerName
? "(" + m?.provider?.providerName + ")" ? " (" + m?.provider?.providerName + ")"
: "" : ""
}`, }`,
subTitle: m.describe, subTitle: m.describe,
@ -788,11 +790,13 @@ export function ChatActions(props: {
/> />
)} )}
{!isMobileScreen && (
<ChatAction <ChatAction
onClick={() => props.setShowShortcutKeyModal(true)} onClick={() => props.setShowShortcutKeyModal(true)}
text={Locale.Chat.ShortcutKey.Title} text={Locale.Chat.ShortcutKey.Title}
icon={<ShortcutkeyIcon />} icon={<ShortcutkeyIcon />}
/> />
)}
</div> </div>
); );
} }

View File

@ -238,9 +238,26 @@ function escapeBrackets(text: string) {
); );
} }
function tryWrapHtmlCode(text: string) {
// try add wrap html code (fixed: html codeblock include 2 newline)
return text
.replace(
/([`]*?)(\w*?)([\n\r]*?)(<!DOCTYPE html>)/g,
(match, quoteStart, lang, newLine, doctype) => {
return !quoteStart ? "\n```html\n" + doctype : match;
},
)
.replace(
/(<\/body>)([\r\n\s]*?)(<\/html>)([\n\r]*?)([`]*?)([\n\r]*?)/g,
(match, bodyEnd, space, htmlEnd, newLine, quoteEnd) => {
return !quoteEnd ? bodyEnd + space + htmlEnd + "\n```\n" : match;
},
);
}
function _MarkDownContent(props: { content: string }) { function _MarkDownContent(props: { content: string }) {
const escapedContent = useMemo(() => { const escapedContent = useMemo(() => {
return escapeBrackets(escapeDollarNumber(props.content)); return tryWrapHtmlCode(escapeBrackets(escapeDollarNumber(props.content)));
}, [props.content]); }, [props.content]);
return ( return (

View File

@ -5,13 +5,19 @@ import Locale from "../locales";
import { InputRange } from "./input-range"; import { InputRange } from "./input-range";
import { ListItem, Select } from "./ui-lib"; import { ListItem, Select } from "./ui-lib";
import { useAllModels } from "../utils/hooks"; import { useAllModels } from "../utils/hooks";
import { groupBy } from "lodash-es";
export function ModelConfigList(props: { export function ModelConfigList(props: {
modelConfig: ModelConfig; modelConfig: ModelConfig;
updateConfig: (updater: (config: ModelConfig) => void) => void; updateConfig: (updater: (config: ModelConfig) => void) => void;
}) { }) {
const allModels = useAllModels(); const allModels = useAllModels();
const groupModels = groupBy(
allModels.filter((v) => v.available),
"provider.providerName",
);
const value = `${props.modelConfig.model}@${props.modelConfig?.providerName}`; const value = `${props.modelConfig.model}@${props.modelConfig?.providerName}`;
const compressModelValue = `${props.modelConfig.compressModel}@${props.modelConfig?.compressProviderName}`;
return ( return (
<> <>
@ -19,6 +25,7 @@ export function ModelConfigList(props: {
<Select <Select
aria-label={Locale.Settings.Model} aria-label={Locale.Settings.Model}
value={value} value={value}
align="left"
onChange={(e) => { onChange={(e) => {
const [model, providerName] = e.currentTarget.value.split("@"); const [model, providerName] = e.currentTarget.value.split("@");
props.updateConfig((config) => { props.updateConfig((config) => {
@ -27,13 +34,15 @@ export function ModelConfigList(props: {
}); });
}} }}
> >
{allModels {Object.keys(groupModels).map((providerName, index) => (
.filter((v) => v.available) <optgroup label={providerName} key={index}>
.map((v, i) => ( {groupModels[providerName].map((v, i) => (
<option value={`${v.name}@${v.provider?.providerName}`} key={i}> <option value={`${v.name}@${v.provider?.providerName}`} key={i}>
{v.displayName}({v.provider?.providerName}) {v.displayName}
</option> </option>
))} ))}
</optgroup>
))}
</Select> </Select>
</ListItem> </ListItem>
<ListItem <ListItem
@ -228,6 +237,30 @@ export function ModelConfigList(props: {
} }
></input> ></input>
</ListItem> </ListItem>
<ListItem
title={Locale.Settings.CompressModel.Title}
subTitle={Locale.Settings.CompressModel.SubTitle}
>
<Select
aria-label={Locale.Settings.CompressModel.Title}
value={compressModelValue}
onChange={(e) => {
const [model, providerName] = e.currentTarget.value.split("@");
props.updateConfig((config) => {
config.compressModel = ModalConfigValidator.model(model);
config.compressProviderName = providerName as ServiceProvider;
});
}}
>
{allModels
.filter((v) => v.available)
.map((v, i) => (
<option value={`${v.name}@${v.provider?.providerName}`} key={i}>
{v.displayName}({v.provider?.providerName})
</option>
))}
</Select>
</ListItem>
</> </>
); );
} }

View File

@ -252,6 +252,12 @@
position: relative; position: relative;
max-width: fit-content; max-width: fit-content;
&.left-align-option {
option {
text-align: left;
}
}
.select-with-icon-select { .select-with-icon-select {
height: 100%; height: 100%;
border: var(--border-in-light); border: var(--border-in-light);

View File

@ -313,13 +313,19 @@ export function PasswordInput(
export function Select( export function Select(
props: React.DetailedHTMLProps< props: React.DetailedHTMLProps<
React.SelectHTMLAttributes<HTMLSelectElement>, React.SelectHTMLAttributes<HTMLSelectElement> & {
align?: "left" | "center";
},
HTMLSelectElement HTMLSelectElement
>, >,
) { ) {
const { className, children, ...otherProps } = props; const { className, children, align, ...otherProps } = props;
return ( return (
<div className={`${styles["select-with-icon"]} ${className}`}> <div
className={`${styles["select-with-icon"]} ${
align === "left" ? styles["left-align-option"] : ""
} ${className}`}
>
<select className={styles["select-with-icon-select"]} {...otherProps}> <select className={styles["select-with-icon-select"]} {...otherProps}>
{children} {children}
</select> </select>

View File

@ -252,6 +252,8 @@ export const KnowledgeCutOffDate: Record<string, string> = {
"gpt-4o-mini": "2023-10", "gpt-4o-mini": "2023-10",
"gpt-4o-mini-2024-07-18": "2023-10", "gpt-4o-mini-2024-07-18": "2023-10",
"gpt-4-vision-preview": "2023-04", "gpt-4-vision-preview": "2023-04",
"o1-mini": "2023-10",
"o1-preview": "2023-10",
// After improvements, // After improvements,
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously. // it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
"gemini-pro": "2023-12", "gemini-pro": "2023-12",
@ -278,6 +280,8 @@ const openaiModels = [
"gpt-4-turbo-2024-04-09", "gpt-4-turbo-2024-04-09",
"gpt-4-1106-preview", "gpt-4-1106-preview",
"dall-e-3", "dall-e-3",
"o1-mini",
"o1-preview",
]; ];
const googleModels = [ const googleModels = [

View File

@ -515,6 +515,10 @@ const cn = {
}, },
Model: "模型 (model)", Model: "模型 (model)",
CompressModel: {
Title: "压缩模型",
SubTitle: "用于压缩历史记录的模型",
},
Temperature: { Temperature: {
Title: "随机性 (temperature)", Title: "随机性 (temperature)",
SubTitle: "值越大,回复越随机", SubTitle: "值越大,回复越随机",
@ -550,8 +554,8 @@ const cn = {
}, },
}, },
Copy: { Copy: {
Success: "已写入剪板", Success: "已写入剪板",
Failed: "复制失败,请赋予剪板权限", Failed: "复制失败,请赋予剪板权限",
}, },
Download: { Download: {
Success: "内容已下载到您的目录。", Success: "内容已下载到您的目录。",

View File

@ -519,6 +519,10 @@ const en: LocaleType = {
}, },
Model: "Model", Model: "Model",
CompressModel: {
Title: "Compression Model",
SubTitle: "Model used to compress history",
},
Temperature: { Temperature: {
Title: "Temperature", Title: "Temperature",
SubTitle: "A larger value makes the more random output", SubTitle: "A larger value makes the more random output",

View File

@ -368,6 +368,10 @@ const tw = {
}, },
Model: "模型 (model)", Model: "模型 (model)",
CompressModel: {
Title: "壓縮模型",
SubTitle: "用於壓縮歷史記錄的模型",
},
Temperature: { Temperature: {
Title: "隨機性 (temperature)", Title: "隨機性 (temperature)",
SubTitle: "值越大,回應越隨機", SubTitle: "值越大,回應越隨機",

View File

@ -1,9 +1,15 @@
import { trimTopic, getMessageTextContent } from "../utils"; import { getMessageTextContent, trimTopic } from "../utils";
import Locale, { getLang } from "../locales"; import { indexedDBStorage } from "@/app/utils/indexedDB-storage";
import { nanoid } from "nanoid";
import type {
ClientApi,
MultimodalContent,
RequestMessage,
} from "../client/api";
import { getClientApi } from "../client/api";
import { ChatControllerPool } from "../client/controller";
import { showToast } from "../components/ui-lib"; import { showToast } from "../components/ui-lib";
import { ModelConfig, ModelType, useAppConfig } from "./config";
import { createEmptyMask, Mask } from "./mask";
import { import {
DEFAULT_INPUT_TEMPLATE, DEFAULT_INPUT_TEMPLATE,
DEFAULT_MODELS, DEFAULT_MODELS,
@ -11,9 +17,9 @@ import {
KnowledgeCutOffDate, KnowledgeCutOffDate,
ServiceProvider, ServiceProvider,
StoreKey, StoreKey,
SUMMARIZE_MODEL,
GEMINI_SUMMARIZE_MODEL,
} from "../constant"; } from "../constant";
import Locale, { getLang } from "../locales";
import { isDalle3, safeLocalStorage } from "../utils";
import { import {
getClientApi, getClientApi,
getHeaders, getHeaders,
@ -26,13 +32,10 @@ import type {
} from "../client/api"; } from "../client/api";
import { ChatControllerPool } from "../client/controller"; import { ChatControllerPool } from "../client/controller";
import { prettyObject } from "../utils/format"; import { prettyObject } from "../utils/format";
import { estimateTokenLength } from "../utils/token";
import { nanoid } from "nanoid";
import { createPersistStore } from "../utils/store"; import { createPersistStore } from "../utils/store";
import { collectModelsWithDefaultModel } from "../utils/model"; import { estimateTokenLength } from "../utils/token";
import { useAccessStore } from "./access"; import { ModelConfig, ModelType, useAppConfig } from "./config";
import { isDalle3, safeLocalStorage } from "../utils"; import { createEmptyMask, Mask } from "./mask";
import { indexedDBStorage } from "@/app/utils/indexedDB-storage";
const localStorage = safeLocalStorage(); const localStorage = safeLocalStorage();
@ -114,39 +117,6 @@ function createEmptySession(): ChatSession {
// if it is using gpt-* models, force to use 4o-mini to summarize // if it is using gpt-* models, force to use 4o-mini to summarize
const ChatFetchTaskPool: Record<string, any> = {}; const ChatFetchTaskPool: Record<string, any> = {};
function getSummarizeModel(currentModel: string): {
name: string;
providerName: string | undefined;
} {
// if it is using gpt-* models, force to use 4o-mini to summarize
if (currentModel.startsWith("gpt") || currentModel.startsWith("chatgpt")) {
const configStore = useAppConfig.getState();
const accessStore = useAccessStore.getState();
const allModel = collectModelsWithDefaultModel(
configStore.models,
[configStore.customModels, accessStore.customModels].join(","),
accessStore.defaultModel,
);
const summarizeModel = allModel.find(
(m) => m.name === SUMMARIZE_MODEL && m.available,
);
return {
name: summarizeModel?.name ?? currentModel,
providerName: summarizeModel?.provider?.providerName,
};
}
if (currentModel.startsWith("gemini")) {
return {
name: GEMINI_SUMMARIZE_MODEL,
providerName: ServiceProvider.Google,
};
}
return {
name: currentModel,
providerName: undefined,
};
}
function countMessages(msgs: ChatMessage[]) { function countMessages(msgs: ChatMessage[]) {
return msgs.reduce( return msgs.reduce(
(pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)), (pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
@ -935,7 +905,7 @@ export const useChatStore = createPersistStore(
return; return;
} }
const providerName = modelConfig.providerName; const providerName = modelConfig.compressProviderName;
const api: ClientApi = getClientApi(providerName); const api: ClientApi = getClientApi(providerName);
// remove error messages if any // remove error messages if any
@ -957,9 +927,7 @@ export const useChatStore = createPersistStore(
api.llm.chat({ api.llm.chat({
messages: topicMessages, messages: topicMessages,
config: { config: {
model: getSummarizeModel(session.mask.modelConfig.model).name, model: modelConfig.compressModel,
providerName: getSummarizeModel(session.mask.modelConfig.model)
.providerName,
stream: false, stream: false,
}, },
onFinish(message) { onFinish(message) {
@ -1021,9 +989,10 @@ export const useChatStore = createPersistStore(
config: { config: {
...modelcfg, ...modelcfg,
stream: true, stream: true,
model: getSummarizeModel(session.mask.modelConfig.model).name, model: modelConfig.compressModel,
providerName: getSummarizeModel(session.mask.modelConfig.model) // providerName: getSummarizeModel(session.mask.modelConfig.model)
.providerName, // .providerName,
// TODO:
}, },
onUpdate(message) { onUpdate(message) {
session.memoryPrompt = message; session.memoryPrompt = message;
@ -1072,7 +1041,7 @@ export const useChatStore = createPersistStore(
}, },
{ {
name: StoreKey.Chat, name: StoreKey.Chat,
version: 3.1, version: 3.2,
migrate(persistedState, version) { migrate(persistedState, version) {
const state = persistedState as any; const state = persistedState as any;
const newState = JSON.parse( const newState = JSON.parse(
@ -1119,6 +1088,16 @@ export const useChatStore = createPersistStore(
}); });
} }
// add default summarize model for every session
if (version < 3.2) {
newState.sessions.forEach((s) => {
const config = useAppConfig.getState();
s.mask.modelConfig.compressModel = config.modelConfig.compressModel;
s.mask.modelConfig.compressProviderName =
config.modelConfig.compressProviderName;
});
}
return newState as any; return newState as any;
}, },
}, },

View File

@ -55,7 +55,7 @@ export const DEFAULT_CONFIG = {
dontUseModel: DISABLE_MODELS, dontUseModel: DISABLE_MODELS,
modelConfig: { modelConfig: {
model: "gpt-3.5-turbo-0125" as ModelType, model: "gpt-4o-mini" as ModelType,
providerName: "OpenAI" as ServiceProvider, providerName: "OpenAI" as ServiceProvider,
temperature: 0.8, temperature: 0.8,
top_p: 1, top_p: 1,
@ -65,6 +65,8 @@ export const DEFAULT_CONFIG = {
sendMemory: true, sendMemory: true,
historyMessageCount: 5, historyMessageCount: 5,
compressMessageLengthThreshold: 4000, compressMessageLengthThreshold: 4000,
compressModel: "gpt-4o-mini" as ModelType,
compressProviderName: "OpenAI" as ServiceProvider,
enableInjectSystemPrompts: true, enableInjectSystemPrompts: true,
template: config?.template ?? DEFAULT_INPUT_TEMPLATE, template: config?.template ?? DEFAULT_INPUT_TEMPLATE,
size: "1024x1024" as DalleSize, size: "1024x1024" as DalleSize,
@ -145,7 +147,7 @@ export const useAppConfig = createPersistStore(
}), }),
{ {
name: StoreKey.Config, name: StoreKey.Config,
version: 3.993, version: 4,
migrate(persistedState, version) { migrate(persistedState, version) {
const state = persistedState as ChatConfig; const state = persistedState as ChatConfig;
@ -190,6 +192,13 @@ export const useAppConfig = createPersistStore(
// : config?.template ?? DEFAULT_INPUT_TEMPLATE; // : config?.template ?? DEFAULT_INPUT_TEMPLATE;
} }
if (version < 4) {
state.modelConfig.compressModel =
DEFAULT_CONFIG.modelConfig.compressModel;
state.modelConfig.compressProviderName =
DEFAULT_CONFIG.modelConfig.compressProviderName;
}
return state as any; return state as any;
}, },
}, },

View File

@ -9,7 +9,7 @@
}, },
"package": { "package": {
"productName": "NextChat", "productName": "NextChat",
"version": "2.15.1" "version": "2.15.2"
}, },
"tauri": { "tauri": {
"allowlist": { "allowlist": {