mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-09-30 07:06:37 +08:00
Compare commits
9 Commits
57d5a5e341
...
ba4ec25945
Author | SHA1 | Date | |
---|---|---|---|
|
ba4ec25945 | ||
|
995bef73de | ||
|
38ac502d80 | ||
|
0511808900 | ||
|
42eff644b4 | ||
|
8ae6883784 | ||
|
c0f2ab6de3 | ||
|
937e2b5a54 | ||
|
afdf3a5cd4 |
@ -64,6 +64,11 @@ CUSTOM_MODELS=
|
||||
# Change default model
|
||||
DEFAULT_MODEL=
|
||||
|
||||
# (optional)
|
||||
# Default: Empty
|
||||
# Change default compress model
|
||||
DEFAULT_COMPRESS_MODEL=
|
||||
|
||||
# anthropic claude Api Key.(optional)
|
||||
ANTHROPIC_API_KEY=
|
||||
|
||||
|
@ -328,6 +328,10 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name
|
||||
|
||||
Change default model
|
||||
|
||||
### `DEFAULT_COMPRESS_MODEL` (optional)
|
||||
|
||||
Change default summary model
|
||||
|
||||
### `VISION_MODELS` (optional)
|
||||
|
||||
> Default: Empty
|
||||
|
@ -253,6 +253,10 @@ DeepSeek Api Url.
|
||||
|
||||
更改默认模型
|
||||
|
||||
### `DEFAULT_COMPRESS_MODEL` (可选)
|
||||
|
||||
更改默认对话摘要模型
|
||||
|
||||
### `VISION_MODELS` (可选)
|
||||
|
||||
> 默认值:空
|
||||
|
@ -221,6 +221,10 @@ ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデ
|
||||
|
||||
デフォルトのモデルを変更します。
|
||||
|
||||
### `DEFAULT_COMPRESS_MODEL` (オプション)
|
||||
|
||||
デフォルトの圧縮モデルを変更します。
|
||||
|
||||
### `VISION_MODELS` (オプション)
|
||||
|
||||
> デフォルト:空
|
||||
|
@ -14,6 +14,7 @@ const DANGER_CONFIG = {
|
||||
disableFastLink: serverConfig.disableFastLink,
|
||||
customModels: serverConfig.customModels,
|
||||
defaultModel: serverConfig.defaultModel,
|
||||
defaultCompressModel: serverConfig.defaultCompressModel,
|
||||
visionModels: serverConfig.visionModels,
|
||||
};
|
||||
|
||||
|
@ -200,6 +200,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
options.config.model.startsWith("o1") ||
|
||||
options.config.model.startsWith("o3") ||
|
||||
options.config.model.startsWith("o4-mini");
|
||||
const isGpt5 = options.config.model.startsWith("gpt-5");
|
||||
if (isDalle3) {
|
||||
const prompt = getMessageTextContent(
|
||||
options.messages.slice(-1)?.pop() as any,
|
||||
@ -230,7 +231,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: !isO1OrO3 ? modelConfig.temperature : 1,
|
||||
temperature: (!isO1OrO3 && !isGpt5) ? modelConfig.temperature : 1,
|
||||
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
|
||||
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
|
||||
top_p: !isO1OrO3 ? modelConfig.top_p : 1,
|
||||
@ -238,7 +239,13 @@ export class ChatGPTApi implements LLMApi {
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
if (isO1OrO3) {
|
||||
if (isGpt5) {
|
||||
// Remove max_tokens if present
|
||||
delete requestPayload.max_tokens;
|
||||
// Add max_completion_tokens (or max_completion_tokens if that's what you meant)
|
||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||
|
||||
} else if (isO1OrO3) {
|
||||
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
|
||||
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
|
||||
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
|
||||
@ -251,8 +258,9 @@ export class ChatGPTApi implements LLMApi {
|
||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||
}
|
||||
|
||||
|
||||
// add max_tokens to vision model
|
||||
if (visionModel && !isO1OrO3) {
|
||||
if (visionModel && !isO1OrO3 && ! isGpt5) {
|
||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||
}
|
||||
}
|
||||
|
@ -259,13 +259,15 @@ export function ModelConfigList(props: {
|
||||
});
|
||||
}}
|
||||
>
|
||||
{allModels
|
||||
.filter((v) => v.available)
|
||||
.map((v, i) => (
|
||||
<option value={`${v.name}@${v.provider?.providerName}`} key={i}>
|
||||
{v.displayName}({v.provider?.providerName})
|
||||
</option>
|
||||
))}
|
||||
{Object.keys(groupModels).map((providerName, index) => (
|
||||
<optgroup label={providerName} key={index}>
|
||||
{groupModels[providerName].map((v, i) => (
|
||||
<option value={`${v.name}@${v.provider?.providerName}`} key={i}>
|
||||
{v.displayName}
|
||||
</option>
|
||||
))}
|
||||
</optgroup>
|
||||
))}
|
||||
</Select>
|
||||
</ListItem>
|
||||
</>
|
||||
|
@ -23,6 +23,7 @@ declare global {
|
||||
DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
|
||||
CUSTOM_MODELS?: string; // to control custom models
|
||||
DEFAULT_MODEL?: string; // to control default model in every new chat window
|
||||
DEFAULT_COMPRESS_MODEL?: string; // to control default compress model
|
||||
VISION_MODELS?: string; // to control vision models
|
||||
|
||||
// stability only
|
||||
@ -139,6 +140,7 @@ export const getServerSideConfig = () => {
|
||||
const disableGPT4 = !!process.env.DISABLE_GPT4;
|
||||
let customModels = process.env.CUSTOM_MODELS ?? "";
|
||||
let defaultModel = process.env.DEFAULT_MODEL ?? "";
|
||||
let defaultCompressModel = process.env.DEFAULT_COMPRESS_MODEL ?? "";
|
||||
let visionModels = process.env.VISION_MODELS ?? "";
|
||||
|
||||
if (disableGPT4) {
|
||||
@ -149,6 +151,9 @@ export const getServerSideConfig = () => {
|
||||
if (defaultModel && isGPT4Model(defaultModel)) {
|
||||
defaultModel = "";
|
||||
}
|
||||
if (defaultCompressModel && isGPT4Model(defaultCompressModel)) {
|
||||
defaultCompressModel = "";
|
||||
}
|
||||
}
|
||||
|
||||
const isStability = !!process.env.STABILITY_API_KEY;
|
||||
@ -271,6 +276,7 @@ export const getServerSideConfig = () => {
|
||||
disableFastLink: !!process.env.DISABLE_FAST_LINK,
|
||||
customModels,
|
||||
defaultModel,
|
||||
defaultCompressModel,
|
||||
visionModels,
|
||||
allowedWebDavEndpoints,
|
||||
enableMcp: process.env.ENABLE_MCP === "true",
|
||||
|
@ -493,6 +493,7 @@ export const VISION_MODEL_REGEXES = [
|
||||
/o3/,
|
||||
/o4-mini/,
|
||||
/grok-4/i,
|
||||
/gpt-5/
|
||||
];
|
||||
|
||||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
|
||||
@ -517,6 +518,11 @@ const openaiModels = [
|
||||
"gpt-4.1-nano-2025-04-14",
|
||||
"gpt-4.5-preview",
|
||||
"gpt-4.5-preview-2025-02-27",
|
||||
"gpt-5-chat",
|
||||
"gpt-5-mini",
|
||||
"gpt-5-nano",
|
||||
"gpt-5",
|
||||
"gpt-5-chat-2025-01-01-preview",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
|
@ -147,6 +147,7 @@ const DEFAULT_ACCESS_STATE = {
|
||||
disableFastLink: false,
|
||||
customModels: "",
|
||||
defaultModel: "",
|
||||
defaultCompressModel: "",
|
||||
visionModels: "",
|
||||
|
||||
// tts config
|
||||
@ -262,12 +263,21 @@ export const useAccessStore = createPersistStore(
|
||||
.then((res) => res.json())
|
||||
.then((res) => {
|
||||
const defaultModel = res.defaultModel ?? "";
|
||||
if (defaultModel !== "") {
|
||||
if (defaultModel) {
|
||||
const [model, providerName] = getModelProvider(defaultModel);
|
||||
DEFAULT_CONFIG.modelConfig.model = model;
|
||||
DEFAULT_CONFIG.modelConfig.providerName = providerName as any;
|
||||
}
|
||||
|
||||
const defaultCompressModel = res.defaultCompressModel ?? "";
|
||||
if (defaultCompressModel) {
|
||||
const [model, providerName] =
|
||||
getModelProvider(defaultCompressModel);
|
||||
DEFAULT_CONFIG.modelConfig.compressModel = model;
|
||||
DEFAULT_CONFIG.modelConfig.compressProviderName =
|
||||
providerName as any;
|
||||
}
|
||||
|
||||
return res;
|
||||
})
|
||||
.then((res: DangerConfig) => {
|
||||
|
@ -123,9 +123,16 @@ function getSummarizeModel(
|
||||
currentModel: string,
|
||||
providerName: string,
|
||||
): string[] {
|
||||
const configStore = useAppConfig.getState();
|
||||
if (configStore.modelConfig.compressModel) {
|
||||
return [
|
||||
configStore.modelConfig.compressModel,
|
||||
configStore.modelConfig.compressProviderName,
|
||||
];
|
||||
}
|
||||
|
||||
// if it is using gpt-* models, force to use 4o-mini to summarize
|
||||
if (currentModel.startsWith("gpt") || currentModel.startsWith("chatgpt")) {
|
||||
const configStore = useAppConfig.getState();
|
||||
const accessStore = useAccessStore.getState();
|
||||
const allModel = collectModelsWithDefaultModel(
|
||||
configStore.models,
|
||||
|
Loading…
Reference in New Issue
Block a user