Compare commits

...

13 Commits

Author SHA1 Message Date
玖然
bf5dbedfdf
Merge 937e2b5a54 into fb3af2a08f 2025-06-16 11:25:59 +08:00
RiverRay
fb3af2a08f
Merge pull request #6515 from dupl/main
Some checks failed
Run Tests / test (push) Has been cancelled
Removed deprecated Gemini models
2025-06-14 13:35:32 +08:00
dupl
eb193ac0ff
Removed deprecated Gemini models 2025-06-12 15:34:03 +08:00
RiverRay
c30ddfbb07
Merge pull request #6425 from yunlingz/o_model_md_response
Some checks failed
Run Tests / test (push) Has been cancelled
Fix: Encourage markdown inclusion in model responses for o1/o3
2025-06-12 11:19:24 +08:00
RiverRay
a2f0149786
Merge pull request #6460 from dreamsafari/main
加入Grok3模型列表
2025-06-12 11:13:31 +08:00
GH Action - Upstream Sync
03d36f96ed Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-06-12 01:53:30 +00:00
RiverRay
705dffc664
Merge pull request #6514 from KevinShiCN/patch-1
Some checks failed
Run Tests / test (push) Has been cancelled
Add gemini-2.5-pro-preview-06-05 into constant.ts
2025-06-11 16:14:09 +08:00
KevinShiCN
02f7e6de98
Add gemini-2.5-pro-preview-06-05 into constant.ts 2025-06-08 23:59:49 +08:00
dreamsafari
843dc52efa
加入Grok3模型列表 2025-04-22 13:06:54 +08:00
Yunling Zhu
c261ebc82c use unshift to improve perf 2025-04-06 16:56:54 +08:00
Yunling Zhu
f7c747c65f encourage markdown inclusion for o1/o3 2025-04-03 22:11:59 +08:00
玖然
937e2b5a54
Merge branch 'ChatGPTNextWeb:main' into main 2025-03-10 22:40:18 +08:00
Noctiro
afdf3a5cd4 Add DEFAULT_COMPRESS_MODEL configuration 2025-02-04 11:20:54 +08:00
11 changed files with 75 additions and 17 deletions

View File

@ -64,6 +64,11 @@ CUSTOM_MODELS=
# Change default model
DEFAULT_MODEL=
# (optional)
# Default: Empty
# Change default compress model
DEFAULT_COMPRESS_MODEL=
# anthropic claude Api Key.(optional)
ANTHROPIC_API_KEY=

View File

@ -322,6 +322,10 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name
Change default model
### `DEFAULT_COMPRESS_MODEL` optional
Change default summary model
### `VISION_MODELS` (optional)
> Default: Empty
@ -348,7 +352,6 @@ Stability API key.
Customize Stability API url.
### `ENABLE_MCP` (optional)
Enable MCPModel Context ProtocolFeature

View File

@ -244,6 +244,10 @@ DeepSeek Api Url.
更改默认模型
### `DEFAULT_COMPRESS_MODEL` (可选)
更改默认对话摘要模型
### `VISION_MODELS` (可选)
> 默认值:空

View File

@ -217,6 +217,10 @@ ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデ
デフォルトのモデルを変更します。
### `DEFAULT_COMPRESS_MODEL` (オプション)
デフォルトの圧縮モデルを変更します。
### `VISION_MODELS` (オプション)
> デフォルト:空

View File

@ -14,6 +14,7 @@ const DANGER_CONFIG = {
disableFastLink: serverConfig.disableFastLink,
customModels: serverConfig.customModels,
defaultModel: serverConfig.defaultModel,
defaultCompressModel: serverConfig.defaultCompressModel,
visionModels: serverConfig.visionModels,
};

View File

@ -56,7 +56,7 @@ export interface OpenAIListModelResponse {
export interface RequestPayload {
messages: {
role: "system" | "user" | "assistant";
role: "developer" | "system" | "user" | "assistant";
content: string | MultimodalContent[];
}[];
stream?: boolean;
@ -238,8 +238,16 @@ export class ChatGPTApi implements LLMApi {
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
if (isO1OrO3) {
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
requestPayload["messages"].unshift({
role: "developer",
content: "Formatting re-enabled",
});
// o1/o3 uses max_completion_tokens to control the number of tokens (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
}

View File

@ -259,13 +259,15 @@ export function ModelConfigList(props: {
});
}}
>
{allModels
.filter((v) => v.available)
.map((v, i) => (
{Object.keys(groupModels).map((providerName, index) => (
<optgroup label={providerName} key={index}>
{groupModels[providerName].map((v, i) => (
<option value={`${v.name}@${v.provider?.providerName}`} key={i}>
{v.displayName}({v.provider?.providerName})
{v.displayName}
</option>
))}
</optgroup>
))}
</Select>
</ListItem>
</>

View File

@ -23,6 +23,7 @@ declare global {
DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
CUSTOM_MODELS?: string; // to control custom models
DEFAULT_MODEL?: string; // to control default model in every new chat window
DEFAULT_COMPRESS_MODEL?: string; // to control default compress model
VISION_MODELS?: string; // to control vision models
// stability only
@ -135,6 +136,7 @@ export const getServerSideConfig = () => {
const disableGPT4 = !!process.env.DISABLE_GPT4;
let customModels = process.env.CUSTOM_MODELS ?? "";
let defaultModel = process.env.DEFAULT_MODEL ?? "";
let defaultCompressModel = process.env.DEFAULT_COMPRESS_MODEL ?? "";
let visionModels = process.env.VISION_MODELS ?? "";
if (disableGPT4) {
@ -145,6 +147,9 @@ export const getServerSideConfig = () => {
if (defaultModel && isGPT4Model(defaultModel)) {
defaultModel = "";
}
if (defaultCompressModel && isGPT4Model(defaultCompressModel)) {
defaultCompressModel = "";
}
}
const isStability = !!process.env.STABILITY_API_KEY;
@ -262,6 +267,7 @@ export const getServerSideConfig = () => {
disableFastLink: !!process.env.DISABLE_FAST_LINK,
customModels,
defaultModel,
defaultCompressModel,
visionModels,
allowedWebDavEndpoints,
enableMcp: process.env.ENABLE_MCP === "true",

View File

@ -523,20 +523,15 @@ const openaiModels = [
];
const googleModels = [
"gemini-1.0-pro", // Deprecated on 2/15/2025
"gemini-1.5-pro-latest",
"gemini-1.5-pro",
"gemini-1.5-pro-002",
"gemini-1.5-pro-exp-0827",
"gemini-1.5-flash-latest",
"gemini-1.5-flash-8b-latest",
"gemini-1.5-flash",
"gemini-1.5-flash-8b",
"gemini-1.5-flash-002",
"gemini-1.5-flash-exp-0827",
"learnlm-1.5-pro-experimental",
"gemini-exp-1114",
"gemini-exp-1121",
"gemini-exp-1206",
"gemini-2.0-flash",
"gemini-2.0-flash-exp",
@ -546,6 +541,7 @@ const googleModels = [
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-pro-exp",
"gemini-2.0-pro-exp-02-05",
"gemini-2.5-pro-preview-06-05",
];
const anthropicModels = [
@ -632,6 +628,18 @@ const xAIModes = [
"grok-2-vision-1212",
"grok-2-vision",
"grok-2-vision-latest",
"grok-3-mini-fast-beta",
"grok-3-mini-fast",
"grok-3-mini-fast-latest",
"grok-3-mini-beta",
"grok-3-mini",
"grok-3-mini-latest",
"grok-3-fast-beta",
"grok-3-fast",
"grok-3-fast-latest",
"grok-3-beta",
"grok-3",
"grok-3-latest",
];
const chatglmModels = [

View File

@ -140,6 +140,7 @@ const DEFAULT_ACCESS_STATE = {
disableFastLink: false,
customModels: "",
defaultModel: "",
defaultCompressModel: "",
visionModels: "",
// tts config
@ -255,12 +256,21 @@ export const useAccessStore = createPersistStore(
.then((res) => res.json())
.then((res) => {
const defaultModel = res.defaultModel ?? "";
if (defaultModel !== "") {
if (defaultModel) {
const [model, providerName] = getModelProvider(defaultModel);
DEFAULT_CONFIG.modelConfig.model = model;
DEFAULT_CONFIG.modelConfig.providerName = providerName as any;
}
const defaultCompressModel = res.defaultCompressModel ?? "";
if (defaultCompressModel) {
const [model, providerName] =
getModelProvider(defaultCompressModel);
DEFAULT_CONFIG.modelConfig.compressModel = model;
DEFAULT_CONFIG.modelConfig.compressProviderName =
providerName as any;
}
return res;
})
.then((res: DangerConfig) => {

View File

@ -123,9 +123,16 @@ function getSummarizeModel(
currentModel: string,
providerName: string,
): string[] {
const configStore = useAppConfig.getState();
if (configStore.modelConfig.compressModel) {
return [
configStore.modelConfig.compressModel,
configStore.modelConfig.compressProviderName,
];
}
// if it is using gpt-* models, force to use 4o-mini to summarize
if (currentModel.startsWith("gpt") || currentModel.startsWith("chatgpt")) {
const configStore = useAppConfig.getState();
const accessStore = useAccessStore.getState();
const allModel = collectModelsWithDefaultModel(
configStore.models,