Compare commits

...

16 Commits

Author SHA1 Message Date
Peter Dave Hello
a9d0632bf5 Merge bc53c17a8c into c30ddfbb07 2025-06-13 18:36:35 +08:00
RiverRay
c30ddfbb07 Merge pull request #6425 from yunlingz/o_model_md_response
Some checks failed
Run Tests / test (push) Has been cancelled
Fix: Encourage markdown inclusion in model responses for o1/o3
2025-06-12 11:19:24 +08:00
RiverRay
a2f0149786 Merge pull request #6460 from dreamsafari/main
加入Grok3模型列表
2025-06-12 11:13:31 +08:00
GH Action - Upstream Sync
03d36f96ed Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-06-12 01:53:30 +00:00
RiverRay
705dffc664 Merge pull request #6514 from KevinShiCN/patch-1
Some checks failed
Run Tests / test (push) Has been cancelled
Add gemini-2.5-pro-preview-06-05 into constant.ts
2025-06-11 16:14:09 +08:00
KevinShiCN
02f7e6de98 Add gemini-2.5-pro-preview-06-05 into constant.ts 2025-06-08 23:59:49 +08:00
dreamsafari
843dc52efa 加入Grok3模型列表 2025-04-22 13:06:54 +08:00
RiverRay
3809375694 Merge pull request #6457 from ACTOR-ALCHEMIST/main
Some checks failed
Run Tests / test (push) Has been cancelled
Support OpenAI o3 and o4-mini
2025-04-19 16:00:41 +08:00
RiverRay
1b0de25986 Update README.md 2025-04-19 15:59:31 +08:00
RiverRay
865c45dd29 Update README.md 2025-04-19 15:56:53 +08:00
RiverRay
1f5d8e6d9c Merge pull request #6458 from ChatGPTNextWeb/Leizhenpeng-patch-7
Update README.md
2025-04-19 15:50:48 +08:00
Jasper Hu
2d7229d2b8 feat: 支持 OpenAI 新模型 o3 与 o4-mini,并适配新参数 2025-04-18 20:36:07 +01:00
Yunling Zhu
c261ebc82c use unshift to improve perf 2025-04-06 16:56:54 +08:00
Yunling Zhu
f7c747c65f encourage markdown inclusion for o1/o3 2025-04-03 22:11:59 +08:00
Peter Dave Hello
bc53c17a8c Improve prompt store prompt list counting 2024-07-22 01:01:25 +08:00
Peter Dave Hello
16c16887ae Improve prompt store lang sorting 2024-07-22 01:01:16 +08:00
4 changed files with 41 additions and 12 deletions

View File

@@ -22,7 +22,7 @@ English / [简体中文](./README_CN.md)
[![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url]
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
[NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition)
[saas-url]: https://nextchat.club?utm_source=readme
@@ -41,7 +41,9 @@ English / [简体中文](./README_CN.md)
</div>
## 🥳 Cheer for NextChat iOS Version Online!
> [ 👉 Click Here Install Now](https://apps.apple.com/us/app/nextchat-ai/id6743085599)
> [👉 Click Here to Install Now](https://apps.apple.com/us/app/nextchat-ai/id6743085599)
> [❤️ Source Code Coming Soon](https://github.com/ChatGPTNextWeb/NextChat-iOS)
![Github iOS Image](https://github.com/user-attachments/assets/e0aa334f-4c13-4dc9-8310-e3b09fa4b9f3)

View File

@@ -56,7 +56,7 @@ export interface OpenAIListModelResponse {
export interface RequestPayload {
messages: {
role: "system" | "user" | "assistant";
role: "developer" | "system" | "user" | "assistant";
content: string | MultimodalContent[];
}[];
stream?: boolean;
@@ -198,7 +198,8 @@ export class ChatGPTApi implements LLMApi {
const isDalle3 = _isDalle3(options.config.model);
const isO1OrO3 =
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3");
options.config.model.startsWith("o3") ||
options.config.model.startsWith("o4-mini");
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
@@ -237,13 +238,21 @@ export class ChatGPTApi implements LLMApi {
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
if (isO1OrO3) {
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
requestPayload["messages"].unshift({
role: "developer",
content: "Formatting re-enabled",
});
// o1/o3 uses max_completion_tokens to control the number of tokens (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
}
// add max_tokens to vision model
if (visionModel) {
if (visionModel && !isO1OrO3) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
}
}

View File

@@ -478,6 +478,8 @@ export const VISION_MODEL_REGEXES = [
/^dall-e-3$/, // Matches exactly "dall-e-3"
/glm-4v/,
/vl/i,
/o3/,
/o4-mini/,
];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
@@ -516,6 +518,8 @@ const openaiModels = [
"o1-mini",
"o1-preview",
"o3-mini",
"o3",
"o4-mini",
];
const googleModels = [
@@ -542,6 +546,7 @@ const googleModels = [
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-pro-exp",
"gemini-2.0-pro-exp-02-05",
"gemini-2.5-pro-preview-06-05",
];
const anthropicModels = [
@@ -628,6 +633,18 @@ const xAIModes = [
"grok-2-vision-1212",
"grok-2-vision",
"grok-2-vision-latest",
"grok-3-mini-fast-beta",
"grok-3-mini-fast",
"grok-3-mini-fast-latest",
"grok-3-mini-beta",
"grok-3-mini",
"grok-3-mini-latest",
"grok-3-fast-beta",
"grok-3-fast",
"grok-3-fast-latest",
"grok-3-beta",
"grok-3",
"grok-3-latest",
];
const chatglmModels = [

View File

@@ -159,10 +159,10 @@ export const usePromptStore = createPersistStore(
fetch(PROMPT_URL)
.then((res) => res.json())
.then((res) => {
let fetchPrompts = [res.en, res.tw, res.cn];
if (getLang() === "cn") {
fetchPrompts = fetchPrompts.reverse();
}
const lang = getLang();
const fetchPrompts = [res[lang], res.en, res.tw, res.cn].filter(
Boolean,
);
const builtinPrompts = fetchPrompts.map((promptList: PromptList) => {
return promptList.map(
([title, content]) =>
@@ -180,8 +180,9 @@ export const usePromptStore = createPersistStore(
const allPromptsForSearch = builtinPrompts
.reduce((pre, cur) => pre.concat(cur), [])
.filter((v) => !!v.title && !!v.content);
SearchService.count.builtin =
res.en.length + res.cn.length + res.tw.length;
SearchService.count.builtin = Object.values(res)
.filter(Array.isArray)
.reduce((total, promptList) => total + promptList.length, 0);
SearchService.init(allPromptsForSearch, userPrompts);
});
},