mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-10-11 04:26:37 +08:00
Compare commits
9 Commits
40a186a639
...
a9d0632bf5
Author | SHA1 | Date | |
---|---|---|---|
|
a9d0632bf5 | ||
|
c30ddfbb07 | ||
|
a2f0149786 | ||
|
03d36f96ed | ||
|
843dc52efa | ||
|
c261ebc82c | ||
|
f7c747c65f | ||
|
bc53c17a8c | ||
|
16c16887ae |
@ -56,7 +56,7 @@ export interface OpenAIListModelResponse {
|
||||
|
||||
export interface RequestPayload {
|
||||
messages: {
|
||||
role: "system" | "user" | "assistant";
|
||||
role: "developer" | "system" | "user" | "assistant";
|
||||
content: string | MultimodalContent[];
|
||||
}[];
|
||||
stream?: boolean;
|
||||
@ -238,8 +238,16 @@ export class ChatGPTApi implements LLMApi {
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
|
||||
if (isO1OrO3) {
|
||||
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
|
||||
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
|
||||
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
|
||||
requestPayload["messages"].unshift({
|
||||
role: "developer",
|
||||
content: "Formatting re-enabled",
|
||||
});
|
||||
|
||||
// o1/o3 uses max_completion_tokens to control the number of tokens (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
|
||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||
}
|
||||
|
||||
|
@ -633,6 +633,18 @@ const xAIModes = [
|
||||
"grok-2-vision-1212",
|
||||
"grok-2-vision",
|
||||
"grok-2-vision-latest",
|
||||
"grok-3-mini-fast-beta",
|
||||
"grok-3-mini-fast",
|
||||
"grok-3-mini-fast-latest",
|
||||
"grok-3-mini-beta",
|
||||
"grok-3-mini",
|
||||
"grok-3-mini-latest",
|
||||
"grok-3-fast-beta",
|
||||
"grok-3-fast",
|
||||
"grok-3-fast-latest",
|
||||
"grok-3-beta",
|
||||
"grok-3",
|
||||
"grok-3-latest",
|
||||
];
|
||||
|
||||
const chatglmModels = [
|
||||
|
@ -159,10 +159,10 @@ export const usePromptStore = createPersistStore(
|
||||
fetch(PROMPT_URL)
|
||||
.then((res) => res.json())
|
||||
.then((res) => {
|
||||
let fetchPrompts = [res.en, res.tw, res.cn];
|
||||
if (getLang() === "cn") {
|
||||
fetchPrompts = fetchPrompts.reverse();
|
||||
}
|
||||
const lang = getLang();
|
||||
const fetchPrompts = [res[lang], res.en, res.tw, res.cn].filter(
|
||||
Boolean,
|
||||
);
|
||||
const builtinPrompts = fetchPrompts.map((promptList: PromptList) => {
|
||||
return promptList.map(
|
||||
([title, content]) =>
|
||||
@ -180,8 +180,9 @@ export const usePromptStore = createPersistStore(
|
||||
const allPromptsForSearch = builtinPrompts
|
||||
.reduce((pre, cur) => pre.concat(cur), [])
|
||||
.filter((v) => !!v.title && !!v.content);
|
||||
SearchService.count.builtin =
|
||||
res.en.length + res.cn.length + res.tw.length;
|
||||
SearchService.count.builtin = Object.values(res)
|
||||
.filter(Array.isArray)
|
||||
.reduce((total, promptList) => total + promptList.length, 0);
|
||||
SearchService.init(allPromptsForSearch, userPrompts);
|
||||
});
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user