mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-10-11 20:43:42 +08:00
Compare commits
10 Commits
5f8de6a112
...
dependabot
Author | SHA1 | Date | |
---|---|---|---|
|
f9d3c93633 | ||
|
673f907ea4 | ||
|
fb3af2a08f | ||
|
eb193ac0ff | ||
|
c30ddfbb07 | ||
|
a2f0149786 | ||
|
03d36f96ed | ||
|
843dc52efa | ||
|
c261ebc82c | ||
|
f7c747c65f |
@@ -22,12 +22,12 @@ English / [简体中文](./README_CN.md)
|
||||
[![MacOS][MacOS-image]][download-url]
|
||||
[![Linux][Linux-image]][download-url]
|
||||
|
||||
[NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition)
|
||||
[NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.club) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition)
|
||||
|
||||
|
||||
[saas-url]: https://nextchat.club?utm_source=readme
|
||||
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
|
||||
[web-url]: https://app.nextchat.dev/
|
||||
[web-url]: https://app.nextchat.club/
|
||||
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
|
||||
[Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge
|
||||
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
|
||||
@@ -309,14 +309,11 @@ If you want to disable parse settings from url, set this to 1.
|
||||
|
||||
To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma.
|
||||
|
||||
Use `-all` to disable all default models, `+all` to enable all default models.
|
||||
Use `-*provider` to disable specified models.
|
||||
Current valid providers: `openai,azure,google,anthropic,baidu,bytedance,alibaba,tencent,moonshot,iflytek,xai,chatglm` and more to come.
|
||||
User `-all` to disable all default models, `+all` to enable all default models.
|
||||
|
||||
For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name.
|
||||
> Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list.
|
||||
> If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list.
|
||||
> If you don't want to use Azure model, using `-*azure` will prevent Azure models from appearing in the model list.
|
||||
|
||||
For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name.
|
||||
> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list.
|
||||
|
@@ -56,7 +56,7 @@ export interface OpenAIListModelResponse {
|
||||
|
||||
export interface RequestPayload {
|
||||
messages: {
|
||||
role: "system" | "user" | "assistant";
|
||||
role: "developer" | "system" | "user" | "assistant";
|
||||
content: string | MultimodalContent[];
|
||||
}[];
|
||||
stream?: boolean;
|
||||
@@ -238,8 +238,16 @@ export class ChatGPTApi implements LLMApi {
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
|
||||
if (isO1OrO3) {
|
||||
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
|
||||
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
|
||||
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
|
||||
requestPayload["messages"].unshift({
|
||||
role: "developer",
|
||||
content: "Formatting re-enabled",
|
||||
});
|
||||
|
||||
// o1/o3 uses max_completion_tokens to control the number of tokens (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
|
||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||
}
|
||||
|
||||
|
@@ -523,20 +523,15 @@ const openaiModels = [
|
||||
];
|
||||
|
||||
const googleModels = [
|
||||
"gemini-1.0-pro", // Deprecated on 2/15/2025
|
||||
"gemini-1.5-pro-latest",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-pro-002",
|
||||
"gemini-1.5-pro-exp-0827",
|
||||
"gemini-1.5-flash-latest",
|
||||
"gemini-1.5-flash-8b-latest",
|
||||
"gemini-1.5-flash",
|
||||
"gemini-1.5-flash-8b",
|
||||
"gemini-1.5-flash-002",
|
||||
"gemini-1.5-flash-exp-0827",
|
||||
"learnlm-1.5-pro-experimental",
|
||||
"gemini-exp-1114",
|
||||
"gemini-exp-1121",
|
||||
"gemini-exp-1206",
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-exp",
|
||||
@@ -633,6 +628,18 @@ const xAIModes = [
|
||||
"grok-2-vision-1212",
|
||||
"grok-2-vision",
|
||||
"grok-2-vision-latest",
|
||||
"grok-3-mini-fast-beta",
|
||||
"grok-3-mini-fast",
|
||||
"grok-3-mini-fast-latest",
|
||||
"grok-3-mini-beta",
|
||||
"grok-3-mini",
|
||||
"grok-3-mini-latest",
|
||||
"grok-3-fast-beta",
|
||||
"grok-3-fast",
|
||||
"grok-3-fast-latest",
|
||||
"grok-3-beta",
|
||||
"grok-3",
|
||||
"grok-3-latest",
|
||||
];
|
||||
|
||||
const chatglmModels = [
|
||||
|
@@ -76,7 +76,6 @@ export function collectModelTable(
|
||||
// server custom models
|
||||
customModels
|
||||
.split(",")
|
||||
.map((v) => v.trim())
|
||||
.filter((v) => !!v && v.length > 0)
|
||||
.forEach((m) => {
|
||||
const available = !m.startsWith("-");
|
||||
@@ -89,13 +88,6 @@ export function collectModelTable(
|
||||
Object.values(modelTable).forEach(
|
||||
(model) => (model.available = available),
|
||||
);
|
||||
} else if (name.startsWith("*")) {
|
||||
const modelId = name.substring(1).toLowerCase();
|
||||
Object.values(modelTable).forEach((model) => {
|
||||
if (model?.provider?.id === modelId) {
|
||||
model.available = available;
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// 1. find model by name, and set available value
|
||||
const [customModelName, customProviderName] = getModelProvider(name);
|
||||
|
@@ -83,7 +83,7 @@
|
||||
"jest": "^29.7.0",
|
||||
"jest-environment-jsdom": "^29.7.0",
|
||||
"lint-staged": "^13.2.2",
|
||||
"prettier": "^3.0.2",
|
||||
"prettier": "^3.6.2",
|
||||
"ts-node": "^10.9.2",
|
||||
"tsx": "^4.16.0",
|
||||
"typescript": "5.2.2",
|
||||
|
@@ -1,142 +0,0 @@
|
||||
import { collectModelTable } from "@/app/utils/model"
|
||||
import { LLMModel,LLMModelProvider } from "@/app/client/api";
|
||||
|
||||
describe('collectModelTable', () => {
|
||||
const mockModels: readonly LLMModel[] = [
|
||||
{
|
||||
name: 'gpt-3.5-turbo',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'openai',
|
||||
providerName: 'OpenAI',
|
||||
providerType: 'openai',
|
||||
} as LLMModelProvider,
|
||||
sorted: 1,
|
||||
},
|
||||
{
|
||||
name: 'gpt-4',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'openai',
|
||||
providerName: 'OpenAI',
|
||||
providerType: 'openai',
|
||||
} as LLMModelProvider,
|
||||
sorted: 1,
|
||||
},
|
||||
{
|
||||
name: 'gpt-3.5-turbo',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'azure',
|
||||
providerName: 'Azure',
|
||||
providerType: 'azure',
|
||||
} as LLMModelProvider,
|
||||
sorted: 2,
|
||||
},
|
||||
{
|
||||
name: 'gpt-4',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'azure',
|
||||
providerName: 'Azure',
|
||||
providerType: 'azure',
|
||||
} as LLMModelProvider,
|
||||
sorted: 2,
|
||||
},
|
||||
{
|
||||
name: 'gemini-pro',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'google',
|
||||
providerName: 'Google',
|
||||
providerType: 'google',
|
||||
} as LLMModelProvider,
|
||||
sorted: 3,
|
||||
},
|
||||
{
|
||||
name: 'claude-3-haiku-20240307',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'anthropic',
|
||||
providerName: 'Anthropic',
|
||||
providerType: 'anthropic',
|
||||
} as LLMModelProvider,
|
||||
sorted: 4,
|
||||
},
|
||||
{
|
||||
name: 'grok-beta',
|
||||
available: true,
|
||||
provider: {
|
||||
id: 'xai',
|
||||
providerName: 'XAI',
|
||||
providerType: 'xai',
|
||||
} as LLMModelProvider,
|
||||
sorted: 11,
|
||||
},
|
||||
];
|
||||
|
||||
test('all models shoule be available', () => {
|
||||
const customModels = '';
|
||||
const result = collectModelTable(mockModels, customModels);
|
||||
|
||||
expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
|
||||
expect(result['gpt-4@openai'].available).toBe(true);
|
||||
expect(result['gpt-3.5-turbo@azure'].available).toBe(true);
|
||||
expect(result['gpt-4@azure'].available).toBe(true);
|
||||
expect(result['gemini-pro@google'].available).toBe(true);
|
||||
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true);
|
||||
expect(result['grok-beta@xai'].available).toBe(true);
|
||||
});
|
||||
test('should exclude all models when custom is "-all"', () => {
|
||||
const customModels = '-all';
|
||||
const result = collectModelTable(mockModels, customModels);
|
||||
|
||||
expect(result['gpt-3.5-turbo@openai'].available).toBe(false);
|
||||
expect(result['gpt-4@openai'].available).toBe(false);
|
||||
expect(result['gpt-3.5-turbo@azure'].available).toBe(false);
|
||||
expect(result['gpt-4@azure'].available).toBe(false);
|
||||
expect(result['gemini-pro@google'].available).toBe(false);
|
||||
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false);
|
||||
expect(result['grok-beta@xai'].available).toBe(false);
|
||||
});
|
||||
|
||||
test('should exclude all Azure models when custom is "-*azure"', () => {
|
||||
const customModels = '-*azure';
|
||||
const result = collectModelTable(mockModels, customModels);
|
||||
|
||||
expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
|
||||
expect(result['gpt-4@openai'].available).toBe(true);
|
||||
expect(result['gpt-3.5-turbo@azure'].available).toBe(false);
|
||||
expect(result['gpt-4@azure'].available).toBe(false);
|
||||
expect(result['gemini-pro@google'].available).toBe(true);
|
||||
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true);
|
||||
expect(result['grok-beta@xai'].available).toBe(true);
|
||||
});
|
||||
|
||||
test('should exclude Google and XAI models when custom is "-*google,-*xai"', () => {
|
||||
const customModels = '-*google,-*xai';
|
||||
const result = collectModelTable(mockModels, customModels);
|
||||
|
||||
expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
|
||||
expect(result['gpt-4@openai'].available).toBe(true);
|
||||
expect(result['gpt-3.5-turbo@azure'].available).toBe(true);
|
||||
expect(result['gpt-4@azure'].available).toBe(true);
|
||||
expect(result['gemini-pro@google'].available).toBe(false);
|
||||
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(true);
|
||||
expect(result['grok-beta@xai'].available).toBe(false);
|
||||
});
|
||||
|
||||
test('All models except OpenAI should be excluded, and additional models should be added when customized as "-all, +*openai,gpt-4o@azure"', () => {
|
||||
const customModels = '-all,+*openai,gpt-4o@azure';
|
||||
const result = collectModelTable(mockModels, customModels);
|
||||
|
||||
expect(result['gpt-3.5-turbo@openai'].available).toBe(true);
|
||||
expect(result['gpt-4@openai'].available).toBe(true);
|
||||
expect(result['gpt-3.5-turbo@azure'].available).toBe(false);
|
||||
expect(result['gpt-4@azure'].available).toBe(false);
|
||||
expect(result['gemini-pro@google'].available).toBe(false);
|
||||
expect(result['claude-3-haiku-20240307@anthropic'].available).toBe(false);
|
||||
expect(result['grok-beta@xai'].available).toBe(false);
|
||||
expect(result['gpt-4o@azure'].available).toBe(true);
|
||||
});
|
||||
});
|
@@ -7076,10 +7076,10 @@ prettier-linter-helpers@^1.0.0:
|
||||
dependencies:
|
||||
fast-diff "^1.1.2"
|
||||
|
||||
prettier@^3.0.2:
|
||||
version "3.0.2"
|
||||
resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.0.2.tgz#78fcecd6d870551aa5547437cdae39d4701dca5b"
|
||||
integrity sha512-o2YR9qtniXvwEZlOKbveKfDQVyqxbEIWn48Z8m3ZJjBjcCmUy3xZGIv+7AkaeuaTr6yPXJjwv07ZWlsWbEy1rQ==
|
||||
prettier@^3.6.2:
|
||||
version "3.6.2"
|
||||
resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.6.2.tgz#ccda02a1003ebbb2bfda6f83a074978f608b9393"
|
||||
integrity sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==
|
||||
|
||||
pretty-format@^27.0.2:
|
||||
version "27.5.1"
|
||||
|
Reference in New Issue
Block a user