mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-09-27 05:36:39 +08:00
feat: #327
This commit is contained in:
parent
8b501ccf2c
commit
798b751f2b
32
README.md
32
README.md
@ -282,15 +282,15 @@ Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.micro
|
|||||||
|
|
||||||
如果你不想让用户使用历史摘要功能,将此环境变量设置为 1 即可。
|
如果你不想让用户使用历史摘要功能,将此环境变量设置为 1 即可。
|
||||||
|
|
||||||
### `ANTHROPIC_API_KEY` (optional)
|
### `ANTHROPIC_API_KEY` (可选)
|
||||||
|
|
||||||
anthropic claude Api Key.
|
anthropic claude Api Key.
|
||||||
|
|
||||||
### `ANTHROPIC_API_VERSION` (optional)
|
### `ANTHROPIC_API_VERSION` (可选)
|
||||||
|
|
||||||
anthropic claude Api version.
|
anthropic claude Api version.
|
||||||
|
|
||||||
### `ANTHROPIC_URL` (optional)
|
### `ANTHROPIC_URL` (可选)
|
||||||
|
|
||||||
anthropic claude Api Url.
|
anthropic claude Api Url.
|
||||||
|
|
||||||
@ -305,11 +305,31 @@ For Azure: use `modelName@azure=deploymentName` to customize model name and depl
|
|||||||
For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name.
|
For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name.
|
||||||
> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list.
|
> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list.
|
||||||
|
|
||||||
### `DEFAULT_MODEL` (optional)
|
### `CUSTOM_MODELS` (可选)
|
||||||
|
|
||||||
Change default model
|
> 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` 表示增加 `qwen-7b-chat` 和 `glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`,并将 `gpt-4-1106-preview` 模型名字展示为 `gpt-4-turbo`。
|
||||||
|
> 如果你想先禁用所有模型,再启用指定模型,可以使用 `-all,+gpt-3.5-turbo`,则表示仅启用 `gpt-3.5-turbo`
|
||||||
|
|
||||||
### `WHITE_WEBDAV_ENDPOINTS` (optional)
|
用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。
|
||||||
|
|
||||||
|
在Azure的模式下,支持使用`modelName@azure=deploymentName`的方式配置模型名称和部署名称(deploy-name)
|
||||||
|
> 示例:`+gpt-3.5-turbo@azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。
|
||||||
|
> 如果你只能使用Azure模式,那么设置 `-all,+gpt-3.5-turbo@azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)`
|
||||||
|
|
||||||
|
在ByteDance的模式下,支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name)
|
||||||
|
> 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项
|
||||||
|
|
||||||
|
### `DEFAULT_MODEL` (可选)
|
||||||
|
|
||||||
|
更改默认模型
|
||||||
|
|
||||||
|
### `USE_REMOTE_MODELS` (可选)
|
||||||
|
|
||||||
|
如果你想使用远程模型列表,可以设置为 1 即可
|
||||||
|
可以与 `CUSTOM_MODELS` 参数一起使用
|
||||||
|
建议配合 `one-api` 类似的中转项目使用
|
||||||
|
|
||||||
|
### `WHITE_WEBDAV_ENDPOINTS` (可选)
|
||||||
|
|
||||||
如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求:
|
如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求:
|
||||||
- 每一个地址必须是一个完整的 endpoint
|
- 每一个地址必须是一个完整的 endpoint
|
||||||
|
@ -18,6 +18,7 @@ const DANGER_CONFIG = {
|
|||||||
edgeTTSVoiceName: serverConfig.edgeTTSVoiceName,
|
edgeTTSVoiceName: serverConfig.edgeTTSVoiceName,
|
||||||
isUseOpenAIEndpointForAllModels: serverConfig.isUseOpenAIEndpointForAllModels,
|
isUseOpenAIEndpointForAllModels: serverConfig.isUseOpenAIEndpointForAllModels,
|
||||||
disableModelProviderDisplay: serverConfig.disableModelProviderDisplay,
|
disableModelProviderDisplay: serverConfig.disableModelProviderDisplay,
|
||||||
|
isUseRemoteModels: serverConfig.isUseRemoteModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
declare global {
|
declare global {
|
||||||
|
@ -51,7 +51,8 @@ export interface OpenAIListModelResponse {
|
|||||||
data: Array<{
|
data: Array<{
|
||||||
id: string;
|
id: string;
|
||||||
object: string;
|
object: string;
|
||||||
root: string;
|
created: number;
|
||||||
|
owned_by: string;
|
||||||
}>;
|
}>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,7 +81,7 @@ export interface DalleRequestPayload {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export class ChatGPTApi implements LLMApi {
|
export class ChatGPTApi implements LLMApi {
|
||||||
private disableListModels = true;
|
private disableListModels = false;
|
||||||
|
|
||||||
path(path: string, model?: string): string {
|
path(path: string, model?: string): string {
|
||||||
const accessStore = useAccessStore.getState();
|
const accessStore = useAccessStore.getState();
|
||||||
@ -651,7 +652,8 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async models(): Promise<LLMModel[]> {
|
async models(): Promise<LLMModel[]> {
|
||||||
if (this.disableListModels) {
|
const accessStore = useAccessStore.getState();
|
||||||
|
if (!accessStore.isUseRemoteModels) {
|
||||||
return DEFAULT_MODELS.slice();
|
return DEFAULT_MODELS.slice();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -663,25 +665,27 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
});
|
});
|
||||||
|
|
||||||
const resJson = (await res.json()) as OpenAIListModelResponse;
|
const resJson = (await res.json()) as OpenAIListModelResponse;
|
||||||
const chatModels = resJson.data?.filter(
|
// const chatModels = resJson.data?.filter(
|
||||||
(m) => m.id.startsWith("gpt-") || m.id.startsWith("chatgpt-"),
|
// (m) => m.id.startsWith("gpt-") || m.id.startsWith("chatgpt-"),
|
||||||
);
|
// );
|
||||||
|
const chatModels = resJson.data.sort((a, b) => {
|
||||||
|
return b.created - a.created;
|
||||||
|
});
|
||||||
console.log("[Models]", chatModels);
|
console.log("[Models]", chatModels);
|
||||||
|
|
||||||
if (!chatModels) {
|
if (!chatModels) {
|
||||||
return [];
|
return [];
|
||||||
}
|
}
|
||||||
|
|
||||||
//由于目前 OpenAI 的 disableListModels 默认为 true,所以当前实际不会运行到这场
|
|
||||||
let seq = 1000; //同 Constant.ts 中的排序保持一致
|
let seq = 1000; //同 Constant.ts 中的排序保持一致
|
||||||
return chatModels.map((m) => ({
|
return chatModels.map((m) => ({
|
||||||
name: m.id,
|
name: m.id,
|
||||||
available: true,
|
available: true,
|
||||||
sorted: seq++,
|
sorted: seq++,
|
||||||
provider: {
|
provider: {
|
||||||
id: "openai",
|
id: m.owned_by.toLowerCase(),
|
||||||
providerName: "OpenAI",
|
providerName: m.owned_by,
|
||||||
providerType: "openai",
|
providerType: m.owned_by.toLowerCase(),
|
||||||
sorted: 1,
|
sorted: 1,
|
||||||
},
|
},
|
||||||
}));
|
}));
|
||||||
|
@ -238,5 +238,6 @@ export const getServerSideConfig = () => {
|
|||||||
!!process.env.USE_OPENAI_ENDPOINT_FOR_ALL_MODELS,
|
!!process.env.USE_OPENAI_ENDPOINT_FOR_ALL_MODELS,
|
||||||
|
|
||||||
disableModelProviderDisplay: !!process.env.DISABLE_MODEL_PROVIDER_DISPLAY,
|
disableModelProviderDisplay: !!process.env.DISABLE_MODEL_PROVIDER_DISPLAY,
|
||||||
|
isUseRemoteModels: !!process.env.USE_REMOTE_MODELS,
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -130,8 +130,8 @@ const DEFAULT_ACCESS_STATE = {
|
|||||||
edgeTTSVoiceName: "zh-CN-YunxiNeural",
|
edgeTTSVoiceName: "zh-CN-YunxiNeural",
|
||||||
|
|
||||||
isUseOpenAIEndpointForAllModels: false,
|
isUseOpenAIEndpointForAllModels: false,
|
||||||
|
|
||||||
disableModelProviderDisplay: false,
|
disableModelProviderDisplay: false,
|
||||||
|
isUseRemoteModels: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
export const useAccessStore = createPersistStore(
|
export const useAccessStore = createPersistStore(
|
||||||
@ -156,6 +156,12 @@ export const useAccessStore = createPersistStore(
|
|||||||
return get().isUseOpenAIEndpointForAllModels;
|
return get().isUseOpenAIEndpointForAllModels;
|
||||||
},
|
},
|
||||||
|
|
||||||
|
useRemoteModels() {
|
||||||
|
this.fetch();
|
||||||
|
|
||||||
|
return get().isUseRemoteModels;
|
||||||
|
},
|
||||||
|
|
||||||
edgeVoiceName() {
|
edgeVoiceName() {
|
||||||
this.fetch();
|
this.fetch();
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user