mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-10-12 21:13:43 +08:00
Compare commits
30 Commits
6305-bugth
...
8887cb7e74
Author | SHA1 | Date | |
---|---|---|---|
|
8887cb7e74 | ||
|
705dffc664 | ||
|
02f7e6de98 | ||
|
4bacfe53c0 | ||
|
2ec83729ec | ||
|
e6278f7f07 | ||
|
47e9c2d3b7 | ||
|
09b1faed17 | ||
|
b8d9a5a604 | ||
|
2a59a38c23 | ||
|
3809375694 | ||
|
1b0de25986 | ||
|
865c45dd29 | ||
|
1f5d8e6d9c | ||
|
c9ef6d58ed | ||
|
2d7229d2b8 | ||
|
11b37c15bd | ||
|
1d0038f17d | ||
|
619fa519c0 | ||
|
48469bd8ca | ||
|
5a5e887f2b | ||
|
b6f5d75656 | ||
|
0d41a17ef6 | ||
|
f7cde17919 | ||
|
570cbb34b6 | ||
|
7aa9ae0a3e | ||
|
ad6666eeaf | ||
|
a2c4e468a0 | ||
|
0a25a1a8cb | ||
|
b709ee3983 |
@@ -81,3 +81,9 @@ SILICONFLOW_API_KEY=
|
|||||||
|
|
||||||
### siliconflow Api url (optional)
|
### siliconflow Api url (optional)
|
||||||
SILICONFLOW_URL=
|
SILICONFLOW_URL=
|
||||||
|
|
||||||
|
### openrouter Api key (optional)
|
||||||
|
OPENROUTER_API_KEY=
|
||||||
|
|
||||||
|
### openrouter Api url (optional)
|
||||||
|
OPENROUTER_URL=
|
||||||
|
41
README.md
41
README.md
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
<h1 align="center">NextChat (ChatGPT Next Web)</h1>
|
<h1 align="center">NextChat</h1>
|
||||||
|
|
||||||
English / [简体中文](./README_CN.md)
|
English / [简体中文](./README_CN.md)
|
||||||
|
|
||||||
@@ -22,8 +22,7 @@ English / [简体中文](./README_CN.md)
|
|||||||
[![MacOS][MacOS-image]][download-url]
|
[![MacOS][MacOS-image]][download-url]
|
||||||
[![Linux][Linux-image]][download-url]
|
[![Linux][Linux-image]][download-url]
|
||||||
|
|
||||||
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases)
|
[NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition)
|
||||||
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
|
|
||||||
|
|
||||||
|
|
||||||
[saas-url]: https://nextchat.club?utm_source=readme
|
[saas-url]: https://nextchat.club?utm_source=readme
|
||||||
@@ -41,31 +40,14 @@ English / [简体中文](./README_CN.md)
|
|||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## 👋 Hey, NextChat is going to develop a native app!
|
## 🥳 Cheer for NextChat iOS Version Online!
|
||||||
|
> [👉 Click Here to Install Now](https://apps.apple.com/us/app/nextchat-ai/id6743085599)
|
||||||
|
|
||||||
> This week we are going to start working on iOS and Android APP, and we want to find some reliable friends to do it together!
|
> [❤️ Source Code Coming Soon](https://github.com/ChatGPTNextWeb/NextChat-iOS)
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
✨ Several key points:
|
|
||||||
|
|
||||||
- Starting from 0, you are a veteran
|
|
||||||
- Completely open source, not hidden
|
|
||||||
- Native development, pursuing the ultimate experience
|
|
||||||
|
|
||||||
Will you come and do something together? 😎
|
|
||||||
|
|
||||||
https://github.com/ChatGPTNextWeb/NextChat/issues/6269
|
|
||||||
|
|
||||||
#Seeking for talents is thirsty #lack of people
|
|
||||||
|
|
||||||
|
|
||||||
## 🥳 Cheer for DeepSeek, China's AI star!
|
|
||||||
> Purpose-Built UI for DeepSeek Reasoner Model
|
|
||||||
|
|
||||||
<img src="https://github.com/user-attachments/assets/f3952210-3af1-4dc0-9b81-40eaa4847d9a"/>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## 🫣 NextChat Support MCP !
|
## 🫣 NextChat Support MCP !
|
||||||
> Before build, please set env ENABLE_MCP=true
|
> Before build, please set env ENABLE_MCP=true
|
||||||
|
|
||||||
@@ -329,6 +311,9 @@ To control custom models, use `+` to add a custom model, use `-` to hide a model
|
|||||||
|
|
||||||
User `-all` to disable all default models, `+all` to enable all default models.
|
User `-all` to disable all default models, `+all` to enable all default models.
|
||||||
|
|
||||||
|
Models from OpenRouter (except `openrouter/auto`) need to be configured manually, use `+provider/model@OpenRouter`.
|
||||||
|
> Example: `+qwen/qwen3-32b:free@OpenRouter` will show `qwen/qwen3-32b:free(OpenRouter)` in model list.
|
||||||
|
|
||||||
For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name.
|
For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name.
|
||||||
> Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list.
|
> Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list.
|
||||||
> If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list.
|
> If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list.
|
||||||
@@ -379,6 +364,14 @@ SiliconFlow API Key.
|
|||||||
|
|
||||||
SiliconFlow API URL.
|
SiliconFlow API URL.
|
||||||
|
|
||||||
|
### `OPENROUTER_API_KEY` (optional)
|
||||||
|
|
||||||
|
OpenRouter API Key.
|
||||||
|
|
||||||
|
### `OPENROUTER_URL` (optional)
|
||||||
|
|
||||||
|
OpenRouter API URL.
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
NodeJS >= 18, Docker >= 20
|
NodeJS >= 18, Docker >= 20
|
||||||
|
11
README_CN.md
11
README_CN.md
@@ -232,6 +232,9 @@ DeepSeek Api Url.
|
|||||||
|
|
||||||
用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。
|
用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。
|
||||||
|
|
||||||
|
OpenRouter 提供的模型(除 `openrouter/auto` 外)需要手动配置,使用 `+provider/model@OpenRouter`。
|
||||||
|
> 示例:`+qwen/qwen3-32b:free@OpenRouter` 这个配置会在模型列表显示一个 `qwen/qwen3-32b:free(OpenRouter)` 的选项。
|
||||||
|
|
||||||
在Azure的模式下,支持使用`modelName@Azure=deploymentName`的方式配置模型名称和部署名称(deploy-name)
|
在Azure的模式下,支持使用`modelName@Azure=deploymentName`的方式配置模型名称和部署名称(deploy-name)
|
||||||
> 示例:`+gpt-3.5-turbo@Azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。
|
> 示例:`+gpt-3.5-turbo@Azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。
|
||||||
> 如果你只能使用Azure模式,那么设置 `-all,+gpt-3.5-turbo@Azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)`
|
> 如果你只能使用Azure模式,那么设置 `-all,+gpt-3.5-turbo@Azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)`
|
||||||
@@ -275,6 +278,14 @@ SiliconFlow API Key.
|
|||||||
|
|
||||||
SiliconFlow API URL.
|
SiliconFlow API URL.
|
||||||
|
|
||||||
|
### `OPENROUTER_API_KEY` (optional)
|
||||||
|
|
||||||
|
OpenRouter API Key.
|
||||||
|
|
||||||
|
### `OPENROUTER_URL` (optional)
|
||||||
|
|
||||||
|
OpenRouter API URL.
|
||||||
|
|
||||||
## 开发
|
## 开发
|
||||||
|
|
||||||
点击下方按钮,开始二次开发:
|
点击下方按钮,开始二次开发:
|
||||||
|
@@ -15,6 +15,7 @@ import { handle as siliconflowHandler } from "../../siliconflow";
|
|||||||
import { handle as xaiHandler } from "../../xai";
|
import { handle as xaiHandler } from "../../xai";
|
||||||
import { handle as chatglmHandler } from "../../glm";
|
import { handle as chatglmHandler } from "../../glm";
|
||||||
import { handle as proxyHandler } from "../../proxy";
|
import { handle as proxyHandler } from "../../proxy";
|
||||||
|
import { handle as openrouterHandler } from "../../openrouter";
|
||||||
|
|
||||||
async function handle(
|
async function handle(
|
||||||
req: NextRequest,
|
req: NextRequest,
|
||||||
@@ -50,6 +51,8 @@ async function handle(
|
|||||||
return chatglmHandler(req, { params });
|
return chatglmHandler(req, { params });
|
||||||
case ApiPath.SiliconFlow:
|
case ApiPath.SiliconFlow:
|
||||||
return siliconflowHandler(req, { params });
|
return siliconflowHandler(req, { params });
|
||||||
|
case ApiPath.OpenRouter:
|
||||||
|
return openrouterHandler(req, { params });
|
||||||
case ApiPath.OpenAI:
|
case ApiPath.OpenAI:
|
||||||
return openaiHandler(req, { params });
|
return openaiHandler(req, { params });
|
||||||
default:
|
default:
|
||||||
|
@@ -104,6 +104,9 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) {
|
|||||||
case ModelProvider.SiliconFlow:
|
case ModelProvider.SiliconFlow:
|
||||||
systemApiKey = serverConfig.siliconFlowApiKey;
|
systemApiKey = serverConfig.siliconFlowApiKey;
|
||||||
break;
|
break;
|
||||||
|
case ModelProvider.OpenRouter:
|
||||||
|
systemApiKey = serverConfig.openrouterApiKey;
|
||||||
|
break;
|
||||||
case ModelProvider.GPT:
|
case ModelProvider.GPT:
|
||||||
default:
|
default:
|
||||||
if (req.nextUrl.pathname.includes("azure/deployments")) {
|
if (req.nextUrl.pathname.includes("azure/deployments")) {
|
||||||
|
128
app/api/openrouter.ts
Normal file
128
app/api/openrouter.ts
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
import { getServerSideConfig } from "@/app/config/server";
|
||||||
|
import {
|
||||||
|
OPENROUTER_BASE_URL,
|
||||||
|
ApiPath,
|
||||||
|
ModelProvider,
|
||||||
|
ServiceProvider,
|
||||||
|
} from "@/app/constant";
|
||||||
|
import { prettyObject } from "@/app/utils/format";
|
||||||
|
import { NextRequest, NextResponse } from "next/server";
|
||||||
|
import { auth } from "@/app/api/auth";
|
||||||
|
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||||
|
|
||||||
|
const serverConfig = getServerSideConfig();
|
||||||
|
|
||||||
|
export async function handle(
|
||||||
|
req: NextRequest,
|
||||||
|
{ params }: { params: { path: string[] } },
|
||||||
|
) {
|
||||||
|
console.log("[OpenRouter Route] params ", params);
|
||||||
|
|
||||||
|
if (req.method === "OPTIONS") {
|
||||||
|
return NextResponse.json({ body: "OK" }, { status: 200 });
|
||||||
|
}
|
||||||
|
|
||||||
|
const authResult = auth(req, ModelProvider.OpenRouter);
|
||||||
|
if (authResult.error) {
|
||||||
|
return NextResponse.json(authResult, {
|
||||||
|
status: 401,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await request(req);
|
||||||
|
return response;
|
||||||
|
} catch (e) {
|
||||||
|
console.error("[OpenRouter] ", e);
|
||||||
|
return NextResponse.json(prettyObject(e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function request(req: NextRequest) {
|
||||||
|
const controller = new AbortController();
|
||||||
|
|
||||||
|
// alibaba use base url or just remove the path
|
||||||
|
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.OpenRouter, "");
|
||||||
|
|
||||||
|
let baseUrl = serverConfig.openrouterUrl || OPENROUTER_BASE_URL;
|
||||||
|
|
||||||
|
if (!baseUrl.startsWith("http")) {
|
||||||
|
baseUrl = `https://${baseUrl}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (baseUrl.endsWith("/")) {
|
||||||
|
baseUrl = baseUrl.slice(0, -1);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log("[Proxy] ", path);
|
||||||
|
console.log("[Base Url]", baseUrl);
|
||||||
|
|
||||||
|
const timeoutId = setTimeout(
|
||||||
|
() => {
|
||||||
|
controller.abort();
|
||||||
|
},
|
||||||
|
10 * 60 * 1000,
|
||||||
|
);
|
||||||
|
|
||||||
|
const fetchUrl = `${baseUrl}${path}`;
|
||||||
|
const fetchOptions: RequestInit = {
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
Authorization: req.headers.get("Authorization") ?? "",
|
||||||
|
},
|
||||||
|
method: req.method,
|
||||||
|
body: req.body,
|
||||||
|
redirect: "manual",
|
||||||
|
// @ts-ignore
|
||||||
|
duplex: "half",
|
||||||
|
signal: controller.signal,
|
||||||
|
};
|
||||||
|
|
||||||
|
// #1815 try to refuse some request to some models
|
||||||
|
if (serverConfig.customModels && req.body) {
|
||||||
|
try {
|
||||||
|
const clonedBody = await req.text();
|
||||||
|
fetchOptions.body = clonedBody;
|
||||||
|
|
||||||
|
const jsonBody = JSON.parse(clonedBody) as { model?: string };
|
||||||
|
|
||||||
|
// not undefined and is false
|
||||||
|
if (
|
||||||
|
isModelNotavailableInServer(
|
||||||
|
serverConfig.customModels,
|
||||||
|
jsonBody?.model as string,
|
||||||
|
ServiceProvider.OpenRouter as string,
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
return NextResponse.json(
|
||||||
|
{
|
||||||
|
error: true,
|
||||||
|
message: `you are not allowed to use ${jsonBody?.model} model`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
status: 403,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error(`[OpenRouter] filter`, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
const res = await fetch(fetchUrl, fetchOptions);
|
||||||
|
|
||||||
|
// to prevent browser prompt for credentials
|
||||||
|
const newHeaders = new Headers(res.headers);
|
||||||
|
newHeaders.delete("www-authenticate");
|
||||||
|
// to disable nginx buffering
|
||||||
|
newHeaders.set("X-Accel-Buffering", "no");
|
||||||
|
|
||||||
|
return new Response(res.body, {
|
||||||
|
status: res.status,
|
||||||
|
statusText: res.statusText,
|
||||||
|
headers: newHeaders,
|
||||||
|
});
|
||||||
|
} finally {
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
}
|
||||||
|
}
|
@@ -24,6 +24,7 @@ import { DeepSeekApi } from "./platforms/deepseek";
|
|||||||
import { XAIApi } from "./platforms/xai";
|
import { XAIApi } from "./platforms/xai";
|
||||||
import { ChatGLMApi } from "./platforms/glm";
|
import { ChatGLMApi } from "./platforms/glm";
|
||||||
import { SiliconflowApi } from "./platforms/siliconflow";
|
import { SiliconflowApi } from "./platforms/siliconflow";
|
||||||
|
import { OpenRouterApi } from "./platforms/openrouter";
|
||||||
|
|
||||||
export const ROLES = ["system", "user", "assistant"] as const;
|
export const ROLES = ["system", "user", "assistant"] as const;
|
||||||
export type MessageRole = (typeof ROLES)[number];
|
export type MessageRole = (typeof ROLES)[number];
|
||||||
@@ -40,6 +41,11 @@ export interface MultimodalContent {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface MultimodalContentForAlibaba {
|
||||||
|
text?: string;
|
||||||
|
image?: string;
|
||||||
|
}
|
||||||
|
|
||||||
export interface RequestMessage {
|
export interface RequestMessage {
|
||||||
role: MessageRole;
|
role: MessageRole;
|
||||||
content: string | MultimodalContent[];
|
content: string | MultimodalContent[];
|
||||||
@@ -168,6 +174,9 @@ export class ClientApi {
|
|||||||
case ModelProvider.SiliconFlow:
|
case ModelProvider.SiliconFlow:
|
||||||
this.llm = new SiliconflowApi();
|
this.llm = new SiliconflowApi();
|
||||||
break;
|
break;
|
||||||
|
case ModelProvider.OpenRouter:
|
||||||
|
this.llm = new OpenRouterApi();
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
this.llm = new ChatGPTApi();
|
this.llm = new ChatGPTApi();
|
||||||
}
|
}
|
||||||
@@ -260,6 +269,8 @@ export function getHeaders(ignoreHeaders: boolean = false) {
|
|||||||
const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM;
|
const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM;
|
||||||
const isSiliconFlow =
|
const isSiliconFlow =
|
||||||
modelConfig.providerName === ServiceProvider.SiliconFlow;
|
modelConfig.providerName === ServiceProvider.SiliconFlow;
|
||||||
|
const isOpenRouter =
|
||||||
|
modelConfig.providerName === ServiceProvider.OpenRouter;
|
||||||
const isEnabledAccessControl = accessStore.enabledAccessControl();
|
const isEnabledAccessControl = accessStore.enabledAccessControl();
|
||||||
const apiKey = isGoogle
|
const apiKey = isGoogle
|
||||||
? accessStore.googleApiKey
|
? accessStore.googleApiKey
|
||||||
@@ -281,6 +292,8 @@ export function getHeaders(ignoreHeaders: boolean = false) {
|
|||||||
? accessStore.chatglmApiKey
|
? accessStore.chatglmApiKey
|
||||||
: isSiliconFlow
|
: isSiliconFlow
|
||||||
? accessStore.siliconflowApiKey
|
? accessStore.siliconflowApiKey
|
||||||
|
: isOpenRouter
|
||||||
|
? accessStore.openrouterApiKey
|
||||||
: isIflytek
|
: isIflytek
|
||||||
? accessStore.iflytekApiKey && accessStore.iflytekApiSecret
|
? accessStore.iflytekApiKey && accessStore.iflytekApiSecret
|
||||||
? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret
|
? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret
|
||||||
@@ -299,6 +312,7 @@ export function getHeaders(ignoreHeaders: boolean = false) {
|
|||||||
isXAI,
|
isXAI,
|
||||||
isChatGLM,
|
isChatGLM,
|
||||||
isSiliconFlow,
|
isSiliconFlow,
|
||||||
|
isOpenRouter,
|
||||||
apiKey,
|
apiKey,
|
||||||
isEnabledAccessControl,
|
isEnabledAccessControl,
|
||||||
};
|
};
|
||||||
@@ -327,6 +341,7 @@ export function getHeaders(ignoreHeaders: boolean = false) {
|
|||||||
isXAI,
|
isXAI,
|
||||||
isChatGLM,
|
isChatGLM,
|
||||||
isSiliconFlow,
|
isSiliconFlow,
|
||||||
|
isOpenRouter,
|
||||||
apiKey,
|
apiKey,
|
||||||
isEnabledAccessControl,
|
isEnabledAccessControl,
|
||||||
} = getConfig();
|
} = getConfig();
|
||||||
@@ -377,6 +392,8 @@ export function getClientApi(provider: ServiceProvider): ClientApi {
|
|||||||
return new ClientApi(ModelProvider.ChatGLM);
|
return new ClientApi(ModelProvider.ChatGLM);
|
||||||
case ServiceProvider.SiliconFlow:
|
case ServiceProvider.SiliconFlow:
|
||||||
return new ClientApi(ModelProvider.SiliconFlow);
|
return new ClientApi(ModelProvider.SiliconFlow);
|
||||||
|
case ServiceProvider.OpenRouter:
|
||||||
|
return new ClientApi(ModelProvider.OpenRouter);
|
||||||
default:
|
default:
|
||||||
return new ClientApi(ModelProvider.GPT);
|
return new ClientApi(ModelProvider.GPT);
|
||||||
}
|
}
|
||||||
|
@@ -7,7 +7,10 @@ import {
|
|||||||
ChatMessageTool,
|
ChatMessageTool,
|
||||||
usePluginStore,
|
usePluginStore,
|
||||||
} from "@/app/store";
|
} from "@/app/store";
|
||||||
import { streamWithThink } from "@/app/utils/chat";
|
import {
|
||||||
|
preProcessImageContentForAlibabaDashScope,
|
||||||
|
streamWithThink,
|
||||||
|
} from "@/app/utils/chat";
|
||||||
import {
|
import {
|
||||||
ChatOptions,
|
ChatOptions,
|
||||||
getHeaders,
|
getHeaders,
|
||||||
@@ -15,12 +18,14 @@ import {
|
|||||||
LLMModel,
|
LLMModel,
|
||||||
SpeechOptions,
|
SpeechOptions,
|
||||||
MultimodalContent,
|
MultimodalContent,
|
||||||
|
MultimodalContentForAlibaba,
|
||||||
} from "../api";
|
} from "../api";
|
||||||
import { getClientConfig } from "@/app/config/client";
|
import { getClientConfig } from "@/app/config/client";
|
||||||
import {
|
import {
|
||||||
getMessageTextContent,
|
getMessageTextContent,
|
||||||
getMessageTextContentWithoutThinking,
|
getMessageTextContentWithoutThinking,
|
||||||
getTimeoutMSByModel,
|
getTimeoutMSByModel,
|
||||||
|
isVisionModel,
|
||||||
} from "@/app/utils";
|
} from "@/app/utils";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
|
|
||||||
@@ -89,14 +94,6 @@ export class QwenApi implements LLMApi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async chat(options: ChatOptions) {
|
async chat(options: ChatOptions) {
|
||||||
const messages = options.messages.map((v) => ({
|
|
||||||
role: v.role,
|
|
||||||
content:
|
|
||||||
v.role === "assistant"
|
|
||||||
? getMessageTextContentWithoutThinking(v)
|
|
||||||
: getMessageTextContent(v),
|
|
||||||
}));
|
|
||||||
|
|
||||||
const modelConfig = {
|
const modelConfig = {
|
||||||
...useAppConfig.getState().modelConfig,
|
...useAppConfig.getState().modelConfig,
|
||||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||||
@@ -105,6 +102,21 @@ export class QwenApi implements LLMApi {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const visionModel = isVisionModel(options.config.model);
|
||||||
|
|
||||||
|
const messages: ChatOptions["messages"] = [];
|
||||||
|
for (const v of options.messages) {
|
||||||
|
const content = (
|
||||||
|
visionModel
|
||||||
|
? await preProcessImageContentForAlibabaDashScope(v.content)
|
||||||
|
: v.role === "assistant"
|
||||||
|
? getMessageTextContentWithoutThinking(v)
|
||||||
|
: getMessageTextContent(v)
|
||||||
|
) as any;
|
||||||
|
|
||||||
|
messages.push({ role: v.role, content });
|
||||||
|
}
|
||||||
|
|
||||||
const shouldStream = !!options.config.stream;
|
const shouldStream = !!options.config.stream;
|
||||||
const requestPayload: RequestPayload = {
|
const requestPayload: RequestPayload = {
|
||||||
model: modelConfig.model,
|
model: modelConfig.model,
|
||||||
@@ -129,7 +141,7 @@ export class QwenApi implements LLMApi {
|
|||||||
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
|
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
|
||||||
};
|
};
|
||||||
|
|
||||||
const chatPath = this.path(Alibaba.ChatPath);
|
const chatPath = this.path(Alibaba.ChatPath(modelConfig.model));
|
||||||
const chatPayload = {
|
const chatPayload = {
|
||||||
method: "POST",
|
method: "POST",
|
||||||
body: JSON.stringify(requestPayload),
|
body: JSON.stringify(requestPayload),
|
||||||
@@ -162,7 +174,7 @@ export class QwenApi implements LLMApi {
|
|||||||
const json = JSON.parse(text);
|
const json = JSON.parse(text);
|
||||||
const choices = json.output.choices as Array<{
|
const choices = json.output.choices as Array<{
|
||||||
message: {
|
message: {
|
||||||
content: string | null;
|
content: string | null | MultimodalContentForAlibaba[];
|
||||||
tool_calls: ChatMessageTool[];
|
tool_calls: ChatMessageTool[];
|
||||||
reasoning_content: string | null;
|
reasoning_content: string | null;
|
||||||
};
|
};
|
||||||
@@ -212,7 +224,9 @@ export class QwenApi implements LLMApi {
|
|||||||
} else if (content && content.length > 0) {
|
} else if (content && content.length > 0) {
|
||||||
return {
|
return {
|
||||||
isThinking: false,
|
isThinking: false,
|
||||||
content: content,
|
content: Array.isArray(content)
|
||||||
|
? content.map((item) => item.text).join(",")
|
||||||
|
: content,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -198,7 +198,8 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
const isDalle3 = _isDalle3(options.config.model);
|
const isDalle3 = _isDalle3(options.config.model);
|
||||||
const isO1OrO3 =
|
const isO1OrO3 =
|
||||||
options.config.model.startsWith("o1") ||
|
options.config.model.startsWith("o1") ||
|
||||||
options.config.model.startsWith("o3");
|
options.config.model.startsWith("o3") ||
|
||||||
|
options.config.model.startsWith("o4-mini");
|
||||||
if (isDalle3) {
|
if (isDalle3) {
|
||||||
const prompt = getMessageTextContent(
|
const prompt = getMessageTextContent(
|
||||||
options.messages.slice(-1)?.pop() as any,
|
options.messages.slice(-1)?.pop() as any,
|
||||||
@@ -243,7 +244,7 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// add max_tokens to vision model
|
// add max_tokens to vision model
|
||||||
if (visionModel) {
|
if (visionModel && !isO1OrO3) {
|
||||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
287
app/client/platforms/openrouter.ts
Normal file
287
app/client/platforms/openrouter.ts
Normal file
@@ -0,0 +1,287 @@
|
|||||||
|
"use client";
|
||||||
|
// azure and openai, using same models. so using same LLMApi.
|
||||||
|
import {
|
||||||
|
ApiPath,
|
||||||
|
OPENROUTER_BASE_URL,
|
||||||
|
OpenRouter,
|
||||||
|
DEFAULT_MODELS,
|
||||||
|
} from "@/app/constant";
|
||||||
|
import {
|
||||||
|
useAccessStore,
|
||||||
|
useAppConfig,
|
||||||
|
useChatStore,
|
||||||
|
ChatMessageTool,
|
||||||
|
usePluginStore,
|
||||||
|
} from "@/app/store";
|
||||||
|
import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
|
||||||
|
import {
|
||||||
|
ChatOptions,
|
||||||
|
getHeaders,
|
||||||
|
LLMApi,
|
||||||
|
LLMModel,
|
||||||
|
SpeechOptions,
|
||||||
|
} from "../api";
|
||||||
|
import { getClientConfig } from "@/app/config/client";
|
||||||
|
import {
|
||||||
|
getMessageTextContent,
|
||||||
|
getMessageTextContentWithoutThinking,
|
||||||
|
isVisionModel,
|
||||||
|
getTimeoutMSByModel,
|
||||||
|
} from "@/app/utils";
|
||||||
|
import { RequestPayload } from "./openai";
|
||||||
|
|
||||||
|
import { fetch } from "@/app/utils/stream";
|
||||||
|
export interface OpenRouterListModelResponse {
|
||||||
|
object: string;
|
||||||
|
data: Array<{
|
||||||
|
id: string;
|
||||||
|
object: string;
|
||||||
|
root: string;
|
||||||
|
}>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class OpenRouterApi implements LLMApi {
|
||||||
|
private disableListModels = false;
|
||||||
|
|
||||||
|
path(path: string): string {
|
||||||
|
const accessStore = useAccessStore.getState();
|
||||||
|
|
||||||
|
let baseUrl = "";
|
||||||
|
|
||||||
|
if (accessStore.useCustomConfig) {
|
||||||
|
baseUrl = accessStore.openrouterUrl;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (baseUrl.length === 0) {
|
||||||
|
const isApp = !!getClientConfig()?.isApp;
|
||||||
|
const apiPath = ApiPath.OpenRouter;
|
||||||
|
baseUrl = isApp ? OPENROUTER_BASE_URL : apiPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (baseUrl.endsWith("/")) {
|
||||||
|
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||||
|
}
|
||||||
|
if (
|
||||||
|
!baseUrl.startsWith("http") &&
|
||||||
|
!baseUrl.startsWith(ApiPath.OpenRouter)
|
||||||
|
) {
|
||||||
|
baseUrl = "https://" + baseUrl;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||||
|
|
||||||
|
return [baseUrl, path].join("/");
|
||||||
|
}
|
||||||
|
|
||||||
|
extractMessage(res: any) {
|
||||||
|
return res.choices?.at(0)?.message?.content ?? "";
|
||||||
|
}
|
||||||
|
|
||||||
|
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||||
|
throw new Error("Method not implemented.");
|
||||||
|
}
|
||||||
|
|
||||||
|
async chat(options: ChatOptions) {
|
||||||
|
const visionModel = isVisionModel(options.config.model);
|
||||||
|
const messages: ChatOptions["messages"] = [];
|
||||||
|
for (const v of options.messages) {
|
||||||
|
if (v.role === "assistant") {
|
||||||
|
const content = getMessageTextContentWithoutThinking(v);
|
||||||
|
messages.push({ role: v.role, content });
|
||||||
|
} else {
|
||||||
|
const content = visionModel
|
||||||
|
? await preProcessImageContent(v.content)
|
||||||
|
: getMessageTextContent(v);
|
||||||
|
messages.push({ role: v.role, content });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const modelConfig = {
|
||||||
|
...useAppConfig.getState().modelConfig,
|
||||||
|
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||||
|
...{
|
||||||
|
model: options.config.model,
|
||||||
|
providerName: options.config.providerName,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const requestPayload: RequestPayload = {
|
||||||
|
messages,
|
||||||
|
stream: options.config.stream,
|
||||||
|
model: modelConfig.model,
|
||||||
|
temperature: modelConfig.temperature,
|
||||||
|
presence_penalty: modelConfig.presence_penalty,
|
||||||
|
frequency_penalty: modelConfig.frequency_penalty,
|
||||||
|
top_p: modelConfig.top_p,
|
||||||
|
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
||||||
|
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log("[Request] openai payload: ", requestPayload);
|
||||||
|
|
||||||
|
const shouldStream = !!options.config.stream;
|
||||||
|
const controller = new AbortController();
|
||||||
|
options.onController?.(controller);
|
||||||
|
|
||||||
|
try {
|
||||||
|
const chatPath = this.path(OpenRouter.ChatPath);
|
||||||
|
const chatPayload = {
|
||||||
|
method: "POST",
|
||||||
|
body: JSON.stringify(requestPayload),
|
||||||
|
signal: controller.signal,
|
||||||
|
headers: getHeaders(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// console.log(chatPayload);
|
||||||
|
|
||||||
|
// Use extended timeout for thinking models as they typically require more processing time
|
||||||
|
const requestTimeoutId = setTimeout(
|
||||||
|
() => controller.abort(),
|
||||||
|
getTimeoutMSByModel(options.config.model),
|
||||||
|
);
|
||||||
|
|
||||||
|
if (shouldStream) {
|
||||||
|
const [tools, funcs] = usePluginStore
|
||||||
|
.getState()
|
||||||
|
.getAsTools(
|
||||||
|
useChatStore.getState().currentSession().mask?.plugin || [],
|
||||||
|
);
|
||||||
|
return streamWithThink(
|
||||||
|
chatPath,
|
||||||
|
requestPayload,
|
||||||
|
getHeaders(),
|
||||||
|
tools as any,
|
||||||
|
funcs,
|
||||||
|
controller,
|
||||||
|
// parseSSE
|
||||||
|
(text: string, runTools: ChatMessageTool[]) => {
|
||||||
|
// console.log("parseSSE", text, runTools);
|
||||||
|
const json = JSON.parse(text);
|
||||||
|
const choices = json.choices as Array<{
|
||||||
|
delta: {
|
||||||
|
content: string | null;
|
||||||
|
tool_calls: ChatMessageTool[];
|
||||||
|
reasoning: string | null;
|
||||||
|
};
|
||||||
|
}>;
|
||||||
|
const tool_calls = choices[0]?.delta?.tool_calls;
|
||||||
|
if (tool_calls?.length > 0) {
|
||||||
|
const index = tool_calls[0]?.index;
|
||||||
|
const id = tool_calls[0]?.id;
|
||||||
|
const args = tool_calls[0]?.function?.arguments;
|
||||||
|
if (id) {
|
||||||
|
runTools.push({
|
||||||
|
id,
|
||||||
|
type: tool_calls[0]?.type,
|
||||||
|
function: {
|
||||||
|
name: tool_calls[0]?.function?.name as string,
|
||||||
|
arguments: args,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// @ts-ignore
|
||||||
|
runTools[index]["function"]["arguments"] += args;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const reasoning = choices[0]?.delta?.reasoning;
|
||||||
|
const content = choices[0]?.delta?.content;
|
||||||
|
|
||||||
|
// Skip if both content and reasoning_content are empty or null
|
||||||
|
if (
|
||||||
|
(!reasoning || reasoning.length === 0) &&
|
||||||
|
(!content || content.length === 0)
|
||||||
|
) {
|
||||||
|
return {
|
||||||
|
isThinking: false,
|
||||||
|
content: "",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (reasoning && reasoning.length > 0) {
|
||||||
|
return {
|
||||||
|
isThinking: true,
|
||||||
|
content: reasoning,
|
||||||
|
};
|
||||||
|
} else if (content && content.length > 0) {
|
||||||
|
return {
|
||||||
|
isThinking: false,
|
||||||
|
content: content,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
isThinking: false,
|
||||||
|
content: "",
|
||||||
|
};
|
||||||
|
},
|
||||||
|
// processToolMessage, include tool_calls message and tool call results
|
||||||
|
(
|
||||||
|
requestPayload: RequestPayload,
|
||||||
|
toolCallMessage: any,
|
||||||
|
toolCallResult: any[],
|
||||||
|
) => {
|
||||||
|
// @ts-ignore
|
||||||
|
requestPayload?.messages?.splice(
|
||||||
|
// @ts-ignore
|
||||||
|
requestPayload?.messages?.length,
|
||||||
|
0,
|
||||||
|
toolCallMessage,
|
||||||
|
...toolCallResult,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
options,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
const res = await fetch(chatPath, chatPayload);
|
||||||
|
clearTimeout(requestTimeoutId);
|
||||||
|
|
||||||
|
const resJson = await res.json();
|
||||||
|
const message = this.extractMessage(resJson);
|
||||||
|
options.onFinish(message, res);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.log("[Request] failed to make a chat request", e);
|
||||||
|
options.onError?.(e as Error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
async usage() {
|
||||||
|
return {
|
||||||
|
used: 0,
|
||||||
|
total: 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
async models(): Promise<LLMModel[]> {
|
||||||
|
if (this.disableListModels) {
|
||||||
|
return DEFAULT_MODELS.slice();
|
||||||
|
}
|
||||||
|
|
||||||
|
const res = await fetch(this.path(OpenRouter.ListModelPath), {
|
||||||
|
method: "GET",
|
||||||
|
headers: {
|
||||||
|
...getHeaders(),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const resJson = (await res.json()) as OpenRouterListModelResponse;
|
||||||
|
const chatModels = resJson.data;
|
||||||
|
console.log("[Models]", chatModels);
|
||||||
|
|
||||||
|
if (!chatModels) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
let seq = 1000; //同 Constant.ts 中的排序保持一致
|
||||||
|
return chatModels.map((m) => ({
|
||||||
|
name: m.id,
|
||||||
|
available: true,
|
||||||
|
sorted: seq++,
|
||||||
|
provider: {
|
||||||
|
id: "openrouter",
|
||||||
|
providerName: "OpenRouter",
|
||||||
|
providerType: "openrouter",
|
||||||
|
sorted: 15,
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}
|
@@ -57,18 +57,32 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
|
|||||||
modelName.startsWith("dall-e") ||
|
modelName.startsWith("dall-e") ||
|
||||||
modelName.startsWith("dalle") ||
|
modelName.startsWith("dalle") ||
|
||||||
modelName.startsWith("o1") ||
|
modelName.startsWith("o1") ||
|
||||||
modelName.startsWith("o3")
|
modelName.startsWith("o3") ||
|
||||||
|
modelName.startsWith("openai/")
|
||||||
) {
|
) {
|
||||||
LlmIcon = BotIconOpenAI;
|
LlmIcon = BotIconOpenAI;
|
||||||
} else if (modelName.startsWith("gemini")) {
|
} else if (
|
||||||
|
modelName.startsWith("gemini") ||
|
||||||
|
modelName.startsWith("google/gemini")
|
||||||
|
) {
|
||||||
LlmIcon = BotIconGemini;
|
LlmIcon = BotIconGemini;
|
||||||
} else if (modelName.startsWith("gemma")) {
|
} else if (
|
||||||
|
modelName.startsWith("gemma") ||
|
||||||
|
modelName.startsWith("google/gemma")
|
||||||
|
) {
|
||||||
LlmIcon = BotIconGemma;
|
LlmIcon = BotIconGemma;
|
||||||
} else if (modelName.startsWith("claude")) {
|
} else if (
|
||||||
|
modelName.startsWith("claude") ||
|
||||||
|
modelName.startsWith("anthropic/claude")
|
||||||
|
) {
|
||||||
LlmIcon = BotIconClaude;
|
LlmIcon = BotIconClaude;
|
||||||
} else if (modelName.includes("llama")) {
|
} else if (modelName.includes("llama")) {
|
||||||
LlmIcon = BotIconMeta;
|
LlmIcon = BotIconMeta;
|
||||||
} else if (modelName.startsWith("mixtral") || modelName.startsWith("codestral")) {
|
} else if (
|
||||||
|
modelName.startsWith("mixtral") ||
|
||||||
|
modelName.startsWith("codestral") ||
|
||||||
|
modelName.startsWith("mistralai/")
|
||||||
|
) {
|
||||||
LlmIcon = BotIconMistral;
|
LlmIcon = BotIconMistral;
|
||||||
} else if (modelName.includes("deepseek")) {
|
} else if (modelName.includes("deepseek")) {
|
||||||
LlmIcon = BotIconDeepseek;
|
LlmIcon = BotIconDeepseek;
|
||||||
@@ -78,7 +92,10 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
|
|||||||
LlmIcon = BotIconQwen;
|
LlmIcon = BotIconQwen;
|
||||||
} else if (modelName.startsWith("ernie")) {
|
} else if (modelName.startsWith("ernie")) {
|
||||||
LlmIcon = BotIconWenxin;
|
LlmIcon = BotIconWenxin;
|
||||||
} else if (modelName.startsWith("grok")) {
|
} else if (
|
||||||
|
modelName.startsWith("grok") ||
|
||||||
|
modelName.startsWith("x-ai/grok")
|
||||||
|
) {
|
||||||
LlmIcon = BotIconGrok;
|
LlmIcon = BotIconGrok;
|
||||||
} else if (modelName.startsWith("hunyuan")) {
|
} else if (modelName.startsWith("hunyuan")) {
|
||||||
LlmIcon = BotIconHunyuan;
|
LlmIcon = BotIconHunyuan;
|
||||||
|
@@ -75,6 +75,7 @@ import {
|
|||||||
ChatGLM,
|
ChatGLM,
|
||||||
DeepSeek,
|
DeepSeek,
|
||||||
SiliconFlow,
|
SiliconFlow,
|
||||||
|
OpenRouter,
|
||||||
} from "../constant";
|
} from "../constant";
|
||||||
import { Prompt, SearchService, usePromptStore } from "../store/prompt";
|
import { Prompt, SearchService, usePromptStore } from "../store/prompt";
|
||||||
import { ErrorBoundary } from "./error";
|
import { ErrorBoundary } from "./error";
|
||||||
@@ -1359,6 +1360,46 @@ export function Settings() {
|
|||||||
</ListItem>
|
</ListItem>
|
||||||
</>
|
</>
|
||||||
);
|
);
|
||||||
|
const openrouterConfigComponent = accessStore.provider ===
|
||||||
|
ServiceProvider.OpenRouter && (
|
||||||
|
<>
|
||||||
|
<ListItem
|
||||||
|
title={Locale.Settings.Access.OpenRouter.Endpoint.Title}
|
||||||
|
subTitle={
|
||||||
|
Locale.Settings.Access.OpenRouter.Endpoint.SubTitle +
|
||||||
|
OpenRouter.ExampleEndpoint
|
||||||
|
}
|
||||||
|
>
|
||||||
|
<input
|
||||||
|
aria-label={Locale.Settings.Access.OpenRouter.Endpoint.Title}
|
||||||
|
type="text"
|
||||||
|
value={accessStore.openrouterUrl}
|
||||||
|
placeholder={OpenRouter.ExampleEndpoint}
|
||||||
|
onChange={(e) =>
|
||||||
|
accessStore.update(
|
||||||
|
(access) => (access.openrouterUrl = e.currentTarget.value),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
></input>
|
||||||
|
</ListItem>
|
||||||
|
<ListItem
|
||||||
|
title={Locale.Settings.Access.OpenRouter.ApiKey.Title}
|
||||||
|
subTitle={Locale.Settings.Access.OpenRouter.ApiKey.SubTitle}
|
||||||
|
>
|
||||||
|
<PasswordInput
|
||||||
|
aria-label={Locale.Settings.Access.OpenRouter.ApiKey.Title}
|
||||||
|
value={accessStore.openrouterApiKey}
|
||||||
|
type="text"
|
||||||
|
placeholder={Locale.Settings.Access.OpenRouter.ApiKey.Placeholder}
|
||||||
|
onChange={(e) => {
|
||||||
|
accessStore.update(
|
||||||
|
(access) => (access.openrouterApiKey = e.currentTarget.value),
|
||||||
|
);
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</ListItem>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
|
||||||
const stabilityConfigComponent = accessStore.provider ===
|
const stabilityConfigComponent = accessStore.provider ===
|
||||||
ServiceProvider.Stability && (
|
ServiceProvider.Stability && (
|
||||||
@@ -1822,6 +1863,7 @@ export function Settings() {
|
|||||||
{XAIConfigComponent}
|
{XAIConfigComponent}
|
||||||
{chatglmConfigComponent}
|
{chatglmConfigComponent}
|
||||||
{siliconflowConfigComponent}
|
{siliconflowConfigComponent}
|
||||||
|
{openrouterConfigComponent}
|
||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
</>
|
</>
|
||||||
|
@@ -88,6 +88,10 @@ declare global {
|
|||||||
SILICONFLOW_URL?: string;
|
SILICONFLOW_URL?: string;
|
||||||
SILICONFLOW_API_KEY?: string;
|
SILICONFLOW_API_KEY?: string;
|
||||||
|
|
||||||
|
// openrouter only
|
||||||
|
OPENROUTER_URL?: string;
|
||||||
|
OPENROUTER_API_KEY?: string;
|
||||||
|
|
||||||
// custom template for preprocessing user input
|
// custom template for preprocessing user input
|
||||||
DEFAULT_INPUT_TEMPLATE?: string;
|
DEFAULT_INPUT_TEMPLATE?: string;
|
||||||
|
|
||||||
@@ -163,6 +167,7 @@ export const getServerSideConfig = () => {
|
|||||||
const isXAI = !!process.env.XAI_API_KEY;
|
const isXAI = !!process.env.XAI_API_KEY;
|
||||||
const isChatGLM = !!process.env.CHATGLM_API_KEY;
|
const isChatGLM = !!process.env.CHATGLM_API_KEY;
|
||||||
const isSiliconFlow = !!process.env.SILICONFLOW_API_KEY;
|
const isSiliconFlow = !!process.env.SILICONFLOW_API_KEY;
|
||||||
|
const isOpenRouter = !!process.env.OPENROUTER_API_KEY;
|
||||||
// const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? "";
|
// const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? "";
|
||||||
// const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim());
|
// const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim());
|
||||||
// const randomIndex = Math.floor(Math.random() * apiKeys.length);
|
// const randomIndex = Math.floor(Math.random() * apiKeys.length);
|
||||||
@@ -246,6 +251,10 @@ export const getServerSideConfig = () => {
|
|||||||
siliconFlowUrl: process.env.SILICONFLOW_URL,
|
siliconFlowUrl: process.env.SILICONFLOW_URL,
|
||||||
siliconFlowApiKey: getApiKey(process.env.SILICONFLOW_API_KEY),
|
siliconFlowApiKey: getApiKey(process.env.SILICONFLOW_API_KEY),
|
||||||
|
|
||||||
|
isOpenRouter,
|
||||||
|
openrouterUrl: process.env.OPENROUTER_URL,
|
||||||
|
openrouterApiKey: getApiKey(process.env.OPENROUTER_API_KEY),
|
||||||
|
|
||||||
gtmId: process.env.GTM_ID,
|
gtmId: process.env.GTM_ID,
|
||||||
gaId: process.env.GA_ID || DEFAULT_GA_ID,
|
gaId: process.env.GA_ID || DEFAULT_GA_ID,
|
||||||
|
|
||||||
|
@@ -36,6 +36,8 @@ export const CHATGLM_BASE_URL = "https://open.bigmodel.cn";
|
|||||||
|
|
||||||
export const SILICONFLOW_BASE_URL = "https://api.siliconflow.cn";
|
export const SILICONFLOW_BASE_URL = "https://api.siliconflow.cn";
|
||||||
|
|
||||||
|
export const OPENROUTER_BASE_URL = "https://openrouter.ai/api";
|
||||||
|
|
||||||
export const CACHE_URL_PREFIX = "/api/cache";
|
export const CACHE_URL_PREFIX = "/api/cache";
|
||||||
export const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`;
|
export const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`;
|
||||||
|
|
||||||
@@ -72,6 +74,7 @@ export enum ApiPath {
|
|||||||
ChatGLM = "/api/chatglm",
|
ChatGLM = "/api/chatglm",
|
||||||
DeepSeek = "/api/deepseek",
|
DeepSeek = "/api/deepseek",
|
||||||
SiliconFlow = "/api/siliconflow",
|
SiliconFlow = "/api/siliconflow",
|
||||||
|
OpenRouter = "/api/openrouter",
|
||||||
}
|
}
|
||||||
|
|
||||||
export enum SlotID {
|
export enum SlotID {
|
||||||
@@ -130,6 +133,7 @@ export enum ServiceProvider {
|
|||||||
ChatGLM = "ChatGLM",
|
ChatGLM = "ChatGLM",
|
||||||
DeepSeek = "DeepSeek",
|
DeepSeek = "DeepSeek",
|
||||||
SiliconFlow = "SiliconFlow",
|
SiliconFlow = "SiliconFlow",
|
||||||
|
OpenRouter = "OpenRouter",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings
|
// Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings
|
||||||
@@ -156,6 +160,7 @@ export enum ModelProvider {
|
|||||||
ChatGLM = "ChatGLM",
|
ChatGLM = "ChatGLM",
|
||||||
DeepSeek = "DeepSeek",
|
DeepSeek = "DeepSeek",
|
||||||
SiliconFlow = "SiliconFlow",
|
SiliconFlow = "SiliconFlow",
|
||||||
|
OpenRouter = "OpenRouter",
|
||||||
}
|
}
|
||||||
|
|
||||||
export const Stability = {
|
export const Stability = {
|
||||||
@@ -221,7 +226,12 @@ export const ByteDance = {
|
|||||||
|
|
||||||
export const Alibaba = {
|
export const Alibaba = {
|
||||||
ExampleEndpoint: ALIBABA_BASE_URL,
|
ExampleEndpoint: ALIBABA_BASE_URL,
|
||||||
ChatPath: "v1/services/aigc/text-generation/generation",
|
ChatPath: (modelName: string) => {
|
||||||
|
if (modelName.includes("vl") || modelName.includes("omni")) {
|
||||||
|
return "v1/services/aigc/multimodal-generation/generation";
|
||||||
|
}
|
||||||
|
return `v1/services/aigc/text-generation/generation`;
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
export const Tencent = {
|
export const Tencent = {
|
||||||
@@ -261,6 +271,12 @@ export const SiliconFlow = {
|
|||||||
ListModelPath: "v1/models?&sub_type=chat",
|
ListModelPath: "v1/models?&sub_type=chat",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export const OpenRouter = {
|
||||||
|
ExampleEndpoint: OPENROUTER_BASE_URL,
|
||||||
|
ChatPath: "v1/chat/completions",
|
||||||
|
ListModelPath: "v1/models",
|
||||||
|
};
|
||||||
|
|
||||||
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
|
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
|
||||||
// export const DEFAULT_SYSTEM_TEMPLATE = `
|
// export const DEFAULT_SYSTEM_TEMPLATE = `
|
||||||
// You are ChatGPT, a large language model trained by {{ServiceProvider}}.
|
// You are ChatGPT, a large language model trained by {{ServiceProvider}}.
|
||||||
@@ -412,6 +428,14 @@ export const KnowledgeCutOffDate: Record<string, string> = {
|
|||||||
"gpt-4-turbo": "2023-12",
|
"gpt-4-turbo": "2023-12",
|
||||||
"gpt-4-turbo-2024-04-09": "2023-12",
|
"gpt-4-turbo-2024-04-09": "2023-12",
|
||||||
"gpt-4-turbo-preview": "2023-12",
|
"gpt-4-turbo-preview": "2023-12",
|
||||||
|
"gpt-4.1": "2024-06",
|
||||||
|
"gpt-4.1-2025-04-14": "2024-06",
|
||||||
|
"gpt-4.1-mini": "2024-06",
|
||||||
|
"gpt-4.1-mini-2025-04-14": "2024-06",
|
||||||
|
"gpt-4.1-nano": "2024-06",
|
||||||
|
"gpt-4.1-nano-2025-04-14": "2024-06",
|
||||||
|
"gpt-4.5-preview": "2023-10",
|
||||||
|
"gpt-4.5-preview-2025-02-27": "2023-10",
|
||||||
"gpt-4o": "2023-10",
|
"gpt-4o": "2023-10",
|
||||||
"gpt-4o-2024-05-13": "2023-10",
|
"gpt-4o-2024-05-13": "2023-10",
|
||||||
"gpt-4o-2024-08-06": "2023-10",
|
"gpt-4o-2024-08-06": "2023-10",
|
||||||
@@ -453,6 +477,7 @@ export const DEFAULT_TTS_VOICES = [
|
|||||||
export const VISION_MODEL_REGEXES = [
|
export const VISION_MODEL_REGEXES = [
|
||||||
/vision/,
|
/vision/,
|
||||||
/gpt-4o/,
|
/gpt-4o/,
|
||||||
|
/gpt-4\.1/,
|
||||||
/claude-3/,
|
/claude-3/,
|
||||||
/gemini-1\.5/,
|
/gemini-1\.5/,
|
||||||
/gemini-exp/,
|
/gemini-exp/,
|
||||||
@@ -464,6 +489,8 @@ export const VISION_MODEL_REGEXES = [
|
|||||||
/^dall-e-3$/, // Matches exactly "dall-e-3"
|
/^dall-e-3$/, // Matches exactly "dall-e-3"
|
||||||
/glm-4v/,
|
/glm-4v/,
|
||||||
/vl/i,
|
/vl/i,
|
||||||
|
/o3/,
|
||||||
|
/o4-mini/,
|
||||||
];
|
];
|
||||||
|
|
||||||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
|
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
|
||||||
@@ -480,6 +507,14 @@ const openaiModels = [
|
|||||||
"gpt-4-32k-0613",
|
"gpt-4-32k-0613",
|
||||||
"gpt-4-turbo",
|
"gpt-4-turbo",
|
||||||
"gpt-4-turbo-preview",
|
"gpt-4-turbo-preview",
|
||||||
|
"gpt-4.1",
|
||||||
|
"gpt-4.1-2025-04-14",
|
||||||
|
"gpt-4.1-mini",
|
||||||
|
"gpt-4.1-mini-2025-04-14",
|
||||||
|
"gpt-4.1-nano",
|
||||||
|
"gpt-4.1-nano-2025-04-14",
|
||||||
|
"gpt-4.5-preview",
|
||||||
|
"gpt-4.5-preview-2025-02-27",
|
||||||
"gpt-4o",
|
"gpt-4o",
|
||||||
"gpt-4o-2024-05-13",
|
"gpt-4o-2024-05-13",
|
||||||
"gpt-4o-2024-08-06",
|
"gpt-4o-2024-08-06",
|
||||||
@@ -494,6 +529,8 @@ const openaiModels = [
|
|||||||
"o1-mini",
|
"o1-mini",
|
||||||
"o1-preview",
|
"o1-preview",
|
||||||
"o3-mini",
|
"o3-mini",
|
||||||
|
"o3",
|
||||||
|
"o4-mini",
|
||||||
];
|
];
|
||||||
|
|
||||||
const googleModels = [
|
const googleModels = [
|
||||||
@@ -520,6 +557,7 @@ const googleModels = [
|
|||||||
"gemini-2.0-flash-thinking-exp-01-21",
|
"gemini-2.0-flash-thinking-exp-01-21",
|
||||||
"gemini-2.0-pro-exp",
|
"gemini-2.0-pro-exp",
|
||||||
"gemini-2.0-pro-exp-02-05",
|
"gemini-2.0-pro-exp-02-05",
|
||||||
|
"gemini-2.5-pro-preview-06-05",
|
||||||
];
|
];
|
||||||
|
|
||||||
const anthropicModels = [
|
const anthropicModels = [
|
||||||
@@ -570,6 +608,9 @@ const alibabaModes = [
|
|||||||
"qwen-max-0403",
|
"qwen-max-0403",
|
||||||
"qwen-max-0107",
|
"qwen-max-0107",
|
||||||
"qwen-max-longcontext",
|
"qwen-max-longcontext",
|
||||||
|
"qwen-omni-turbo",
|
||||||
|
"qwen-vl-plus",
|
||||||
|
"qwen-vl-max",
|
||||||
];
|
];
|
||||||
|
|
||||||
const tencentModels = [
|
const tencentModels = [
|
||||||
@@ -642,6 +683,12 @@ const siliconflowModels = [
|
|||||||
"Pro/deepseek-ai/DeepSeek-V3",
|
"Pro/deepseek-ai/DeepSeek-V3",
|
||||||
];
|
];
|
||||||
|
|
||||||
|
// Use this to generate a full model list -> https://gist.github.com/hyc1230/d4b271d161ffcda485f1fa1a27e08096
|
||||||
|
const openrouterModels = [
|
||||||
|
// Requires user to customize models
|
||||||
|
"openrouter/auto",
|
||||||
|
];
|
||||||
|
|
||||||
let seq = 1000; // 内置的模型序号生成器从1000开始
|
let seq = 1000; // 内置的模型序号生成器从1000开始
|
||||||
export const DEFAULT_MODELS = [
|
export const DEFAULT_MODELS = [
|
||||||
...openaiModels.map((name) => ({
|
...openaiModels.map((name) => ({
|
||||||
@@ -798,6 +845,17 @@ export const DEFAULT_MODELS = [
|
|||||||
sorted: 14,
|
sorted: 14,
|
||||||
},
|
},
|
||||||
})),
|
})),
|
||||||
|
...openrouterModels.map((name) => ({
|
||||||
|
name,
|
||||||
|
available: true,
|
||||||
|
sorted: seq++,
|
||||||
|
provider: {
|
||||||
|
id: "openrouter",
|
||||||
|
providerName: "OpenRouter",
|
||||||
|
providerType: "openrouter",
|
||||||
|
sorted: 15,
|
||||||
|
},
|
||||||
|
})),
|
||||||
] as const;
|
] as const;
|
||||||
|
|
||||||
export const CHAT_PAGE_SIZE = 15;
|
export const CHAT_PAGE_SIZE = 15;
|
||||||
|
@@ -507,6 +507,17 @@ const cn = {
|
|||||||
SubTitle: "样例:",
|
SubTitle: "样例:",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
OpenRouter: {
|
||||||
|
ApiKey: {
|
||||||
|
Title: "接口密钥",
|
||||||
|
SubTitle: "使用自定义 OpenRouter API Key",
|
||||||
|
Placeholder: "OpenRouter API Key",
|
||||||
|
},
|
||||||
|
Endpoint: {
|
||||||
|
Title: "接口地址",
|
||||||
|
SubTitle: "样例:",
|
||||||
|
},
|
||||||
|
},
|
||||||
Stability: {
|
Stability: {
|
||||||
ApiKey: {
|
ApiKey: {
|
||||||
Title: "接口密钥",
|
Title: "接口密钥",
|
||||||
|
@@ -467,6 +467,17 @@ const da: PartialLocaleType = {
|
|||||||
SubTitle: "F.eks.: ",
|
SubTitle: "F.eks.: ",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
OpenRouter: {
|
||||||
|
ApiKey: {
|
||||||
|
Title: "OpenRouter-nøgle",
|
||||||
|
SubTitle: "Din egen OpenRouter-nøgle",
|
||||||
|
Placeholder: "OpenRouter API Key",
|
||||||
|
},
|
||||||
|
Endpoint: {
|
||||||
|
Title: "Adresse",
|
||||||
|
SubTitle: "F.eks.: ",
|
||||||
|
},
|
||||||
|
},
|
||||||
Stability: {
|
Stability: {
|
||||||
ApiKey: {
|
ApiKey: {
|
||||||
Title: "Stability-nøgle",
|
Title: "Stability-nøgle",
|
||||||
|
@@ -491,6 +491,17 @@ const en: LocaleType = {
|
|||||||
SubTitle: "Example: ",
|
SubTitle: "Example: ",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
OpenRouter: {
|
||||||
|
ApiKey: {
|
||||||
|
Title: "OpenRouter API Key",
|
||||||
|
SubTitle: "Use a custom OpenRouter API Key",
|
||||||
|
Placeholder: "OpenRouter API Key",
|
||||||
|
},
|
||||||
|
Endpoint: {
|
||||||
|
Title: "Endpoint Address",
|
||||||
|
SubTitle: "Example: ",
|
||||||
|
},
|
||||||
|
},
|
||||||
Stability: {
|
Stability: {
|
||||||
ApiKey: {
|
ApiKey: {
|
||||||
Title: "Stability API Key",
|
Title: "Stability API Key",
|
||||||
|
@@ -17,6 +17,7 @@ import {
|
|||||||
XAI_BASE_URL,
|
XAI_BASE_URL,
|
||||||
CHATGLM_BASE_URL,
|
CHATGLM_BASE_URL,
|
||||||
SILICONFLOW_BASE_URL,
|
SILICONFLOW_BASE_URL,
|
||||||
|
OPENROUTER_BASE_URL,
|
||||||
} from "../constant";
|
} from "../constant";
|
||||||
import { getHeaders } from "../client/api";
|
import { getHeaders } from "../client/api";
|
||||||
import { getClientConfig } from "../config/client";
|
import { getClientConfig } from "../config/client";
|
||||||
@@ -59,6 +60,8 @@ const DEFAULT_SILICONFLOW_URL = isApp
|
|||||||
? SILICONFLOW_BASE_URL
|
? SILICONFLOW_BASE_URL
|
||||||
: ApiPath.SiliconFlow;
|
: ApiPath.SiliconFlow;
|
||||||
|
|
||||||
|
const DEFAULT_OPENROUTER_URL = isApp ? OPENROUTER_BASE_URL : ApiPath.OpenRouter;
|
||||||
|
|
||||||
const DEFAULT_ACCESS_STATE = {
|
const DEFAULT_ACCESS_STATE = {
|
||||||
accessCode: "",
|
accessCode: "",
|
||||||
useCustomConfig: false,
|
useCustomConfig: false,
|
||||||
@@ -132,6 +135,10 @@ const DEFAULT_ACCESS_STATE = {
|
|||||||
siliconflowUrl: DEFAULT_SILICONFLOW_URL,
|
siliconflowUrl: DEFAULT_SILICONFLOW_URL,
|
||||||
siliconflowApiKey: "",
|
siliconflowApiKey: "",
|
||||||
|
|
||||||
|
// openrouter
|
||||||
|
openrouterUrl: DEFAULT_OPENROUTER_URL,
|
||||||
|
openrouterApiKey: "",
|
||||||
|
|
||||||
// server config
|
// server config
|
||||||
needCode: true,
|
needCode: true,
|
||||||
hideUserApiKey: false,
|
hideUserApiKey: false,
|
||||||
@@ -219,6 +226,10 @@ export const useAccessStore = createPersistStore(
|
|||||||
return ensure(get(), ["siliconflowApiKey"]);
|
return ensure(get(), ["siliconflowApiKey"]);
|
||||||
},
|
},
|
||||||
|
|
||||||
|
isValidOpenRouter() {
|
||||||
|
return ensure(get(), ["openrouterApiKey"]);
|
||||||
|
},
|
||||||
|
|
||||||
isAuthorized() {
|
isAuthorized() {
|
||||||
this.fetch();
|
this.fetch();
|
||||||
|
|
||||||
@@ -238,6 +249,7 @@ export const useAccessStore = createPersistStore(
|
|||||||
this.isValidXAI() ||
|
this.isValidXAI() ||
|
||||||
this.isValidChatGLM() ||
|
this.isValidChatGLM() ||
|
||||||
this.isValidSiliconFlow() ||
|
this.isValidSiliconFlow() ||
|
||||||
|
this.isValidOpenRouter() ||
|
||||||
!this.enabledAccessControl() ||
|
!this.enabledAccessControl() ||
|
||||||
(this.enabledAccessControl() && ensure(get(), ["accessCode"]))
|
(this.enabledAccessControl() && ensure(get(), ["accessCode"]))
|
||||||
);
|
);
|
||||||
|
@@ -3,7 +3,7 @@ import {
|
|||||||
UPLOAD_URL,
|
UPLOAD_URL,
|
||||||
REQUEST_TIMEOUT_MS,
|
REQUEST_TIMEOUT_MS,
|
||||||
} from "@/app/constant";
|
} from "@/app/constant";
|
||||||
import { RequestMessage } from "@/app/client/api";
|
import { MultimodalContent, RequestMessage } from "@/app/client/api";
|
||||||
import Locale from "@/app/locales";
|
import Locale from "@/app/locales";
|
||||||
import {
|
import {
|
||||||
EventStreamContentType,
|
EventStreamContentType,
|
||||||
@@ -70,8 +70,9 @@ export function compressImage(file: Blob, maxSize: number): Promise<string> {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function preProcessImageContent(
|
export async function preProcessImageContentBase(
|
||||||
content: RequestMessage["content"],
|
content: RequestMessage["content"],
|
||||||
|
transformImageUrl: (url: string) => Promise<{ [key: string]: any }>,
|
||||||
) {
|
) {
|
||||||
if (typeof content === "string") {
|
if (typeof content === "string") {
|
||||||
return content;
|
return content;
|
||||||
@@ -81,7 +82,7 @@ export async function preProcessImageContent(
|
|||||||
if (part?.type == "image_url" && part?.image_url?.url) {
|
if (part?.type == "image_url" && part?.image_url?.url) {
|
||||||
try {
|
try {
|
||||||
const url = await cacheImageToBase64Image(part?.image_url?.url);
|
const url = await cacheImageToBase64Image(part?.image_url?.url);
|
||||||
result.push({ type: part.type, image_url: { url } });
|
result.push(await transformImageUrl(url));
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Error processing image URL:", error);
|
console.error("Error processing image URL:", error);
|
||||||
}
|
}
|
||||||
@@ -92,6 +93,23 @@ export async function preProcessImageContent(
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export async function preProcessImageContent(
|
||||||
|
content: RequestMessage["content"],
|
||||||
|
) {
|
||||||
|
return preProcessImageContentBase(content, async (url) => ({
|
||||||
|
type: "image_url",
|
||||||
|
image_url: { url },
|
||||||
|
})) as Promise<MultimodalContent[] | string>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function preProcessImageContentForAlibabaDashScope(
|
||||||
|
content: RequestMessage["content"],
|
||||||
|
) {
|
||||||
|
return preProcessImageContentBase(content, async (url) => ({
|
||||||
|
image: url,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
const imageCaches: Record<string, string> = {};
|
const imageCaches: Record<string, string> = {};
|
||||||
export function cacheImageToBase64Image(imageUrl: string) {
|
export function cacheImageToBase64Image(imageUrl: string) {
|
||||||
if (imageUrl.includes(CACHE_URL_PREFIX)) {
|
if (imageUrl.includes(CACHE_URL_PREFIX)) {
|
||||||
|
@@ -15,6 +15,8 @@ const config: Config = {
|
|||||||
moduleNameMapper: {
|
moduleNameMapper: {
|
||||||
"^@/(.*)$": "<rootDir>/$1",
|
"^@/(.*)$": "<rootDir>/$1",
|
||||||
},
|
},
|
||||||
|
extensionsToTreatAsEsm: [".ts", ".tsx"],
|
||||||
|
injectGlobals: true,
|
||||||
};
|
};
|
||||||
|
|
||||||
// createJestConfig is exported this way to ensure that next/jest can load the Next.js config which is async
|
// createJestConfig is exported this way to ensure that next/jest can load the Next.js config which is async
|
||||||
|
@@ -1,24 +1,22 @@
|
|||||||
// Learn more: https://github.com/testing-library/jest-dom
|
// Learn more: https://github.com/testing-library/jest-dom
|
||||||
import "@testing-library/jest-dom";
|
import "@testing-library/jest-dom";
|
||||||
|
import { jest } from "@jest/globals";
|
||||||
|
|
||||||
global.fetch = jest.fn(() =>
|
global.fetch = jest.fn(() =>
|
||||||
Promise.resolve({
|
Promise.resolve({
|
||||||
ok: true,
|
ok: true,
|
||||||
status: 200,
|
status: 200,
|
||||||
json: () => Promise.resolve({}),
|
json: () => Promise.resolve([]),
|
||||||
headers: new Headers(),
|
headers: new Headers(),
|
||||||
redirected: false,
|
redirected: false,
|
||||||
statusText: "OK",
|
statusText: "OK",
|
||||||
type: "basic",
|
type: "basic",
|
||||||
url: "",
|
url: "",
|
||||||
clone: function () {
|
|
||||||
return this;
|
|
||||||
},
|
|
||||||
body: null,
|
body: null,
|
||||||
bodyUsed: false,
|
bodyUsed: false,
|
||||||
arrayBuffer: () => Promise.resolve(new ArrayBuffer(0)),
|
arrayBuffer: () => Promise.resolve(new ArrayBuffer(0)),
|
||||||
blob: () => Promise.resolve(new Blob()),
|
blob: () => Promise.resolve(new Blob()),
|
||||||
formData: () => Promise.resolve(new FormData()),
|
formData: () => Promise.resolve(new FormData()),
|
||||||
text: () => Promise.resolve(""),
|
text: () => Promise.resolve(""),
|
||||||
}),
|
} as Response),
|
||||||
);
|
);
|
||||||
|
@@ -17,8 +17,8 @@
|
|||||||
"prompts": "node ./scripts/fetch-prompts.mjs",
|
"prompts": "node ./scripts/fetch-prompts.mjs",
|
||||||
"prepare": "husky install",
|
"prepare": "husky install",
|
||||||
"proxy-dev": "sh ./scripts/init-proxy.sh && proxychains -f ./scripts/proxychains.conf yarn dev",
|
"proxy-dev": "sh ./scripts/init-proxy.sh && proxychains -f ./scripts/proxychains.conf yarn dev",
|
||||||
"test": "jest --watch",
|
"test": "node --no-warnings --experimental-vm-modules $(yarn bin jest) --watch",
|
||||||
"test:ci": "jest --ci"
|
"test:ci": "node --no-warnings --experimental-vm-modules $(yarn bin jest) --ci"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@fortaine/fetch-event-source": "^3.0.6",
|
"@fortaine/fetch-event-source": "^3.0.6",
|
||||||
|
@@ -1,3 +1,4 @@
|
|||||||
|
import { jest } from "@jest/globals";
|
||||||
import { isVisionModel } from "../app/utils";
|
import { isVisionModel } from "../app/utils";
|
||||||
|
|
||||||
describe("isVisionModel", () => {
|
describe("isVisionModel", () => {
|
||||||
@@ -50,7 +51,7 @@ describe("isVisionModel", () => {
|
|||||||
|
|
||||||
test("should identify models from VISION_MODELS env var", () => {
|
test("should identify models from VISION_MODELS env var", () => {
|
||||||
process.env.VISION_MODELS = "custom-vision-model,another-vision-model";
|
process.env.VISION_MODELS = "custom-vision-model,another-vision-model";
|
||||||
|
|
||||||
expect(isVisionModel("custom-vision-model")).toBe(true);
|
expect(isVisionModel("custom-vision-model")).toBe(true);
|
||||||
expect(isVisionModel("another-vision-model")).toBe(true);
|
expect(isVisionModel("another-vision-model")).toBe(true);
|
||||||
expect(isVisionModel("unrelated-model")).toBe(false);
|
expect(isVisionModel("unrelated-model")).toBe(false);
|
||||||
@@ -64,4 +65,4 @@ describe("isVisionModel", () => {
|
|||||||
expect(isVisionModel("unrelated-model")).toBe(false);
|
expect(isVisionModel("unrelated-model")).toBe(false);
|
||||||
expect(isVisionModel("gpt-4-vision")).toBe(true);
|
expect(isVisionModel("gpt-4-vision")).toBe(true);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
Reference in New Issue
Block a user