Compare commits

..

1 Commits

Author SHA1 Message Date
RiverRay
b95b1ac6f3 Update README.md 2025-02-21 08:56:21 +08:00
7 changed files with 25 additions and 90 deletions

View File

@@ -22,6 +22,7 @@ English / [简体中文](./README_CN.md)
[![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url]
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases)
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
@@ -129,7 +130,7 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
- 🚀 v2.15.8 Now supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
- 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379)
- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
- 🚀 v2.14.0 Now supports Artifacts & SD.
- 🚀 v2.14.0 Now supports Artifacts & SD
- 🚀 v2.10.1 support Google Gemini Pro model.
- 🚀 v2.9.11 you can use azure endpoint now.
- 🚀 v2.8 now we have a client that runs across all platforms!
@@ -337,7 +338,7 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name
### `DEFAULT_MODEL` optional
Change default model.
Change default model
### `VISION_MODELS` (optional)
@@ -368,7 +369,7 @@ Customize Stability API url.
### `ENABLE_MCP` (optional)
Enable MCPModel Context ProtocolFeature.
Enable MCPModel Context ProtocolFeature
### `SILICONFLOW_API_KEY` (optional)

View File

@@ -40,11 +40,6 @@ export interface MultimodalContent {
};
}
export interface MultimodalContentForAlibaba {
text?: string;
image?: string;
}
export interface RequestMessage {
role: MessageRole;
content: string | MultimodalContent[];

View File

@@ -7,10 +7,7 @@ import {
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import {
preProcessImageContentForAlibabaDashScope,
streamWithThink,
} from "@/app/utils/chat";
import { streamWithThink } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
@@ -18,14 +15,12 @@ import {
LLMModel,
SpeechOptions,
MultimodalContent,
MultimodalContentForAlibaba,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
isVisionModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream";
@@ -94,6 +89,14 @@ export class QwenApi implements LLMApi {
}
async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({
role: v.role,
content:
v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: getMessageTextContent(v),
}));
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
@@ -102,21 +105,6 @@ export class QwenApi implements LLMApi {
},
};
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = (
visionModel
? await preProcessImageContentForAlibabaDashScope(v.content)
: v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: getMessageTextContent(v)
) as any;
messages.push({ role: v.role, content });
}
const shouldStream = !!options.config.stream;
const requestPayload: RequestPayload = {
model: modelConfig.model,
@@ -141,7 +129,7 @@ export class QwenApi implements LLMApi {
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
};
const chatPath = this.path(Alibaba.ChatPath(modelConfig.model));
const chatPath = this.path(Alibaba.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
@@ -174,7 +162,7 @@ export class QwenApi implements LLMApi {
const json = JSON.parse(text);
const choices = json.output.choices as Array<{
message: {
content: string | null | MultimodalContentForAlibaba[];
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
@@ -224,9 +212,7 @@ export class QwenApi implements LLMApi {
} else if (content && content.length > 0) {
return {
isThinking: false,
content: Array.isArray(content)
? content.map((item) => item.text).join(",")
: content,
content: content,
};
}

View File

@@ -75,25 +75,6 @@ export class DeepSeekApi implements LLMApi {
}
}
// 检测并修复消息顺序确保除system外的第一个消息是user
const filteredMessages: ChatOptions["messages"] = [];
let hasFoundFirstUser = false;
for (const msg of messages) {
if (msg.role === "system") {
// Keep all system messages
filteredMessages.push(msg);
} else if (msg.role === "user") {
// User message directly added
filteredMessages.push(msg);
hasFoundFirstUser = true;
} else if (hasFoundFirstUser) {
// After finding the first user message, all subsequent non-system messages are retained.
filteredMessages.push(msg);
}
// If hasFoundFirstUser is false and it is not a system message, it will be skipped.
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
@@ -104,7 +85,7 @@ export class DeepSeekApi implements LLMApi {
};
const requestPayload: RequestPayload = {
messages: filteredMessages,
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,

View File

@@ -66,11 +66,11 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
LlmIcon = BotIconGemma;
} else if (modelName.startsWith("claude")) {
LlmIcon = BotIconClaude;
} else if (modelName.includes("llama")) {
} else if (modelName.toLowerCase().includes("llama")) {
LlmIcon = BotIconMeta;
} else if (modelName.startsWith("mixtral") || modelName.startsWith("codestral")) {
} else if (modelName.startsWith("mixtral")) {
LlmIcon = BotIconMistral;
} else if (modelName.includes("deepseek")) {
} else if (modelName.toLowerCase().includes("deepseek")) {
LlmIcon = BotIconDeepseek;
} else if (modelName.startsWith("moonshot")) {
LlmIcon = BotIconMoonshot;
@@ -85,7 +85,7 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
} else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
LlmIcon = BotIconDoubao;
} else if (
modelName.includes("glm") ||
modelName.toLowerCase().includes("glm") ||
modelName.startsWith("cogview-") ||
modelName.startsWith("cogvideox-")
) {

View File

@@ -221,12 +221,7 @@ export const ByteDance = {
export const Alibaba = {
ExampleEndpoint: ALIBABA_BASE_URL,
ChatPath: (modelName: string) => {
if (modelName.includes("vl") || modelName.includes("omni")) {
return "v1/services/aigc/multimodal-generation/generation";
}
return `v1/services/aigc/text-generation/generation`;
},
ChatPath: "v1/services/aigc/text-generation/generation",
};
export const Tencent = {
@@ -540,8 +535,6 @@ const anthropicModels = [
"claude-3-5-sonnet-20240620",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-7-sonnet-latest",
];
const baiduModels = [
@@ -575,9 +568,6 @@ const alibabaModes = [
"qwen-max-0403",
"qwen-max-0107",
"qwen-max-longcontext",
"qwen-omni-turbo",
"qwen-vl-plus",
"qwen-vl-max",
];
const tencentModels = [

View File

@@ -3,7 +3,7 @@ import {
UPLOAD_URL,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { MultimodalContent, RequestMessage } from "@/app/client/api";
import { RequestMessage } from "@/app/client/api";
import Locale from "@/app/locales";
import {
EventStreamContentType,
@@ -70,9 +70,8 @@ export function compressImage(file: Blob, maxSize: number): Promise<string> {
});
}
export async function preProcessImageContentBase(
export async function preProcessImageContent(
content: RequestMessage["content"],
transformImageUrl: (url: string) => Promise<{ [key: string]: any }>,
) {
if (typeof content === "string") {
return content;
@@ -82,7 +81,7 @@ export async function preProcessImageContentBase(
if (part?.type == "image_url" && part?.image_url?.url) {
try {
const url = await cacheImageToBase64Image(part?.image_url?.url);
result.push(await transformImageUrl(url));
result.push({ type: part.type, image_url: { url } });
} catch (error) {
console.error("Error processing image URL:", error);
}
@@ -93,23 +92,6 @@ export async function preProcessImageContentBase(
return result;
}
export async function preProcessImageContent(
content: RequestMessage["content"],
) {
return preProcessImageContentBase(content, async (url) => ({
type: "image_url",
image_url: { url },
})) as Promise<MultimodalContent[] | string>;
}
export async function preProcessImageContentForAlibabaDashScope(
content: RequestMessage["content"],
) {
return preProcessImageContentBase(content, async (url) => ({
image: url,
}));
}
const imageCaches: Record<string, string> = {};
export function cacheImageToBase64Image(imageUrl: string) {
if (imageUrl.includes(CACHE_URL_PREFIX)) {