mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-11-14 13:03:49 +08:00
feat: web search
This commit is contained in:
@@ -25,7 +25,10 @@ import {
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getWebReferenceMessageTextContent,
|
||||
} from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
@@ -104,7 +107,7 @@ export class QwenApi implements LLMApi {
|
||||
async chat(options: ChatOptions) {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: getMessageTextContent(v),
|
||||
content: getWebReferenceMessageTextContent(v),
|
||||
}));
|
||||
|
||||
const modelConfig = {
|
||||
|
||||
@@ -22,7 +22,11 @@ import {
|
||||
} from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { ANTHROPIC_BASE_URL } from "@/app/constant";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getWebReferenceMessageTextContent,
|
||||
isVisionModel,
|
||||
} from "@/app/utils";
|
||||
import { preProcessImageContent, stream } from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
import { RequestPayload } from "./openai";
|
||||
@@ -318,7 +322,7 @@ export class ClaudeApi implements LLMApi {
|
||||
if (!visionModel || typeof content === "string") {
|
||||
return {
|
||||
role: insideRole,
|
||||
content: getMessageTextContent(v),
|
||||
content: getWebReferenceMessageTextContent(v),
|
||||
};
|
||||
}
|
||||
return {
|
||||
|
||||
@@ -26,7 +26,10 @@ import {
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getWebReferenceMessageTextContent,
|
||||
} from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
@@ -97,7 +100,7 @@ export class ErnieApi implements LLMApi {
|
||||
const messages = options.messages.map((v) => ({
|
||||
// "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function",
|
||||
role: v.role === "system" ? "user" : v.role,
|
||||
content: getMessageTextContent(v),
|
||||
content: getWebReferenceMessageTextContent(v),
|
||||
}));
|
||||
|
||||
// "error_code": 336006, "error_msg": "the length of messages must be an odd number",
|
||||
|
||||
@@ -25,7 +25,10 @@ import {
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getWebReferenceMessageTextContent,
|
||||
} from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
@@ -98,7 +101,7 @@ export class DoubaoApi implements LLMApi {
|
||||
async chat(options: ChatOptions) {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: getMessageTextContent(v),
|
||||
content: getWebReferenceMessageTextContent(v),
|
||||
}));
|
||||
|
||||
const modelConfig = {
|
||||
|
||||
@@ -28,6 +28,7 @@ import { getClientConfig } from "@/app/config/client";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageTextContentWithoutThinking,
|
||||
getWebReferenceMessageTextContent,
|
||||
} from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
@@ -86,7 +87,7 @@ export class DeepSeekApi implements LLMApi {
|
||||
const content = getMessageTextContentWithoutThinking(v);
|
||||
messages.push({ role: v.role, content });
|
||||
} else {
|
||||
const content = getMessageTextContent(v);
|
||||
const content = getWebReferenceMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,10 @@ import {
|
||||
TranscriptionOptions,
|
||||
} from "../api";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getWebReferenceMessageTextContent,
|
||||
} from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
@@ -78,7 +81,7 @@ export class ChatGLMApi implements LLMApi {
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
const content = getWebReferenceMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import {
|
||||
getMessageTextContent,
|
||||
getMessageImages,
|
||||
isVisionModel,
|
||||
getWebReferenceMessageTextContent,
|
||||
} from "@/app/utils";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import { nanoid } from "nanoid";
|
||||
@@ -91,7 +92,7 @@ export class GeminiProApi implements LLMApi {
|
||||
_messages.push({ role: v.role, content });
|
||||
}
|
||||
const messages = _messages.map((v) => {
|
||||
let parts: any[] = [{ text: getMessageTextContent(v) }];
|
||||
let parts: any[] = [{ text: getWebReferenceMessageTextContent(v) }];
|
||||
if (isVisionModel(options.config.model)) {
|
||||
const images = getMessageImages(v);
|
||||
if (images.length > 0) {
|
||||
|
||||
@@ -24,7 +24,10 @@ import {
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getWebReferenceMessageTextContent,
|
||||
} from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
import { RequestPayload } from "./openai";
|
||||
@@ -79,7 +82,7 @@ export class SparkApi implements LLMApi {
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
const content = getWebReferenceMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,10 @@ import {
|
||||
TranscriptionOptions,
|
||||
} from "../api";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getWebReferenceMessageTextContent,
|
||||
} from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
@@ -79,7 +82,7 @@ export class MoonshotApi implements LLMApi {
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
const content = getWebReferenceMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ import {
|
||||
getMessageTextContent,
|
||||
isVisionModel,
|
||||
isDalle3 as _isDalle3,
|
||||
getWebReferenceMessageTextContent,
|
||||
} from "@/app/utils";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
@@ -239,7 +240,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
for (const v of options.messages) {
|
||||
const content = visionModel
|
||||
? await preProcessImageContent(v.content)
|
||||
: getMessageTextContent(v);
|
||||
: getWebReferenceMessageTextContent(v);
|
||||
if (!(isO1 && v.role === "system"))
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import { getClientConfig } from "@/app/config/client";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageTextContentWithoutThinking,
|
||||
getWebReferenceMessageTextContent,
|
||||
} from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
@@ -89,7 +90,7 @@ export class SiliconflowApi implements LLMApi {
|
||||
const content = getMessageTextContentWithoutThinking(v);
|
||||
messages.push({ role: v.role, content });
|
||||
} else {
|
||||
const content = getMessageTextContent(v);
|
||||
const content = getWebReferenceMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,11 @@ import {
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getWebReferenceMessageTextContent,
|
||||
isVisionModel,
|
||||
} from "@/app/utils";
|
||||
import mapKeys from "lodash-es/mapKeys";
|
||||
import mapValues from "lodash-es/mapValues";
|
||||
import isArray from "lodash-es/isArray";
|
||||
@@ -110,7 +114,7 @@ export class HunyuanApi implements LLMApi {
|
||||
const messages = options.messages.map((v, index) => ({
|
||||
// "Messages 中 system 角色必须位于列表的最开始"
|
||||
role: index !== 0 && v.role === "system" ? "user" : v.role,
|
||||
content: visionModel ? v.content : getMessageTextContent(v),
|
||||
content: visionModel ? v.content : getWebReferenceMessageTextContent(v),
|
||||
}));
|
||||
|
||||
const modelConfig = {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { TavilySearchResponse } from "@tavily/core";
|
||||
import { ClientApi, getClientApi, getHeaders } from "../api";
|
||||
import { ChatSession } from "@/app/store";
|
||||
|
||||
@@ -45,3 +46,19 @@ export class FileApi {
|
||||
return fileInfo;
|
||||
}
|
||||
}
|
||||
|
||||
export class WebApi {
|
||||
async search(query: string): Promise<TavilySearchResponse> {
|
||||
var headers = getHeaders(true);
|
||||
const api = "/api/search";
|
||||
var res = await fetch(api, {
|
||||
method: "POST",
|
||||
body: JSON.stringify({ query }),
|
||||
headers: {
|
||||
...headers,
|
||||
},
|
||||
});
|
||||
const resJson = await res.json();
|
||||
return resJson;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,10 @@ import {
|
||||
TranscriptionOptions,
|
||||
} from "../api";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getWebReferenceMessageTextContent,
|
||||
} from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
@@ -74,7 +77,7 @@ export class XAIApi implements LLMApi {
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
const content = getWebReferenceMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user