mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-11-16 22:13:47 +08:00
Merge remote-tracking branch 'upstream/main' into cache_audio
This commit is contained in:
@@ -23,6 +23,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
@@ -178,6 +179,7 @@ export class QwenApi implements LLMApi {
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
ChatMessageTool,
|
||||
} from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
import { ANTHROPIC_BASE_URL } from "@/app/constant";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
import { preProcessImageContent, stream } from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
@@ -388,9 +388,7 @@ export class ClaudeApi implements LLMApi {
|
||||
if (baseUrl.trim().length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/anthropic"
|
||||
: ApiPath.Anthropic;
|
||||
baseUrl = isApp ? ANTHROPIC_BASE_URL : ApiPath.Anthropic;
|
||||
}
|
||||
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) {
|
||||
|
||||
@@ -24,6 +24,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
@@ -197,6 +198,7 @@ export class ErnieApi implements LLMApi {
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
@@ -23,6 +23,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
@@ -165,6 +166,7 @@ export class DoubaoApi implements LLMApi {
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
@@ -7,21 +7,26 @@ import {
|
||||
LLMUsage,
|
||||
SpeechOptions,
|
||||
} from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
useChatStore,
|
||||
usePluginStore,
|
||||
ChatMessageTool,
|
||||
} from "@/app/store";
|
||||
import { stream } from "@/app/utils/chat";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { GEMINI_BASE_URL } from "@/app/constant";
|
||||
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageImages,
|
||||
isVisionModel,
|
||||
} from "@/app/utils";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import { nanoid } from "nanoid";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export class GeminiProApi implements LLMApi {
|
||||
path(path: string): string {
|
||||
@@ -34,7 +39,7 @@ export class GeminiProApi implements LLMApi {
|
||||
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
if (baseUrl.length === 0) {
|
||||
baseUrl = isApp ? DEFAULT_API_HOST + `/api/proxy/google` : ApiPath.Google;
|
||||
baseUrl = isApp ? GEMINI_BASE_URL : ApiPath.Google;
|
||||
}
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
@@ -177,114 +182,81 @@ export class GeminiProApi implements LLMApi {
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
const [tools, funcs] = usePluginStore
|
||||
.getState()
|
||||
.getAsTools(
|
||||
useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
);
|
||||
return stream(
|
||||
chatPath,
|
||||
requestPayload,
|
||||
getHeaders(),
|
||||
// @ts-ignore
|
||||
[{ functionDeclarations: tools.map((tool) => tool.function) }],
|
||||
funcs,
|
||||
controller,
|
||||
// parseSSE
|
||||
(text: string, runTools: ChatMessageTool[]) => {
|
||||
// console.log("parseSSE", text, runTools);
|
||||
const chunkJson = JSON.parse(text);
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
}
|
||||
};
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
if (finished || controller.signal.aborted) {
|
||||
responseText += remainText;
|
||||
finish();
|
||||
return;
|
||||
}
|
||||
|
||||
if (remainText.length > 0) {
|
||||
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
||||
const fetchText = remainText.slice(0, fetchCount);
|
||||
responseText += fetchText;
|
||||
remainText = remainText.slice(fetchCount);
|
||||
options.onUpdate?.(responseText, fetchText);
|
||||
}
|
||||
|
||||
requestAnimationFrame(animateResponseText);
|
||||
}
|
||||
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[Gemini] request response content type: ",
|
||||
contentType,
|
||||
const functionCall = chunkJson?.candidates
|
||||
?.at(0)
|
||||
?.content.parts.at(0)?.functionCall;
|
||||
if (functionCall) {
|
||||
const { name, args } = functionCall;
|
||||
runTools.push({
|
||||
id: nanoid(),
|
||||
type: "function",
|
||||
function: {
|
||||
name,
|
||||
arguments: JSON.stringify(args), // utils.chat call function, using JSON.parse
|
||||
},
|
||||
});
|
||||
}
|
||||
return chunkJson?.candidates?.at(0)?.content.parts.at(0)?.text;
|
||||
},
|
||||
// processToolMessage, include tool_calls message and tool call results
|
||||
(
|
||||
requestPayload: RequestPayload,
|
||||
toolCallMessage: any,
|
||||
toolCallResult: any[],
|
||||
) => {
|
||||
// @ts-ignore
|
||||
requestPayload?.contents?.splice(
|
||||
// @ts-ignore
|
||||
requestPayload?.contents?.length,
|
||||
0,
|
||||
{
|
||||
role: "model",
|
||||
parts: toolCallMessage.tool_calls.map(
|
||||
(tool: ChatMessageTool) => ({
|
||||
functionCall: {
|
||||
name: tool?.function?.name,
|
||||
args: JSON.parse(tool?.function?.arguments as string),
|
||||
},
|
||||
}),
|
||||
),
|
||||
},
|
||||
// @ts-ignore
|
||||
...toolCallResult.map((result) => ({
|
||||
role: "function",
|
||||
parts: [
|
||||
{
|
||||
functionResponse: {
|
||||
name: result.name,
|
||||
response: {
|
||||
name: result.name,
|
||||
content: result.content, // TODO just text content...
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
})),
|
||||
);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text);
|
||||
const delta = apiClient.extractMessage(json);
|
||||
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
}
|
||||
|
||||
const blockReason = json?.promptFeedback?.blockReason;
|
||||
if (blockReason) {
|
||||
// being blocked
|
||||
console.log(`[Google] [Safety Ratings] result:`, blockReason);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
options,
|
||||
);
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"use client";
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
IFLYTEK_BASE_URL,
|
||||
Iflytek,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
} from "@/app/constant";
|
||||
@@ -22,6 +22,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
import { RequestPayload } from "./openai";
|
||||
|
||||
@@ -40,7 +41,7 @@ export class SparkApi implements LLMApi {
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.Iflytek;
|
||||
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath;
|
||||
baseUrl = isApp ? IFLYTEK_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
@@ -149,6 +150,7 @@ export class SparkApi implements LLMApi {
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// azure and openai, using same models. so using same LLMApi.
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
MOONSHOT_BASE_URL,
|
||||
Moonshot,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
} from "@/app/constant";
|
||||
@@ -40,7 +40,7 @@ export class MoonshotApi implements LLMApi {
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.Moonshot;
|
||||
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath;
|
||||
baseUrl = isApp ? MOONSHOT_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// azure and openai, using same models. so using same LLMApi.
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
OPENAI_BASE_URL,
|
||||
DEFAULT_MODELS,
|
||||
OpenaiPath,
|
||||
Azure,
|
||||
@@ -98,7 +98,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI;
|
||||
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath;
|
||||
baseUrl = isApp ? OPENAI_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"use client";
|
||||
import { ApiPath, DEFAULT_API_HOST, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
|
||||
import {
|
||||
@@ -22,6 +22,7 @@ import mapKeys from "lodash-es/mapKeys";
|
||||
import mapValues from "lodash-es/mapValues";
|
||||
import isArray from "lodash-es/isArray";
|
||||
import isObject from "lodash-es/isObject";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
@@ -70,9 +71,7 @@ export class HunyuanApi implements LLMApi {
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/tencent"
|
||||
: ApiPath.Tencent;
|
||||
baseUrl = isApp ? TENCENT_BASE_URL : ApiPath.Tencent;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
@@ -179,6 +178,7 @@ export class HunyuanApi implements LLMApi {
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
Reference in New Issue
Block a user