mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-11-13 12:43:42 +08:00
feat: sync upstream code
This commit is contained in:
@@ -1,11 +1,16 @@
|
||||
import { getClientConfig } from "../config/client";
|
||||
import {
|
||||
ACCESS_CODE_PREFIX,
|
||||
Azure,
|
||||
ModelProvider,
|
||||
ServiceProvider,
|
||||
} from "../constant";
|
||||
import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
|
||||
import {
|
||||
ChatMessageTool,
|
||||
ChatMessage,
|
||||
ModelType,
|
||||
useAccessStore,
|
||||
useChatStore,
|
||||
} from "../store";
|
||||
import { ChatGPTApi, DalleRequestPayload } from "./platforms/openai";
|
||||
import { FileApi, FileInfo } from "./platforms/utils";
|
||||
import { GeminiProApi } from "./platforms/google";
|
||||
@@ -13,6 +18,11 @@ import { ClaudeApi } from "./platforms/anthropic";
|
||||
import { ErnieApi } from "./platforms/baidu";
|
||||
import { DoubaoApi } from "./platforms/bytedance";
|
||||
import { QwenApi } from "./platforms/alibaba";
|
||||
import { HunyuanApi } from "./platforms/tencent";
|
||||
import { MoonshotApi } from "./platforms/moonshot";
|
||||
import { SparkApi } from "./platforms/iflytek";
|
||||
import { XAIApi } from "./platforms/xai";
|
||||
import { ChatGLMApi } from "./platforms/glm";
|
||||
|
||||
export const ROLES = ["system", "user", "assistant"] as const;
|
||||
export type MessageRole = (typeof ROLES)[number];
|
||||
@@ -79,9 +89,11 @@ export interface ChatOptions {
|
||||
|
||||
onToolUpdate?: (toolName: string, toolInput: string) => void;
|
||||
onUpdate?: (message: string, chunk: string) => void;
|
||||
onFinish: (message: string) => void;
|
||||
onFinish: (message: string, responseRes: Response) => void;
|
||||
onError?: (err: Error) => void;
|
||||
onController?: (controller: AbortController) => void;
|
||||
onBeforeTool?: (tool: ChatMessageTool) => void;
|
||||
onAfterTool?: (tool: ChatMessageTool) => void;
|
||||
}
|
||||
|
||||
export interface AgentChatOptions {
|
||||
@@ -94,6 +106,8 @@ export interface AgentChatOptions {
|
||||
onFinish: (message: string) => void;
|
||||
onError?: (err: Error) => void;
|
||||
onController?: (controller: AbortController) => void;
|
||||
onBeforeTool?: (tool: ChatMessageTool) => void;
|
||||
onAfterTool?: (tool: ChatMessageTool) => void;
|
||||
}
|
||||
|
||||
export interface CreateRAGStoreOptions {
|
||||
@@ -113,12 +127,14 @@ export interface LLMModel {
|
||||
displayName?: string;
|
||||
available: boolean;
|
||||
provider: LLMModelProvider;
|
||||
sorted: number;
|
||||
}
|
||||
|
||||
export interface LLMModelProvider {
|
||||
id: string;
|
||||
providerName: string;
|
||||
providerType: string;
|
||||
sorted: number;
|
||||
}
|
||||
|
||||
export abstract class LLMApi {
|
||||
@@ -179,6 +195,21 @@ export class ClientApi {
|
||||
case ModelProvider.Qwen:
|
||||
this.llm = new QwenApi();
|
||||
break;
|
||||
case ModelProvider.Hunyuan:
|
||||
this.llm = new HunyuanApi();
|
||||
break;
|
||||
case ModelProvider.Moonshot:
|
||||
this.llm = new MoonshotApi();
|
||||
break;
|
||||
case ModelProvider.Iflytek:
|
||||
this.llm = new SparkApi();
|
||||
break;
|
||||
case ModelProvider.XAI:
|
||||
this.llm = new XAIApi();
|
||||
break;
|
||||
case ModelProvider.ChatGLM:
|
||||
this.llm = new ChatGLMApi();
|
||||
break;
|
||||
default:
|
||||
this.llm = new ChatGPTApi();
|
||||
}
|
||||
@@ -231,7 +262,20 @@ export class ClientApi {
|
||||
}
|
||||
}
|
||||
|
||||
export function getHeaders(ignoreHeaders?: boolean) {
|
||||
export function getBearerToken(
|
||||
apiKey: string,
|
||||
noBearer: boolean = false,
|
||||
): string {
|
||||
return validString(apiKey)
|
||||
? `${noBearer ? "" : "Bearer "}${apiKey.trim()}`
|
||||
: "";
|
||||
}
|
||||
|
||||
export function validString(x: string): boolean {
|
||||
return x?.length > 0;
|
||||
}
|
||||
|
||||
export function getHeaders(ignoreHeaders: boolean = false) {
|
||||
const accessStore = useAccessStore.getState();
|
||||
const chatStore = useChatStore.getState();
|
||||
let headers: Record<string, string> = {};
|
||||
@@ -246,12 +290,16 @@ export function getHeaders(ignoreHeaders?: boolean) {
|
||||
|
||||
function getConfig() {
|
||||
const modelConfig = chatStore.currentSession().mask.modelConfig;
|
||||
const isGoogle = modelConfig.providerName == ServiceProvider.Google;
|
||||
const isGoogle = modelConfig.providerName === ServiceProvider.Google;
|
||||
const isAzure = modelConfig.providerName === ServiceProvider.Azure;
|
||||
const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic;
|
||||
const isBaidu = modelConfig.providerName == ServiceProvider.Baidu;
|
||||
const isByteDance = modelConfig.providerName === ServiceProvider.ByteDance;
|
||||
const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba;
|
||||
const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot;
|
||||
const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek;
|
||||
const isXAI = modelConfig.providerName === ServiceProvider.XAI;
|
||||
const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM;
|
||||
const isEnabledAccessControl = accessStore.enabledAccessControl();
|
||||
const apiKey = isGoogle
|
||||
? accessStore.googleApiKey
|
||||
@@ -263,7 +311,20 @@ export function getHeaders(ignoreHeaders?: boolean) {
|
||||
? accessStore.bytedanceApiKey
|
||||
: isAlibaba
|
||||
? accessStore.alibabaApiKey
|
||||
: accessStore.openaiApiKey;
|
||||
: isMoonshot
|
||||
? accessStore.moonshotApiKey
|
||||
: isXAI
|
||||
? accessStore.xaiApiKey
|
||||
: isChatGLM
|
||||
? accessStore.chatglmApiKey
|
||||
: isIflytek
|
||||
? accessStore.iflytekApiKey &&
|
||||
accessStore.iflytekApiSecret
|
||||
? accessStore.iflytekApiKey +
|
||||
":" +
|
||||
accessStore.iflytekApiSecret
|
||||
: ""
|
||||
: accessStore.openaiApiKey;
|
||||
if (accessStore.isUseOpenAIEndpointForAllModels || ignoreHeaders) {
|
||||
return {
|
||||
isGoogle: false,
|
||||
@@ -272,6 +333,10 @@ export function getHeaders(ignoreHeaders?: boolean) {
|
||||
isBaidu: false,
|
||||
isByteDance: false,
|
||||
isAlibaba: false,
|
||||
isMoonshot: false,
|
||||
isIflytek: false,
|
||||
isXAI: false,
|
||||
isChatGLM: false,
|
||||
apiKey: accessStore.openaiApiKey,
|
||||
isEnabledAccessControl,
|
||||
};
|
||||
@@ -283,24 +348,25 @@ export function getHeaders(ignoreHeaders?: boolean) {
|
||||
isBaidu,
|
||||
isByteDance,
|
||||
isAlibaba,
|
||||
isMoonshot,
|
||||
isIflytek,
|
||||
isXAI,
|
||||
isChatGLM,
|
||||
apiKey,
|
||||
isEnabledAccessControl,
|
||||
};
|
||||
}
|
||||
|
||||
function getAuthHeader(): string {
|
||||
return isAzure ? "api-key" : isAnthropic ? "x-api-key" : "Authorization";
|
||||
return isAzure
|
||||
? "api-key"
|
||||
: isAnthropic
|
||||
? "x-api-key"
|
||||
: isGoogle
|
||||
? "x-goog-api-key"
|
||||
: "Authorization";
|
||||
}
|
||||
|
||||
function getBearerToken(apiKey: string, noBearer: boolean = false): string {
|
||||
return validString(apiKey)
|
||||
? `${noBearer ? "" : "Bearer "}${apiKey.trim()}`
|
||||
: "";
|
||||
}
|
||||
|
||||
function validString(x: string): boolean {
|
||||
return x?.length > 0;
|
||||
}
|
||||
const {
|
||||
isGoogle,
|
||||
isAzure,
|
||||
@@ -309,14 +375,15 @@ export function getHeaders(ignoreHeaders?: boolean) {
|
||||
apiKey,
|
||||
isEnabledAccessControl,
|
||||
} = getConfig();
|
||||
// when using google api in app, not set auth header
|
||||
if (isGoogle && clientConfig?.isApp) return headers;
|
||||
// when using baidu api in app, not set auth header
|
||||
if (isBaidu && clientConfig?.isApp) return headers;
|
||||
|
||||
const authHeader = getAuthHeader();
|
||||
|
||||
const bearerToken = getBearerToken(apiKey, isAzure || isAnthropic);
|
||||
const bearerToken = getBearerToken(
|
||||
apiKey,
|
||||
isAzure || isAnthropic || isGoogle,
|
||||
);
|
||||
|
||||
if (bearerToken) {
|
||||
headers[authHeader] = bearerToken;
|
||||
@@ -345,6 +412,16 @@ export function getClientApi(provider: ServiceProvider): ClientApi {
|
||||
return new ClientApi(ModelProvider.Doubao);
|
||||
case ServiceProvider.Alibaba:
|
||||
return new ClientApi(ModelProvider.Qwen);
|
||||
case ServiceProvider.Tencent:
|
||||
return new ClientApi(ModelProvider.Hunyuan);
|
||||
case ServiceProvider.Moonshot:
|
||||
return new ClientApi(ModelProvider.Moonshot);
|
||||
case ServiceProvider.Iflytek:
|
||||
return new ClientApi(ModelProvider.Iflytek);
|
||||
case ServiceProvider.XAI:
|
||||
return new ClientApi(ModelProvider.XAI);
|
||||
case ServiceProvider.ChatGLM:
|
||||
return new ClientApi(ModelProvider.ChatGLM);
|
||||
default:
|
||||
return new ClientApi(ModelProvider.GPT);
|
||||
}
|
||||
|
||||
@@ -8,14 +8,14 @@ import {
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
|
||||
import {
|
||||
AgentChatOptions,
|
||||
ChatOptions,
|
||||
CreateRAGStoreOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
MultimodalContent,
|
||||
SpeechOptions,
|
||||
MultimodalContent,
|
||||
AgentChatOptions,
|
||||
CreateRAGStoreOptions,
|
||||
TranscriptionOptions,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
@@ -26,6 +26,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
@@ -57,9 +58,6 @@ interface RequestPayload {
|
||||
}
|
||||
|
||||
export class QwenApi implements LLMApi {
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
transcription(options: TranscriptionOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
@@ -99,6 +97,10 @@ export class QwenApi implements LLMApi {
|
||||
return res?.output?.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
@@ -153,6 +155,7 @@ export class QwenApi implements LLMApi {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
let responseRes: Response;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
@@ -182,13 +185,14 @@ export class QwenApi implements LLMApi {
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
options.onFinish(responseText + remainText, responseRes);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
@@ -197,6 +201,7 @@ export class QwenApi implements LLMApi {
|
||||
"[Alibaba] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
responseRes = res;
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
@@ -263,7 +268,7 @@ export class QwenApi implements LLMApi {
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
|
||||
@@ -1,33 +1,27 @@
|
||||
import {
|
||||
ACCESS_CODE_PREFIX,
|
||||
Anthropic,
|
||||
ApiPath,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
ServiceProvider,
|
||||
} from "@/app/constant";
|
||||
import { Anthropic, ApiPath } from "@/app/constant";
|
||||
import {
|
||||
AgentChatOptions,
|
||||
ChatOptions,
|
||||
CreateRAGStoreOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
MultimodalContent,
|
||||
SpeechOptions,
|
||||
TranscriptionOptions,
|
||||
} from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
|
||||
import Locale from "../../locales";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
useChatStore,
|
||||
usePluginStore,
|
||||
ChatMessageTool,
|
||||
} from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { ANTHROPIC_BASE_URL } from "@/app/constant";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import { preProcessImageContent, stream } from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export type MultiBlockContent = {
|
||||
type: "image" | "text";
|
||||
@@ -88,173 +82,19 @@ const ClaudeMapper = {
|
||||
const keys = ["claude-2, claude-instant-1"];
|
||||
|
||||
export class ClaudeApi implements LLMApi {
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
transcription(options: TranscriptionOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async toolAgentChat(options: AgentChatOptions) {
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
const messages: AgentChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = visionModel
|
||||
? await preProcessImageContent(v.content)
|
||||
: getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
},
|
||||
};
|
||||
const accessStore = useAccessStore.getState();
|
||||
let baseUrl = accessStore.anthropicUrl;
|
||||
const requestPayload = {
|
||||
chatSessionId: options.chatSessionId,
|
||||
messages,
|
||||
isAzure: false,
|
||||
azureApiVersion: accessStore.azureApiVersion,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
baseUrl: baseUrl,
|
||||
maxIterations: options.agentConfig.maxIterations,
|
||||
returnIntermediateSteps: options.agentConfig.returnIntermediateSteps,
|
||||
useTools: options.agentConfig.useTools,
|
||||
provider: ServiceProvider.Anthropic,
|
||||
};
|
||||
|
||||
console.log("[Request] anthropic payload: ", requestPayload);
|
||||
|
||||
const shouldStream = true;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
let path = "/api/langchain/tool/agent/";
|
||||
const enableNodeJSPlugin = !!process.env.NEXT_PUBLIC_ENABLE_NODEJS_PLUGIN;
|
||||
path = enableNodeJSPlugin ? path + "nodejs" : path + "edge";
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
// console.log("shouldStream", shouldStream);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let finished = false;
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
options.onFinish(responseText);
|
||||
finished = true;
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(path, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[OpenAI] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
console.warn(`extraInfo: ${extraInfo}`);
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
let response = JSON.parse(msg.data);
|
||||
if (!response.isSuccess) {
|
||||
console.error("[Request]", msg.data);
|
||||
responseText = msg.data;
|
||||
throw Error(response.message);
|
||||
}
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
try {
|
||||
if (response && !response.isToolMessage) {
|
||||
responseText += response.message;
|
||||
options.onUpdate?.(responseText, response.message);
|
||||
} else {
|
||||
options.onToolUpdate?.(response.toolName!, response.message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", response, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(path, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat reqeust", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
toolAgentChat(options: AgentChatOptions): Promise<void> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
createRAGStore(options: CreateRAGStoreOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
console.log("[Response] claude response: ", res);
|
||||
|
||||
@@ -373,120 +213,136 @@ export class ClaudeApi implements LLMApi {
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
const payload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestBody),
|
||||
signal: controller.signal,
|
||||
headers: {
|
||||
...getHeaders(), // get common headers
|
||||
"anthropic-version": accessStore.anthropicApiVersion,
|
||||
// do not send `anthropicApiKey` in browser!!!
|
||||
// Authorization: getAuthKey(accessStore.anthropicApiKey),
|
||||
},
|
||||
};
|
||||
|
||||
if (shouldStream) {
|
||||
try {
|
||||
const context = {
|
||||
text: "",
|
||||
finished: false,
|
||||
};
|
||||
|
||||
const finish = () => {
|
||||
if (!context.finished) {
|
||||
options.onFinish(context.text);
|
||||
context.finished = true;
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
fetchEventSource(path, {
|
||||
...payload,
|
||||
async onopen(res) {
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log("response content type: ", contentType);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
context.text = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [context.text];
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
context.text = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
let chunkJson:
|
||||
| undefined
|
||||
| {
|
||||
type: "content_block_delta" | "content_block_stop";
|
||||
delta?: {
|
||||
type: "text_delta";
|
||||
text: string;
|
||||
};
|
||||
index: number;
|
||||
let index = -1;
|
||||
const [tools, funcs] = [{}, {}];
|
||||
// const [tools, funcs] = usePluginStore
|
||||
// .getState()
|
||||
// .getAsTools(
|
||||
// useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
// );
|
||||
return stream(
|
||||
path,
|
||||
requestBody,
|
||||
{
|
||||
...getHeaders(),
|
||||
"anthropic-version": accessStore.anthropicApiVersion,
|
||||
},
|
||||
// @ts-ignore
|
||||
tools.map((tool) => ({
|
||||
name: tool?.function?.name,
|
||||
description: tool?.function?.description,
|
||||
input_schema: tool?.function?.parameters,
|
||||
})),
|
||||
funcs,
|
||||
controller,
|
||||
// parseSSE
|
||||
(text: string, runTools: ChatMessageTool[]) => {
|
||||
// console.log("parseSSE", text, runTools);
|
||||
let chunkJson:
|
||||
| undefined
|
||||
| {
|
||||
type: "content_block_delta" | "content_block_stop";
|
||||
content_block?: {
|
||||
type: "tool_use";
|
||||
id: string;
|
||||
name: string;
|
||||
};
|
||||
try {
|
||||
chunkJson = JSON.parse(msg.data);
|
||||
} catch (e) {
|
||||
console.error("[Response] parse error", msg.data);
|
||||
}
|
||||
delta?: {
|
||||
type: "text_delta" | "input_json_delta";
|
||||
text?: string;
|
||||
partial_json?: string;
|
||||
};
|
||||
index: number;
|
||||
};
|
||||
chunkJson = JSON.parse(text);
|
||||
|
||||
if (!chunkJson || chunkJson.type === "content_block_stop") {
|
||||
return finish();
|
||||
}
|
||||
|
||||
const { delta } = chunkJson;
|
||||
if (delta?.text) {
|
||||
context.text += delta.text;
|
||||
options.onUpdate?.(context.text, delta.text);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} catch (e) {
|
||||
console.error("failed to chat", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
if (chunkJson?.content_block?.type == "tool_use") {
|
||||
index += 1;
|
||||
const id = chunkJson?.content_block.id;
|
||||
const name = chunkJson?.content_block.name;
|
||||
runTools.push({
|
||||
id,
|
||||
type: "function",
|
||||
function: {
|
||||
name,
|
||||
arguments: "",
|
||||
},
|
||||
});
|
||||
}
|
||||
if (
|
||||
chunkJson?.delta?.type == "input_json_delta" &&
|
||||
chunkJson?.delta?.partial_json
|
||||
) {
|
||||
// @ts-ignore
|
||||
runTools[index]["function"]["arguments"] +=
|
||||
chunkJson?.delta?.partial_json;
|
||||
}
|
||||
return chunkJson?.delta?.text;
|
||||
},
|
||||
// processToolMessage, include tool_calls message and tool call results
|
||||
(
|
||||
requestPayload: RequestPayload,
|
||||
toolCallMessage: any,
|
||||
toolCallResult: any[],
|
||||
) => {
|
||||
// reset index value
|
||||
index = -1;
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.splice(
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.length,
|
||||
0,
|
||||
{
|
||||
role: "assistant",
|
||||
content: toolCallMessage.tool_calls.map(
|
||||
(tool: ChatMessageTool) => ({
|
||||
type: "tool_use",
|
||||
id: tool.id,
|
||||
name: tool?.function?.name,
|
||||
input: tool?.function?.arguments
|
||||
? JSON.parse(tool?.function?.arguments)
|
||||
: {},
|
||||
}),
|
||||
),
|
||||
},
|
||||
// @ts-ignore
|
||||
...toolCallResult.map((result) => ({
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "tool_result",
|
||||
tool_use_id: result.tool_call_id,
|
||||
content: result.content,
|
||||
},
|
||||
],
|
||||
})),
|
||||
);
|
||||
},
|
||||
options,
|
||||
);
|
||||
} else {
|
||||
const payload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestBody),
|
||||
signal: controller.signal,
|
||||
headers: {
|
||||
...getHeaders(), // get common headers
|
||||
"anthropic-version": accessStore.anthropicApiVersion,
|
||||
// do not send `anthropicApiKey` in browser!!!
|
||||
// Authorization: getAuthKey(accessStore.anthropicApiKey),
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
controller.signal.onabort = () => options.onFinish("");
|
||||
controller.signal.onabort = () =>
|
||||
options.onFinish("", new Response(null, { status: 400 }));
|
||||
|
||||
const res = await fetch(path, payload);
|
||||
const resJson = await res.json();
|
||||
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
} catch (e) {
|
||||
console.error("failed to chat", e);
|
||||
options.onError?.(e as Error);
|
||||
@@ -552,9 +408,7 @@ export class ClaudeApi implements LLMApi {
|
||||
if (baseUrl.trim().length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/anthropic"
|
||||
: ApiPath.Anthropic;
|
||||
baseUrl = isApp ? ANTHROPIC_BASE_URL : ApiPath.Anthropic;
|
||||
}
|
||||
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) {
|
||||
|
||||
@@ -27,6 +27,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
@@ -52,9 +53,6 @@ interface RequestPayload {
|
||||
}
|
||||
|
||||
export class ErnieApi implements LLMApi {
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
transcription(options: TranscriptionOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
@@ -91,18 +89,30 @@ export class ErnieApi implements LLMApi {
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
// "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function",
|
||||
role: v.role === "system" ? "user" : v.role,
|
||||
content: getMessageTextContent(v),
|
||||
}));
|
||||
|
||||
// "error_code": 336006, "error_msg": "the length of messages must be an odd number",
|
||||
if (messages.length % 2 === 0) {
|
||||
messages.unshift({
|
||||
role: "user",
|
||||
content: " ",
|
||||
});
|
||||
if (messages.at(0)?.role === "user") {
|
||||
messages.splice(1, 0, {
|
||||
role: "assistant",
|
||||
content: " ",
|
||||
});
|
||||
} else {
|
||||
messages.unshift({
|
||||
role: "user",
|
||||
content: " ",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
@@ -164,6 +174,7 @@ export class ErnieApi implements LLMApi {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
let responseRes: Response;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
@@ -193,19 +204,20 @@ export class ErnieApi implements LLMApi {
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
options.onFinish(responseText + remainText, responseRes);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log("[Baidu] request response content type: ", contentType);
|
||||
|
||||
responseRes = res;
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
@@ -268,7 +280,7 @@ export class ErnieApi implements LLMApi {
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = resJson?.result;
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
|
||||
@@ -26,6 +26,7 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
@@ -51,9 +52,6 @@ interface RequestPayload {
|
||||
}
|
||||
|
||||
export class DoubaoApi implements LLMApi {
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
transcription(options: TranscriptionOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
@@ -93,6 +91,10 @@ export class DoubaoApi implements LLMApi {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
@@ -140,6 +142,7 @@ export class DoubaoApi implements LLMApi {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
let responseRes: Response;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
@@ -169,13 +172,14 @@ export class DoubaoApi implements LLMApi {
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
options.onFinish(responseText + remainText, responseRes);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
@@ -184,7 +188,7 @@ export class DoubaoApi implements LLMApi {
|
||||
"[ByteDance] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
|
||||
responseRes = res;
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
@@ -250,7 +254,7 @@ export class DoubaoApi implements LLMApi {
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
|
||||
210
app/client/platforms/glm.ts
Normal file
210
app/client/platforms/glm.ts
Normal file
@@ -0,0 +1,210 @@
|
||||
"use client";
|
||||
import {
|
||||
ApiPath,
|
||||
CHATGLM_BASE_URL,
|
||||
ChatGLM,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
useChatStore,
|
||||
ChatMessageTool,
|
||||
usePluginStore,
|
||||
} from "@/app/store";
|
||||
import { stream } from "@/app/utils/chat";
|
||||
import {
|
||||
AgentChatOptions,
|
||||
ChatOptions,
|
||||
CreateRAGStoreOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
SpeechOptions,
|
||||
TranscriptionOptions,
|
||||
} from "../api";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export class ChatGLMApi implements LLMApi {
|
||||
transcription(options: TranscriptionOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
toolAgentChat(options: AgentChatOptions): Promise<void> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
createRAGStore(options: CreateRAGStoreOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
private disableListModels = true;
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.chatglmUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.ChatGLM;
|
||||
baseUrl = isApp ? CHATGLM_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ChatGLM)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
providerName: options.config.providerName,
|
||||
},
|
||||
};
|
||||
|
||||
const requestPayload: RequestPayload = {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
};
|
||||
|
||||
console.log("[Request] glm payload: ", requestPayload);
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(ChatGLM.ChatPath);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
const [tools, funcs] = [[], {}];
|
||||
// const [tools, funcs] = usePluginStore
|
||||
// .getState()
|
||||
// .getAsTools(
|
||||
// useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
// );
|
||||
return stream(
|
||||
chatPath,
|
||||
requestPayload,
|
||||
getHeaders(),
|
||||
tools as any,
|
||||
funcs,
|
||||
controller,
|
||||
// parseSSE
|
||||
(text: string, runTools: ChatMessageTool[]) => {
|
||||
// console.log("parseSSE", text, runTools);
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.choices as Array<{
|
||||
delta: {
|
||||
content: string;
|
||||
tool_calls: ChatMessageTool[];
|
||||
};
|
||||
}>;
|
||||
const tool_calls = choices[0]?.delta?.tool_calls;
|
||||
if (tool_calls?.length > 0) {
|
||||
const index = tool_calls[0]?.index;
|
||||
const id = tool_calls[0]?.id;
|
||||
const args = tool_calls[0]?.function?.arguments;
|
||||
if (id) {
|
||||
runTools.push({
|
||||
id,
|
||||
type: tool_calls[0]?.type,
|
||||
function: {
|
||||
name: tool_calls[0]?.function?.name as string,
|
||||
arguments: args,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// @ts-ignore
|
||||
runTools[index]["function"]["arguments"] += args;
|
||||
}
|
||||
}
|
||||
return choices[0]?.delta?.content;
|
||||
},
|
||||
// processToolMessage, include tool_calls message and tool call results
|
||||
(
|
||||
requestPayload: RequestPayload,
|
||||
toolCallMessage: any,
|
||||
toolCallResult: any[],
|
||||
) => {
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.splice(
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.length,
|
||||
0,
|
||||
toolCallMessage,
|
||||
...toolCallResult,
|
||||
);
|
||||
},
|
||||
options,
|
||||
);
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
@@ -10,27 +10,28 @@ import {
|
||||
SpeechOptions,
|
||||
TranscriptionOptions,
|
||||
} from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
useChatStore,
|
||||
usePluginStore,
|
||||
ChatMessageTool,
|
||||
} from "@/app/store";
|
||||
import { stream } from "@/app/utils/chat";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { GEMINI_BASE_URL } from "@/app/constant";
|
||||
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageImages,
|
||||
isVisionModel,
|
||||
} from "@/app/utils";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import options from "cheerio/lib/options";
|
||||
import { nanoid } from "nanoid";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export class GeminiProApi implements LLMApi {
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
transcription(options: TranscriptionOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
@@ -48,11 +49,9 @@ export class GeminiProApi implements LLMApi {
|
||||
baseUrl = accessStore.googleUrl;
|
||||
}
|
||||
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + `/api/proxy/google?key=${accessStore.googleApiKey}`
|
||||
: ApiPath.Google;
|
||||
baseUrl = isApp ? GEMINI_BASE_URL : ApiPath.Google;
|
||||
}
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
@@ -77,6 +76,10 @@ export class GeminiProApi implements LLMApi {
|
||||
""
|
||||
);
|
||||
}
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions): Promise<void> {
|
||||
const apiClient = this;
|
||||
let multimodal = false;
|
||||
@@ -191,120 +194,89 @@ export class GeminiProApi implements LLMApi {
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
const [tools, funcs] = [[], {}];
|
||||
// const [tools, funcs] = usePluginStore
|
||||
// .getState()
|
||||
// .getAsTools(
|
||||
// useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
// );
|
||||
return stream(
|
||||
chatPath,
|
||||
requestPayload,
|
||||
getHeaders(),
|
||||
// @ts-ignore
|
||||
tools.length > 0
|
||||
? // @ts-ignore
|
||||
[{ functionDeclarations: tools.map((tool) => tool.function) }]
|
||||
: [],
|
||||
funcs,
|
||||
controller,
|
||||
// parseSSE
|
||||
(text: string, runTools: ChatMessageTool[]) => {
|
||||
// console.log("parseSSE", text, runTools);
|
||||
const chunkJson = JSON.parse(text);
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
}
|
||||
};
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
if (finished || controller.signal.aborted) {
|
||||
responseText += remainText;
|
||||
finish();
|
||||
return;
|
||||
}
|
||||
|
||||
if (remainText.length > 0) {
|
||||
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
||||
const fetchText = remainText.slice(0, fetchCount);
|
||||
responseText += fetchText;
|
||||
remainText = remainText.slice(fetchCount);
|
||||
options.onUpdate?.(responseText, fetchText);
|
||||
}
|
||||
|
||||
requestAnimationFrame(animateResponseText);
|
||||
}
|
||||
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[Gemini] request response content type: ",
|
||||
contentType,
|
||||
const functionCall = chunkJson?.candidates
|
||||
?.at(0)
|
||||
?.content.parts.at(0)?.functionCall;
|
||||
if (functionCall) {
|
||||
const { name, args } = functionCall;
|
||||
runTools.push({
|
||||
id: nanoid(),
|
||||
type: "function",
|
||||
function: {
|
||||
name,
|
||||
arguments: JSON.stringify(args), // utils.chat call function, using JSON.parse
|
||||
},
|
||||
});
|
||||
}
|
||||
return chunkJson?.candidates?.at(0)?.content.parts.at(0)?.text;
|
||||
},
|
||||
// processToolMessage, include tool_calls message and tool call results
|
||||
(
|
||||
requestPayload: RequestPayload,
|
||||
toolCallMessage: any,
|
||||
toolCallResult: any[],
|
||||
) => {
|
||||
// @ts-ignore
|
||||
requestPayload?.contents?.splice(
|
||||
// @ts-ignore
|
||||
requestPayload?.contents?.length,
|
||||
0,
|
||||
{
|
||||
role: "model",
|
||||
parts: toolCallMessage.tool_calls.map(
|
||||
(tool: ChatMessageTool) => ({
|
||||
functionCall: {
|
||||
name: tool?.function?.name,
|
||||
args: JSON.parse(tool?.function?.arguments as string),
|
||||
},
|
||||
}),
|
||||
),
|
||||
},
|
||||
// @ts-ignore
|
||||
...toolCallResult.map((result) => ({
|
||||
role: "function",
|
||||
parts: [
|
||||
{
|
||||
functionResponse: {
|
||||
name: result.name,
|
||||
response: {
|
||||
name: result.name,
|
||||
content: result.content, // TODO just text content...
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
})),
|
||||
);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text);
|
||||
const delta = apiClient.extractMessage(json);
|
||||
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
}
|
||||
|
||||
const blockReason = json?.promptFeedback?.blockReason;
|
||||
if (blockReason) {
|
||||
// being blocked
|
||||
console.log(`[Google] [Safety Ratings] result:`, blockReason);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
options,
|
||||
);
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
|
||||
if (resJson?.promptFeedback?.blockReason) {
|
||||
// being blocked
|
||||
options.onError?.(
|
||||
@@ -315,7 +287,7 @@ export class GeminiProApi implements LLMApi {
|
||||
);
|
||||
}
|
||||
const message = apiClient.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
|
||||
265
app/client/platforms/iflytek.ts
Normal file
265
app/client/platforms/iflytek.ts
Normal file
@@ -0,0 +1,265 @@
|
||||
"use client";
|
||||
import {
|
||||
ApiPath,
|
||||
IFLYTEK_BASE_URL,
|
||||
Iflytek,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
} from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
|
||||
import {
|
||||
AgentChatOptions,
|
||||
ChatOptions,
|
||||
CreateRAGStoreOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
SpeechOptions,
|
||||
TranscriptionOptions,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
import { RequestPayload } from "./openai";
|
||||
|
||||
export class SparkApi implements LLMApi {
|
||||
transcription(options: TranscriptionOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
toolAgentChat(options: AgentChatOptions): Promise<void> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
createRAGStore(options: CreateRAGStoreOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
private disableListModels = true;
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.iflytekUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.Iflytek;
|
||||
baseUrl = isApp ? IFLYTEK_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Iflytek)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
providerName: options.config.providerName,
|
||||
},
|
||||
};
|
||||
|
||||
const requestPayload: RequestPayload = {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
console.log("[Request] Spark payload: ", requestPayload);
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(Iflytek.ChatPath);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// Make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
let responseRes: Response;
|
||||
|
||||
// Animate response text to make it look smooth
|
||||
function animateResponseText() {
|
||||
if (finished || controller.signal.aborted) {
|
||||
responseText += remainText;
|
||||
console.log("[Response Animation] finished");
|
||||
return;
|
||||
}
|
||||
|
||||
if (remainText.length > 0) {
|
||||
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
||||
const fetchText = remainText.slice(0, fetchCount);
|
||||
responseText += fetchText;
|
||||
remainText = remainText.slice(fetchCount);
|
||||
options.onUpdate?.(responseText, fetchText);
|
||||
}
|
||||
|
||||
requestAnimationFrame(animateResponseText);
|
||||
}
|
||||
|
||||
// Start animation
|
||||
animateResponseText();
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText, responseRes);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log("[Spark] request response content type: ", contentType);
|
||||
responseRes = res;
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
// Handle different error scenarios
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
extraInfo = Locale.Error.Unauthorized;
|
||||
}
|
||||
|
||||
options.onError?.(
|
||||
new Error(
|
||||
`Request failed with status ${res.status}: ${extraInfo}`,
|
||||
),
|
||||
);
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.choices as Array<{
|
||||
delta: { content: string };
|
||||
}>;
|
||||
const delta = choices[0]?.delta?.content;
|
||||
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text);
|
||||
options.onError?.(new Error(`Failed to parse response: ${text}`));
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
if (!res.ok) {
|
||||
const errorText = await res.text();
|
||||
options.onError?.(
|
||||
new Error(`Request failed with status ${res.status}: ${errorText}`),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
213
app/client/platforms/moonshot.ts
Normal file
213
app/client/platforms/moonshot.ts
Normal file
@@ -0,0 +1,213 @@
|
||||
"use client";
|
||||
// azure and openai, using same models. so using same LLMApi.
|
||||
import {
|
||||
ApiPath,
|
||||
MOONSHOT_BASE_URL,
|
||||
Moonshot,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
useChatStore,
|
||||
ChatMessageTool,
|
||||
usePluginStore,
|
||||
} from "@/app/store";
|
||||
import { stream } from "@/app/utils/chat";
|
||||
import {
|
||||
AgentChatOptions,
|
||||
ChatOptions,
|
||||
CreateRAGStoreOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
SpeechOptions,
|
||||
TranscriptionOptions,
|
||||
} from "../api";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export class MoonshotApi implements LLMApi {
|
||||
transcription(options: TranscriptionOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
toolAgentChat(options: AgentChatOptions): Promise<void> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
createRAGStore(options: CreateRAGStoreOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
private disableListModels = true;
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.moonshotUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.Moonshot;
|
||||
baseUrl = isApp ? MOONSHOT_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Moonshot)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
providerName: options.config.providerName,
|
||||
},
|
||||
};
|
||||
|
||||
const requestPayload: RequestPayload = {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
console.log("[Request] openai payload: ", requestPayload);
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(Moonshot.ChatPath);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
const [tools, funcs] = [[], {}];
|
||||
// const [tools, funcs] = usePluginStore
|
||||
// .getState()
|
||||
// .getAsTools(
|
||||
// useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
// );
|
||||
return stream(
|
||||
chatPath,
|
||||
requestPayload,
|
||||
getHeaders(),
|
||||
tools as any,
|
||||
funcs,
|
||||
controller,
|
||||
// parseSSE
|
||||
(text: string, runTools: ChatMessageTool[]) => {
|
||||
// console.log("parseSSE", text, runTools);
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.choices as Array<{
|
||||
delta: {
|
||||
content: string;
|
||||
tool_calls: ChatMessageTool[];
|
||||
};
|
||||
}>;
|
||||
const tool_calls = choices[0]?.delta?.tool_calls;
|
||||
if (tool_calls?.length > 0) {
|
||||
const index = tool_calls[0]?.index;
|
||||
const id = tool_calls[0]?.id;
|
||||
const args = tool_calls[0]?.function?.arguments;
|
||||
if (id) {
|
||||
runTools.push({
|
||||
id,
|
||||
type: tool_calls[0]?.type,
|
||||
function: {
|
||||
name: tool_calls[0]?.function?.name as string,
|
||||
arguments: args,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// @ts-ignore
|
||||
runTools[index]["function"]["arguments"] += args;
|
||||
}
|
||||
}
|
||||
return choices[0]?.delta?.content;
|
||||
},
|
||||
// processToolMessage, include tool_calls message and tool call results
|
||||
(
|
||||
requestPayload: RequestPayload,
|
||||
toolCallMessage: any,
|
||||
toolCallResult: any[],
|
||||
) => {
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.splice(
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.length,
|
||||
0,
|
||||
toolCallMessage,
|
||||
...toolCallResult,
|
||||
);
|
||||
},
|
||||
options,
|
||||
);
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
@@ -2,12 +2,12 @@
|
||||
// azure and openai, using same models. so using same LLMApi.
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
DEFAULT_MODELS,
|
||||
OpenaiPath,
|
||||
Azure,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
ServiceProvider,
|
||||
OPENAI_BASE_URL,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
ChatMessageTool,
|
||||
@@ -101,7 +101,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI;
|
||||
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath;
|
||||
baseUrl = isApp ? OPENAI_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
@@ -383,7 +383,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = await this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
@@ -663,20 +663,26 @@ export class ChatGPTApi implements LLMApi {
|
||||
});
|
||||
|
||||
const resJson = (await res.json()) as OpenAIListModelResponse;
|
||||
const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-"));
|
||||
const chatModels = resJson.data?.filter(
|
||||
(m) => m.id.startsWith("gpt-") || m.id.startsWith("chatgpt-"),
|
||||
);
|
||||
console.log("[Models]", chatModels);
|
||||
|
||||
if (!chatModels) {
|
||||
return [];
|
||||
}
|
||||
|
||||
//由于目前 OpenAI 的 disableListModels 默认为 true,所以当前实际不会运行到这场
|
||||
let seq = 1000; //同 Constant.ts 中的排序保持一致
|
||||
return chatModels.map((m) => ({
|
||||
name: m.id,
|
||||
available: true,
|
||||
sorted: seq++,
|
||||
provider: {
|
||||
id: "openai",
|
||||
providerName: "OpenAI",
|
||||
providerType: "openai",
|
||||
sorted: 1,
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
||||
286
app/client/platforms/tencent.ts
Normal file
286
app/client/platforms/tencent.ts
Normal file
@@ -0,0 +1,286 @@
|
||||
"use client";
|
||||
import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
|
||||
import {
|
||||
AgentChatOptions,
|
||||
ChatOptions,
|
||||
CreateRAGStoreOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
MultimodalContent,
|
||||
SpeechOptions,
|
||||
TranscriptionOptions,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
import mapKeys from "lodash-es/mapKeys";
|
||||
import mapValues from "lodash-es/mapValues";
|
||||
import isArray from "lodash-es/isArray";
|
||||
import isObject from "lodash-es/isObject";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
data: Array<{
|
||||
id: string;
|
||||
object: string;
|
||||
root: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
interface RequestPayload {
|
||||
Messages: {
|
||||
Role: "system" | "user" | "assistant";
|
||||
Content: string | MultimodalContent[];
|
||||
}[];
|
||||
Stream?: boolean;
|
||||
Model: string;
|
||||
Temperature: number;
|
||||
TopP: number;
|
||||
}
|
||||
|
||||
function capitalizeKeys(obj: any): any {
|
||||
if (isArray(obj)) {
|
||||
return obj.map(capitalizeKeys);
|
||||
} else if (isObject(obj)) {
|
||||
return mapValues(
|
||||
mapKeys(obj, (value: any, key: string) =>
|
||||
key.replace(/(^|_)(\w)/g, (m, $1, $2) => $2.toUpperCase()),
|
||||
),
|
||||
capitalizeKeys,
|
||||
);
|
||||
} else {
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
export class HunyuanApi implements LLMApi {
|
||||
transcription(options: TranscriptionOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
toolAgentChat(options: AgentChatOptions): Promise<void> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
createRAGStore(options: CreateRAGStoreOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
path(): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.tencentUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
baseUrl = isApp ? TENCENT_BASE_URL : ApiPath.Tencent;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Tencent)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl);
|
||||
return baseUrl;
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res.Choices?.at(0)?.Message?.Content ?? "";
|
||||
}
|
||||
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
const messages = options.messages.map((v, index) => ({
|
||||
// "Messages 中 system 角色必须位于列表的最开始"
|
||||
role: index !== 0 && v.role === "system" ? "user" : v.role,
|
||||
content: visionModel ? v.content : getMessageTextContent(v),
|
||||
}));
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
},
|
||||
};
|
||||
|
||||
const requestPayload: RequestPayload = capitalizeKeys({
|
||||
model: modelConfig.model,
|
||||
messages,
|
||||
temperature: modelConfig.temperature,
|
||||
top_p: modelConfig.top_p,
|
||||
stream: options.config.stream,
|
||||
});
|
||||
|
||||
console.log("[Request] Tencent payload: ", requestPayload);
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path();
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
let responseRes: Response;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
if (finished || controller.signal.aborted) {
|
||||
responseText += remainText;
|
||||
console.log("[Response Animation] finished");
|
||||
if (responseText?.length === 0) {
|
||||
options.onError?.(new Error("empty response from server"));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (remainText.length > 0) {
|
||||
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
||||
const fetchText = remainText.slice(0, fetchCount);
|
||||
responseText += fetchText;
|
||||
remainText = remainText.slice(fetchCount);
|
||||
options.onUpdate?.(responseText, fetchText);
|
||||
}
|
||||
|
||||
requestAnimationFrame(animateResponseText);
|
||||
}
|
||||
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText, responseRes);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[Tencent] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
responseRes = res;
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.Choices as Array<{
|
||||
Delta: { Content: string };
|
||||
}>;
|
||||
const delta = choices[0]?.Delta?.Content;
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
206
app/client/platforms/xai.ts
Normal file
206
app/client/platforms/xai.ts
Normal file
@@ -0,0 +1,206 @@
|
||||
"use client";
|
||||
// azure and openai, using same models. so using same LLMApi.
|
||||
import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import {
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
useChatStore,
|
||||
ChatMessageTool,
|
||||
usePluginStore,
|
||||
} from "@/app/store";
|
||||
import { stream } from "@/app/utils/chat";
|
||||
import {
|
||||
AgentChatOptions,
|
||||
ChatOptions,
|
||||
CreateRAGStoreOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
SpeechOptions,
|
||||
TranscriptionOptions,
|
||||
} from "../api";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export class XAIApi implements LLMApi {
|
||||
transcription(options: TranscriptionOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
toolAgentChat(options: AgentChatOptions): Promise<void> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
createRAGStore(options: CreateRAGStoreOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
private disableListModels = true;
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.xaiUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.XAI;
|
||||
baseUrl = isApp ? XAI_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.XAI)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
providerName: options.config.providerName,
|
||||
},
|
||||
};
|
||||
|
||||
const requestPayload: RequestPayload = {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
};
|
||||
|
||||
console.log("[Request] xai payload: ", requestPayload);
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(XAI.ChatPath);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
const [tools, funcs] = [[], {}];
|
||||
// const [tools, funcs] = usePluginStore
|
||||
// .getState()
|
||||
// .getAsTools(
|
||||
// useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
// );
|
||||
return stream(
|
||||
chatPath,
|
||||
requestPayload,
|
||||
getHeaders(),
|
||||
tools as any,
|
||||
funcs,
|
||||
controller,
|
||||
// parseSSE
|
||||
(text: string, runTools: ChatMessageTool[]) => {
|
||||
// console.log("parseSSE", text, runTools);
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.choices as Array<{
|
||||
delta: {
|
||||
content: string;
|
||||
tool_calls: ChatMessageTool[];
|
||||
};
|
||||
}>;
|
||||
const tool_calls = choices[0]?.delta?.tool_calls;
|
||||
if (tool_calls?.length > 0) {
|
||||
const index = tool_calls[0]?.index;
|
||||
const id = tool_calls[0]?.id;
|
||||
const args = tool_calls[0]?.function?.arguments;
|
||||
if (id) {
|
||||
runTools.push({
|
||||
id,
|
||||
type: tool_calls[0]?.type,
|
||||
function: {
|
||||
name: tool_calls[0]?.function?.name as string,
|
||||
arguments: args,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// @ts-ignore
|
||||
runTools[index]["function"]["arguments"] += args;
|
||||
}
|
||||
}
|
||||
return choices[0]?.delta?.content;
|
||||
},
|
||||
// processToolMessage, include tool_calls message and tool call results
|
||||
(
|
||||
requestPayload: RequestPayload,
|
||||
toolCallMessage: any,
|
||||
toolCallResult: any[],
|
||||
) => {
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.splice(
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.length,
|
||||
0,
|
||||
toolCallMessage,
|
||||
...toolCallResult,
|
||||
);
|
||||
},
|
||||
options,
|
||||
);
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user