mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2026-02-11 08:54:29 +08:00
Merge d7911dc303 into 8ad63a6c25
This commit is contained in:
@@ -18,6 +18,7 @@ import { ErnieApi } from "./platforms/baidu";
|
||||
import { DoubaoApi } from "./platforms/bytedance";
|
||||
import { QwenApi } from "./platforms/alibaba";
|
||||
import { HunyuanApi } from "./platforms/tencent";
|
||||
import { StepfunApi } from "./platforms/stepfun";
|
||||
import { MoonshotApi } from "./platforms/moonshot";
|
||||
import { SparkApi } from "./platforms/iflytek";
|
||||
|
||||
@@ -146,6 +147,9 @@ export class ClientApi {
|
||||
case ModelProvider.Hunyuan:
|
||||
this.llm = new HunyuanApi();
|
||||
break;
|
||||
case ModelProvider.Stepfun:
|
||||
this.llm = new StepfunApi();
|
||||
break;
|
||||
case ModelProvider.Moonshot:
|
||||
this.llm = new MoonshotApi();
|
||||
break;
|
||||
@@ -237,6 +241,7 @@ export function getHeaders(ignoreHeaders: boolean = false) {
|
||||
const isBaidu = modelConfig.providerName == ServiceProvider.Baidu;
|
||||
const isByteDance = modelConfig.providerName === ServiceProvider.ByteDance;
|
||||
const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba;
|
||||
const isStepfun = modelConfig.providerName === ServiceProvider.Stepfun;
|
||||
const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot;
|
||||
const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek;
|
||||
const isEnabledAccessControl = accessStore.enabledAccessControl();
|
||||
@@ -250,6 +255,8 @@ export function getHeaders(ignoreHeaders: boolean = false) {
|
||||
? accessStore.bytedanceApiKey
|
||||
: isAlibaba
|
||||
? accessStore.alibabaApiKey
|
||||
: isStepfun
|
||||
? accessStore.stepfunApiKey
|
||||
: isMoonshot
|
||||
? accessStore.moonshotApiKey
|
||||
: isIflytek
|
||||
@@ -324,6 +331,8 @@ export function getClientApi(provider: ServiceProvider): ClientApi {
|
||||
return new ClientApi(ModelProvider.Qwen);
|
||||
case ServiceProvider.Tencent:
|
||||
return new ClientApi(ModelProvider.Hunyuan);
|
||||
case ServiceProvider.Stepfun:
|
||||
return new ClientApi(ModelProvider.Stepfun);
|
||||
case ServiceProvider.Moonshot:
|
||||
return new ClientApi(ModelProvider.Moonshot);
|
||||
case ServiceProvider.Iflytek:
|
||||
|
||||
139
app/client/platforms/stepfun.ts
Normal file
139
app/client/platforms/stepfun.ts
Normal file
@@ -0,0 +1,139 @@
|
||||
"use client";
|
||||
// azure and openai, using same models. so using same LLMApi.
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
DEFAULT_MODELS,
|
||||
Stepfun,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
ServiceProvider,
|
||||
} from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { collectModelsWithDefaultModel } from "@/app/utils/model";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
LLMUsage,
|
||||
MultimodalContent,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
|
||||
import { OpenAIListModelResponse, RequestPayload } from "./openai";
|
||||
|
||||
export class StepfunApi implements LLMApi {
|
||||
private disableListModels = true;
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
let baseUrl = accessStore.useCustomConfig ? accessStore.stepfunUrl : "";
|
||||
|
||||
if (!baseUrl) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.Stepfun;
|
||||
baseUrl = isApp ? `${DEFAULT_API_HOST}/proxy${apiPath}` : apiPath;
|
||||
}
|
||||
|
||||
baseUrl = baseUrl.replace(/\/$/, ""); // Remove trailing slash
|
||||
if (!/^https?:\/\//.test(baseUrl) && !baseUrl.startsWith(ApiPath.Stepfun)) {
|
||||
baseUrl = `https://${baseUrl}`;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
return `${baseUrl}/${path}`;
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
providerName: options.config.providerName,
|
||||
},
|
||||
};
|
||||
|
||||
const requestPayload: RequestPayload = {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
console.log("[Request] openai payload: ", requestPayload);
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(Stepfun.ChatPath);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
// Streaming logic
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
if (!res.ok) {
|
||||
throw new Error(`Request failed with status ${res.status}`);
|
||||
}
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user