This commit is contained in:
sijinhui
2024-09-19 00:07:37 +08:00
52 changed files with 3763 additions and 2647 deletions

View File

@@ -1,7 +1,6 @@
import { getClientConfig } from "../config/client";
import {
ACCESS_CODE_PREFIX,
Azure,
// AZURE_MODELS,
ModelProvider,
ServiceProvider,
@@ -26,13 +25,8 @@ import { SparkApi } from "./platforms/iflytek";
export const ROLES = ["system", "user", "assistant"] as const;
export type MessageRole = (typeof ROLES)[number];
export const Models = [
"gpt-3.5-turbo-16k",
"gpt-4-0613",
"gpt-4-32k",
"midjourney",
"emini-pro",
] as const;
export const Models = ["gpt-3.5-turbo", "gpt-4", "midjourney"] as const;
export const TTSModels = ["tts-1", "tts-1-hd"] as const;
export type ChatModel = ModelType;
export interface MultimodalContent {
@@ -61,6 +55,15 @@ export interface LLMConfig {
style?: DalleRequestPayload["style"];
}
export interface SpeechOptions {
model: string;
input: string;
voice: string;
response_format?: string;
speed?: number;
onController?: (controller: AbortController) => void;
}
export interface ChatOptions {
messages: RequestMessage[];
config: LLMConfig;
@@ -96,6 +99,7 @@ export interface LLMModelProvider {
export abstract class LLMApi {
abstract chat(options: ChatOptions): Promise<void>;
abstract speech(options: SpeechOptions): Promise<ArrayBuffer>;
abstract usage(): Promise<LLMUsage>;
abstract models(): Promise<LLMModel[]>;
}
@@ -214,13 +218,16 @@ export function validString(x: string): boolean {
return x?.length > 0;
}
export function getHeaders() {
export function getHeaders(ignoreHeaders: boolean = false) {
const accessStore = useAccessStore.getState();
const chatStore = useChatStore.getState();
const headers: Record<string, string> = {
"Content-Type": "application/json",
Accept: "application/json",
};
let headers: Record<string, string> = {};
if (!ignoreHeaders) {
headers = {
"Content-Type": "application/json",
Accept: "application/json",
};
}
const clientConfig = getClientConfig();

View File

@@ -12,6 +12,7 @@ import {
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
MultimodalContent,
} from "../api";
import Locale from "../../locales";
@@ -83,6 +84,10 @@ export class QwenApi implements LLMApi {
return res?.output?.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({
role: v.role,

View File

@@ -1,5 +1,5 @@
import { ACCESS_CODE_PREFIX, Anthropic, ApiPath } from "@/app/constant";
import { ChatOptions, getHeaders, LLMApi, MultimodalContent } from "../api";
import { Anthropic, ApiPath } from "@/app/constant";
import { ChatOptions, getHeaders, LLMApi, SpeechOptions } from "../api";
import {
useAccessStore,
useAppConfig,
@@ -9,13 +9,6 @@ import {
} from "@/app/store";
import { getClientConfig } from "@/app/config/client";
import { DEFAULT_API_HOST } from "@/app/constant";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import Locale from "../../locales";
import { prettyObject } from "@/app/utils/format";
import { getMessageTextContent, isVisionModel } from "@/app/utils";
import { preProcessImageContent, stream } from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
@@ -80,6 +73,10 @@ const ClaudeMapper = {
const keys = ["claude-2, claude-instant-1"];
export class ClaudeApi implements LLMApi {
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
extractMessage(res: any) {
console.log("[Response] claude response: ", res);

View File

@@ -14,6 +14,7 @@ import {
LLMApi,
LLMModel,
MultimodalContent,
SpeechOptions,
} from "../api";
import Locale from "../../locales";
import {
@@ -75,6 +76,10 @@ export class ErnieApi implements LLMApi {
return [baseUrl, path].join("/");
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({
// "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function",

View File

@@ -13,6 +13,7 @@ import {
LLMApi,
LLMModel,
MultimodalContent,
SpeechOptions,
} from "../api";
import Locale from "../../locales";
import {
@@ -77,6 +78,10 @@ export class DoubaoApi implements LLMApi {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({
role: v.role,

View File

@@ -1,5 +1,12 @@
import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
LLMUsage,
SpeechOptions,
} from "../api";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { getClientConfig } from "@/app/config/client";
import { DEFAULT_API_HOST } from "@/app/constant";
@@ -56,6 +63,10 @@ export class GeminiProApi implements LLMApi {
""
);
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions): Promise<void> {
const apiClient = this;
let multimodal = false;

View File

@@ -7,7 +7,13 @@ import {
} from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { ChatOptions, getHeaders, LLMApi, LLMModel } from "../api";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
@@ -17,7 +23,7 @@ import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils";
import { OpenAIListModelResponse, RequestPayload } from "./openai";
import { RequestPayload } from "./openai";
export class SparkApi implements LLMApi {
private disableListModels = true;
@@ -53,6 +59,10 @@ export class SparkApi implements LLMApi {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {

View File

@@ -3,10 +3,8 @@
import {
ApiPath,
DEFAULT_API_HOST,
DEFAULT_MODELS,
Moonshot,
REQUEST_TIMEOUT_MS,
ServiceProvider,
} from "@/app/constant";
import {
useAccessStore,
@@ -15,28 +13,17 @@ import {
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { collectModelsWithDefaultModel } from "@/app/utils/model";
import { preProcessImageContent, stream } from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { stream } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
LLMUsage,
MultimodalContent,
SpeechOptions,
} from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils";
import { OpenAIListModelResponse, RequestPayload } from "./openai";
import { RequestPayload } from "./openai";
export class MoonshotApi implements LLMApi {
private disableListModels = true;
@@ -72,6 +59,10 @@ export class MoonshotApi implements LLMApi {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {

View File

@@ -34,17 +34,12 @@ import {
LLMModel,
LLMUsage,
MultimodalContent,
SpeechOptions,
} from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
getMessageImages,
isVisionModel,
isDalle3 as _isDalle3,
} from "@/app/utils";
@@ -148,6 +143,44 @@ export class ChatGPTApi implements LLMApi {
return res.choices?.at(0)?.message?.content ?? res;
}
async speech(options: SpeechOptions): Promise<ArrayBuffer> {
const requestPayload = {
model: options.model,
input: options.input,
voice: options.voice,
response_format: options.response_format,
speed: options.speed,
};
console.log("[Request] openai speech payload: ", requestPayload);
const controller = new AbortController();
options.onController?.(controller);
try {
const speechPath = this.path(OpenaiPath.SpeechPath);
const speechPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
);
const res = await fetch(speechPath, speechPayload);
clearTimeout(requestTimeoutId);
return await res.arrayBuffer();
} catch (e) {
console.log("[Request] failed to make a speech request", e);
throw e;
}
}
async chat(options: ChatOptions) {
const modelConfig = {
...useAppConfig.getState().modelConfig,

View File

@@ -8,6 +8,7 @@ import {
LLMApi,
LLMModel,
MultimodalContent,
SpeechOptions,
} from "../api";
import Locale from "../../locales";
import {
@@ -89,6 +90,10 @@ export class HunyuanApi implements LLMApi {
return res.Choices?.at(0)?.Message?.Content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model);
const messages = options.messages.map((v, index) => ({