feat: 支持全局配置插件

This commit is contained in:
Hk-Gosuto
2023-08-16 13:38:15 +08:00
parent dd0b451a7c
commit 76eb2afd06
11 changed files with 147 additions and 8 deletions

View File

@@ -23,6 +23,11 @@ export interface LLMConfig {
frequency_penalty?: number;
}
export interface LLMAgentConfig {
maxIterations: number;
returnIntermediateSteps: boolean;
}
export interface ChatOptions {
messages: RequestMessage[];
config: LLMConfig;
@@ -33,6 +38,17 @@ export interface ChatOptions {
onController?: (controller: AbortController) => void;
}
export interface AgentChatOptions {
messages: RequestMessage[];
config: LLMConfig;
agentConfig: LLMAgentConfig;
onToolUpdate?: (toolName: string, toolInput: string) => void;
onUpdate?: (message: string, chunk: string) => void;
onFinish: (message: string) => void;
onError?: (err: Error) => void;
onController?: (controller: AbortController) => void;
}
export interface LLMUsage {
used: number;
total: number;
@@ -45,7 +61,7 @@ export interface LLMModel {
export abstract class LLMApi {
abstract chat(options: ChatOptions): Promise<void>;
abstract toolAgentChat(options: ChatOptions): Promise<void>;
abstract toolAgentChat(options: AgentChatOptions): Promise<void>;
abstract usage(): Promise<LLMUsage>;
abstract models(): Promise<LLMModel[]>;
}

View File

@@ -6,7 +6,14 @@ import {
} from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
import {
AgentChatOptions,
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
LLMUsage,
} from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
@@ -188,7 +195,7 @@ export class ChatGPTApi implements LLMApi {
}
}
async toolAgentChat(options: ChatOptions) {
async toolAgentChat(options: AgentChatOptions) {
const messages = options.messages.map((v) => ({
role: v.role,
content: v.content,
@@ -210,6 +217,8 @@ export class ChatGPTApi implements LLMApi {
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
maxIterations: options.agentConfig.maxIterations,
returnIntermediateSteps: options.agentConfig.returnIntermediateSteps,
};
console.log("[Request] openai payload: ", requestPayload);