This commit is contained in:
Hk-Gosuto
2025-03-07 16:20:06 +08:00
parent f704e7a271
commit 48fe84024f
7 changed files with 293 additions and 103 deletions

View File

@@ -31,13 +31,13 @@ async function handle(req: NextRequest) {
// const pineconeIndex = pinecone.Index(serverConfig.pineconeIndex!);
const apiKey = getOpenAIApiKey(token);
const baseUrl = getOpenAIBaseUrl(reqBody.baseUrl);
const embeddings = new OpenAIEmbeddings(
{
modelName: serverConfig.ragEmbeddingModel ?? "text-embedding-3-large",
openAIApiKey: apiKey,
const embeddings = new OpenAIEmbeddings({
modelName: serverConfig.ragEmbeddingModel ?? "text-embedding-3-large",
openAIApiKey: apiKey,
configuration: {
baseURL: baseUrl,
},
{ basePath: baseUrl },
);
});
// const vectorStore = await PineconeStore.fromExistingIndex(embeddings, {
// pineconeIndex,
// });

View File

@@ -94,13 +94,11 @@ async function handle(req: NextRequest) {
baseUrl: process.env.OLLAMA_BASE_URL,
});
} else {
embeddings = new OpenAIEmbeddings(
{
modelName: serverConfig.ragEmbeddingModel,
openAIApiKey: apiKey,
},
{ basePath: baseUrl },
);
embeddings = new OpenAIEmbeddings({
modelName: serverConfig.ragEmbeddingModel,
openAIApiKey: apiKey,
configuration: { baseURL: baseUrl },
});
}
// https://js.langchain.com/docs/integrations/vectorstores/pinecone
// https://js.langchain.com/docs/integrations/vectorstores/qdrant
@@ -181,12 +179,10 @@ async function handle(req: NextRequest) {
}
function bufferToBlob(buffer: Buffer, mimeType?: string): Blob {
const arrayBuffer: ArrayBuffer = buffer.buffer.slice(
buffer.byteOffset,
buffer.byteOffset + buffer.byteLength,
);
const arrayBuffer = new Uint8Array(buffer).buffer;
return new Blob([arrayBuffer], { type: mimeType || "" });
}
function getOpenAIApiKey(token: string) {
const serverConfig = getServerSideConfig();
const isApiKey = !token.startsWith(ACCESS_CODE_PREFIX);

View File

@@ -39,7 +39,11 @@ import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import {
AzureChatOpenAI,
ChatOpenAI,
OpenAIEmbeddings,
} from "@langchain/openai";
import { ChatAnthropic } from "@langchain/anthropic";
import {
BaseMessage,
@@ -74,6 +78,8 @@ export interface RequestBody {
returnIntermediateSteps: boolean;
useTools: (undefined | string)[];
provider: ServiceProvider;
max_tokens?: number;
max_completion_tokens?: number;
}
export class ResponseBody {
@@ -254,14 +260,14 @@ export class AgentApi {
},
});
}
return new ChatOpenAI(
{
temperature: 0,
modelName: reqBody.model,
openAIApiKey: apiKey,
return new ChatOpenAI({
temperature: 0,
modelName: reqBody.model,
openAIApiKey: apiKey,
configuration: {
baseURL: baseUrl,
},
{ basePath: baseUrl },
);
});
}
getToolEmbeddings(reqBody: RequestBody, apiKey: string, baseUrl: string) {
@@ -275,19 +281,19 @@ export class AgentApi {
return null;
}
}
return new OpenAIEmbeddings(
{
openAIApiKey: apiKey,
return new OpenAIEmbeddings({
openAIApiKey: apiKey,
configuration: {
baseURL: baseUrl,
},
{ basePath: baseUrl },
);
});
}
getLLM(reqBody: RequestBody, apiKey: string, baseUrl: string) {
const serverConfig = getServerSideConfig();
if (reqBody.isAzure || serverConfig.isAzure) {
console.log("[use Azure ChatOpenAI]");
return new ChatOpenAI({
return new AzureChatOpenAI({
temperature: reqBody.temperature,
streaming: reqBody.stream,
topP: reqBody.top_p,
@@ -299,22 +305,26 @@ export class AgentApi {
: serverConfig.azureApiVersion,
azureOpenAIApiDeploymentName: reqBody.model,
azureOpenAIBasePath: baseUrl,
maxTokens: reqBody.max_tokens,
maxCompletionTokens: reqBody.max_completion_tokens,
});
}
if (reqBody.provider === ServiceProvider.OpenAI) {
console.log("[use ChatOpenAI]");
return new ChatOpenAI(
{
modelName: reqBody.model,
openAIApiKey: apiKey,
temperature: reqBody.temperature,
streaming: reqBody.stream,
topP: reqBody.top_p,
presencePenalty: reqBody.presence_penalty,
frequencyPenalty: reqBody.frequency_penalty,
return new ChatOpenAI({
modelName: reqBody.model,
openAIApiKey: apiKey,
temperature: reqBody.temperature,
streaming: reqBody.stream,
topP: reqBody.top_p,
presencePenalty: reqBody.presence_penalty,
frequencyPenalty: reqBody.frequency_penalty,
maxTokens: reqBody.max_tokens,
maxCompletionTokens: reqBody.max_completion_tokens,
configuration: {
baseURL: baseUrl,
},
{ basePath: baseUrl },
);
});
}
if (reqBody.provider === ServiceProvider.Anthropic) {
console.log("[use ChatAnthropic]");
@@ -439,11 +449,16 @@ export class AgentApi {
});
const pastMessages = new Array();
const isO1OrO3 =
reqBody.model.startsWith("o1") || reqBody.model.startsWith("o3");
reqBody.messages
.slice(0, reqBody.messages.length - 1)
.forEach((message) => {
if (message.role === "system" && typeof message.content === "string")
if (
!isO1OrO3 &&
message.role === "system" &&
typeof message.content === "string"
)
pastMessages.push(new SystemMessage(message.content));
if (message.role === "user")
typeof message.content === "string"
@@ -458,6 +473,15 @@ export class AgentApi {
pastMessages.push(new AIMessage(message.content));
});
reqBody.temperature = !isO1OrO3 ? reqBody.temperature : 1;
reqBody.presence_penalty = !isO1OrO3 ? reqBody.presence_penalty : 0;
reqBody.frequency_penalty = !isO1OrO3 ? reqBody.frequency_penalty : 0;
reqBody.top_p = !isO1OrO3 ? reqBody.top_p : 1;
if (isO1OrO3) {
reqBody.max_completion_tokens = reqBody.max_tokens;
}
let llm = this.getLLM(reqBody, apiKey, baseUrl);
const MEMORY_KEY = "chat_history";

View File

@@ -49,14 +49,11 @@ async function handle(req: NextRequest) {
baseUrl: process.env.OLLAMA_BASE_URL,
});
} else {
ragEmbeddings = new OpenAIEmbeddings(
{
modelName:
process.env.RAG_EMBEDDING_MODEL ?? "text-embedding-3-large",
openAIApiKey: apiKey,
},
{ basePath: baseUrl },
);
ragEmbeddings = new OpenAIEmbeddings({
modelName: process.env.RAG_EMBEDDING_MODEL ?? "text-embedding-3-large",
openAIApiKey: apiKey,
configuration: { baseURL: baseUrl },
});
}
var dalleCallback = async (data: string) => {