This commit is contained in:
GH Action - Upstream Sync 2023-12-25 18:00:56 +00:00
commit 5a5c45d44c
8 changed files with 108 additions and 64 deletions

View File

@ -1,7 +1,7 @@
import { NextRequest } from "next/server"; import { NextRequest } from "next/server";
import { getServerSideConfig } from "../config/server"; import { getServerSideConfig } from "../config/server";
import md5 from "spark-md5"; import md5 from "spark-md5";
import { ACCESS_CODE_PREFIX } from "../constant"; import { ACCESS_CODE_PREFIX, ModelProvider } from "../constant";
function getIP(req: NextRequest) { function getIP(req: NextRequest) {
let ip = req.ip ?? req.headers.get("x-real-ip"); let ip = req.ip ?? req.headers.get("x-real-ip");
@ -16,15 +16,15 @@ function getIP(req: NextRequest) {
function parseApiKey(bearToken: string) { function parseApiKey(bearToken: string) {
const token = bearToken.trim().replaceAll("Bearer ", "").trim(); const token = bearToken.trim().replaceAll("Bearer ", "").trim();
const isOpenAiKey = !token.startsWith(ACCESS_CODE_PREFIX); const isApiKey = !token.startsWith(ACCESS_CODE_PREFIX);
return { return {
accessCode: isOpenAiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length), accessCode: isApiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length),
apiKey: isOpenAiKey ? token : "", apiKey: isApiKey ? token : "",
}; };
} }
export function auth(req: NextRequest) { export function auth(req: NextRequest, modelProvider: ModelProvider) {
const authToken = req.headers.get("Authorization") ?? ""; const authToken = req.headers.get("Authorization") ?? "";
// check if it is openai api key or user token // check if it is openai api key or user token
@ -49,22 +49,23 @@ export function auth(req: NextRequest) {
if (serverConfig.hideUserApiKey && !!apiKey) { if (serverConfig.hideUserApiKey && !!apiKey) {
return { return {
error: true, error: true,
msg: "you are not allowed to access openai with your own api key", msg: "you are not allowed to access with your own api key",
}; };
} }
// if user does not provide an api key, inject system api key // if user does not provide an api key, inject system api key
if (!apiKey) { if (!apiKey) {
const serverApiKey = serverConfig.isAzure const serverConfig = getServerSideConfig();
? serverConfig.azureApiKey
: serverConfig.apiKey;
if (serverApiKey) { const systemApiKey =
modelProvider === ModelProvider.GeminiPro
? serverConfig.googleApiKey
: serverConfig.isAzure
? serverConfig.azureApiKey
: serverConfig.apiKey;
if (systemApiKey) {
console.log("[Auth] use system api key"); console.log("[Auth] use system api key");
req.headers.set( req.headers.set("Authorization", `Bearer ${systemApiKey}`);
"Authorization",
`${serverConfig.isAzure ? "" : "Bearer "}${serverApiKey}`,
);
} else { } else {
console.log("[Auth] admin did not provide an api key"); console.log("[Auth] admin did not provide an api key");
} }

View File

@ -9,8 +9,21 @@ const serverConfig = getServerSideConfig();
export async function requestOpenai(req: NextRequest) { export async function requestOpenai(req: NextRequest) {
const controller = new AbortController(); const controller = new AbortController();
const authValue = req.headers.get("Authorization") ?? ""; var authValue,
const authHeaderName = serverConfig.isAzure ? "api-key" : "Authorization"; authHeaderName = "";
if (serverConfig.isAzure) {
authValue =
req.headers
.get("Authorization")
?.trim()
.replaceAll("Bearer ", "")
.trim() ?? "";
authHeaderName = "api-key";
} else {
authValue = req.headers.get("Authorization") ?? "";
authHeaderName = "Authorization";
}
let path = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll( let path = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll(
"/api/openai/", "/api/openai/",

View File

@ -1,7 +1,7 @@
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { auth } from "../../auth"; import { auth } from "../../auth";
import { getServerSideConfig } from "@/app/config/server"; import { getServerSideConfig } from "@/app/config/server";
import { GEMINI_BASE_URL, Google } from "@/app/constant"; import { GEMINI_BASE_URL, Google, ModelProvider } from "@/app/constant";
async function handle( async function handle(
req: NextRequest, req: NextRequest,
@ -39,10 +39,18 @@ async function handle(
10 * 60 * 1000, 10 * 60 * 1000,
); );
const authResult = auth(req, ModelProvider.GeminiPro);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
const bearToken = req.headers.get("Authorization") ?? ""; const bearToken = req.headers.get("Authorization") ?? "";
const token = bearToken.trim().replaceAll("Bearer ", "").trim(); const token = bearToken.trim().replaceAll("Bearer ", "").trim();
const key = token ? token : serverConfig.googleApiKey; const key = token ? token : serverConfig.googleApiKey;
if (!key) { if (!key) {
return NextResponse.json( return NextResponse.json(
{ {
@ -56,7 +64,6 @@ async function handle(
} }
const fetchUrl = `${baseUrl}/${path}?key=${key}`; const fetchUrl = `${baseUrl}/${path}?key=${key}`;
const fetchOptions: RequestInit = { const fetchOptions: RequestInit = {
headers: { headers: {
"Content-Type": "application/json", "Content-Type": "application/json",

View File

@ -1,6 +1,6 @@
import { type OpenAIListModelResponse } from "@/app/client/platforms/openai"; import { type OpenAIListModelResponse } from "@/app/client/platforms/openai";
import { getServerSideConfig } from "@/app/config/server"; import { getServerSideConfig } from "@/app/config/server";
import { OpenaiPath } from "@/app/constant"; import { ModelProvider, OpenaiPath } from "@/app/constant";
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { auth } from "../../auth"; import { auth } from "../../auth";
@ -45,7 +45,7 @@ async function handle(
); );
} }
const authResult = auth(req); const authResult = auth(req, ModelProvider.GPT);
if (authResult.error) { if (authResult.error) {
return NextResponse.json(authResult, { return NextResponse.json(authResult, {
status: 401, status: 401,
@ -75,4 +75,22 @@ export const GET = handle;
export const POST = handle; export const POST = handle;
export const runtime = "edge"; export const runtime = "edge";
export const preferredRegion = ['arn1', 'bom1', 'cdg1', 'cle1', 'cpt1', 'dub1', 'fra1', 'gru1', 'hnd1', 'iad1', 'icn1', 'kix1', 'lhr1', 'pdx1', 'sfo1', 'sin1', 'syd1']; export const preferredRegion = [
"arn1",
"bom1",
"cdg1",
"cle1",
"cpt1",
"dub1",
"fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"lhr1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];

View File

@ -21,10 +21,24 @@ export class GeminiProApi implements LLMApi {
} }
async chat(options: ChatOptions): Promise<void> { async chat(options: ChatOptions): Promise<void> {
const messages = options.messages.map((v) => ({ const messages = options.messages.map((v) => ({
role: v.role.replace("assistant", "model").replace("system", "model"), role: v.role.replace("assistant", "model").replace("system", "user"),
parts: [{ text: v.content }], parts: [{ text: v.content }],
})); }));
// google requires that role in neighboring messages must not be the same
for (let i = 0; i < messages.length - 1; ) {
// Check if current and next item both have the role "model"
if (messages[i].role === messages[i + 1].role) {
// Concatenate the 'parts' of the current and next item
messages[i].parts = messages[i].parts.concat(messages[i + 1].parts);
// Remove the next item
messages.splice(i + 1, 1);
} else {
// Move to the next item
i++;
}
}
const modelConfig = { const modelConfig = {
...useAppConfig.getState().modelConfig, ...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig, ...useChatStore.getState().currentSession().mask.modelConfig,
@ -43,14 +57,6 @@ export class GeminiProApi implements LLMApi {
topP: modelConfig.top_p, topP: modelConfig.top_p,
// "topK": modelConfig.top_k, // "topK": modelConfig.top_k,
}, },
// stream: options.config.stream,
// model: modelConfig.model,
// temperature: modelConfig.temperature,
// presence_penalty: modelConfig.presence_penalty,
// frequency_penalty: modelConfig.frequency_penalty,
// top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
}; };
console.log("[Request] google payload: ", requestPayload); console.log("[Request] google payload: ", requestPayload);

View File

@ -65,6 +65,7 @@ export const getServerSideConfig = () => {
} }
const isAzure = !!process.env.AZURE_URL; const isAzure = !!process.env.AZURE_URL;
const isGoogle = !!process.env.GOOGLE_API_KEY;
const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? ""; const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? "";
const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim()); const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim());
@ -84,6 +85,7 @@ export const getServerSideConfig = () => {
azureApiKey: process.env.AZURE_API_KEY, azureApiKey: process.env.AZURE_API_KEY,
azureApiVersion: process.env.AZURE_API_VERSION, azureApiVersion: process.env.AZURE_API_VERSION,
isGoogle,
googleApiKey: process.env.GOOGLE_API_KEY, googleApiKey: process.env.GOOGLE_API_KEY,
googleUrl: process.env.GOOGLE_URL, googleUrl: process.env.GOOGLE_URL,

View File

@ -389,24 +389,22 @@ export const useChatStore = createPersistStore(
const shouldInjectSystemPrompts = modelConfig.enableInjectSystemPrompts; const shouldInjectSystemPrompts = modelConfig.enableInjectSystemPrompts;
var systemPrompts: ChatMessage[] = []; var systemPrompts: ChatMessage[] = [];
if (modelConfig.model !== "gemini-pro") { systemPrompts = shouldInjectSystemPrompts
systemPrompts = shouldInjectSystemPrompts ? [
? [ createMessage({
createMessage({ role: "system",
role: "system", content: fillTemplateWith("", {
content: fillTemplateWith("", { ...modelConfig,
...modelConfig, template: DEFAULT_SYSTEM_TEMPLATE,
template: DEFAULT_SYSTEM_TEMPLATE,
}),
}), }),
] }),
: []; ]
if (shouldInjectSystemPrompts) { : [];
console.log( if (shouldInjectSystemPrompts) {
"[Global System Prompt] ", console.log(
systemPrompts.at(0)?.content ?? "empty", "[Global System Prompt] ",
); systemPrompts.at(0)?.content ?? "empty",
} );
} }
// long term memory // long term memory

View File

@ -10,24 +10,23 @@ export function collectModelTable(
available: boolean; available: boolean;
name: string; name: string;
displayName: string; displayName: string;
provider: LLMModel["provider"]; provider?: LLMModel["provider"]; // Marked as optional
} }
> = {}; > = {};
// default models // default models
models.forEach( models.forEach((m) => {
(m) => modelTable[m.name] = {
(modelTable[m.name] = { ...m,
...m, displayName: m.name, // 'provider' is copied over if it exists
displayName: m.name, };
}), });
);
// server custom models // server custom models
customModels customModels
.split(",") .split(",")
.filter((v) => !!v && v.length > 0) .filter((v) => !!v && v.length > 0)
.map((m) => { .forEach((m) => {
const available = !m.startsWith("-"); const available = !m.startsWith("-");
const nameConfig = const nameConfig =
m.startsWith("+") || m.startsWith("-") ? m.slice(1) : m; m.startsWith("+") || m.startsWith("-") ? m.slice(1) : m;
@ -35,15 +34,15 @@ export function collectModelTable(
// enable or disable all models // enable or disable all models
if (name === "all") { if (name === "all") {
Object.values(modelTable).forEach((m) => (m.available = available)); Object.values(modelTable).forEach((model) => (model.available = available));
} else {
modelTable[name] = {
name,
displayName: displayName || name,
available,
provider: modelTable[name]?.provider, // Use optional chaining
};
} }
modelTable[name] = {
name,
displayName: displayName || name,
available,
provider: modelTable[name].provider,
};
}); });
return modelTable; return modelTable;
} }