This commit is contained in:
sijinhui
2023-12-16 23:05:14 +08:00
parent efdd61595e
commit b43c0b0109
91 changed files with 3399 additions and 12096 deletions

View File

@@ -1,12 +1,22 @@
import { getClientConfig } from "../config/client";
import { ACCESS_CODE_PREFIX, Azure, ServiceProvider } from "../constant";
import {
ACCESS_CODE_PREFIX,
Azure,
AZURE_MODELS,
ServiceProvider,
} from "../constant";
import { ChatMessage, ModelType, useAccessStore } from "../store";
import { ChatGPTApi } from "./platforms/openai";
export const ROLES = ["system", "user", "assistant"] as const;
export type MessageRole = (typeof ROLES)[number];
export const Models = ["gpt-3.5-turbo", "gpt-4"] as const;
export const Models = [
"gpt-3.5-turbo-16k",
"gpt-4-0613",
"gpt-4-32k",
"midjourney",
] as const;
export type ChatModel = ModelType;
export interface RequestMessage {
@@ -40,6 +50,7 @@ export interface LLMUsage {
export interface LLMModel {
name: string;
describe: string;
available: boolean;
}
@@ -125,14 +136,15 @@ export class ClientApi {
export const api = new ClientApi();
export function getHeaders() {
export function getHeaders(isAzure?: boolean) {
const accessStore = useAccessStore.getState();
const headers: Record<string, string> = {
"Content-Type": "application/json",
"x-requested-with": "XMLHttpRequest",
};
// const isAzure = AZURE_MODELS.includes(jsonBody?.model as string)
// const isAzure = accessStore.provider === ServiceProvider.Azure;
const isAzure = accessStore.provider === ServiceProvider.Azure;
const authHeader = isAzure ? "api-key" : "Authorization";
const apiKey = isAzure ? accessStore.azureApiKey : accessStore.openaiApiKey;
@@ -151,5 +163,14 @@ export function getHeaders() {
);
}
if (validString(accessStore.midjourneyProxyUrl)) {
headers["midjourney-proxy-url"] = accessStore.midjourneyProxyUrl;
}
return headers;
}
export function useGetMidjourneySelfProxyUrl(url: string) {
const accessStore = useAccessStore.getState();
console.log("useMjImgSelfProxy", accessStore.useMjImgSelfProxy);
return url;
}

View File

@@ -1,5 +1,6 @@
import {
ApiPath,
AZURE_MODELS,
DEFAULT_API_HOST,
DEFAULT_MODELS,
OpenaiPath,
@@ -30,10 +31,10 @@ export interface OpenAIListModelResponse {
export class ChatGPTApi implements LLMApi {
private disableListModels = true;
path(path: string): string {
path(path: string, isAzure?: boolean, azureModel?: string): string {
const accessStore = useAccessStore.getState();
const isAzure = accessStore.provider === ServiceProvider.Azure;
// const isAzure = accessStore.provider === ServiceProvider.Azure;
if (isAzure && !accessStore.isValidAzure()) {
throw Error(
@@ -56,7 +57,7 @@ export class ChatGPTApi implements LLMApi {
}
if (isAzure) {
path = makeAzurePath(path, accessStore.azureApiVersion);
path = makeAzurePath(path, accessStore.azureApiVersion, azureModel);
}
return [baseUrl, path].join("/");
@@ -79,7 +80,7 @@ export class ChatGPTApi implements LLMApi {
model: options.config.model,
},
};
const is_azure = AZURE_MODELS.includes(modelConfig.model);
const requestPayload = {
messages,
stream: options.config.stream,
@@ -92,21 +93,24 @@ export class ChatGPTApi implements LLMApi {
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
console.log("[Request] openai payload: ", requestPayload);
// console.log("[Request] openai payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(OpenaiPath.ChatPath);
let chatPath = this.path(
OpenaiPath.ChatPath,
is_azure,
modelConfig.model,
);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
headers: getHeaders(is_azure),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
@@ -154,10 +158,10 @@ export class ChatGPTApi implements LLMApi {
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[OpenAI] request response content type: ",
contentType,
);
// console.log(
// "[OpenAI] request response content type: ",
// contentType,
// );
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
@@ -172,19 +176,25 @@ export class ChatGPTApi implements LLMApi {
res.status !== 200
) {
const responseTexts = [responseText];
let extraInfo = await res.clone().text();
// let extraInfo = await res.clone().text();
try {
const resJson = await res.clone().json();
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
responseTexts.push(prettyObject(resJson));
} catch {
responseTexts.push(Locale.Error.BACKEND_ERR);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
// if (res.status === 401) {
// responseTexts.push(Locale.Error.Unauthorized);
// } else if (res.status === 404) {
// responseTexts.push(Locale.Error.NOT_FOUND_ERR);
// }
// if (res.status > 400) {
// responseTexts.push(Locale.Error.BACKEND_ERR);
// }
// else if (extraInfo) {
// responseTexts.push(extraInfo);
// }
responseText = responseTexts.join("\n\n");
@@ -314,7 +324,7 @@ export class ChatGPTApi implements LLMApi {
const resJson = (await res.json()) as OpenAIListModelResponse;
const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-"));
console.log("[Models]", chatModels);
// console.log("[Models]", chatModels);
if (!chatModels) {
return [];
@@ -323,6 +333,7 @@ export class ChatGPTApi implements LLMApi {
return chatModels.map((m) => ({
name: m.id,
available: true,
describe: "",
}));
}
}