mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-11-13 20:53:45 +08:00
Merge remote-tracking branch 'upstream/main'
This commit is contained in:
@@ -16,6 +16,7 @@ import {
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
LLMUsage,
|
||||
MultimodalContent,
|
||||
SpeechOptions,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
@@ -26,7 +27,11 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { makeAzurePath } from "@/app/azure";
|
||||
import axios from "axios";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageImages,
|
||||
isVisionModel,
|
||||
} from "@/app/utils";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
@@ -120,49 +125,11 @@ export class ChatGPTApi implements LLMApi {
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: any[] = [];
|
||||
|
||||
const getImageBase64Data = async (url: string) => {
|
||||
const response = await axios.get(url, { responseType: "arraybuffer" });
|
||||
const base64 = Buffer.from(response.data, "binary").toString("base64");
|
||||
return base64;
|
||||
};
|
||||
if (options.config.model === "gpt-4-vision-preview") {
|
||||
for (const v of options.messages) {
|
||||
let message: {
|
||||
role: string;
|
||||
content: {
|
||||
type: string;
|
||||
text?: string;
|
||||
image_url?: { url: string };
|
||||
}[];
|
||||
} = {
|
||||
role: v.role,
|
||||
content: [],
|
||||
};
|
||||
message.content.push({
|
||||
type: "text",
|
||||
text: v.content,
|
||||
});
|
||||
if (v.image_url) {
|
||||
var base64Data = await getImageBase64Data(v.image_url);
|
||||
message.content.push({
|
||||
type: "image_url",
|
||||
image_url: {
|
||||
url: `data:image/jpeg;base64,${base64Data}`,
|
||||
},
|
||||
});
|
||||
}
|
||||
messages.push(message);
|
||||
}
|
||||
} else {
|
||||
options.messages.map((v) =>
|
||||
messages.push({
|
||||
role: v.role,
|
||||
content: v.content,
|
||||
}),
|
||||
);
|
||||
}
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: visionModel ? v.content : getMessageTextContent(v),
|
||||
}));
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
@@ -186,6 +153,16 @@ export class ChatGPTApi implements LLMApi {
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
// add max_tokens to vision model
|
||||
if (visionModel) {
|
||||
Object.defineProperty(requestPayload, "max_tokens", {
|
||||
enumerable: true,
|
||||
configurable: true,
|
||||
writable: true,
|
||||
value: modelConfig.max_tokens,
|
||||
});
|
||||
}
|
||||
|
||||
console.log("[Request] openai payload: ", requestPayload);
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
|
||||
Reference in New Issue
Block a user