mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-11-16 14:03:43 +08:00
Merge remote-tracking branch 'upstream/main' into dev
This commit is contained in:
@@ -21,7 +21,7 @@ import {
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
|
||||
@@ -3,7 +3,6 @@ import { ChatOptions, getHeaders, LLMApi, MultimodalContent } from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
import { RequestMessage } from "@/app/typing";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
@@ -12,6 +11,7 @@ import {
|
||||
import Locale from "../../locales";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
|
||||
export type MultiBlockContent = {
|
||||
@@ -93,7 +93,12 @@ export class ClaudeApi implements LLMApi {
|
||||
},
|
||||
};
|
||||
|
||||
const messages = [...options.messages];
|
||||
// try get base64image from local cache image_url
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = await preProcessImageContent(v.content);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const keys = ["system", "user"];
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import {
|
||||
getMessageImages,
|
||||
isVisionModel,
|
||||
} from "@/app/utils";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
|
||||
export class GeminiProApi implements LLMApi {
|
||||
path(path: string): string {
|
||||
@@ -56,7 +57,14 @@ export class GeminiProApi implements LLMApi {
|
||||
async chat(options: ChatOptions): Promise<void> {
|
||||
const apiClient = this;
|
||||
let multimodal = false;
|
||||
const messages = options.messages.map((v) => {
|
||||
|
||||
// try get base64image from local cache image_url
|
||||
const _messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = await preProcessImageContent(v.content);
|
||||
_messages.push({ role: v.role, content });
|
||||
}
|
||||
const messages = _messages.map((v) => {
|
||||
let parts: any[] = [{ text: getMessageTextContent(v) }];
|
||||
if (isVisionModel(options.config.model)) {
|
||||
const images = getMessageImages(v);
|
||||
|
||||
@@ -12,6 +12,7 @@ import {
|
||||
} from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { collectModelsWithDefaultModel } from "@/app/utils/model";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
|
||||
import {
|
||||
@@ -106,10 +107,13 @@ export class ChatGPTApi implements LLMApi {
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: visionModel ? v.content : getMessageTextContent(v),
|
||||
}));
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = visionModel
|
||||
? await preProcessImageContent(v.content)
|
||||
: getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
|
||||
Reference in New Issue
Block a user