fix: Bedrock image processing and Edge browser routing - Fixed image prompts by bypassing cache system, added Bedrock models with vision detection, enhanced image processing for URLs, fixed Edge routing to Bedrock, added error handling and debugging

This commit is contained in:
AC 2025-06-11 15:21:01 +08:00
parent 3aae552167
commit f682b1f4de
6 changed files with 642 additions and 42 deletions

View File

@ -76,9 +76,37 @@ export async function handle(
"Stream:",
body.stream,
"Messages count:",
body.messages.length,
body.messages?.length || 0,
);
// Add detailed logging for debugging
if (body.messages && body.messages.length > 0) {
body.messages.forEach((msg: any, index: number) => {
console.log(`[Bedrock] Message ${index}:`, {
role: msg.role,
contentType: typeof msg.content,
isArray: Array.isArray(msg.content),
contentLength: Array.isArray(msg.content)
? msg.content.length
: typeof msg.content === "string"
? msg.content.length
: "unknown",
});
if (Array.isArray(msg.content)) {
msg.content.forEach((item: any, itemIndex: number) => {
console.log(`[Bedrock] Message ${index}, Item ${itemIndex}:`, {
type: item.type,
hasImageUrl: !!item.image_url?.url,
urlPreview: item.image_url?.url
? item.image_url.url.substring(0, 50) + "..."
: null,
});
});
}
});
}
const {
messages,
model,
@ -87,6 +115,27 @@ export async function handle(
max_tokens,
} = body;
// --- Input Validation ---
if (!model || typeof model !== "string") {
return NextResponse.json(
{
error: true,
msg: "Model parameter is required and must be a string",
},
{ status: 400 },
);
}
if (!Array.isArray(messages) || messages.length === 0) {
return NextResponse.json(
{
error: true,
msg: "Messages parameter is required and must be a non-empty array",
},
{ status: 400 },
);
}
// --- Payload formatting for Claude on Bedrock ---
const isClaudeModel = model.includes("anthropic.claude");
if (!isClaudeModel) {
@ -101,23 +150,298 @@ export async function handle(
(msg: any) => msg.role !== "system",
);
// Validate we have non-system messages
if (userAssistantMessages.length === 0) {
return NextResponse.json(
{
error: true,
msg: "At least one user or assistant message is required",
},
{ status: 400 },
);
}
// Process messages and handle image fetching
const processedMessages = await Promise.all(
userAssistantMessages.map(async (msg: any) => {
let content;
if (Array.isArray(msg.content)) {
const processedContent = await Promise.all(
msg.content.map(async (item: any) => {
if (item.type === "image_url") {
console.log("[Bedrock] Processing image_url item:", item);
// Adapt from OpenAI format to Bedrock's format
const url = item.image_url?.url;
if (!url) {
console.warn(
"[Bedrock] Image URL is missing in content item",
);
return null;
}
// Check if it's a data URL or regular URL
const dataUrlMatch = url.match(
/^data:(image\/[^;]+);base64,(.+)$/,
);
if (dataUrlMatch) {
// Handle data URL (base64)
const mediaType = dataUrlMatch[1];
const base64Data = dataUrlMatch[2];
if (!base64Data) {
console.warn("[Bedrock] Empty base64 data in image URL");
return null;
}
const bedrockImageItem = {
type: "image",
source: {
type: "base64",
media_type: mediaType,
data: base64Data,
},
};
console.log(
"[Bedrock] Successfully converted data URL to Bedrock format:",
{
mediaType,
dataLength: base64Data.length,
},
);
return bedrockImageItem;
} else if (
url.startsWith("http://") ||
url.startsWith("https://")
) {
// Handle HTTP URL - fetch directly and convert to base64
console.log(
"[Bedrock] HTTP URL detected, fetching directly:",
url.substring(0, 50) + "...",
);
try {
const response = await fetch(url);
console.log(
"[Bedrock] Fetch response status:",
response.status,
response.statusText,
);
if (!response.ok) {
console.error(
"[Bedrock] Failed to fetch image:",
response.status,
response.statusText,
);
return null;
}
const blob = await response.blob();
console.log("[Bedrock] Blob info:", {
size: blob.size,
type: blob.type,
});
if (blob.size === 0) {
console.error(
"[Bedrock] Fetched blob is empty - cache endpoint may not be working",
);
console.log(
"[Bedrock] This might be a service worker cache issue - image was uploaded but cache retrieval failed",
);
return null;
}
const arrayBuffer = await blob.arrayBuffer();
console.log(
"[Bedrock] ArrayBuffer size:",
arrayBuffer.byteLength,
);
if (arrayBuffer.byteLength === 0) {
console.error("[Bedrock] ArrayBuffer is empty");
return null;
}
const base64Data =
Buffer.from(arrayBuffer).toString("base64");
console.log("[Bedrock] Base64 conversion:", {
originalSize: arrayBuffer.byteLength,
base64Length: base64Data.length,
isEmpty: !base64Data || base64Data.length === 0,
firstChars: base64Data.substring(0, 20),
});
if (!base64Data || base64Data.length === 0) {
console.error(
"[Bedrock] Base64 data is empty after conversion",
);
return null;
}
const mediaType = blob.type || "image/jpeg";
const bedrockImageItem = {
type: "image",
source: {
type: "base64",
media_type: mediaType,
data: base64Data,
},
};
console.log(
"[Bedrock] Successfully converted HTTP URL to Bedrock format:",
{
url: url.substring(0, 50) + "...",
mediaType,
dataLength: base64Data.length,
hasValidData: !!base64Data && base64Data.length > 0,
},
);
return bedrockImageItem;
} catch (error) {
console.error("[Bedrock] Error fetching image:", error);
return null;
}
} else {
console.warn(
"[Bedrock] Invalid URL format:",
url.substring(0, 50) + "...",
);
return null;
}
} else {
// Handle text content
return item;
}
}),
);
// Filter out nulls and ensure we have content
content = processedContent.filter(Boolean);
// Additional validation: ensure no image objects have empty data
content = content.filter((item: any) => {
if (item.type === "image") {
const hasValidData =
item.source?.data && item.source.data.length > 0;
if (!hasValidData) {
console.error(
"[Bedrock] Filtering out image with empty data:",
{
hasSource: !!item.source,
hasData: !!item.source?.data,
dataLength: item.source?.data?.length || 0,
},
);
return false;
}
}
return true;
});
if (content.length === 0) {
console.warn(
"[Bedrock] All content items were filtered out, adding empty text",
);
content = [{ type: "text", text: "" }];
}
console.log(
"[Bedrock] Processed content for message:",
content.length,
"items",
);
} else if (typeof msg.content === "string") {
content = [{ type: "text", text: msg.content }];
} else {
console.warn("[Bedrock] Unknown content type:", typeof msg.content);
content = [{ type: "text", text: "" }];
}
return {
role: msg.role,
content: content,
};
}),
);
const payload = {
anthropic_version: "bedrock-2023-05-31",
max_tokens: max_tokens || 4096,
temperature: temperature,
messages: userAssistantMessages.map((msg: any) => ({
role: msg.role, // 'user' or 'assistant'
content:
typeof msg.content === "string"
? [{ type: "text", text: msg.content }]
: msg.content, // Assuming MultimodalContent format is compatible
})),
max_tokens:
typeof max_tokens === "number" && max_tokens > 0 ? max_tokens : 4096,
temperature:
typeof temperature === "number" && temperature >= 0 && temperature <= 1
? temperature
: 0.7, // Bedrock Claude accepts 0-1 range
messages: processedMessages,
...(systemPrompts.length > 0 && {
system: systemPrompts.map((msg: any) => msg.content).join("\n"),
system: systemPrompts
.map((msg: any) => {
if (typeof msg.content === "string") {
return msg.content;
} else if (Array.isArray(msg.content)) {
// Handle multimodal system prompts by extracting text
return msg.content
.filter((item: any) => item.type === "text")
.map((item: any) => item.text)
.join(" ");
}
return String(msg.content); // Fallback conversion
})
.filter(Boolean)
.join("\n"),
}),
};
// --- End Payload Formatting ---
// Log the final payload structure (without base64 data to avoid huge logs)
console.log("[Bedrock] Final payload structure:", {
anthropic_version: payload.anthropic_version,
max_tokens: payload.max_tokens,
temperature: payload.temperature,
messageCount: payload.messages.length,
messages: payload.messages.map((msg: any, index: number) => ({
index,
role: msg.role,
contentItems: msg.content.map((item: any) => ({
type: item.type,
hasData: item.type === "image" ? !!item.source?.data : !!item.text,
mediaType: item.source?.media_type || null,
textLength: item.text?.length || null,
dataLength: item.source?.data?.length || null,
})),
})),
hasSystem: !!(payload as any).system,
});
// Final validation: check for any empty images
const hasEmptyImages = payload.messages.some((msg: any) =>
msg.content.some(
(item: any) =>
item.type === "image" &&
(!item.source?.data || item.source.data.length === 0),
),
);
if (hasEmptyImages) {
console.error(
"[Bedrock] Payload contains empty images, this will cause Bedrock to fail",
);
return NextResponse.json(
{
error: true,
msg: "Image processing failed: empty image data detected",
},
{ status: 400 },
);
}
if (stream) {
const command = new InvokeModelWithResponseStreamCommand({
modelId: model,
@ -139,13 +463,23 @@ export async function handle(
try {
for await (const event of responseBody) {
if (event.chunk?.bytes) {
const chunkData = JSON.parse(decoder.decode(event.chunk.bytes));
let chunkData;
try {
chunkData = JSON.parse(decoder.decode(event.chunk.bytes));
} catch (parseError) {
console.error(
"[Bedrock] Failed to parse chunk JSON:",
parseError,
);
continue; // Skip malformed chunks
}
let responseText = "";
let finishReason: string | null = null;
if (
chunkData.type === "content_block_delta" &&
chunkData.delta.type === "text_delta"
chunkData.delta?.type === "text_delta"
) {
responseText = chunkData.delta.text || "";
} else if (chunkData.type === "message_stop") {
@ -156,35 +490,68 @@ export async function handle(
: "length"; // Example logic
}
// Format as OpenAI SSE chunk
const sseData = {
id: `chatcmpl-${nanoid()}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: model,
choices: [
{
index: 0,
delta: { content: responseText },
finish_reason: finishReason,
},
],
};
controller.enqueue(
encoder.encode(`data: ${JSON.stringify(sseData)}\n\n`),
);
// Only send non-empty responses or finish signals
if (responseText || finishReason) {
// Format as OpenAI SSE chunk
const sseData = {
id: `chatcmpl-${nanoid()}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: model,
choices: [
{
index: 0,
delta: { content: responseText },
finish_reason: finishReason,
},
],
};
try {
controller.enqueue(
encoder.encode(`data: ${JSON.stringify(sseData)}\n\n`),
);
} catch (enqueueError) {
console.error(
"[Bedrock] Failed to enqueue data:",
enqueueError,
);
break; // Stop processing if client disconnected
}
}
if (finishReason) {
controller.enqueue(encoder.encode("data: [DONE]\n\n"));
try {
controller.enqueue(encoder.encode("data: [DONE]\n\n"));
} catch (enqueueError) {
console.error(
"[Bedrock] Failed to enqueue [DONE]:",
enqueueError,
);
}
break; // Exit loop after stop message
}
}
}
} catch (error) {
console.error("[Bedrock] Streaming error:", error);
controller.error(error);
try {
controller.error(error);
} catch (controllerError) {
console.error(
"[Bedrock] Failed to signal controller error:",
controllerError,
);
}
} finally {
controller.close();
try {
controller.close();
} catch (closeError) {
console.error(
"[Bedrock] Failed to close controller:",
closeError,
);
}
}
},
});
@ -205,7 +572,28 @@ export async function handle(
body: JSON.stringify(payload),
});
const response = await client.send(command);
const responseBody = JSON.parse(new TextDecoder().decode(response.body));
if (!response.body) {
throw new Error("Empty response body from Bedrock");
}
let responseBody;
try {
responseBody = JSON.parse(new TextDecoder().decode(response.body));
} catch (parseError) {
console.error("[Bedrock] Failed to parse response JSON:", parseError);
throw new Error("Invalid JSON response from Bedrock");
}
// Validate response structure
if (
!responseBody.content ||
!Array.isArray(responseBody.content) ||
responseBody.content.length === 0
) {
console.error("[Bedrock] Invalid response structure:", responseBody);
throw new Error("Invalid response structure from Bedrock");
}
// Format response to match OpenAI
const formattedResponse = {

View File

@ -366,31 +366,57 @@ export function getClientApi(provider: ServiceProvider | string): ClientApi {
provider,
"| Type:",
typeof provider,
"| Browser:",
navigator.userAgent.includes("Edge")
? "Edge"
: navigator.userAgent.includes("Safari")
? "Safari"
: "Other",
);
// Standardize the provider name to match Enum case (TitleCase)
let standardizedProvider: ServiceProvider | string;
if (typeof provider === "string") {
console.log(
"[getClientApi] Provider is string, attempting to standardize:",
provider,
);
// Convert known lowercase versions to their Enum equivalent
switch (provider.toLowerCase()) {
case "bedrock":
standardizedProvider = ServiceProvider.Bedrock;
console.log(
"[getClientApi] Converted 'bedrock' string to ServiceProvider.Bedrock",
);
break;
case "openai":
standardizedProvider = ServiceProvider.OpenAI;
console.log(
"[getClientApi] Converted 'openai' string to ServiceProvider.OpenAI",
);
break;
case "google":
standardizedProvider = ServiceProvider.Google;
break;
// Add other potential lowercase strings if needed
default:
console.log(
"[getClientApi] Unknown string provider, keeping as-is:",
provider,
);
standardizedProvider = provider; // Keep unknown strings as is
}
} else {
console.log("[getClientApi] Provider is already enum value:", provider);
standardizedProvider = provider; // Already an Enum value
}
console.log("[getClientApi] Standardized Provider:", standardizedProvider);
console.log(
"[getClientApi] Final Standardized Provider:",
standardizedProvider,
"| Enum check:",
standardizedProvider === ServiceProvider.Bedrock,
);
switch (standardizedProvider) {
case ServiceProvider.Google:
@ -431,6 +457,18 @@ export function getClientApi(provider: ServiceProvider | string): ClientApi {
console.log(
`[getClientApi] Provider '${provider}' (Standardized: '${standardizedProvider}') not matched, returning default GPT.`,
);
// Edge browser fallback: check if this is a Bedrock model by name
if (
typeof provider === "string" &&
provider.includes("anthropic.claude")
) {
console.log(
"[getClientApi] Edge fallback: Detected Bedrock model by name, routing to Bedrock",
);
return new ClientApi(ModelProvider.Bedrock);
}
return new ClientApi(ModelProvider.GPT);
}
}

View File

@ -31,7 +31,7 @@ export class BedrockApi implements LLMApi {
messages,
temperature: modelConfig.temperature,
stream: !!modelConfig.stream,
max_tokens: 4096, // Example: You might want to make this configurable
max_tokens: (modelConfig as any).max_tokens || 4096, // Cast to access max_tokens from ModelConfig
}),
signal: controller.signal,
headers: getHeaders(), // getHeaders should handle Bedrock (no auth needed)

View File

@ -77,8 +77,6 @@ import {
showPlugins,
} from "../utils";
import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
import dynamic from "next/dynamic";
import { ChatControllerPool } from "../client/controller";
@ -1153,6 +1151,15 @@ function _Chat() {
const doSubmit = (userInput: string) => {
if (userInput.trim() === "" && isEmpty(attachImages)) return;
console.log("[doSubmit] Called with:", {
userInput: userInput?.substring(0, 50) + "...",
hasAttachImages: !!attachImages,
attachImagesCount: attachImages?.length || 0,
attachImagesPreview:
attachImages?.map((img) => img?.substring(0, 50) + "...") || [],
});
const matchCommand = chatCommands.match(userInput);
if (matchCommand.matched) {
setUserInput("");
@ -1576,13 +1583,27 @@ function _Chat() {
...(await new Promise<string[]>((res, rej) => {
setUploading(true);
const imagesData: string[] = [];
uploadImageRemote(file)
// Use compressImage directly to bypass cache issues
import("@/app/utils/chat")
.then(({ compressImage }) => compressImage(file, 256 * 1024))
.then((dataUrl) => {
console.log("[uploadImage] Compressed image:", {
fileSize: file.size,
fileName: file.name,
dataUrlLength: dataUrl.length,
isDataUrl: dataUrl.startsWith("data:"),
});
imagesData.push(dataUrl);
setUploading(false);
res(imagesData);
if (
imagesData.length === 3 ||
imagesData.length === 1 // Only one file in this context
) {
setUploading(false);
res(imagesData);
}
})
.catch((e) => {
console.error("[uploadImage] Compression failed:", e);
setUploading(false);
rej(e);
});
@ -1618,8 +1639,16 @@ function _Chat() {
const imagesData: string[] = [];
for (let i = 0; i < files.length; i++) {
const file = event.target.files[i];
uploadImageRemote(file)
// Use compressImage directly to bypass cache issues
import("@/app/utils/chat")
.then(({ compressImage }) => compressImage(file, 256 * 1024))
.then((dataUrl) => {
console.log("[uploadImage] Compressed image:", {
fileSize: file.size,
fileName: file.name,
dataUrlLength: dataUrl.length,
isDataUrl: dataUrl.startsWith("data:"),
});
imagesData.push(dataUrl);
if (
imagesData.length === 3 ||
@ -1630,6 +1659,7 @@ function _Chat() {
}
})
.catch((e) => {
console.error("[uploadImage] Compression failed:", e);
setUploading(false);
rej(e);
});

View File

@ -466,6 +466,7 @@ export const VISION_MODEL_REGEXES = [
/vision/,
/gpt-4o/,
/claude-3/,
/anthropic\.claude-3/,
/gemini-1\.5/,
/gemini-exp/,
/gemini-2\.0/,
@ -657,6 +658,24 @@ const siliconflowModels = [
"Pro/deepseek-ai/DeepSeek-V3",
];
const bedrockModels = [
"anthropic.claude-3-opus-20240229-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-5-sonnet-20240620-v1:0",
"anthropic.claude-3-5-sonnet-20241022-v1:0",
"anthropic.claude-3-5-haiku-20241022-v1:0",
"anthropic.claude-instant-v1",
"amazon.titan-text-express-v1",
"amazon.titan-text-lite-v1",
"cohere.command-text-v14",
"cohere.command-light-text-v14",
"ai21.j2-ultra-v1",
"ai21.j2-mid-v1",
"meta.llama2-13b-chat-v1",
"meta.llama2-70b-chat-v1",
];
let seq = 1000; // 内置的模型序号生成器从1000开始
export const DEFAULT_MODELS = [
...openaiModels.map((name) => ({
@ -813,6 +832,17 @@ export const DEFAULT_MODELS = [
sorted: 14,
},
})),
...bedrockModels.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "bedrock",
providerName: "Bedrock",
providerType: "bedrock",
sorted: 15,
},
})),
] as const;
export const CHAT_PAGE_SIZE = 15;

View File

@ -412,12 +412,24 @@ export const useChatStore = createPersistStore(
const session = get().currentSession();
const modelConfig = session.mask.modelConfig;
console.log("[onUserInput] Starting with:", {
content: content?.substring(0, 50) + "...",
hasAttachImages: !!attachImages,
attachImagesCount: attachImages?.length || 0,
isMcpResponse,
});
// MCP Response no need to fill template
let mContent: string | MultimodalContent[] = isMcpResponse
? content
: fillTemplateWith(content, modelConfig);
if (!isMcpResponse && attachImages && attachImages.length > 0) {
console.log("[onUserInput] Processing attached images:", {
imageCount: attachImages.length,
firstImagePreview: attachImages[0]?.substring(0, 50) + "...",
});
mContent = [
...(content ? [{ type: "text" as const, text: content }] : []),
...attachImages.map((url) => ({
@ -425,6 +437,14 @@ export const useChatStore = createPersistStore(
image_url: { url },
})),
];
console.log("[onUserInput] Created multimodal content:", {
isArray: Array.isArray(mContent),
contentLength: Array.isArray(mContent) ? mContent.length : "N/A",
contentTypes: Array.isArray(mContent)
? mContent.map((item) => item.type)
: "N/A",
});
}
let userMessage: ChatMessage = createMessage({
@ -470,12 +490,106 @@ export const useChatStore = createPersistStore(
"| Model:",
modelConfig.model,
);
// Add detailed browser and session logging
console.log("[onUserInput] Browser:", navigator.userAgent);
console.log(
"[onUserInput] Full modelConfig:",
JSON.stringify(modelConfig, null, 2),
);
console.log(
"[onUserInput] Session mask:",
JSON.stringify(session.mask, null, 2),
);
// --- 日志结束 ---
// 使用从配置中获取的 providerName并提供默认值
const api: ClientApi = getClientApi(
providerNameFromConfig ?? ServiceProvider.OpenAI,
);
// Edge browser workaround: if we're using a Bedrock model but got wrong API, force Bedrock
if (
modelConfig.model?.includes("anthropic.claude") &&
!api.llm.constructor.name.includes("Bedrock")
) {
console.warn(
"[onUserInput] Edge workaround: Detected Bedrock model but wrong API class:",
api.llm.constructor.name,
"- forcing Bedrock",
);
const bedrockApi = getClientApi(ServiceProvider.Bedrock);
bedrockApi.llm.chat({
messages: sendMessages,
config: { ...modelConfig, stream: true },
onUpdate(message) {
botMessage.streaming = true;
if (message) {
botMessage.content = message;
}
get().updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
},
async onFinish(message) {
botMessage.streaming = false;
if (message) {
botMessage.content = message;
botMessage.date = new Date().toLocaleString();
get().onNewMessage(botMessage, session);
}
ChatControllerPool.remove(session.id, botMessage.id);
},
onBeforeTool(tool: ChatMessageTool) {
(botMessage.tools = botMessage?.tools || []).push(tool);
get().updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
},
onAfterTool(tool: ChatMessageTool) {
botMessage?.tools?.forEach((t, i, tools) => {
if (tool.id == t.id) {
tools[i] = { ...tool };
}
});
get().updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
},
onError(error) {
const isAborted = error.message?.includes?.("aborted");
botMessage.content +=
"\n\n" +
prettyObject({
error: true,
message: error.message,
});
botMessage.streaming = false;
userMessage.isError = !isAborted;
botMessage.isError = !isAborted;
get().updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
ChatControllerPool.remove(
session.id,
botMessage.id ?? messageIndex,
);
console.error("[Chat] failed ", error);
},
onController(controller) {
// collect controller for stop/retry
ChatControllerPool.addController(
session.id,
botMessage.id ?? messageIndex,
controller,
);
},
});
return;
}
// make request
api.llm.chat({
messages: sendMessages,