feat: openai realtime merge

This commit is contained in:
Hk-Gosuto
2024-12-23 15:48:21 +08:00
parent c6156a8d8a
commit 21bf685d12
30 changed files with 2418 additions and 833 deletions

View File

@@ -110,6 +110,13 @@
display: flex;
flex-wrap: wrap;
justify-content: space-between;
gap: 5px;
&-end {
display: flex;
margin-left: auto;
gap: 5px;
}
.chat-input-action {
display: inline-flex;
@@ -127,10 +134,6 @@
width: var(--icon-width);
overflow: hidden;
&:not(:last-child) {
margin-right: 5px;
}
.text {
white-space: nowrap;
padding-left: 5px;
@@ -413,6 +416,12 @@
flex-wrap: nowrap;
}
}
.chat-model-name {
font-size: 12px;
color: var(--black);
margin-left: 6px;
}
}
.chat-message-container {
@@ -467,37 +476,6 @@
}
}
.chat-message-checkmark {
display: inline-block;
margin-right: 5px;
height: 12px;
width: 12px;
color: #13a10e;
fill: #13a10e;
user-select: none;
backface-visibility: hidden;
transform: translateZ(0px);
}
.chat-message-tools-status {
display: flex;
justify-content: center;
align-items: center;
font-size: 12px;
margin-top: 5px;
line-height: 1.5;
}
.chat-message-tools-name {
color: #aaa;
}
.chat-message-tools-details {
margin-left: 5px;
font-weight: bold;
color: #999;
}
.chat-message-status {
font-size: 12px;
color: #aaa;
@@ -505,6 +483,21 @@
margin-top: 5px;
}
.chat-message-tools {
font-size: 12px;
color: #aaa;
line-height: 1.5;
margin-top: 5px;
.chat-message-tool {
display: flex;
align-items: end;
svg {
margin-left: 5px;
margin-right: 5px;
}
}
}
.chat-message-item {
box-sizing: border-box;
max-width: 100%;
@@ -520,15 +513,23 @@
transition: all ease 0.3s;
}
.chat-message-item-files {
display: grid;
grid-template-columns: repeat(var(--file-count), auto);
grid-gap: 5px;
}
.chat-message-item-file {
text-decoration: none;
color: #aaa;
.chat-message-audio {
display: flex;
align-items: center;
justify-content: space-between;
border-radius: 10px;
background-color: rgba(0, 0, 0, 0.05);
border: var(--border-in-light);
position: relative;
transition: all ease 0.3s;
margin-top: 10px;
font-size: 14px;
user-select: text;
word-break: break-word;
box-sizing: border-box;
audio {
height: 30px; /* 调整高度 */
}
}
.chat-message-item-image {
@@ -739,3 +740,78 @@
bottom: 30px;
}
}
.shortcut-key-container {
padding: 10px;
overflow-y: auto;
display: flex;
flex-direction: column;
}
.shortcut-key-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(350px, 1fr));
gap: 16px;
}
.shortcut-key-item {
display: flex;
justify-content: space-between;
align-items: center;
overflow: hidden;
padding: 10px;
background-color: var(--white);
}
.shortcut-key-title {
font-size: 14px;
color: var(--black);
}
.shortcut-key-keys {
display: flex;
gap: 8px;
}
.shortcut-key {
display: flex;
align-items: center;
justify-content: center;
border: var(--border-in-light);
border-radius: 8px;
padding: 4px;
background-color: var(--gray);
min-width: 32px;
}
.shortcut-key span {
font-size: 12px;
color: var(--black);
}
.chat-main {
display: flex;
height: 100%;
width: 100%;
position: relative;
overflow: hidden;
.chat-body-container {
height: 100%;
display: flex;
flex-direction: column;
flex: 1;
width: 100%;
}
.chat-side-panel {
position: absolute;
inset: 0;
background: var(--white);
overflow: hidden;
z-index: 10;
transform: translateX(100%);
transition: all ease 0.3s;
&-show {
transform: translateX(0);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1 @@
export * from "./realtime-chat";

View File

@@ -0,0 +1,74 @@
.realtime-chat {
width: 100%;
justify-content: center;
align-items: center;
position: relative;
display: flex;
flex-direction: column;
height: 100%;
padding: 20px;
box-sizing: border-box;
.circle-mic {
width: 150px;
height: 150px;
border-radius: 50%;
background: linear-gradient(to bottom right, #a0d8ef, #f0f8ff);
display: flex;
justify-content: center;
align-items: center;
}
.icon-center {
font-size: 24px;
}
.bottom-icons {
display: flex;
justify-content: space-between;
align-items: center;
width: 100%;
position: absolute;
bottom: 20px;
box-sizing: border-box;
padding: 0 20px;
}
.icon-left,
.icon-right {
width: 46px;
height: 46px;
font-size: 36px;
background: var(--second);
border-radius: 50%;
padding: 2px;
display: flex;
justify-content: center;
align-items: center;
cursor: pointer;
&:hover {
opacity: 0.8;
}
}
&.mobile {
display: none;
}
}
.pulse {
animation: pulse 1.5s infinite;
}
@keyframes pulse {
0% {
transform: scale(1);
opacity: 0.7;
}
50% {
transform: scale(1.1);
opacity: 1;
}
100% {
transform: scale(1);
opacity: 0.7;
}
}

View File

@@ -0,0 +1,359 @@
import VoiceIcon from "@/app/icons/voice.svg";
import VoiceOffIcon from "@/app/icons/voice-off.svg";
import PowerIcon from "@/app/icons/power.svg";
import styles from "./realtime-chat.module.scss";
import clsx from "clsx";
import { useState, useRef, useEffect } from "react";
import { useChatStore, createMessage, useAppConfig } from "@/app/store";
import { IconButton } from "@/app/components/button";
import {
Modality,
RTClient,
RTInputAudioItem,
RTResponse,
TurnDetection,
} from "rt-client";
import { AudioHandler } from "@/app/lib/audio";
import { uploadImage } from "@/app/utils/chat";
import { VoicePrint } from "@/app/components/voice-print";
interface RealtimeChatProps {
onClose?: () => void;
onStartVoice?: () => void;
onPausedVoice?: () => void;
}
export function RealtimeChat({
onClose,
onStartVoice,
onPausedVoice,
}: RealtimeChatProps) {
const chatStore = useChatStore();
const session = chatStore.currentSession();
const config = useAppConfig();
const [status, setStatus] = useState("");
const [isRecording, setIsRecording] = useState(false);
const [isConnected, setIsConnected] = useState(false);
const [isConnecting, setIsConnecting] = useState(false);
const [modality, setModality] = useState("audio");
const [useVAD, setUseVAD] = useState(true);
const [frequencies, setFrequencies] = useState<Uint8Array | undefined>();
const clientRef = useRef<RTClient | null>(null);
const audioHandlerRef = useRef<AudioHandler | null>(null);
const initRef = useRef(false);
const temperature = config.realtimeConfig.temperature;
const apiKey = config.realtimeConfig.apiKey;
const model = config.realtimeConfig.model;
const azure = config.realtimeConfig.provider === "Azure";
const azureEndpoint = config.realtimeConfig.azure.endpoint;
const azureDeployment = config.realtimeConfig.azure.deployment;
const voice = config.realtimeConfig.voice;
const handleConnect = async () => {
if (isConnecting) return;
if (!isConnected) {
try {
setIsConnecting(true);
clientRef.current = azure
? new RTClient(
new URL(azureEndpoint),
{ key: apiKey },
{ deployment: azureDeployment },
)
: new RTClient({ key: apiKey }, { model });
const modalities: Modality[] =
modality === "audio" ? ["text", "audio"] : ["text"];
const turnDetection: TurnDetection = useVAD
? { type: "server_vad" }
: null;
await clientRef.current.configure({
instructions: "",
voice,
input_audio_transcription: { model: "whisper-1" },
turn_detection: turnDetection,
tools: [],
temperature,
modalities,
});
startResponseListener();
setIsConnected(true);
// TODO
// try {
// const recentMessages = chatStore.getMessagesWithMemory();
// for (const message of recentMessages) {
// const { role, content } = message;
// if (typeof content === "string") {
// await clientRef.current.sendItem({
// type: "message",
// role: role as any,
// content: [
// {
// type: (role === "assistant" ? "text" : "input_text") as any,
// text: content as string,
// },
// ],
// });
// }
// }
// // await clientRef.current.generateResponse();
// } catch (error) {
// console.error("Set message failed:", error);
// }
} catch (error) {
console.error("Connection failed:", error);
setStatus("Connection failed");
} finally {
setIsConnecting(false);
}
} else {
await disconnect();
}
};
const disconnect = async () => {
if (clientRef.current) {
try {
await clientRef.current.close();
clientRef.current = null;
setIsConnected(false);
} catch (error) {
console.error("Disconnect failed:", error);
}
}
};
const startResponseListener = async () => {
if (!clientRef.current) return;
try {
for await (const serverEvent of clientRef.current.events()) {
if (serverEvent.type === "response") {
await handleResponse(serverEvent);
} else if (serverEvent.type === "input_audio") {
await handleInputAudio(serverEvent);
}
}
} catch (error) {
if (clientRef.current) {
console.error("Response iteration error:", error);
}
}
};
const handleResponse = async (response: RTResponse) => {
for await (const item of response) {
if (item.type === "message" && item.role === "assistant") {
const botMessage = createMessage({
role: item.role,
content: "",
});
// add bot message first
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat([botMessage]);
});
let hasAudio = false;
for await (const content of item) {
if (content.type === "text") {
for await (const text of content.textChunks()) {
botMessage.content += text;
}
} else if (content.type === "audio") {
const textTask = async () => {
for await (const text of content.transcriptChunks()) {
botMessage.content += text;
}
};
const audioTask = async () => {
audioHandlerRef.current?.startStreamingPlayback();
for await (const audio of content.audioChunks()) {
hasAudio = true;
audioHandlerRef.current?.playChunk(audio);
}
};
await Promise.all([textTask(), audioTask()]);
}
// update message.content
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
}
if (hasAudio) {
// upload audio get audio_url
const blob = audioHandlerRef.current?.savePlayFile();
uploadImage(blob!).then((audio_url) => {
botMessage.audio_url = audio_url;
// update text and audio_url
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
});
}
}
}
};
const handleInputAudio = async (item: RTInputAudioItem) => {
await item.waitForCompletion();
if (item.transcription) {
const userMessage = createMessage({
role: "user",
content: item.transcription,
});
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat([userMessage]);
});
// save input audio_url, and update session
const { audioStartMillis, audioEndMillis } = item;
// upload audio get audio_url
const blob = audioHandlerRef.current?.saveRecordFile(
audioStartMillis,
audioEndMillis,
);
uploadImage(blob!).then((audio_url) => {
userMessage.audio_url = audio_url;
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
});
}
// stop streaming play after get input audio.
audioHandlerRef.current?.stopStreamingPlayback();
};
const toggleRecording = async () => {
if (!isRecording && clientRef.current) {
try {
if (!audioHandlerRef.current) {
audioHandlerRef.current = new AudioHandler();
await audioHandlerRef.current.initialize();
}
await audioHandlerRef.current.startRecording(async (chunk) => {
await clientRef.current?.sendAudio(chunk);
});
setIsRecording(true);
} catch (error) {
console.error("Failed to start recording:", error);
}
} else if (audioHandlerRef.current) {
try {
audioHandlerRef.current.stopRecording();
if (!useVAD) {
const inputAudio = await clientRef.current?.commitAudio();
await handleInputAudio(inputAudio!);
await clientRef.current?.generateResponse();
}
setIsRecording(false);
} catch (error) {
console.error("Failed to stop recording:", error);
}
}
};
useEffect(() => {
// 防止重复初始化
if (initRef.current) return;
initRef.current = true;
const initAudioHandler = async () => {
const handler = new AudioHandler();
await handler.initialize();
audioHandlerRef.current = handler;
await handleConnect();
await toggleRecording();
};
initAudioHandler().catch((error) => {
setStatus(error);
console.error(error);
});
return () => {
if (isRecording) {
toggleRecording();
}
audioHandlerRef.current?.close().catch(console.error);
disconnect();
};
}, []);
useEffect(() => {
let animationFrameId: number;
if (isConnected && isRecording) {
const animationFrame = () => {
if (audioHandlerRef.current) {
const freqData = audioHandlerRef.current.getByteFrequencyData();
setFrequencies(freqData);
}
animationFrameId = requestAnimationFrame(animationFrame);
};
animationFrameId = requestAnimationFrame(animationFrame);
} else {
setFrequencies(undefined);
}
return () => {
if (animationFrameId) {
cancelAnimationFrame(animationFrameId);
}
};
}, [isConnected, isRecording]);
// update session params
useEffect(() => {
clientRef.current?.configure({ voice });
}, [voice]);
useEffect(() => {
clientRef.current?.configure({ temperature });
}, [temperature]);
const handleClose = async () => {
onClose?.();
if (isRecording) {
await toggleRecording();
}
disconnect().catch(console.error);
};
return (
<div className={styles["realtime-chat"]}>
<div
className={clsx(styles["circle-mic"], {
[styles["pulse"]]: isRecording,
})}
>
<VoicePrint frequencies={frequencies} isActive={isRecording} />
</div>
<div className={styles["bottom-icons"]}>
<div>
<IconButton
icon={isRecording ? <VoiceIcon /> : <VoiceOffIcon />}
onClick={toggleRecording}
disabled={!isConnected}
shadow
bordered
/>
</div>
<div className={styles["icon-center"]}>{status}</div>
<div>
<IconButton
icon={<PowerIcon />}
onClick={handleClose}
shadow
bordered
/>
</div>
</div>
</div>
);
}

View File

@@ -0,0 +1,173 @@
import { RealtimeConfig } from "@/app/store";
import Locale from "@/app/locales";
import { ListItem, Select, PasswordInput } from "@/app/components/ui-lib";
import { InputRange } from "@/app/components/input-range";
import { Voice } from "rt-client";
import { ServiceProvider } from "@/app/constant";
const providers = [ServiceProvider.OpenAI, ServiceProvider.Azure];
const models = ["gpt-4o-realtime-preview-2024-10-01"];
const voice = ["alloy", "shimmer", "echo"];
export function RealtimeConfigList(props: {
realtimeConfig: RealtimeConfig;
updateConfig: (updater: (config: RealtimeConfig) => void) => void;
}) {
const azureConfigComponent = props.realtimeConfig.provider ===
ServiceProvider.Azure && (
<>
<ListItem
title={Locale.Settings.Realtime.Azure.Endpoint.Title}
subTitle={Locale.Settings.Realtime.Azure.Endpoint.SubTitle}
>
<input
value={props.realtimeConfig?.azure?.endpoint}
type="text"
placeholder={Locale.Settings.Realtime.Azure.Endpoint.Title}
onChange={(e) => {
props.updateConfig(
(config) => (config.azure.endpoint = e.currentTarget.value),
);
}}
/>
</ListItem>
<ListItem
title={Locale.Settings.Realtime.Azure.Deployment.Title}
subTitle={Locale.Settings.Realtime.Azure.Deployment.SubTitle}
>
<input
value={props.realtimeConfig?.azure?.deployment}
type="text"
placeholder={Locale.Settings.Realtime.Azure.Deployment.Title}
onChange={(e) => {
props.updateConfig(
(config) => (config.azure.deployment = e.currentTarget.value),
);
}}
/>
</ListItem>
</>
);
return (
<>
<ListItem
title={Locale.Settings.Realtime.Enable.Title}
subTitle={Locale.Settings.Realtime.Enable.SubTitle}
>
<input
type="checkbox"
checked={props.realtimeConfig.enable}
onChange={(e) =>
props.updateConfig(
(config) => (config.enable = e.currentTarget.checked),
)
}
></input>
</ListItem>
{props.realtimeConfig.enable && (
<>
<ListItem
title={Locale.Settings.Realtime.Provider.Title}
subTitle={Locale.Settings.Realtime.Provider.SubTitle}
>
<Select
aria-label={Locale.Settings.Realtime.Provider.Title}
value={props.realtimeConfig.provider}
onChange={(e) => {
props.updateConfig(
(config) =>
(config.provider = e.target.value as ServiceProvider),
);
}}
>
{providers.map((v, i) => (
<option value={v} key={i}>
{v}
</option>
))}
</Select>
</ListItem>
<ListItem
title={Locale.Settings.Realtime.Model.Title}
subTitle={Locale.Settings.Realtime.Model.SubTitle}
>
<Select
aria-label={Locale.Settings.Realtime.Model.Title}
value={props.realtimeConfig.model}
onChange={(e) => {
props.updateConfig((config) => (config.model = e.target.value));
}}
>
{models.map((v, i) => (
<option value={v} key={i}>
{v}
</option>
))}
</Select>
</ListItem>
<ListItem
title={Locale.Settings.Realtime.ApiKey.Title}
subTitle={Locale.Settings.Realtime.ApiKey.SubTitle}
>
<PasswordInput
aria={Locale.Settings.ShowPassword}
aria-label={Locale.Settings.Realtime.ApiKey.Title}
value={props.realtimeConfig.apiKey}
type="text"
placeholder={Locale.Settings.Realtime.ApiKey.Placeholder}
onChange={(e) => {
props.updateConfig(
(config) => (config.apiKey = e.currentTarget.value),
);
}}
/>
</ListItem>
{azureConfigComponent}
<ListItem
title={Locale.Settings.TTS.Voice.Title}
subTitle={Locale.Settings.TTS.Voice.SubTitle}
>
<Select
value={props.realtimeConfig.voice}
onChange={(e) => {
props.updateConfig(
(config) => (config.voice = e.currentTarget.value as Voice),
);
}}
>
{voice.map((v, i) => (
<option value={v} key={i}>
{v}
</option>
))}
</Select>
</ListItem>
<ListItem
title={Locale.Settings.Realtime.Temperature.Title}
subTitle={Locale.Settings.Realtime.Temperature.SubTitle}
>
<InputRange
aria={Locale.Settings.Temperature.Title}
value={props.realtimeConfig?.temperature?.toFixed(1)}
min="0.6"
max="1"
step="0.1"
onChange={(e) => {
props.updateConfig(
(config) =>
(config.temperature = e.currentTarget.valueAsNumber),
);
}}
></InputRange>
</ListItem>
</>
)}
</>
);
}

View File

@@ -9,6 +9,7 @@ import CopyIcon from "../icons/copy.svg";
import ClearIcon from "../icons/clear.svg";
import LoadingIcon from "../icons/three-dots.svg";
import EditIcon from "../icons/edit.svg";
import FireIcon from "../icons/fire.svg";
import EyeIcon from "../icons/eye.svg";
import DownloadIcon from "../icons/download.svg";
import UploadIcon from "../icons/upload.svg";
@@ -18,7 +19,7 @@ import ConfirmIcon from "../icons/confirm.svg";
import ConnectionIcon from "../icons/connection.svg";
import CloudSuccessIcon from "../icons/cloud-success.svg";
import CloudFailIcon from "../icons/cloud-fail.svg";
import { trackSettingsPageGuideToCPaymentClick } from "../utils/auth-settings-events";
import {
Input,
List,
@@ -84,6 +85,7 @@ import { PluginConfigList } from "./plugin-config";
import { useMaskStore } from "../store/mask";
import { ProviderType } from "../utils/cloud";
import { TTSConfigList } from "./tts-config";
import { RealtimeConfigList } from "./realtime-chat/realtime-config";
import { STTConfigList } from "./stt-config";
function EditPromptModal(props: { id: string; onClose: () => void }) {
@@ -1748,9 +1750,11 @@ export function Settings() {
<ListItem
title={Locale.Settings.Access.CustomModel.Title}
subTitle={Locale.Settings.Access.CustomModel.SubTitle}
vertical={true}
>
<input
aria-label={Locale.Settings.Access.CustomModel.Title}
style={{ width: "100%", maxWidth: "unset", textAlign: "left" }}
type="text"
value={config.customModels}
placeholder="model1,model2,model3"
@@ -1777,7 +1781,18 @@ export function Settings() {
{shouldShowPromptModal && (
<UserPromptModal onClose={() => setShowPromptModal(false)} />
)}
<List>
<RealtimeConfigList
realtimeConfig={config.realtimeConfig}
updateConfig={(updater) => {
const realtimeConfig = { ...config.realtimeConfig };
updater(realtimeConfig);
config.update(
(config) => (config.realtimeConfig = realtimeConfig),
);
}}
/>
</List>
<List>
<PluginConfigList
pluginConfig={config.pluginConfig}
@@ -1788,7 +1803,6 @@ export function Settings() {
}}
/>
</List>
<List>
<TTSConfigList
ttsConfig={config.ttsConfig}

View File

@@ -0,0 +1 @@
export * from "./voice-print";

View File

@@ -0,0 +1,11 @@
.voice-print {
width: 100%;
height: 60px;
margin: 20px 0;
canvas {
width: 100%;
height: 100%;
filter: brightness(1.2); // 增加整体亮度
}
}

View File

@@ -0,0 +1,180 @@
import { useEffect, useRef, useCallback } from "react";
import styles from "./voice-print.module.scss";
interface VoicePrintProps {
frequencies?: Uint8Array;
isActive?: boolean;
}
export function VoicePrint({ frequencies, isActive }: VoicePrintProps) {
// Canvas引用用于获取绘图上下文
const canvasRef = useRef<HTMLCanvasElement>(null);
// 存储历史频率数据,用于平滑处理
const historyRef = useRef<number[][]>([]);
// 控制保留的历史数据帧数,影响平滑度
const historyLengthRef = useRef(10);
// 存储动画帧ID用于清理
const animationFrameRef = useRef<number>();
/**
* 更新频率历史数据
* 使用FIFO队列维护固定长度的历史记录
*/
const updateHistory = useCallback((freqArray: number[]) => {
historyRef.current.push(freqArray);
if (historyRef.current.length > historyLengthRef.current) {
historyRef.current.shift();
}
}, []);
useEffect(() => {
const canvas = canvasRef.current;
if (!canvas) return;
const ctx = canvas.getContext("2d");
if (!ctx) return;
/**
* 处理高DPI屏幕显示
* 根据设备像素比例调整canvas实际渲染分辨率
*/
const dpr = window.devicePixelRatio || 1;
canvas.width = canvas.offsetWidth * dpr;
canvas.height = canvas.offsetHeight * dpr;
ctx.scale(dpr, dpr);
/**
* 主要绘制函数
* 使用requestAnimationFrame实现平滑动画
* 包含以下步骤:
* 1. 清空画布
* 2. 更新历史数据
* 3. 计算波形点
* 4. 绘制上下对称的声纹
*/
const draw = () => {
// 清空画布
ctx.clearRect(0, 0, canvas.width, canvas.height);
if (!frequencies || !isActive) {
historyRef.current = [];
return;
}
const freqArray = Array.from(frequencies);
updateHistory(freqArray);
// 绘制声纹
const points: [number, number][] = [];
const centerY = canvas.height / 2;
const width = canvas.width;
const sliceWidth = width / (frequencies.length - 1);
// 绘制主波形
ctx.beginPath();
ctx.moveTo(0, centerY);
/**
* 声纹绘制算法:
* 1. 使用历史数据平均值实现平滑过渡
* 2. 通过正弦函数添加自然波动
* 3. 使用贝塞尔曲线连接点,使曲线更平滑
* 4. 绘制对称部分形成完整声纹
*/
for (let i = 0; i < frequencies.length; i++) {
const x = i * sliceWidth;
let avgFrequency = frequencies[i];
/**
* 波形平滑处理:
* 1. 收集历史数据中对应位置的频率值
* 2. 计算当前值与历史值的加权平均
* 3. 根据平均值计算实际显示高度
*/
if (historyRef.current.length > 0) {
const historicalValues = historyRef.current.map((h) => h[i] || 0);
avgFrequency =
(avgFrequency + historicalValues.reduce((a, b) => a + b, 0)) /
(historyRef.current.length + 1);
}
/**
* 波形变换:
* 1. 归一化频率值到0-1范围
* 2. 添加时间相关的正弦变换
* 3. 使用贝塞尔曲线平滑连接点
*/
const normalized = avgFrequency / 255.0;
const height = normalized * (canvas.height / 2);
const y = centerY + height * Math.sin(i * 0.2 + Date.now() * 0.002);
points.push([x, y]);
if (i === 0) {
ctx.moveTo(x, y);
} else {
// 使用贝塞尔曲线使波形更平滑
const prevPoint = points[i - 1];
const midX = (prevPoint[0] + x) / 2;
ctx.quadraticCurveTo(
prevPoint[0],
prevPoint[1],
midX,
(prevPoint[1] + y) / 2,
);
}
}
// 绘制对称的下半部分
for (let i = points.length - 1; i >= 0; i--) {
const [x, y] = points[i];
const symmetricY = centerY - (y - centerY);
if (i === points.length - 1) {
ctx.lineTo(x, symmetricY);
} else {
const nextPoint = points[i + 1];
const midX = (nextPoint[0] + x) / 2;
ctx.quadraticCurveTo(
nextPoint[0],
centerY - (nextPoint[1] - centerY),
midX,
centerY - ((nextPoint[1] + y) / 2 - centerY),
);
}
}
ctx.closePath();
/**
* 渐变效果:
* 从左到右应用三色渐变,带透明度
* 使用蓝色系配色提升视觉效果
*/
const gradient = ctx.createLinearGradient(0, 0, canvas.width, 0);
gradient.addColorStop(0, "rgba(100, 180, 255, 0.95)");
gradient.addColorStop(0.5, "rgba(140, 200, 255, 0.9)");
gradient.addColorStop(1, "rgba(180, 220, 255, 0.95)");
ctx.fillStyle = gradient;
ctx.fill();
animationFrameRef.current = requestAnimationFrame(draw);
};
// 启动动画循环
draw();
// 清理函数:在组件卸载时取消动画
return () => {
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
};
}, [frequencies, isActive, updateHistory]);
return (
<div className={styles["voice-print"]}>
<canvas ref={canvasRef} />
</div>
);
}