This commit is contained in:
Nan
2025-04-30 14:06:10 +08:00
parent e9fa30b0fe
commit 0db6b6be56
19 changed files with 2183 additions and 275 deletions

View File

@@ -0,0 +1,179 @@
.voiceRecognitionContainer {
display: flex;
flex-direction: column;
width: 100%;
max-width: 800px;
margin: 0 auto;
padding: 20px;
background-color: #1e1e1e;
border-radius: 10px;
box-shadow: 0 5px 20px rgba(0, 0, 0, 0.3);
color: #ffffff;
}
.title {
text-align: center;
margin-bottom: 20px;
font-size: 24px;
}
.statusContainer {
display: flex;
flex-direction: column;
align-items: center;
margin-bottom: 20px;
}
.statusIndicator {
display: flex;
align-items: center;
background-color: rgba(0, 0, 0, 0.5);
padding: 8px 16px;
border-radius: 20px;
margin-bottom: 10px;
}
.statusDot {
width: 12px;
height: 12px;
border-radius: 50%;
margin-right: 10px;
&.idle {
background-color: #888888;
}
&.recording {
background-color: #ff9800;
animation: pulse 1.5s infinite;
}
&.training {
background-color: #2196f3;
animation: pulse 1.5s infinite;
}
&.recognizing {
background-color: #9c27b0;
animation: pulse 1.5s infinite;
}
&.trained {
background-color: #4caf50;
}
&.matched {
background-color: #4caf50;
}
&.not_matched {
background-color: #f44336;
}
&.error {
background-color: #f44336;
}
}
.statusText {
font-size: 14px;
}
.message {
font-size: 16px;
text-align: center;
margin: 0;
}
.visualizerContainer {
width: 100%;
height: 150px;
margin-bottom: 20px;
border: 1px solid rgba(255, 255, 255, 0.2);
border-radius: 5px;
overflow: hidden;
}
.controlsContainer {
display: flex;
justify-content: space-between;
margin-bottom: 20px;
@media (max-width: 600px) {
flex-direction: column;
}
}
.trainingControls,
.recognitionControls {
display: flex;
flex-direction: column;
width: 48%;
@media (max-width: 600px) {
width: 100%;
margin-bottom: 20px;
}
h3 {
margin-bottom: 10px;
font-size: 18px;
}
}
.button {
padding: 10px 15px;
margin-bottom: 10px;
border: none;
border-radius: 5px;
background-color: #2196f3;
color: white;
font-size: 14px;
cursor: pointer;
transition: background-color 0.3s;
&:hover:not(:disabled) {
background-color: #1976d2;
}
&:disabled {
background-color: #cccccc;
color: #666666;
cursor: not-allowed;
}
}
.resultContainer {
margin-top: 20px;
padding: 15px;
background-color: rgba(0, 0, 0, 0.3);
border-radius: 5px;
}
.scoreBar {
width: 100%;
height: 20px;
background-color: rgba(255, 255, 255, 0.1);
border-radius: 10px;
overflow: hidden;
margin-bottom: 10px;
}
.scoreIndicator {
height: 100%;
background: linear-gradient(to right, #f44336, #ffeb3b, #4caf50);
border-radius: 10px;
transition: width 0.5s ease-in-out;
}
.scoreValue {
text-align: center;
font-size: 16px;
font-weight: bold;
}
@keyframes pulse {
0% { box-shadow: 0 0 0 0 rgba(255, 255, 255, 0.7); }
70% { box-shadow: 0 0 0 10px rgba(255, 255, 255, 0); }
100% { box-shadow: 0 0 0 0 rgba(255, 255, 255, 0); }
}

View File

@@ -0,0 +1,506 @@
import React, { useState, useEffect, useRef } from "react";
import * as tf from "@tensorflow/tfjs";
import { VoicePrint } from "./voice-print/voice-print";
import styles from "./TensorFlow.module.scss";
// 声纹识别状态
enum VoiceRecognitionStatus {
IDLE = "空闲",
RECORDING = "录制中",
TRAINING = "训练中",
RECOGNIZING = "识别中",
TRAINED = "已训练",
MATCHED = "声纹匹配",
NOT_MATCHED = "声纹不匹配",
ERROR = "错误",
}
// 声纹特征提取参数
const SAMPLE_RATE = 16000; // 采样率
const FFT_SIZE = 1024; // FFT大小
const MEL_BINS = 40; // Mel滤波器数量
const FRAME_LENGTH = 25; // 帧长度(ms)
const FRAME_STEP = 10; // 帧步长(ms)
const FEATURE_LENGTH = 100; // 特征序列长度
const TensorFlow: React.FC = () => {
// 状态管理
const [status, setStatus] = useState<VoiceRecognitionStatus>(
VoiceRecognitionStatus.IDLE,
);
const [message, setMessage] = useState<string>("");
const [isRecording, setIsRecording] = useState<boolean>(false);
const [isTrained, setIsTrained] = useState<boolean>(false);
const [matchScore, setMatchScore] = useState<number>(0);
const [frequencies, setFrequencies] = useState<Uint8Array | undefined>(
undefined,
);
// 引用
const audioContextRef = useRef<AudioContext | null>(null);
const analyserRef = useRef<AnalyserNode | null>(null);
const mediaStreamRef = useRef<MediaStream | null>(null);
const recordedChunksRef = useRef<Float32Array[]>([]);
const modelRef = useRef<tf.LayersModel | null>(null);
const voiceprintRef = useRef<Float32Array | null>(null);
const animationFrameRef = useRef<number | null>(null);
// 初始化
useEffect(() => {
// 检查是否有保存的声纹模型
const savedVoiceprint = localStorage.getItem("userVoiceprint");
if (savedVoiceprint) {
try {
voiceprintRef.current = new Float32Array(JSON.parse(savedVoiceprint));
setIsTrained(true);
setStatus(VoiceRecognitionStatus.TRAINED);
setMessage("已加载保存的声纹模型");
} catch (error) {
console.error("加载保存的声纹模型失败:", error);
}
}
// 加载TensorFlow模型
loadModel();
return () => {
stopRecording();
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
};
}, []);
// 加载声纹识别模型
const loadModel = async () => {
try {
// 创建简单的声纹识别模型
const model = tf.sequential();
// 添加卷积层处理音频特征
model.add(
tf.layers.conv1d({
inputShape: [FEATURE_LENGTH, MEL_BINS],
filters: 32,
kernelSize: 3,
activation: "relu",
}),
);
model.add(tf.layers.maxPooling1d({ poolSize: 2 }));
model.add(
tf.layers.conv1d({
filters: 64,
kernelSize: 3,
activation: "relu",
}),
);
model.add(tf.layers.maxPooling1d({ poolSize: 2 }));
model.add(tf.layers.flatten());
// 添加全连接层
model.add(tf.layers.dense({ units: 128, activation: "relu" }));
model.add(tf.layers.dropout({ rate: 0.5 }));
// 输出层 - 声纹特征向量
model.add(tf.layers.dense({ units: 64, activation: "linear" }));
// 编译模型
model.compile({
optimizer: "adam",
loss: "meanSquaredError",
});
modelRef.current = model;
console.log("声纹识别模型已加载");
} catch (error) {
console.error("加载模型失败:", error);
setStatus(VoiceRecognitionStatus.ERROR);
setMessage("加载模型失败");
}
};
// 开始录音
const startRecording = async (isTraining: boolean = false) => {
try {
if (isRecording) return;
// 重置录音数据
recordedChunksRef.current = [];
// 请求麦克风权限
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaStreamRef.current = stream;
// 创建音频上下文
const audioContext = new (window.AudioContext ||
(window as any).webkitAudioContext)();
audioContextRef.current = audioContext;
// 创建分析器节点用于可视化
const analyser = audioContext.createAnalyser();
analyser.fftSize = FFT_SIZE;
analyserRef.current = analyser;
// 创建音频源
const source = audioContext.createMediaStreamSource(stream);
source.connect(analyser);
// 创建处理器节点
const processor = audioContext.createScriptProcessor(4096, 1, 1);
// 处理音频数据
processor.onaudioprocess = (e) => {
const inputData = e.inputBuffer.getChannelData(0);
recordedChunksRef.current.push(new Float32Array(inputData));
};
// 连接节点
analyser.connect(processor);
processor.connect(audioContext.destination);
// 更新状态
setIsRecording(true);
setStatus(
isTraining
? VoiceRecognitionStatus.RECORDING
: VoiceRecognitionStatus.RECOGNIZING,
);
setMessage(
isTraining ? "请说话3-5秒钟用于训练..." : "请说话进行声纹识别...",
);
// 开始频谱可视化
startVisualization();
// 设置自动停止录音训练模式下5秒后自动停止
if (isTraining) {
setTimeout(() => {
stopRecording();
trainVoiceprint();
}, 5000);
}
} catch (error) {
console.error("开始录音失败:", error);
setStatus(VoiceRecognitionStatus.ERROR);
setMessage("无法访问麦克风,请检查权限");
}
};
// 停止录音
const stopRecording = () => {
if (!isRecording) return;
// 停止所有音频流
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((track) => track.stop());
mediaStreamRef.current = null;
}
// 关闭音频上下文
if (audioContextRef.current) {
audioContextRef.current.close();
audioContextRef.current = null;
}
// 停止可视化
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
animationFrameRef.current = null;
}
setIsRecording(false);
setFrequencies(undefined);
};
// 开始频谱可视化
const startVisualization = () => {
const analyser = analyserRef.current;
if (!analyser) return;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
const updateVisualization = () => {
if (!analyser) return;
analyser.getByteFrequencyData(dataArray);
setFrequencies(dataArray);
animationFrameRef.current = requestAnimationFrame(updateVisualization);
};
updateVisualization();
};
// 提取音频特征
const extractFeatures = async (
audioData: Float32Array[],
): Promise<tf.Tensor | null> => {
try {
// 合并所有音频块
const mergedData = new Float32Array(
audioData.reduce((acc, chunk) => acc + chunk.length, 0),
);
let offset = 0;
for (const chunk of audioData) {
mergedData.set(chunk, offset);
offset += chunk.length;
}
// 转换为张量
const audioTensor = tf.tensor1d(mergedData);
// 计算梅尔频谱图 (简化版)
// 在实际应用中,这里应该使用更复杂的信号处理方法
// 如MFCC (Mel-frequency cepstral coefficients)
const frameLength = Math.round((SAMPLE_RATE * FRAME_LENGTH) / 1000);
const frameStep = Math.round((SAMPLE_RATE * FRAME_STEP) / 1000);
// 使用短时傅里叶变换提取特征
// 注意这是简化版实际应用中应使用专业的DSP库
const frames = [];
for (let i = 0; i + frameLength <= mergedData.length; i += frameStep) {
const frame = mergedData.slice(i, i + frameLength);
frames.push(Array.from(frame));
}
// 限制帧数
const limitedFrames = frames.slice(0, FEATURE_LENGTH);
// 如果帧数不足,用零填充
while (limitedFrames.length < FEATURE_LENGTH) {
limitedFrames.push(new Array(frameLength).fill(0));
}
// 创建特征张量
const featureTensor = tf.tensor(limitedFrames);
// 简化的梅尔频谱计算
// 在实际应用中应使用更准确的方法
const melSpectrogram = tf.tidy(() => {
// 应用FFT (简化)
const fftMag = featureTensor.abs();
// 降维到MEL_BINS
const reshaped = fftMag.reshape([FEATURE_LENGTH, -1]);
const melFeatures = reshaped.slice([0, 0], [FEATURE_LENGTH, MEL_BINS]);
// 归一化
const normalized = melFeatures.div(tf.scalar(255.0));
return normalized.expandDims(0); // 添加批次维度
});
return melSpectrogram;
} catch (error) {
console.error("特征提取失败:", error);
return null;
}
};
// 训练声纹模型
const trainVoiceprint = async () => {
if (recordedChunksRef.current.length === 0 || !modelRef.current) {
setStatus(VoiceRecognitionStatus.ERROR);
setMessage("没有录音数据或模型未加载");
return;
}
setStatus(VoiceRecognitionStatus.TRAINING);
setMessage("正在训练声纹模型...");
try {
// 提取特征
const features = await extractFeatures(recordedChunksRef.current);
if (!features) throw new Error("特征提取失败");
// 使用模型提取声纹特征向量
const voiceprint = tf.tidy(() => {
// 前向传播获取声纹特征
const prediction = modelRef.current!.predict(features) as tf.Tensor;
// 归一化特征向量
return tf.div(prediction, tf.norm(prediction));
});
// 保存声纹特征
const voiceprintData = await voiceprint.data();
voiceprintRef.current = new Float32Array(voiceprintData);
// 保存到localStorage
localStorage.setItem(
"userVoiceprint",
JSON.stringify(Array.from(voiceprintData)),
);
setIsTrained(true);
setStatus(VoiceRecognitionStatus.TRAINED);
setMessage("声纹模型训练完成并已保存");
// 清理
voiceprint.dispose();
features.dispose();
} catch (error) {
console.error("训练失败:", error);
setStatus(VoiceRecognitionStatus.ERROR);
setMessage("声纹训练失败");
}
};
// 识别声纹
const recognizeVoice = async () => {
if (!isTrained || !voiceprintRef.current) {
setStatus(VoiceRecognitionStatus.ERROR);
setMessage("请先训练声纹模型");
return;
}
if (recordedChunksRef.current.length === 0 || !modelRef.current) {
setStatus(VoiceRecognitionStatus.ERROR);
setMessage("没有录音数据或模型未加载");
return;
}
try {
// 提取特征
const features = await extractFeatures(recordedChunksRef.current);
if (!features) throw new Error("特征提取失败");
// 使用模型提取声纹特征向量
const currentVoiceprint = tf.tidy(() => {
// 前向传播获取声纹特征
const prediction = modelRef.current!.predict(features) as tf.Tensor;
// 归一化特征向量
return tf.div(prediction, tf.norm(prediction));
});
// 计算与保存的声纹的余弦相似度
const similarity = tf.tidy(() => {
const savedVoiceprint = tf.tensor1d(voiceprintRef.current!);
// 计算点积
const dotProduct = tf.sum(
tf.mul(currentVoiceprint.reshape([-1]), savedVoiceprint),
);
return dotProduct;
});
// 获取相似度分数 (范围从-1到1越接近1表示越相似)
const similarityScore = await similarity.data();
const score = similarityScore[0];
setMatchScore(score);
// 判断是否为同一人 (阈值可调整)
const threshold = 0.7;
const isMatch = score > threshold;
setStatus(
isMatch
? VoiceRecognitionStatus.MATCHED
: VoiceRecognitionStatus.NOT_MATCHED,
);
setMessage(
isMatch
? `声纹匹配成功!相似度: ${(score * 100).toFixed(2)}%`
: `声纹不匹配。相似度: ${(score * 100).toFixed(2)}%`,
);
// 清理
currentVoiceprint.dispose();
features.dispose();
similarity.dispose();
} catch (error) {
console.error("识别失败:", error);
setStatus(VoiceRecognitionStatus.ERROR);
setMessage("声纹识别失败");
}
};
// 清除训练数据
const clearTrainedData = () => {
localStorage.removeItem("userVoiceprint");
voiceprintRef.current = null;
setIsTrained(false);
setStatus(VoiceRecognitionStatus.IDLE);
setMessage("声纹数据已清除");
};
return (
<div className={styles.voiceRecognitionContainer}>
<h2 className={styles.title}></h2>
<div className={styles.statusContainer}>
<div className={styles.statusIndicator}>
<div
className={`${styles.statusDot} ${styles[status.toLowerCase()]}`}
></div>
<span className={styles.statusText}>{status}</span>
</div>
<p className={styles.message}>{message}</p>
</div>
<div className={styles.visualizerContainer}>
<VoicePrint frequencies={frequencies} isActive={isRecording} />
</div>
<div className={styles.controlsContainer}>
<div className={styles.trainingControls}>
<h3></h3>
<button
className={styles.button}
onClick={() => startRecording(true)}
disabled={isRecording}
>
</button>
<button
className={styles.button}
onClick={clearTrainedData}
disabled={!isTrained}
>
</button>
</div>
<div className={styles.recognitionControls}>
<h3></h3>
<button
className={styles.button}
onClick={() => startRecording(false)}
disabled={isRecording || !isTrained}
>
</button>
<button
className={styles.button}
onClick={() => {
stopRecording();
recognizeVoice();
}}
disabled={!isRecording}
>
</button>
</div>
</div>
{status === VoiceRecognitionStatus.MATCHED ||
status === VoiceRecognitionStatus.NOT_MATCHED ? (
<div className={styles.resultContainer}>
<div className={styles.scoreBar}>
<div
className={styles.scoreIndicator}
style={{ width: `${Math.max(0, matchScore * 100)}%` }}
></div>
</div>
<div className={styles.scoreValue}>
: {(matchScore * 100).toFixed(2)}%
</div>
</div>
) : null}
</div>
);
};
export default TensorFlow;

View File

@@ -0,0 +1,64 @@
.container {
position: relative;
padding: 15px;
border-bottom: 1px solid var(--gray);
}
.avatarContainer {
display: flex;
align-items: center;
cursor: pointer;
}
.avatar {
width: 40px;
height: 40px;
border-radius: 50%;
object-fit: cover;
margin-right: 10px;
border: 2px solid var(--primary);
}
.userInfo {
flex: 1;
overflow: hidden;
}
.nickname {
font-size: 16px;
font-weight: bold;
color: var(--black);
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.userId {
font-size: 12px;
color: var(--black-50);
margin-top: 2px;
}
.menu {
position: absolute;
top: 100%;
left: 0;
width: 120px;
background-color: var(--white);
border-radius: 4px;
box-shadow: var(--card-shadow);
z-index: 1000;
overflow: hidden;
}
.menuItem {
padding: 10px 15px;
font-size: 14px;
color: var(--black);
cursor: pointer;
transition: background-color 0.3s;
&:hover {
background-color: var(--gray);
}
}

View File

@@ -0,0 +1,113 @@
import React, { useState, useRef, useEffect } from "react";
import { useNavigate } from "react-router-dom";
import styles from "./WechatAuthor.module.scss";
import { Path } from "../constant";
import { useAccessStore } from "../store";
import { safeLocalStorage } from "../utils";
import { showConfirm } from "./ui-lib";
interface WechatUserInfo {
id: string;
nickname: string;
avatar: string;
accessToken: string;
}
export function WechatAuthor() {
const navigate = useNavigate();
const accessStore = useAccessStore();
const storage = safeLocalStorage();
const [userInfo, setUserInfo] = useState<WechatUserInfo | null>(null);
const [showMenu, setShowMenu] = useState(false);
const menuRef = useRef<HTMLDivElement>(null);
// 加载用户信息
useEffect(() => {
const userInfoStr = storage.getItem("wechat_user_info");
if (userInfoStr) {
try {
const parsedInfo = JSON.parse(userInfoStr);
setUserInfo(parsedInfo);
} catch (e) {
console.error("Failed to parse user info", e);
}
}
}, []);
// 点击外部关闭菜单
useEffect(() => {
const handleClickOutside = (event: MouseEvent) => {
if (menuRef.current && !menuRef.current.contains(event.target as Node)) {
setShowMenu(false);
}
};
document.addEventListener("mousedown", handleClickOutside);
return () => {
document.removeEventListener("mousedown", handleClickOutside);
};
}, []);
// 处理登出
const handleLogout = async () => {
const confirmed = await showConfirm("确定要退出登录吗?");
if (confirmed) {
// 清除登录信息
storage.removeItem("wechat_user_info");
// 更新访问状态
accessStore.update((access) => {
access.accessToken = "";
access.wechatLoggedIn = false;
});
// 跳转到登录页
navigate(Path.Home);
}
setShowMenu(false);
};
// 如果没有用户信息,显示登录按钮
if (!accessStore.wechatLoggedIn) {
return (
<div className={styles.container}>
<div
className={styles.loginPrompt}
onClick={() => navigate(Path.Login)}
>
</div>
</div>
);
}
return (
<div className={styles.container}>
<div
className={styles.avatarContainer}
onClick={() => setShowMenu(true)}
onContextMenu={(e) => {
e.preventDefault();
setShowMenu(true);
}}
>
<img
src={userInfo?.avatar}
alt={userInfo?.nickname}
className={styles.avatar}
/>
<div className={styles.userInfo}>
<div className={styles.nickname}>{userInfo?.nickname}</div>
<div className={styles.userId}>ID: {userInfo?.id}</div>
</div>
</div>
{showMenu && (
<div className={styles.menu} ref={menuRef}>
<div className={styles.menuItem} onClick={handleLogout}>
退
</div>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,195 @@
.container {
display: flex;
justify-content: center;
align-items: center;
min-height: 100vh;
background-color: var(--gray);
}
.loginCard {
width: 400px;
background-color: var(--white);
border-radius: 10px;
box-shadow: var(--card-shadow);
padding: 30px;
display: flex;
flex-direction: column;
align-items: center;
}
.header {
text-align: center;
margin-bottom: 30px;
h2 {
font-size: 24px;
margin-bottom: 10px;
color: var(--primary);
}
.subtitle {
font-size: 14px;
color: var(--black-50);
}
}
.qrcodeContainer {
width: 240px;
height: 240px;
position: relative;
margin-bottom: 20px;
display: flex;
justify-content: center;
align-items: center;
}
.loadingWrapper {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
height: 100%;
width: 100%;
p {
margin-top: 15px;
color: var(--black-50);
font-size: 14px;
}
}
.loadingIcon {
width: 40px;
height: 40px;
animation: spin 1.5s linear infinite;
}
.qrcodeWrapper {
position: relative;
width: 100%;
height: 100%;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
cursor: pointer;
&:hover .qrcodeOverlay {
opacity: 1;
}
}
.qrcode {
width: 200px;
height: 200px;
padding: 10px;
background-color: white;
border: 1px solid var(--gray);
border-radius: 8px;
}
.qrcodeOverlay {
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: rgba(0, 0, 0, 0.7);
display: flex;
justify-content: center;
align-items: center;
opacity: 0;
transition: opacity 0.3s;
border-radius: 8px;
p {
color: white;
font-size: 16px;
}
}
.qrcodeHint {
margin-top: 15px;
color: var(--black-50);
font-size: 14px;
text-align: center;
}
.statusWrapper {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
height: 100%;
width: 100%;
}
.statusIcon {
width: 60px;
height: 60px;
display: flex;
justify-content: center;
align-items: center;
margin-bottom: 20px;
}
.successIcon {
width: 60px;
height: 60px;
color: var(--success);
}
.errorIcon {
width: 60px;
height: 60px;
color: var(--error);
}
.statusText {
font-size: 16px;
text-align: center;
color: var(--black);
margin-bottom: 20px;
}
.footer {
width: 100%;
display: flex;
flex-direction: column;
align-items: center;
}
.expireHint {
font-size: 12px;
color: var(--black-50);
margin-bottom: 15px;
}
.refreshButton {
padding: 8px 20px;
background-color: var(--primary);
color: white;
border: none;
border-radius: 4px;
cursor: pointer;
font-size: 14px;
transition: background-color 0.3s;
&:hover {
background-color: var(--primary-dark);
}
&:disabled {
background-color: var(--gray);
cursor: not-allowed;
}
}
@keyframes spin {
0% {
transform: rotate(0deg);
}
100% {
transform: rotate(360deg);
}
}

View File

@@ -0,0 +1,174 @@
import React, { useState, useEffect } from "react";
import { useNavigate } from "react-router-dom";
import { Path } from "../constant";
import styles from "./WechatLogin.module.scss";
import LoadingIcon from "../icons/loading.svg";
// import QRCodeImage from "../icons/wechat-qrcode-mock.svg"; // 假设有一个模拟的二维码SVG
import SuccessIcon from "../icons/confirm.svg";
import ErrorIcon from "../icons/close.svg";
import Locale from "../locales";
import { useAccessStore } from "../store";
import { safeLocalStorage } from "../utils";
// 登录状态枚举
enum LoginStatus {
LOADING = "loading",
READY = "ready",
SCANNED = "scanned",
CONFIRMED = "confirmed",
SUCCESS = "success",
ERROR = "error",
}
export function WechatLogin() {
const navigate = useNavigate();
const [status, setStatus] = useState<LoginStatus>(LoginStatus.LOADING);
const [errorMessage, setErrorMessage] = useState<string>("");
const accessStore = useAccessStore();
const storage = safeLocalStorage();
// 模拟登录流程
useEffect(() => {
// 初始加载
const timer1 = setTimeout(() => {
setStatus(LoginStatus.READY);
}, 1000);
return () => {
clearTimeout(timer1);
};
}, []);
// 模拟二维码扫描和确认过程
const simulateLogin = () => {
// 模拟扫码
setStatus(LoginStatus.SCANNED);
// 模拟确认
setTimeout(() => {
setStatus(LoginStatus.CONFIRMED);
// 模拟登录成功
setTimeout(() => {
setStatus(LoginStatus.SUCCESS);
// 存储登录信息
const mockUserInfo = {
id: "wx_" + Math.floor(Math.random() * 1000000),
nickname: "微信用户",
avatar: "https://placekitten.com/100/100", // 模拟头像
accessToken: "mock_token_" + Date.now(),
};
storage.setItem("wechat_user_info", JSON.stringify(mockUserInfo));
// 更新访问状态
accessStore.update((access) => {
access.accessToken = mockUserInfo.accessToken;
access.wechatLoggedIn = true;
});
// 登录成功后跳转
setTimeout(() => {
navigate(Path.Chat);
}, 2000);
}, 1000);
}, 2000);
};
// 刷新二维码
const refreshQRCode = () => {
setStatus(LoginStatus.LOADING);
setTimeout(() => {
setStatus(LoginStatus.READY);
}, 1000);
};
// 处理登录错误
const handleLoginError = () => {
setStatus(LoginStatus.ERROR);
setErrorMessage("登录失败,请稍后重试");
};
return (
<div className={styles.container}>
<div className={styles.loginCard}>
<div className={styles.header}>
<h2>{Locale.Auth.Title}</h2>
<p className={styles.subtitle}>使</p>
</div>
<div className={styles.qrcodeContainer}>
{status === LoginStatus.LOADING && (
<div className={styles.loadingWrapper}>
<LoadingIcon className={styles.loadingIcon} />
<p>...</p>
</div>
)}
{status === LoginStatus.READY && (
<div className={styles.qrcodeWrapper} onClick={simulateLogin}>
{/* <QRCodeImage className={styles.qrcode} /> */}
<div className={styles.qrcodeOverlay}>
<p></p>
</div>
<p className={styles.qrcodeHint}>使</p>
</div>
)}
{status === LoginStatus.SCANNED && (
<div className={styles.statusWrapper}>
<div className={styles.statusIcon}>
<LoadingIcon className={styles.loadingIcon} />
</div>
<p className={styles.statusText}></p>
</div>
)}
{status === LoginStatus.CONFIRMED && (
<div className={styles.statusWrapper}>
<div className={styles.statusIcon}>
<LoadingIcon className={styles.loadingIcon} />
</div>
<p className={styles.statusText}>...</p>
</div>
)}
{status === LoginStatus.SUCCESS && (
<div className={styles.statusWrapper}>
<div className={styles.statusIcon}>
<SuccessIcon className={styles.successIcon} />
</div>
<p className={styles.statusText}>...</p>
</div>
)}
{status === LoginStatus.ERROR && (
<div className={styles.statusWrapper}>
<div className={styles.statusIcon}>
<ErrorIcon className={styles.errorIcon} />
</div>
<p className={styles.statusText}>{errorMessage}</p>
<button className={styles.refreshButton} onClick={refreshQRCode}>
</button>
</div>
)}
</div>
{(status === LoginStatus.READY || status === LoginStatus.LOADING) && (
<div className={styles.footer}>
<p className={styles.expireHint}>2</p>
<button
className={styles.refreshButton}
onClick={refreshQRCode}
disabled={status === LoginStatus.LOADING}
>
</button>
</div>
)}
</div>
</div>
);
}

View File

@@ -0,0 +1,31 @@
import React, { useEffect } from "react";
import { useNavigate, useLocation } from "react-router-dom";
import { Path } from "../constant";
import { useAccessStore } from "../store";
import { safeLocalStorage } from "../utils";
// 不需要登录就可以访问的路径
const PUBLIC_PATHS = [Path.Home, Path.Login];
export function AuthWrapper({ children }: { children: React.ReactNode }) {
const navigate = useNavigate();
const location = useLocation();
const accessStore = useAccessStore();
const storage = safeLocalStorage();
useEffect(() => {
// 检查当前路径是否需要登录
const isPublicPath = PUBLIC_PATHS.includes(location.pathname as Path);
// 检查是否已登录
const userInfoStr = storage.getItem("wechat_user_info");
const isLoggedIn = userInfoStr && accessStore.wechatLoggedIn;
// 如果需要登录但未登录,重定向到登录页
if (!isPublicPath && !isLoggedIn) {
navigate(Path.Login);
}
}, [location.pathname, navigate, accessStore.wechatLoggedIn]);
return <>{children}</>;
}

View File

@@ -620,6 +620,7 @@
flex-direction: column;
border-top: var(--border-in-light);
box-shadow: var(--card-shadow);
// height: 15vh; // 添加固定高度为视窗高度的15%
.chat-input-actions {
.chat-input-action {

View File

@@ -30,6 +30,7 @@ import { type ClientApi, getClientApi } from "../client/api";
import { useAccessStore } from "../store";
import clsx from "clsx";
import { initializeMcpSystem, isMcpEnabled } from "../mcp/actions";
import LoginPage from "../pages/login";
export function Loading(props: { noLogo?: boolean }) {
return (
@@ -198,6 +199,7 @@ function Screen() {
})}
/>
<WindowContent>
{/* <AuthWrapper></AuthWrapper> 只有登录时才可以路由到其他页面,相当于拦截器*/}
<Routes>
<Route path={Path.Home} element={<Chat />} />
<Route path={Path.NewChat} element={<NewChat />} />
@@ -207,6 +209,7 @@ function Screen() {
<Route path={Path.Chat} element={<Chat />} />
<Route path={Path.Settings} element={<Settings />} />
<Route path={Path.McpMarket} element={<McpMarketPage />} />
<Route path={Path.Login} element={<LoginPage />} />
{/* <Route path={Path.Interview} element={<InterviewPage/>}/> */}
</Routes>
</WindowContent>

View File

@@ -0,0 +1,163 @@
.interview-overlay {
position: fixed;
top: 5px;
right: 5px;
width: 33vw;
height: 85vh;
background-color: #1e1e1e;
border: 1px solid rgba(255, 255, 255, 0.2);
border-radius: 10px;
box-shadow: 0 5px 20px rgba(0, 0, 0, 0.3);
display: flex;
flex-direction: column;
align-items: flex-start;
justify-content: flex-start;
color: #ffffff;
z-index: 1000;
padding: 20px;
overflow-y: auto;
&.dragging {
cursor: col-resize;
}
.drag-handle {
position: absolute;
left: 0;
top: 0;
width: 5px;
height: 100%;
cursor: col-resize;
background-color: transparent;
&:hover {
background-color: rgba(255, 255, 255, 0.2);
}
}
.content-container {
display: flex;
flex-direction: column;
align-items: flex-start;
justify-content: flex-start;
width: 100%;
}
.status-indicator {
display: flex;
align-items: center;
justify-content: flex-start;
margin-bottom: 1rem;
background-color: rgba(0, 0, 0, 0.5);
padding: 0.5rem 1rem;
border-radius: 1rem;
width: fit-content;
.indicator-dot {
width: 10px;
height: 10px;
border-radius: 50%;
margin-right: 10px;
&.listening {
background-color: #4caf50;
box-shadow: 0 0 10px #4caf50;
animation: pulse 1.5s infinite;
}
&.not-listening {
background-color: #ff6b6b;
}
}
.status-text {
font-size: 0.9rem;
}
}
.error-message {
color: #ff6b6b;
margin-bottom: 1rem;
background-color: rgba(0, 0, 0, 0.5);
padding: 0.75rem 1rem;
border-radius: 0.5rem;
width: 100%;
text-align: center;
}
.transcript-display {
width: 100%;
margin-bottom: 1rem;
padding: 1rem;
background-color: rgba(0, 0, 0, 0.5);
border-radius: 0.5rem;
max-height: 120px;
overflow-y: auto;
text-align: left;
font-size: 0.9rem;
line-height: 1.5;
border: 1px solid rgba(0, 0, 0, 0.5);
}
.button-container {
display: flex;
justify-content: space-between;
gap: 0.5rem;
margin-top: 1rem;
width: 100%;
.button {
display: flex;
align-items: center;
justify-content: center;
gap: 0.5rem;
border-radius: 0.5rem;
padding: 0.5rem 1rem;
font-size: 0.9rem;
cursor: pointer;
transition: all 0.2s ease;
flex: 1;
color: white;
border: none;
&.pause-button {
background-color: #ff9800;
&:hover {
background-color: #f57c00;
}
&.paused {
background-color: #4caf50;
&:hover {
background-color: #45a049;
}
}
}
&.stop-button {
background-color: rgba(0, 0, 0, 0.5);
&:hover {
background-color: #000000;
}
}
&.clear-button {
background-color: transparent;
border: 1px solid rgba(0, 0, 0, 0.5);
&:hover {
background-color: rgba(0, 0, 0, 0.5);
}
}
}
}
}
@keyframes pulse {
0% { box-shadow: 0 0 0 0 rgba(76, 175, 80, 0.7); }
70% { box-shadow: 0 0 0 10px rgba(76, 175, 80, 0); }
100% { box-shadow: 0 0 0 0 rgba(76, 175, 80, 0); }
}

View File

@@ -3,6 +3,7 @@ import StopIcon from "../icons/pause.svg";
import SpeechRecognition, {
useSpeechRecognition,
} from "react-speech-recognition";
import "./interview-overlay.scss";
interface InterviewOverlayProps {
onClose: () => void;
@@ -16,11 +17,17 @@ export const InterviewOverlay: React.FC<InterviewOverlayProps> = ({
submitMessage,
}) => {
const [visible, setVisible] = useState(true);
const [countdown, setCountdown] = useState(20);
const countdownRef = useRef(countdown);
const intervalIdRef = useRef<NodeJS.Timeout | null>(null);
// const [countdown, setCountdown] = useState(20);
// const countdownRef = useRef(countdown);
// const intervalIdRef = useRef<NodeJS.Timeout | null>(null);
// 添加暂停状态
const [isPaused, setIsPaused] = useState(false);
// 添加宽度状态和拖动状态
const [width, setWidth] = useState("33vw");
const [isDragging, setIsDragging] = useState(false);
const isDraggingRef = useRef(isDragging);
const dragStartXRef = useRef(0);
const initialWidthRef = useRef(0);
// 使用 react-speech-recognition 的钩子
const {
@@ -37,12 +44,6 @@ export const InterviewOverlay: React.FC<InterviewOverlayProps> = ({
useEffect(() => {
transcriptRef.current = transcript;
onTextUpdate(transcript);
// 当有新的语音识别结果时,重置倒计时
if (transcript) {
setCountdown(20);
countdownRef.current = 20;
}
}, [transcript, onTextUpdate]);
// 检查浏览器是否支持语音识别
@@ -62,25 +63,9 @@ export const InterviewOverlay: React.FC<InterviewOverlayProps> = ({
continuous: true,
language: "zh-CN",
});
// 设置倒计时
intervalIdRef.current = setInterval(() => {
setCountdown((prev) => {
const newCount = prev - 1;
countdownRef.current = newCount;
if (newCount <= 0) {
stopRecognition();
}
return newCount;
});
}, 1000);
}
return () => {
if (intervalIdRef.current) {
clearInterval(intervalIdRef.current);
}
SpeechRecognition.stopListening();
};
}, [visible, isPaused]);
@@ -88,17 +73,10 @@ export const InterviewOverlay: React.FC<InterviewOverlayProps> = ({
const stopRecognition = () => {
try {
SpeechRecognition.stopListening();
// 提交最终结果
if (transcriptRef.current) {
submitMessage(transcriptRef.current);
}
// 清理倒计时
if (intervalIdRef.current) {
clearInterval(intervalIdRef.current);
}
// 关闭overlay
setVisible(false);
onClose();
@@ -110,150 +88,108 @@ export const InterviewOverlay: React.FC<InterviewOverlayProps> = ({
// 添加暂停/恢复功能
const togglePause = () => {
if (!isPaused) {
// 暂停
SpeechRecognition.stopListening();
if (intervalIdRef.current) {
clearInterval(intervalIdRef.current);
}
// 使用更强制的中断方式
SpeechRecognition.abortListening();
// 然后再调用正常的停止方法确保完全停止
setTimeout(() => {
SpeechRecognition.stopListening();
}, 0);
// 提交当前文本
if (transcriptRef.current) {
submitMessage(transcriptRef.current);
resetTranscript();
if (transcriptRef.current && transcriptRef.current.trim() !== "") {
// 使用setTimeout将提交操作放到下一个事件循环避免阻塞UI更新
setTimeout(() => {
submitMessage(transcriptRef.current);
resetTranscript();
}, 0);
}
} else {
// 恢复
console.log("recover ");
// 先确保停止当前可能存在的监听
SpeechRecognition.abortListening();
// 短暂延迟后重新启动监听
setTimeout(() => {
SpeechRecognition.startListening({
continuous: true,
language: "zh-CN",
});
// 重置文本
resetTranscript();
}, 100);
// 重新设置倒计时
intervalIdRef.current = setInterval(() => {
setCountdown((prev) => {
const newCount = prev - 1;
countdownRef.current = newCount;
if (newCount <= 0) {
stopRecognition();
}
return newCount;
});
}, 1000);
}, 0);
}
setIsPaused(!isPaused);
};
// 添加拖动相关的事件处理函数
const handleDragStart = (e: React.MouseEvent) => {
setIsDragging(() => {
isDraggingRef.current = true;
return true;
});
dragStartXRef.current = e.clientX;
initialWidthRef.current = parseInt(width);
document.addEventListener("mousemove", handleDragMove);
document.addEventListener("mouseup", handleDragEnd);
};
const handleDragMove = (e: MouseEvent) => {
if (isDraggingRef.current) {
const deltaX = e.clientX - dragStartXRef.current;
const newWidth = Math.max(
15,
Math.min(
80,
initialWidthRef.current - (deltaX / window.innerWidth) * 100,
),
);
console.log(`mouse have moved Width:${newWidth}vw`);
setWidth(`${newWidth}vw`);
}
};
const handleDragEnd = () => {
setIsDragging(() => {
isDraggingRef.current = false;
return false;
});
document.removeEventListener("mousemove", handleDragMove);
document.removeEventListener("mouseup", handleDragEnd);
};
// 组件卸载时清理事件监听器
useEffect(() => {
return () => {
document.removeEventListener("mousemove", handleDragMove);
document.removeEventListener("mouseup", handleDragEnd);
};
}, []);
if (!visible) {
return null;
}
return (
<div
style={{
position: "fixed",
top: "20px",
right: "20px",
width: "33vw",
height: "100vh",
// maxHeight: "80vh",
backgroundColor: "#1e1e1e", // 替换 var(--gray)
border: "1px solid rgba(255, 255, 255, 0.2)", // 替换 var(--border-in-light)
borderRadius: "10px",
boxShadow: "0 5px 20px rgba(0, 0, 0, 0.3)", // 替换 var(--shadow)
display: "flex",
flexDirection: "column",
alignItems: "flex-start",
justifyContent: "flex-start",
color: "#ffffff", // 替换 C 为白色
zIndex: 1000,
padding: "20px",
overflowY: "auto",
}}
className={`interview-overlay ${isDragging ? "dragging" : ""}`}
style={{ width }}
>
<div
style={{
display: "flex",
flexDirection: "column",
alignItems: "flex-start",
justifyContent: "flex-start",
width: "100%",
}}
>
<h2
style={{
fontSize: "1.5rem",
fontWeight: "500",
marginBottom: "1rem",
textAlign: "left",
color: "#ffffff", // 替换 var(--white)
}}
>
{" "}
<span
style={{
color: countdown <= 5 ? "#ff6b6b" : "#4caf50",
fontWeight: "bold",
}}
>
{countdown}
</span>{" "}
</h2>
{/* 添加左侧拖动条 */}
<div className="drag-handle" onMouseDown={handleDragStart} />
<div className="content-container">
{/* 语音识别状态指示器 */}
<div
style={{
display: "flex",
alignItems: "center",
justifyContent: "flex-start",
marginBottom: "1rem",
backgroundColor: "rgba(0, 0, 0, 0.5)", // 替换 var(--black-50)
padding: "0.5rem 1rem",
borderRadius: "1rem",
width: "fit-content",
}}
>
<div className="status-indicator">
<div
style={{
width: "10px",
height: "10px",
borderRadius: "50%",
backgroundColor: listening ? "#4caf50" : "#ff6b6b",
marginRight: "10px",
boxShadow: listening ? "0 0 10px #4caf50" : "none",
animation: listening ? "pulse 1.5s infinite" : "none",
}}
className={`indicator-dot ${
listening ? "listening" : "not-listening"
}`}
/>
<span style={{ fontSize: "0.9rem" }}>
<span className="status-text">
{listening ? "正在监听..." : isPaused ? "已暂停" : "未监听"}
</span>
</div>
{/* 错误提示 */}
{(!browserSupportsSpeechRecognition || !isMicrophoneAvailable) && (
<div
style={{
color: "#ff6b6b",
marginBottom: "1rem",
backgroundColor: "rgba(0, 0, 0, 0.5)", // 替换 var(--black-50)
padding: "0.75rem 1rem",
borderRadius: "0.5rem",
width: "100%",
textAlign: "center",
}}
>
<div className="error-message">
{!browserSupportsSpeechRecognition
? "您的浏览器不支持语音识别功能,请使用Chrome浏览器"
: "无法访问麦克风,请检查麦克风权限"}
@@ -261,137 +197,28 @@ export const InterviewOverlay: React.FC<InterviewOverlayProps> = ({
)}
{/* 识别文本显示区域 */}
{transcript && (
<div
style={{
width: "100%",
marginBottom: "1rem",
padding: "1rem",
backgroundColor: "rgba(0, 0, 0, 0.5)", // 替换 var(--black-50)
borderRadius: "0.5rem",
maxHeight: "120px",
overflowY: "auto",
textAlign: "left",
fontSize: "0.9rem",
lineHeight: "1.5",
border: "1px solid rgba(0, 0, 0, 0.5)", // 替换 var(--black-50)
}}
>
{transcript}
</div>
)}
{transcript && <div className="transcript-display">{transcript}</div>}
{/* 按钮区域 */}
<div
style={{
display: "flex",
justifyContent: "space-between",
gap: "0.5rem",
marginTop: "1rem",
width: "100%",
}}
>
<div className="button-container">
{/* 暂停/恢复按钮 */}
<button
onClick={togglePause}
style={{
display: "flex",
alignItems: "center",
justifyContent: "center",
gap: "0.5rem",
backgroundColor: isPaused ? "#4caf50" : "#ff9800",
color: "white",
border: "none",
borderRadius: "0.5rem",
padding: "0.5rem 1rem",
fontSize: "0.9rem",
cursor: "pointer",
transition: "all 0.2s ease",
flex: "1",
}}
onMouseOver={(e) =>
(e.currentTarget.style.backgroundColor = isPaused
? "#45a049"
: "#f57c00")
}
onMouseOut={(e) =>
(e.currentTarget.style.backgroundColor = isPaused
? "#4caf50"
: "#ff9800")
}
className={`button pause-button ${isPaused ? "paused" : ""}`}
>
<span>{isPaused ? "▶️ 恢复监听" : "⏸️ 暂停并发送"}</span>
</button>
<button
onClick={stopRecognition}
style={{
display: "flex",
alignItems: "center",
justifyContent: "center",
gap: "0.5rem",
backgroundColor: "rgba(0, 0, 0, 0.5)", // 替换 var(--black-50)
color: "white",
border: "none",
borderRadius: "0.5rem",
padding: "0.5rem 1rem",
fontSize: "0.9rem",
cursor: "pointer",
transition: "all 0.2s ease",
flex: "1",
}}
onMouseOver={
(e) => (e.currentTarget.style.backgroundColor = "#000000") // 替换 var(--black)
}
onMouseOut={
(e) =>
(e.currentTarget.style.backgroundColor = "rgba(0, 0, 0, 0.5)") // 替换 var(--black-50)
}
>
<button onClick={stopRecognition} className="button stop-button">
<StopIcon />
<span></span>
<span></span>
</button>
<button
onClick={resetTranscript}
style={{
display: "flex",
alignItems: "center",
justifyContent: "center",
gap: "0.5rem",
backgroundColor: "transparent",
color: "white",
border: "1px solid rgba(0, 0, 0, 0.5)", // 替换 var(--black-50)
borderRadius: "0.5rem",
padding: "0.5rem 1rem",
fontSize: "0.9rem",
cursor: "pointer",
transition: "all 0.2s ease",
flex: "1",
}}
onMouseOver={
(e) =>
(e.currentTarget.style.backgroundColor = "rgba(0, 0, 0, 0.5)") // 替换 var(--black-50)
}
onMouseOut={(e) =>
(e.currentTarget.style.backgroundColor = "transparent")
}
>
<button onClick={resetTranscript} className="button clear-button">
<span>🗑 </span>
</button>
</div>
</div>
{/* 添加脉冲动画 */}
<style>
{`
@keyframes pulse {
0% { box-shadow: 0 0 0 0 rgba(76, 175, 80, 0.7); }
70% { box-shadow: 0 0 0 10px rgba(76, 175, 80, 0); }
100% { box-shadow: 0 0 0 0 rgba(76, 175, 80, 0); }
}
`}
</style>
</div>
);
};

View File

@@ -32,6 +32,8 @@ import { Selector, showConfirm } from "./ui-lib";
import clsx from "clsx";
import { isMcpEnabled } from "../mcp/actions";
import { WechatAuthor } from "./WechatAuthor";
const DISCOVERY = [
{ name: Locale.Plugin.Name, path: Path.Plugins },
{ name: "Stable Diffusion", path: Path.Sd },
@@ -223,6 +225,7 @@ export function SideBarTail(props: {
);
}
// 在侧边栏组件中添加WechatAuthor
export function SideBar(props: { className?: string }) {
useHotKey();
const { onDragStart, shouldNarrow } = useDragSideBar();
@@ -248,6 +251,7 @@ export function SideBar(props: { className?: string }) {
shouldNarrow={shouldNarrow}
{...props}
>
<WechatAuthor /> {/* 添加到最顶部 */}
<SideBarHeader
title="NextChat"
subTitle="Build your own AI assistant."