mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-10-01 23:56:39 +08:00
调整语音为点按结束
This commit is contained in:
parent
cce6bcbbc8
commit
ac6b02f678
@ -1,3 +1,5 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
import { Button, Input, Space } from "antd";
|
import { Button, Input, Space } from "antd";
|
||||||
import {
|
import {
|
||||||
Dispatch,
|
Dispatch,
|
||||||
@ -55,58 +57,65 @@ export default function VoiceInput({
|
|||||||
}, [accessToken]);
|
}, [accessToken]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
|
// console.log('77777777777', userInput)
|
||||||
if (!userInput || userInput.trim() === "") {
|
if (!userInput || userInput.trim() === "") {
|
||||||
setTempUserInput("");
|
setTempUserInput("");
|
||||||
setVoiceInputText("");
|
setVoiceInputText("");
|
||||||
|
} else {
|
||||||
|
if (!/\[\.\.\.\]$/.test(userInput)) {
|
||||||
|
setTempUserInput(userInput);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}, [userInput]);
|
}, [userInput]);
|
||||||
|
useEffect(() => {}, [tempUserInput]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (voiceInputText.trim() !== "") {
|
||||||
|
setUserInput(tempUserInput + voiceInputText);
|
||||||
|
}
|
||||||
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
|
}, [voiceInputText]);
|
||||||
|
|
||||||
function onRecognizedResult(result: SpeechRecognitionResult) {
|
function onRecognizedResult(result: SpeechRecognitionResult) {
|
||||||
// setVoiceInputText("");
|
let temp_rec_result = `${result.text ?? ""}`;
|
||||||
setVoiceInputText(`${result.text ?? ""}`);
|
|
||||||
let intentJson = result.properties.getProperty(
|
let intentJson = result.properties.getProperty(
|
||||||
ms_audio_sdk.PropertyId.LanguageUnderstandingServiceResponse_JsonResult,
|
ms_audio_sdk.PropertyId.LanguageUnderstandingServiceResponse_JsonResult,
|
||||||
);
|
);
|
||||||
if (intentJson) {
|
if (intentJson) {
|
||||||
setVoiceInputText(voiceInputText + `${intentJson}`);
|
temp_rec_result += `${intentJson}`;
|
||||||
}
|
}
|
||||||
|
setVoiceInputText(temp_rec_result);
|
||||||
}
|
}
|
||||||
function onCanceled(
|
function onCanceled(
|
||||||
sender: Recognizer,
|
sender: Recognizer,
|
||||||
event: SpeechRecognitionCanceledEventArgs,
|
event: SpeechRecognitionCanceledEventArgs,
|
||||||
) {
|
) {
|
||||||
console.log("[onCanceled] ", event);
|
// console.log("[onCanceled] ", event);
|
||||||
// 如果有异常就尝试重新获取
|
// 如果有异常就尝试重新获取
|
||||||
setAccessToken("");
|
setAccessToken("");
|
||||||
// 展示取消事件
|
|
||||||
// statusDiv.innerHTML += "(cancel) Reason: " + ms_audio_sdk.CancellationReason[event.reason];
|
|
||||||
// if (event.reason === ms_audio_sdk.CancellationReason.Error) {
|
|
||||||
// statusDiv.innerHTML += ": " + event.errorDetails;
|
|
||||||
// }
|
|
||||||
// statusDiv.innerHTML += "\r\n";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function onRecognizing(
|
function onRecognizing(
|
||||||
sender: Recognizer,
|
sender: Recognizer,
|
||||||
event: SpeechRecognitionEventArgs,
|
event: SpeechRecognitionEventArgs,
|
||||||
) {
|
) {
|
||||||
let result = event.result;
|
let result = event.result;
|
||||||
setUserInput(
|
|
||||||
tempUserInput +
|
|
||||||
voiceInputText.replace(/(.*)(^|[\r\n]+).*\[\.\.\.][\r\n]+/, "$1$2") +
|
|
||||||
`${result.text ?? ""} [...]`,
|
|
||||||
);
|
|
||||||
setVoiceInputText(
|
setVoiceInputText(
|
||||||
voiceInputText.replace(/(.*)(^|[\r\n]+).*\[\.\.\.][\r\n]+/, "$1$2") +
|
voiceInputText.replace(/(.*)(^|[\r\n]+).*\[\.\.\.][\r\n]+/, "$1$2") +
|
||||||
`${result.text ?? ""} [...]`,
|
`${result.text ?? ""} [...]`,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
function onRecognized(sender: Recognizer, event: SpeechRecognitionEventArgs) {
|
||||||
|
var result = event.result;
|
||||||
|
onRecognizedResult(event.result);
|
||||||
|
}
|
||||||
|
|
||||||
const startRecognition = () => {
|
const startRecognition = () => {
|
||||||
if (voiceInputLoading) {
|
if (voiceInputLoading) {
|
||||||
recognizer.current?.close();
|
recognizer.current?.close();
|
||||||
|
recognizer.current = undefined;
|
||||||
setVoiceInputLoading(false);
|
setVoiceInputLoading(false);
|
||||||
// setVoiceInputText("");
|
|
||||||
// setUserInput(tempUserInput);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,44 +133,37 @@ export default function VoiceInput({
|
|||||||
);
|
);
|
||||||
const audioConfig = ms_audio_sdk.AudioConfig.fromDefaultMicrophoneInput();
|
const audioConfig = ms_audio_sdk.AudioConfig.fromDefaultMicrophoneInput();
|
||||||
speechConfig.speechRecognitionLanguage = "zh-CN";
|
speechConfig.speechRecognitionLanguage = "zh-CN";
|
||||||
speechConfig.setProperty(
|
|
||||||
ms_audio_sdk.PropertyId.SpeechServiceConnection_EndSilenceTimeoutMs,
|
|
||||||
"3500",
|
|
||||||
);
|
|
||||||
recognizer.current = new ms_audio_sdk.SpeechRecognizer(
|
recognizer.current = new ms_audio_sdk.SpeechRecognizer(
|
||||||
speechConfig,
|
speechConfig,
|
||||||
audioConfig,
|
audioConfig,
|
||||||
);
|
);
|
||||||
recognizer.current.recognizing = onRecognizing; // 自定义分段显示
|
recognizer.current.recognizing = onRecognizing; // 自定义分段显示
|
||||||
|
recognizer.current.recognized = onRecognized;
|
||||||
recognizer.current.canceled = onCanceled; // 自定义中断
|
recognizer.current.canceled = onCanceled; // 自定义中断
|
||||||
recognizer.current.recognizeOnceAsync(
|
recognizer.current.startContinuousRecognitionAsync();
|
||||||
(result) => {
|
|
||||||
onRecognizedResult(result);
|
|
||||||
setUserInput(
|
|
||||||
tempUserInput + (voiceInputText ?? "") + `${result.text ?? ""}`,
|
|
||||||
);
|
|
||||||
setTempUserInput("");
|
|
||||||
setVoiceInputText("");
|
|
||||||
setVoiceInputLoading(false);
|
|
||||||
},
|
|
||||||
(err) => {
|
|
||||||
console.error("Recognition error: ", err); // 错误处理
|
|
||||||
setVoiceInputLoading(false);
|
|
||||||
},
|
|
||||||
);
|
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
const icon = useMemo(() => {
|
const icon = useMemo(() => {
|
||||||
if (voiceInputLoading) {
|
if (voiceInputLoading) {
|
||||||
return (
|
if (accessToken === "unknown") {
|
||||||
<LoadingOutlined
|
return (
|
||||||
style={{
|
<LoadingOutlined
|
||||||
fontSize: 16,
|
style={{
|
||||||
color: "rgb(234, 149, 24)",
|
fontSize: 16,
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
);
|
);
|
||||||
|
} else {
|
||||||
|
return (
|
||||||
|
<LoadingOutlined
|
||||||
|
style={{
|
||||||
|
fontSize: 16,
|
||||||
|
color: "rgb(234, 149, 24)",
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return (
|
return (
|
||||||
<AudioOutlined
|
<AudioOutlined
|
||||||
@ -171,12 +173,11 @@ export default function VoiceInput({
|
|||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
);
|
);
|
||||||
}, [voiceInputLoading]);
|
}, [voiceInputLoading, accessToken]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div>
|
<div>
|
||||||
<Space.Compact>
|
<Space.Compact>
|
||||||
{/*<Input value={voiceInputText} />*/}
|
|
||||||
<Button type="text" onClick={startRecognition} icon={icon} />
|
<Button type="text" onClick={startRecognition} icon={icon} />
|
||||||
</Space.Compact>
|
</Space.Compact>
|
||||||
</div>
|
</div>
|
||||||
|
Loading…
Reference in New Issue
Block a user