From 7ffabb77f9f42c65dc051dc5fe206cf62eacd505 Mon Sep 17 00:00:00 2001 From: sijinhui Date: Wed, 27 Mar 2024 20:03:13 +0800 Subject: [PATCH] test --- app/app/(admin)/admin/t/page.tsx | 16 +++++ app/components/chat.tsx | 6 ++ app/components/voice-input.tsx | 102 +++++++++++++++++++++++++++++++ 3 files changed, 124 insertions(+) create mode 100644 app/app/(admin)/admin/t/page.tsx create mode 100644 app/components/voice-input.tsx diff --git a/app/app/(admin)/admin/t/page.tsx b/app/app/(admin)/admin/t/page.tsx new file mode 100644 index 000000000..1efac7b9f --- /dev/null +++ b/app/app/(admin)/admin/t/page.tsx @@ -0,0 +1,16 @@ +import { Flex } from "antd"; +import VoiceInput from "@/app/components/voice-input"; + +export default async function UsersPage() { + // const users: User[] = await getData(); + + // console.log("data", data); + + return ( + <> + + + + + ); +} diff --git a/app/components/chat.tsx b/app/components/chat.tsx index 09fa8df19..ea32eceff 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -1688,6 +1688,12 @@ function _Chat() { })} )} + } + text={Locale.Chat.Send} + type="primary" + onClick={() => doSubmit(userInput)} + /> } text={Locale.Chat.Send} diff --git a/app/components/voice-input.tsx b/app/components/voice-input.tsx new file mode 100644 index 000000000..ab82dfdcf --- /dev/null +++ b/app/components/voice-input.tsx @@ -0,0 +1,102 @@ +"use client"; +import { Button, Input, Space } from "antd"; +import { useEffect, useMemo, useRef, useState } from "react"; +import { AudioOutlined, LoadingOutlined } from "@ant-design/icons"; + +export default function VoiceInput() { + const [userInput, setUserInput] = useState(""); + const [loading, setLoading] = useState(false); + const recognition = useRef(null); + + const lastLength = useRef(0); + + useEffect(() => { + if ("webkitSpeechRecognition" in window) { + if (recognition.current === null) { + recognition.current = new window.webkitSpeechRecognition(); + } + } else { + console.error("此浏览器不支持webkitSpeechRecognition。"); + return; + } + if (!recognition.current) return; + // 设置语言 + recognition.current.lang = "zh"; + // 开启连续识别 + recognition.current.continuous = true; + // 开启实时识别 + recognition.current.interimResults = true; + + function onresult(event: any) { + // 这个事件会把前面识别的结果都返回回来,所以需要取最后一个识别结果 + const length = event.results.length; + // 没有新的识别结果的时候,事件也会触发,所以这里判断一下如果没有新的识别结果,就不取最后一个识别结果了。 + if (lastLength.current === length) { + return; + } + + lastLength.current = length; + + console.log(event.results); + + // 获取最后一个识别结果 + const transcript = event.results[length - 1]?.[0]?.transcript; + + // 将最后一个识别结果添加到文本 + if (transcript) { + setUserInput((userInput) => userInput + transcript); + } + } + + // 监听语音识别结果 + recognition.current.addEventListener("result", onresult); + + return () => { + if (recognition.current) { + recognition.current.removeEventListener("result", onresult); + } + }; + }, []); + + function click() { + if (loading) { + recognition.current.stop(); + setLoading(false); + return; + } + setLoading(true); + + lastLength.current = 0; + recognition.current.start(); + } + + const icon = useMemo(() => { + if (loading) { + return ( + + ); + } + return ( + + ); + }, [loading]); + + return ( +
+ + +
+ ); +}