mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-09-17 08:46:37 +08:00
Merge 16c3255e99
into 995bef73de
This commit is contained in:
commit
462d68750b
1
.yarnrc.yml
Normal file
1
.yarnrc.yml
Normal file
@ -0,0 +1 @@
|
||||
nodeLinker: node-modules
|
@ -107,7 +107,8 @@ export interface LLMModelProvider {
|
||||
|
||||
export abstract class LLMApi {
|
||||
abstract chat(options: ChatOptions): Promise<void>;
|
||||
abstract speech(options: SpeechOptions): Promise<ArrayBuffer>;
|
||||
abstract speech(options: SpeechOptions): Promise<ArrayBuffer | AudioBuffer>;
|
||||
abstract streamSpeech?(options: SpeechOptions): AsyncGenerator<AudioBuffer>;
|
||||
abstract usage(): Promise<LLMUsage>;
|
||||
abstract models(): Promise<LLMModel[]>;
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import {
|
||||
useChatStore,
|
||||
ChatMessageTool,
|
||||
usePluginStore,
|
||||
FunctionToolItem,
|
||||
} from "@/app/store";
|
||||
import {
|
||||
preProcessImageContentForAlibabaDashScope,
|
||||
@ -51,6 +52,8 @@ interface RequestParam {
|
||||
repetition_penalty?: number;
|
||||
top_p: number;
|
||||
max_tokens?: number;
|
||||
tools?: FunctionToolItem[];
|
||||
enable_search?: boolean;
|
||||
}
|
||||
interface RequestPayload {
|
||||
model: string;
|
||||
@ -59,6 +62,7 @@ interface RequestPayload {
|
||||
}
|
||||
|
||||
export class QwenApi implements LLMApi {
|
||||
private static audioContext: AudioContext | null = null;
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
@ -89,10 +93,83 @@ export class QwenApi implements LLMApi {
|
||||
return res?.output?.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
async speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async *streamSpeech(options: SpeechOptions): AsyncGenerator<AudioBuffer> {
|
||||
if (!options.input || !options.model) {
|
||||
throw new Error("Missing required parameters: input and model");
|
||||
}
|
||||
const requestPayload = {
|
||||
model: options.model,
|
||||
input: {
|
||||
text: options.input,
|
||||
voice: options.voice,
|
||||
},
|
||||
speed: options.speed,
|
||||
response_format: options.response_format,
|
||||
};
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
try {
|
||||
const speechPath = this.path(Alibaba.SpeechPath);
|
||||
const speechPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: {
|
||||
...getHeaders(),
|
||||
"X-DashScope-SSE": "enable",
|
||||
},
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
getTimeoutMSByModel(options.model),
|
||||
);
|
||||
|
||||
const res = await fetch(speechPath, speechPayload);
|
||||
clearTimeout(requestTimeoutId); // Clear timeout on successful connection
|
||||
|
||||
const reader = res.body!.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = "";
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) {
|
||||
break;
|
||||
}
|
||||
buffer += decoder.decode(value, { stream: true });
|
||||
const lines = buffer.split("\n");
|
||||
buffer = lines.pop() || "";
|
||||
|
||||
for (const line of lines) {
|
||||
const data = line.slice(5);
|
||||
try {
|
||||
if (line.startsWith("data:")) {
|
||||
const json = JSON.parse(data);
|
||||
if (json.output?.audio?.data) {
|
||||
yield this.PCMBase64ToAudioBuffer(json.output.audio.data);
|
||||
}
|
||||
}
|
||||
} catch (parseError) {
|
||||
console.warn(
|
||||
"[StreamSpeech] Failed to parse SSE data:",
|
||||
parseError,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
reader.releaseLock();
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a speech request", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
@ -129,6 +206,7 @@ export class QwenApi implements LLMApi {
|
||||
temperature: modelConfig.temperature,
|
||||
// max_tokens: modelConfig.max_tokens,
|
||||
top_p: modelConfig.top_p === 1 ? 0.99 : modelConfig.top_p, // qwen top_p is should be < 1
|
||||
enable_search: modelConfig.enableNetWork,
|
||||
},
|
||||
};
|
||||
|
||||
@ -161,11 +239,16 @@ export class QwenApi implements LLMApi {
|
||||
.getAsTools(
|
||||
useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
);
|
||||
// console.log("getAsTools", tools, funcs);
|
||||
const _tools = tools as unknown as FunctionToolItem[];
|
||||
if (_tools && _tools.length > 0) {
|
||||
requestPayload.parameters.tools = _tools;
|
||||
}
|
||||
return streamWithThink(
|
||||
chatPath,
|
||||
requestPayload,
|
||||
headers,
|
||||
tools as any,
|
||||
[],
|
||||
funcs,
|
||||
controller,
|
||||
// parseSSE
|
||||
@ -198,7 +281,7 @@ export class QwenApi implements LLMApi {
|
||||
});
|
||||
} else {
|
||||
// @ts-ignore
|
||||
runTools[index]["function"]["arguments"] += args;
|
||||
runTools[index]["function"]["arguments"] += args || "";
|
||||
}
|
||||
}
|
||||
|
||||
@ -273,5 +356,79 @@ export class QwenApi implements LLMApi {
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
|
||||
// 播放 PCM base64 数据
|
||||
private async PCMBase64ToAudioBuffer(base64Data: string) {
|
||||
try {
|
||||
// 解码 base64
|
||||
const binaryString = atob(base64Data);
|
||||
const bytes = new Uint8Array(binaryString.length);
|
||||
for (let i = 0; i < binaryString.length; i++) {
|
||||
bytes[i] = binaryString.charCodeAt(i);
|
||||
}
|
||||
|
||||
// 转换为 AudioBuffer
|
||||
const audioBuffer = await this.convertToAudioBuffer(bytes);
|
||||
|
||||
return audioBuffer;
|
||||
} catch (error) {
|
||||
console.error("播放 PCM 数据失败:", error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
private static getAudioContext(): AudioContext {
|
||||
if (!QwenApi.audioContext) {
|
||||
QwenApi.audioContext = new (window.AudioContext ||
|
||||
window.webkitAudioContext)();
|
||||
}
|
||||
return QwenApi.audioContext;
|
||||
}
|
||||
|
||||
// 将 PCM 字节数据转换为 AudioBuffer
|
||||
private convertToAudioBuffer(pcmData: Uint8Array) {
|
||||
const audioContext = QwenApi.getAudioContext();
|
||||
const channels = 1;
|
||||
const sampleRate = 24000;
|
||||
return new Promise<AudioBuffer>((resolve, reject) => {
|
||||
try {
|
||||
let float32Array;
|
||||
// 16位 PCM 转换为 32位浮点数
|
||||
float32Array = this.pcm16ToFloat32(pcmData);
|
||||
|
||||
// 创建 AudioBuffer
|
||||
const audioBuffer = audioContext.createBuffer(
|
||||
channels,
|
||||
float32Array.length / channels,
|
||||
sampleRate,
|
||||
);
|
||||
|
||||
// 复制数据到 AudioBuffer
|
||||
for (let channel = 0; channel < channels; channel++) {
|
||||
const channelData = audioBuffer.getChannelData(channel);
|
||||
for (let i = 0; i < channelData.length; i++) {
|
||||
channelData[i] = float32Array[i * channels + channel];
|
||||
}
|
||||
}
|
||||
|
||||
resolve(audioBuffer);
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
}
|
||||
// 16位 PCM 转 32位浮点数
|
||||
private pcm16ToFloat32(pcmData: Uint8Array) {
|
||||
const length = pcmData.length / 2;
|
||||
const float32Array = new Float32Array(length);
|
||||
|
||||
for (let i = 0; i < length; i++) {
|
||||
const int16 = (pcmData[i * 2 + 1] << 8) | pcmData[i * 2];
|
||||
const int16Signed = int16 > 32767 ? int16 - 65536 : int16;
|
||||
float32Array[i] = int16Signed / 32768;
|
||||
}
|
||||
|
||||
return float32Array;
|
||||
}
|
||||
}
|
||||
export { Alibaba };
|
||||
|
@ -48,6 +48,7 @@ import PluginIcon from "../icons/plugin.svg";
|
||||
import ShortcutkeyIcon from "../icons/shortcutkey.svg";
|
||||
import McpToolIcon from "../icons/tool.svg";
|
||||
import HeadphoneIcon from "../icons/headphone.svg";
|
||||
import NetWorkIcon from "../icons/network.svg";
|
||||
import {
|
||||
BOT_HELLO,
|
||||
ChatMessage,
|
||||
@ -75,6 +76,7 @@ import {
|
||||
useMobileScreen,
|
||||
selectOrCopy,
|
||||
showPlugins,
|
||||
canUseNetWork,
|
||||
} from "../utils";
|
||||
|
||||
import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
|
||||
@ -101,8 +103,6 @@ import {
|
||||
import { useNavigate } from "react-router-dom";
|
||||
import {
|
||||
CHAT_PAGE_SIZE,
|
||||
DEFAULT_TTS_ENGINE,
|
||||
ModelProvider,
|
||||
Path,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
ServiceProvider,
|
||||
@ -512,6 +512,7 @@ export function ChatActions(props: {
|
||||
|
||||
// switch themes
|
||||
const theme = config.theme;
|
||||
const enableNetWork = session.mask.modelConfig.enableNetWork || false;
|
||||
|
||||
function nextTheme() {
|
||||
const themes = [Theme.Auto, Theme.Light, Theme.Dark];
|
||||
@ -521,6 +522,13 @@ export function ChatActions(props: {
|
||||
config.update((config) => (config.theme = nextTheme));
|
||||
}
|
||||
|
||||
function nextNetWork() {
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
session.mask.modelConfig.enableNetWork =
|
||||
!session.mask.modelConfig.enableNetWork;
|
||||
});
|
||||
}
|
||||
|
||||
// stop all responses
|
||||
const couldStop = ChatControllerPool.hasPending();
|
||||
const stopAll = () => ChatControllerPool.stopAll();
|
||||
@ -699,6 +707,9 @@ export function ChatActions(props: {
|
||||
session.mask.modelConfig.providerName =
|
||||
providerName as ServiceProvider;
|
||||
session.mask.syncGlobalConfig = false;
|
||||
session.mask.modelConfig.enableNetWork = canUseNetWork(model)
|
||||
? session.mask.modelConfig.enableNetWork
|
||||
: false;
|
||||
});
|
||||
if (providerName == "ByteDance") {
|
||||
const selectedModel = models.find(
|
||||
@ -833,6 +844,16 @@ export function ChatActions(props: {
|
||||
/>
|
||||
)}
|
||||
{!isMobileScreen && <MCPAction />}
|
||||
|
||||
{canUseNetWork(currentModel) && (
|
||||
<ChatAction
|
||||
onClick={nextNetWork}
|
||||
text={
|
||||
Locale.Chat.InputActions.NetWork[enableNetWork ? "on" : "off"]
|
||||
}
|
||||
icon={<NetWorkIcon />}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
<div className={styles["chat-input-actions-end"]}>
|
||||
{config.realtimeConfig.enable && (
|
||||
@ -1286,6 +1307,7 @@ function _Chat() {
|
||||
const accessStore = useAccessStore();
|
||||
const [speechStatus, setSpeechStatus] = useState(false);
|
||||
const [speechLoading, setSpeechLoading] = useState(false);
|
||||
const [speechCooldown, setSpeechCooldown] = useState(false);
|
||||
|
||||
async function openaiSpeech(text: string) {
|
||||
if (speechStatus) {
|
||||
@ -1293,14 +1315,14 @@ function _Chat() {
|
||||
setSpeechStatus(false);
|
||||
} else {
|
||||
var api: ClientApi;
|
||||
api = new ClientApi(ModelProvider.GPT);
|
||||
const config = useAppConfig.getState();
|
||||
api = new ClientApi(config.ttsConfig.modelProvider);
|
||||
setSpeechLoading(true);
|
||||
ttsPlayer.init();
|
||||
let audioBuffer: ArrayBuffer;
|
||||
let audioBuffer: ArrayBuffer | AudioBuffer;
|
||||
const { markdownToTxt } = require("markdown-to-txt");
|
||||
const textContent = markdownToTxt(text);
|
||||
if (config.ttsConfig.engine !== DEFAULT_TTS_ENGINE) {
|
||||
if (config.ttsConfig.engine === "Edge") {
|
||||
const edgeVoiceName = accessStore.edgeVoiceName();
|
||||
const tts = new MsEdgeTTS();
|
||||
await tts.setMetadata(
|
||||
@ -1308,28 +1330,60 @@ function _Chat() {
|
||||
OUTPUT_FORMAT.AUDIO_24KHZ_96KBITRATE_MONO_MP3,
|
||||
);
|
||||
audioBuffer = await tts.toArrayBuffer(textContent);
|
||||
playSpeech(audioBuffer);
|
||||
} else {
|
||||
audioBuffer = await api.llm.speech({
|
||||
model: config.ttsConfig.model,
|
||||
input: textContent,
|
||||
voice: config.ttsConfig.voice,
|
||||
speed: config.ttsConfig.speed,
|
||||
});
|
||||
if (api.llm.streamSpeech) {
|
||||
// 使用流式播放,边接收边播放
|
||||
setSpeechStatus(true);
|
||||
ttsPlayer.startStreamPlay(() => {
|
||||
setSpeechStatus(false);
|
||||
});
|
||||
|
||||
try {
|
||||
for await (const chunk of api.llm.streamSpeech({
|
||||
model: config.ttsConfig.model,
|
||||
input: textContent,
|
||||
voice: config.ttsConfig.voice,
|
||||
speed: config.ttsConfig.speed,
|
||||
})) {
|
||||
ttsPlayer.addToQueue(chunk);
|
||||
}
|
||||
ttsPlayer.finishStreamPlay();
|
||||
} catch (e) {
|
||||
console.error("[Stream Speech]", e);
|
||||
showToast(prettyObject(e));
|
||||
setSpeechStatus(false);
|
||||
ttsPlayer.stop();
|
||||
} finally {
|
||||
setSpeechLoading(false);
|
||||
}
|
||||
} else {
|
||||
audioBuffer = await api.llm.speech({
|
||||
model: config.ttsConfig.model,
|
||||
input: textContent,
|
||||
voice: config.ttsConfig.voice,
|
||||
speed: config.ttsConfig.speed,
|
||||
});
|
||||
playSpeech(audioBuffer);
|
||||
}
|
||||
}
|
||||
setSpeechStatus(true);
|
||||
ttsPlayer
|
||||
.play(audioBuffer, () => {
|
||||
setSpeechStatus(false);
|
||||
})
|
||||
.catch((e) => {
|
||||
console.error("[OpenAI Speech]", e);
|
||||
showToast(prettyObject(e));
|
||||
setSpeechStatus(false);
|
||||
})
|
||||
.finally(() => setSpeechLoading(false));
|
||||
}
|
||||
}
|
||||
|
||||
function playSpeech(audioBuffer: ArrayBuffer | AudioBuffer) {
|
||||
setSpeechStatus(true);
|
||||
ttsPlayer
|
||||
.play(audioBuffer, () => {
|
||||
setSpeechStatus(false);
|
||||
})
|
||||
.catch((e) => {
|
||||
console.error("[OpenAI Speech]", e);
|
||||
showToast(prettyObject(e));
|
||||
setSpeechStatus(false);
|
||||
})
|
||||
.finally(() => setSpeechLoading(false));
|
||||
}
|
||||
|
||||
const context: RenderMessage[] = useMemo(() => {
|
||||
return session.mask.hideContext ? [] : session.mask.context.slice();
|
||||
}, [session.mask.context, session.mask.hideContext]);
|
||||
|
@ -3,10 +3,9 @@ import { TTSConfig, TTSConfigValidator } from "../store";
|
||||
import Locale from "../locales";
|
||||
import { ListItem, Select } from "./ui-lib";
|
||||
import {
|
||||
DEFAULT_TTS_ENGINE,
|
||||
DEFAULT_TTS_ENGINES,
|
||||
DEFAULT_TTS_MODELS,
|
||||
DEFAULT_TTS_VOICES,
|
||||
ServiceProvider,
|
||||
TTS_CONFIGS,
|
||||
TTSEngineType
|
||||
} from "../constant";
|
||||
import { InputRange } from "./input-range";
|
||||
|
||||
@ -48,22 +47,33 @@ export function TTSConfigList(props: {
|
||||
<Select
|
||||
value={props.ttsConfig.engine}
|
||||
onChange={(e) => {
|
||||
const newEngine = e.currentTarget.value as TTSEngineType;
|
||||
props.updateConfig(
|
||||
(config) =>
|
||||
(config.engine = TTSConfigValidator.engine(
|
||||
e.currentTarget.value,
|
||||
)),
|
||||
(config) => {
|
||||
config.engine = TTSConfigValidator.engine(newEngine);
|
||||
const engineConfig = TTS_CONFIGS[newEngine];
|
||||
config.model = TTSConfigValidator.model(
|
||||
engineConfig.Model[0] || ""
|
||||
);
|
||||
config.voice = TTSConfigValidator.voice(
|
||||
engineConfig.Voices[0] || ""
|
||||
);
|
||||
config.modelProvider = TTSConfigValidator.modelProvider(
|
||||
engineConfig.ModelProvider
|
||||
);
|
||||
}
|
||||
);
|
||||
}}
|
||||
>
|
||||
{DEFAULT_TTS_ENGINES.map((v, i) => (
|
||||
{Object.keys(TTS_CONFIGS).map((v, i) => (
|
||||
<option value={v} key={i}>
|
||||
{v}
|
||||
{v}-TTS
|
||||
</option>
|
||||
))}
|
||||
</Select>
|
||||
</ListItem>
|
||||
{props.ttsConfig.engine === DEFAULT_TTS_ENGINE && (
|
||||
{(props.ttsConfig.engine === ServiceProvider.OpenAI ||
|
||||
props.ttsConfig.engine === ServiceProvider.Alibaba) && (
|
||||
<>
|
||||
<ListItem title={Locale.Settings.TTS.Model}>
|
||||
<Select
|
||||
@ -77,7 +87,7 @@ export function TTSConfigList(props: {
|
||||
);
|
||||
}}
|
||||
>
|
||||
{DEFAULT_TTS_MODELS.map((v, i) => (
|
||||
{TTS_CONFIGS[props.ttsConfig.engine]!.Model.map((v, i) => (
|
||||
<option value={v} key={i}>
|
||||
{v}
|
||||
</option>
|
||||
@ -99,7 +109,7 @@ export function TTSConfigList(props: {
|
||||
);
|
||||
}}
|
||||
>
|
||||
{DEFAULT_TTS_VOICES.map((v, i) => (
|
||||
{TTS_CONFIGS[props.ttsConfig.engine]!.Voices.map((v, i) => (
|
||||
<option value={v} key={i}>
|
||||
{v}
|
||||
</option>
|
||||
|
@ -232,6 +232,7 @@ export const Alibaba = {
|
||||
}
|
||||
return `v1/services/aigc/text-generation/generation`;
|
||||
},
|
||||
SpeechPath: "v1/services/aigc/multimodal-generation/generation",
|
||||
};
|
||||
|
||||
export const Tencent = {
|
||||
@ -461,19 +462,49 @@ export const KnowledgeCutOffDate: Record<string, string> = {
|
||||
"deepseek-coder": "2024-07",
|
||||
};
|
||||
|
||||
export const DEFAULT_TTS_ENGINE = "OpenAI-TTS";
|
||||
export const DEFAULT_TTS_ENGINES = ["OpenAI-TTS", "Edge-TTS"];
|
||||
export const DEFAULT_TTS_ENGINE = ServiceProvider.OpenAI;
|
||||
export const DEFAULT_TTS_MODEL = "tts-1";
|
||||
export const DEFAULT_TTS_VOICE = "alloy";
|
||||
export const DEFAULT_TTS_MODELS = ["tts-1", "tts-1-hd"];
|
||||
export const DEFAULT_TTS_VOICES = [
|
||||
"alloy",
|
||||
"echo",
|
||||
"fable",
|
||||
"onyx",
|
||||
"nova",
|
||||
"shimmer",
|
||||
];
|
||||
|
||||
export const OPENAI_TTS = {
|
||||
Provider: ServiceProvider.OpenAI,
|
||||
ModelProvider: ModelProvider.GPT,
|
||||
Model: ["tts-1", "tts-1-hd"],
|
||||
Voices: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
|
||||
} as const;
|
||||
|
||||
export const ALIBABA_TTS = {
|
||||
Provider: ServiceProvider.Alibaba,
|
||||
ModelProvider: ModelProvider.Qwen,
|
||||
Model: ["qwen-tts", "qwen-tts-latest"],
|
||||
Voices: ["Chelsie", "Cherry", "Ethan", "Serena", "Dylan", "Jada", "Sunny"],
|
||||
} as const;
|
||||
|
||||
export const EDGE_TTS = {
|
||||
Provider: "Edge" as const,
|
||||
ModelProvider: ModelProvider.GPT,
|
||||
Model: [] as string[],
|
||||
Voices: [] as string[],
|
||||
} as const;
|
||||
|
||||
export type TTSEngineType = ServiceProvider.OpenAI | ServiceProvider.Alibaba | "Edge";
|
||||
|
||||
export const DEFAULT_TTS_ENGINES = [ServiceProvider.OpenAI, ServiceProvider.Alibaba, "Edge"] as const;
|
||||
export const DEFAULT_TTS_MODELS = [...OPENAI_TTS.Model, ...ALIBABA_TTS.Model] as const;
|
||||
export const DEFAULT_TTS_VOICES = [...OPENAI_TTS.Voices, ...ALIBABA_TTS.Voices] as const;
|
||||
|
||||
interface TTSConfigItem {
|
||||
Provider: ServiceProvider | "Edge";
|
||||
Model: readonly string[];
|
||||
Voices: readonly string[];
|
||||
ModelProvider: ModelProvider;
|
||||
}
|
||||
|
||||
export const TTS_CONFIGS: Record<TTSEngineType, TTSConfigItem> = {
|
||||
[ServiceProvider.OpenAI]: OPENAI_TTS,
|
||||
[ServiceProvider.Alibaba]: ALIBABA_TTS,
|
||||
Edge: EDGE_TTS,
|
||||
} as const;
|
||||
|
||||
export const VISION_MODEL_REGEXES = [
|
||||
/vision/,
|
||||
@ -926,3 +957,4 @@ export const DEFAULT_GA_ID = "G-89WN60ZK2E";
|
||||
|
||||
export const SAAS_CHAT_URL = "https://nextchat.club";
|
||||
export const SAAS_CHAT_UTM_URL = "https://nextchat.club?utm=github";
|
||||
|
||||
|
1
app/icons/network.svg
Normal file
1
app/icons/network.svg
Normal file
@ -0,0 +1 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1754388361314" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="1734" xmlns:xlink="http://www.w3.org/1999/xlink" width="16" height="16"><path d="M522.666667 42.666667c3.776 0 7.530667 0.170667 11.242666 0.490666C782.954667 54.613333 981.333333 260.138667 981.333333 512c0 251.861333-198.4 457.386667-447.424 468.821333-3.712 0.341333-7.466667 0.512-11.242666 0.512l-3.285334-0.064C516.906667 981.333333 514.474667 981.333333 512 981.333333 252.8 981.333333 42.666667 771.2 42.666667 512S252.8 42.666667 512 42.666667l7.658667 0.042666L522.666667 42.666667zM490.666667 533.333333h-149.056c4.842667 191.082667 74.069333 342.08 149.056 376.576V533.333333z m213.056 0H554.666667v376.576c74.986667-34.517333 144.213333-185.514667 149.056-376.554666z m-426.133334 0H107.221333c8.746667 168.853333 120.853333 310.4 274.261334 362.517334-60.16-81.109333-100.394667-212.650667-103.893334-362.496z m639.189334 0h-149.034667c-3.349333 143.104-40.170667 269.504-95.872 351.253334C810.048 825.216 908.586667 691.221333 916.778667 533.333333zM381.482667 128.128c-146.986667 50.069333-255.936 181.909333-272.597334 341.226667h169.450667c6.634667-140.970667 45.866667-263.978667 103.146667-341.226667zM342.4 469.333333H490.666667V114.090667C418.496 147.285333 351.637333 288.426667 342.4 469.333333zM554.666667 114.090667L554.666667 469.333333h148.266666C693.674667 288.448 626.837333 147.306667 554.666667 114.090667z m117.184 25.322666l1.834666 2.730667c51.904 77.674667 87.04 194.474667 93.290667 327.189333h148.117333c-15.530667-148.565333-111.317333-273.237333-243.242666-329.92z" fill="#333333" p-id="1735"></path></svg>
|
After Width: | Height: | Size: 1.8 KiB |
@ -72,6 +72,10 @@ const ar: PartialLocaleType = {
|
||||
light: "الوضع الفاتح",
|
||||
dark: "الوضع الداكن",
|
||||
},
|
||||
NetWork: {
|
||||
on: "تفعيل البحث عبر الإنترنت",
|
||||
off: "إيقاف البحث عبر الإنترنت",
|
||||
},
|
||||
Prompt: "الأوامر السريعة",
|
||||
Masks: "جميع الأقنعة",
|
||||
Clear: "مسح الدردشة",
|
||||
|
@ -72,6 +72,10 @@ const bn: PartialLocaleType = {
|
||||
light: "আলোর মোড",
|
||||
dark: "অন্ধকার মোড",
|
||||
},
|
||||
NetWork: {
|
||||
on: "ওয়েব অনুসন্ধান সক্রিয় করুন",
|
||||
off: "ওয়েব অনুসন্ধান নিষ্ক্রিয় করুন",
|
||||
},
|
||||
Prompt: "সংক্ষিপ্ত নির্দেশনা",
|
||||
Masks: "সমস্ত মাস্ক",
|
||||
Clear: "চ্যাট পরিষ্কার করুন",
|
||||
|
@ -76,6 +76,10 @@ const cn = {
|
||||
light: "亮色模式",
|
||||
dark: "深色模式",
|
||||
},
|
||||
NetWork: {
|
||||
on: "开启联网搜索",
|
||||
off: "关闭联网搜索",
|
||||
},
|
||||
Prompt: "快捷指令",
|
||||
Masks: "所有面具",
|
||||
Clear: "清除聊天",
|
||||
|
@ -72,6 +72,10 @@ const cs: PartialLocaleType = {
|
||||
light: "Světelný režim",
|
||||
dark: "Tmavý režim",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Povolit webové vyhledávání",
|
||||
off: "Zakázat webové vyhledávání",
|
||||
},
|
||||
Prompt: "Rychlé příkazy",
|
||||
Masks: "Všechny masky",
|
||||
Clear: "Vymazat konverzaci",
|
||||
|
@ -74,6 +74,10 @@ const da: PartialLocaleType = {
|
||||
light: "Lyst tema",
|
||||
dark: "Mørkt tema",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Aktivér web-søgning",
|
||||
off: "Deaktivér web-søgning",
|
||||
},
|
||||
Prompt: "Prompts",
|
||||
Masks: "Personaer",
|
||||
Clear: "Ryd kontekst",
|
||||
|
@ -73,6 +73,10 @@ const de: PartialLocaleType = {
|
||||
light: "Helles Thema",
|
||||
dark: "Dunkles Thema",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Web-Suche aktivieren",
|
||||
off: "Web-Suche deaktivieren",
|
||||
},
|
||||
Prompt: "Schnellbefehle",
|
||||
Masks: "Alle Masken",
|
||||
Clear: "Chat löschen",
|
||||
@ -437,7 +441,8 @@ const de: PartialLocaleType = {
|
||||
AI302: {
|
||||
ApiKey: {
|
||||
Title: "Schnittstellenschlüssel",
|
||||
SubTitle: "Verwenden Sie einen benutzerdefinierten 302.AI API-Schlüssel",
|
||||
SubTitle:
|
||||
"Verwenden Sie einen benutzerdefinierten 302.AI API-Schlüssel",
|
||||
Placeholder: "302.AI API-Schlüssel",
|
||||
},
|
||||
Endpoint: {
|
||||
|
@ -77,6 +77,10 @@ const en: LocaleType = {
|
||||
light: "Light Theme",
|
||||
dark: "Dark Theme",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Enable Web Search",
|
||||
off: "Disable Web Search",
|
||||
},
|
||||
Prompt: "Prompts",
|
||||
Masks: "Masks",
|
||||
Clear: "Clear Context",
|
||||
|
@ -74,6 +74,10 @@ const es: PartialLocaleType = {
|
||||
light: "Modo claro",
|
||||
dark: "Modo oscuro",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Habilitar búsqueda web",
|
||||
off: "Deshabilitar búsqueda web",
|
||||
},
|
||||
Prompt: "Comandos rápidos",
|
||||
Masks: "Todas las máscaras",
|
||||
Clear: "Limpiar chat",
|
||||
|
@ -73,6 +73,10 @@ const fr: PartialLocaleType = {
|
||||
light: "Mode clair",
|
||||
dark: "Mode sombre",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Activer la recherche web",
|
||||
off: "Désactiver la recherche web",
|
||||
},
|
||||
Prompt: "Commandes rapides",
|
||||
Masks: "Tous les masques",
|
||||
Clear: "Effacer la discussion",
|
||||
|
@ -72,6 +72,10 @@ const id: PartialLocaleType = {
|
||||
light: "Mode Terang",
|
||||
dark: "Mode Gelap",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Aktifkan pencarian web",
|
||||
off: "Nonaktifkan pencarian web",
|
||||
},
|
||||
Prompt: "Perintah Cepat",
|
||||
Masks: "Semua Masker",
|
||||
Clear: "Hapus Obrolan",
|
||||
|
@ -73,6 +73,10 @@ const it: PartialLocaleType = {
|
||||
light: "Tema chiaro",
|
||||
dark: "Tema scuro",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Abilita ricerca web",
|
||||
off: "Disabilita ricerca web",
|
||||
},
|
||||
Prompt: "Comandi rapidi",
|
||||
Masks: "Tutte le maschere",
|
||||
Clear: "Pulisci chat",
|
||||
|
@ -72,6 +72,10 @@ const jp: PartialLocaleType = {
|
||||
light: "ライトモード",
|
||||
dark: "ダークモード",
|
||||
},
|
||||
NetWork: {
|
||||
on: "ウェブ検索を有効化",
|
||||
off: "ウェブ検索を無効化",
|
||||
},
|
||||
Prompt: "クイックコマンド",
|
||||
Masks: "すべてのマスク",
|
||||
Clear: "チャットをクリア",
|
||||
|
@ -76,6 +76,10 @@ const ko: PartialLocaleType = {
|
||||
light: "라이트 모드",
|
||||
dark: "다크 모드",
|
||||
},
|
||||
NetWork: {
|
||||
on: "웹 검색 활성화",
|
||||
off: "웹 검색 비활성화",
|
||||
},
|
||||
Prompt: "빠른 명령",
|
||||
Masks: "모든 마스크",
|
||||
Clear: "채팅 지우기",
|
||||
|
@ -74,6 +74,10 @@ const no: PartialLocaleType = {
|
||||
light: "Lyst tema",
|
||||
dark: "Mørkt tema",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Aktiver web-søk",
|
||||
off: "Deaktiver web-søk",
|
||||
},
|
||||
Prompt: "Hurtigkommando",
|
||||
Masks: "Alle masker",
|
||||
Clear: "Rydd samtale",
|
||||
|
@ -72,6 +72,10 @@ const pt: PartialLocaleType = {
|
||||
light: "Tema Claro",
|
||||
dark: "Tema Escuro",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Ativar pesquisa web",
|
||||
off: "Desativar pesquisa web",
|
||||
},
|
||||
Prompt: "Prompts",
|
||||
Masks: "Máscaras",
|
||||
Clear: "Limpar Contexto",
|
||||
|
@ -72,6 +72,10 @@ const ru: PartialLocaleType = {
|
||||
light: "Светлая тема",
|
||||
dark: "Темная тема",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Включить веб-поиск",
|
||||
off: "Отключить веб-поиск",
|
||||
},
|
||||
Prompt: "Быстрая команда",
|
||||
Masks: "Все маски",
|
||||
Clear: "Очистить чат",
|
||||
|
@ -73,6 +73,10 @@ const sk: PartialLocaleType = {
|
||||
light: "Svetlý motív",
|
||||
dark: "Tmavý motív",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Povoliť webové vyhľadávanie",
|
||||
off: "Zakázať webové vyhľadávanie",
|
||||
},
|
||||
Prompt: "Výzvy",
|
||||
Masks: "Masky",
|
||||
Clear: "Vymazať kontext",
|
||||
|
@ -72,6 +72,10 @@ const tr: PartialLocaleType = {
|
||||
light: "Açık mod",
|
||||
dark: "Koyu mod",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Web aramasını etkinleştir",
|
||||
off: "Web aramasını devre dışı bırak",
|
||||
},
|
||||
Prompt: "Kısayol komutu",
|
||||
Masks: "Tüm maskeler",
|
||||
Clear: "Sohbeti temizle",
|
||||
|
@ -72,6 +72,10 @@ const tw = {
|
||||
light: "亮色模式",
|
||||
dark: "深色模式",
|
||||
},
|
||||
NetWork: {
|
||||
on: "開啟網路搜尋",
|
||||
off: "關閉網路搜尋",
|
||||
},
|
||||
Prompt: "快捷指令",
|
||||
Masks: "所有角色範本",
|
||||
Clear: "清除聊天",
|
||||
|
@ -72,6 +72,10 @@ const vi: PartialLocaleType = {
|
||||
light: "Chế độ sáng",
|
||||
dark: "Chế độ tối",
|
||||
},
|
||||
NetWork: {
|
||||
on: "Bật tìm kiếm web",
|
||||
off: "Tắt tìm kiếm web",
|
||||
},
|
||||
Prompt: "Lệnh tắt",
|
||||
Masks: "Tất cả mặt nạ",
|
||||
Clear: "Xóa cuộc trò chuyện",
|
||||
|
@ -6,13 +6,14 @@ import {
|
||||
DEFAULT_MODELS,
|
||||
DEFAULT_SIDEBAR_WIDTH,
|
||||
DEFAULT_TTS_ENGINE,
|
||||
DEFAULT_TTS_ENGINES,
|
||||
DEFAULT_TTS_MODEL,
|
||||
DEFAULT_TTS_MODELS,
|
||||
DEFAULT_TTS_VOICE,
|
||||
DEFAULT_TTS_VOICES,
|
||||
StoreKey,
|
||||
ServiceProvider,
|
||||
TTSEngineType,
|
||||
ModelProvider,
|
||||
} from "../constant";
|
||||
import { createPersistStore } from "../utils/store";
|
||||
import type { Voice } from "rt-client";
|
||||
@ -20,7 +21,6 @@ import type { Voice } from "rt-client";
|
||||
export type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
|
||||
export type TTSModelType = (typeof DEFAULT_TTS_MODELS)[number];
|
||||
export type TTSVoiceType = (typeof DEFAULT_TTS_VOICES)[number];
|
||||
export type TTSEngineType = (typeof DEFAULT_TTS_ENGINES)[number];
|
||||
|
||||
export enum SubmitKey {
|
||||
Enter = "Enter",
|
||||
@ -81,12 +81,14 @@ export const DEFAULT_CONFIG = {
|
||||
size: "1024x1024" as ModelSize,
|
||||
quality: "standard" as DalleQuality,
|
||||
style: "vivid" as DalleStyle,
|
||||
enableNetWork: false,
|
||||
},
|
||||
|
||||
ttsConfig: {
|
||||
enable: false,
|
||||
autoplay: false,
|
||||
engine: DEFAULT_TTS_ENGINE,
|
||||
modelProvider: ModelProvider.GPT,
|
||||
engine: DEFAULT_TTS_ENGINE as TTSEngineType,
|
||||
model: DEFAULT_TTS_MODEL,
|
||||
voice: DEFAULT_TTS_VOICE,
|
||||
speed: 1.0,
|
||||
@ -126,18 +128,21 @@ export function limitNumber(
|
||||
}
|
||||
|
||||
export const TTSConfigValidator = {
|
||||
engine(x: string) {
|
||||
engine(x: string | TTSEngineType): TTSEngineType {
|
||||
return x as TTSEngineType;
|
||||
},
|
||||
model(x: string) {
|
||||
model(x: string): TTSModelType {
|
||||
return x as TTSModelType;
|
||||
},
|
||||
voice(x: string) {
|
||||
voice(x: string): TTSVoiceType {
|
||||
return x as TTSVoiceType;
|
||||
},
|
||||
speed(x: number) {
|
||||
speed(x: number): number {
|
||||
return limitNumber(x, 0.25, 4.0, 1.0);
|
||||
},
|
||||
modelProvider(x: string): ModelProvider {
|
||||
return x as ModelProvider;
|
||||
},
|
||||
};
|
||||
|
||||
export const ModalConfigValidator = {
|
||||
|
15
app/utils.ts
15
app/utils.ts
@ -296,6 +296,15 @@ export function isDalle3(model: string) {
|
||||
return "dall-e-3" === model;
|
||||
}
|
||||
|
||||
export function canUseNetWork(model: string) {
|
||||
return (
|
||||
model.includes("qwen-max") ||
|
||||
model.includes("qwen-plus") ||
|
||||
model.includes("qwen-turbo") ||
|
||||
model.includes("qwq")
|
||||
);
|
||||
}
|
||||
|
||||
export function getTimeoutMSByModel(model: string) {
|
||||
model = model.toLowerCase();
|
||||
if (
|
||||
@ -347,6 +356,12 @@ export function showPlugins(provider: ServiceProvider, model: string) {
|
||||
if (provider == ServiceProvider.Google && !model.includes("vision")) {
|
||||
return true;
|
||||
}
|
||||
if (
|
||||
provider == ServiceProvider.Alibaba &&
|
||||
(model.includes("qwen") || model.includes("deepseek"))
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1,25 +1,48 @@
|
||||
type TTSPlayer = {
|
||||
init: () => void;
|
||||
play: (audioBuffer: ArrayBuffer, onended: () => void | null) => Promise<void>;
|
||||
play: (
|
||||
audioBuffer: ArrayBuffer | AudioBuffer,
|
||||
onended: () => void | null,
|
||||
) => Promise<void>;
|
||||
playQueue: (
|
||||
audioBuffers: (ArrayBuffer | AudioBuffer)[],
|
||||
onended: () => void | null,
|
||||
) => Promise<void>;
|
||||
addToQueue: (audioBuffer: ArrayBuffer | AudioBuffer) => void;
|
||||
startStreamPlay: (onended: () => void | null) => void;
|
||||
finishStreamPlay: () => void;
|
||||
stop: () => void;
|
||||
};
|
||||
|
||||
export function createTTSPlayer(): TTSPlayer {
|
||||
let audioContext: AudioContext | null = null;
|
||||
let audioBufferSourceNode: AudioBufferSourceNode | null = null;
|
||||
let isPlaying = false;
|
||||
let playQueue: (ArrayBuffer | AudioBuffer)[] = [];
|
||||
let currentOnended: (() => void | null) | null = null;
|
||||
let isStreamMode = false;
|
||||
let streamFinished = false;
|
||||
|
||||
const init = () => {
|
||||
console.log("[TTSPlayer] init");
|
||||
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
||||
audioContext.suspend();
|
||||
};
|
||||
|
||||
const play = async (audioBuffer: ArrayBuffer, onended: () => void | null) => {
|
||||
const play = async (
|
||||
audioBuffer: ArrayBuffer | AudioBuffer,
|
||||
onended: () => void | null,
|
||||
) => {
|
||||
if (audioBufferSourceNode) {
|
||||
audioBufferSourceNode.stop();
|
||||
audioBufferSourceNode.disconnect();
|
||||
}
|
||||
|
||||
const buffer = await audioContext!.decodeAudioData(audioBuffer);
|
||||
let buffer: AudioBuffer;
|
||||
if (audioBuffer instanceof AudioBuffer) {
|
||||
buffer = audioBuffer;
|
||||
} else {
|
||||
buffer = await audioContext!.decodeAudioData(audioBuffer);
|
||||
}
|
||||
audioBufferSourceNode = audioContext!.createBufferSource();
|
||||
audioBufferSourceNode.buffer = buffer;
|
||||
audioBufferSourceNode.connect(audioContext!.destination);
|
||||
@ -29,17 +52,109 @@ export function createTTSPlayer(): TTSPlayer {
|
||||
audioBufferSourceNode.onended = onended;
|
||||
};
|
||||
|
||||
const stop = () => {
|
||||
const playNext = async () => {
|
||||
if (playQueue.length === 0) {
|
||||
// 在流模式下,如果队列为空但流还没结束,等待
|
||||
if (isStreamMode && !streamFinished) {
|
||||
setTimeout(() => playNext(), 100);
|
||||
return;
|
||||
}
|
||||
|
||||
isPlaying = false;
|
||||
isStreamMode = false;
|
||||
streamFinished = false;
|
||||
if (currentOnended) {
|
||||
currentOnended();
|
||||
currentOnended = null;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
const nextBuffer = playQueue.shift()!;
|
||||
let buffer: AudioBuffer;
|
||||
if (nextBuffer instanceof AudioBuffer) {
|
||||
buffer = nextBuffer;
|
||||
} else {
|
||||
buffer = await audioContext!.decodeAudioData(nextBuffer);
|
||||
}
|
||||
|
||||
if (audioBufferSourceNode) {
|
||||
audioBufferSourceNode.stop();
|
||||
audioBufferSourceNode.disconnect();
|
||||
}
|
||||
|
||||
audioBufferSourceNode = audioContext!.createBufferSource();
|
||||
audioBufferSourceNode.buffer = buffer;
|
||||
audioBufferSourceNode.connect(audioContext!.destination);
|
||||
audioBufferSourceNode.onended = () => {
|
||||
playNext();
|
||||
};
|
||||
|
||||
await audioContext!.resume();
|
||||
audioBufferSourceNode.start();
|
||||
};
|
||||
|
||||
const playQueueMethod = async (
|
||||
audioBuffers: (ArrayBuffer | AudioBuffer)[],
|
||||
onended: () => void | null,
|
||||
) => {
|
||||
playQueue = [...audioBuffers];
|
||||
currentOnended = onended;
|
||||
if (!isPlaying) {
|
||||
isPlaying = true;
|
||||
await playNext();
|
||||
}
|
||||
};
|
||||
|
||||
const addToQueue = (audioBuffer: ArrayBuffer | AudioBuffer) => {
|
||||
if (streamFinished) {
|
||||
return;
|
||||
}
|
||||
playQueue.push(audioBuffer);
|
||||
};
|
||||
|
||||
const startStreamPlay = (onended: () => void | null) => {
|
||||
isStreamMode = true;
|
||||
streamFinished = false;
|
||||
playQueue = [];
|
||||
currentOnended = onended;
|
||||
|
||||
if (!isPlaying) {
|
||||
isPlaying = true;
|
||||
playNext();
|
||||
}
|
||||
};
|
||||
|
||||
const finishStreamPlay = () => {
|
||||
streamFinished = true;
|
||||
};
|
||||
|
||||
const stop = async () => {
|
||||
console.log("[TTSPlayer] stop");
|
||||
playQueue = [];
|
||||
isPlaying = false;
|
||||
isStreamMode = false;
|
||||
streamFinished = true;
|
||||
currentOnended = null;
|
||||
|
||||
if (audioBufferSourceNode) {
|
||||
audioBufferSourceNode.stop();
|
||||
audioBufferSourceNode.disconnect();
|
||||
audioBufferSourceNode = null;
|
||||
}
|
||||
if (audioContext) {
|
||||
audioContext.close();
|
||||
await audioContext.close();
|
||||
audioContext = null;
|
||||
}
|
||||
};
|
||||
|
||||
return { init, play, stop };
|
||||
return {
|
||||
init,
|
||||
play,
|
||||
playQueue: playQueueMethod,
|
||||
addToQueue,
|
||||
startStreamPlay,
|
||||
finishStreamPlay,
|
||||
stop,
|
||||
};
|
||||
}
|
||||
|
@ -93,5 +93,9 @@
|
||||
"resolutions": {
|
||||
"lint-staged/yaml": "^2.2.2"
|
||||
},
|
||||
"packageManager": "yarn@1.22.19"
|
||||
"packageManager": "yarn@1.22.19",
|
||||
"volta": {
|
||||
"node": "20.19.4",
|
||||
"yarn": "1.22.19"
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user