diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts index ce88f6d53..4d2ca6232 100644 --- a/app/client/platforms/alibaba.ts +++ b/app/client/platforms/alibaba.ts @@ -67,7 +67,7 @@ interface RequestPayload { } export class QwenApi implements LLMApi { - private audioContext?: AudioContext; + private static audioContext: AudioContext | null = null; path(path: string): string { const accessStore = useAccessStore.getState(); @@ -133,6 +133,7 @@ export class QwenApi implements LLMApi { ); const res = await fetch(speechPath, speechPayload); + clearTimeout(requestTimeoutId); // Clear timeout on successful connection const reader = res.body!.getReader(); const decoder = new TextDecoder(); @@ -150,13 +151,12 @@ export class QwenApi implements LLMApi { if (line.startsWith("data:")) { const data = line.slice(5); const json = JSON.parse(data); - if (json.output.audio.data) { + if (json.output?.audio?.data) { yield this.PCMBase64ToAudioBuffer(json.output.audio.data); } } } } - clearTimeout(requestTimeoutId); reader.releaseLock(); } catch (e) { console.log("[Request] failed to make a speech request", e); @@ -371,13 +371,17 @@ export class QwenApi implements LLMApi { } } - // 将 PCM 字节数据转换为 AudioBuffer - private convertToAudioBuffer(pcmData: Uint8Array) { - if (!this.audioContext) { - this.audioContext = new (window.AudioContext || + private static getAudioContext(): AudioContext { + if (!QwenApi.audioContext) { + QwenApi.audioContext = new (window.AudioContext || window.webkitAudioContext)(); } - const audioContext = this.audioContext; + return QwenApi.audioContext; + } + + // 将 PCM 字节数据转换为 AudioBuffer + private convertToAudioBuffer(pcmData: Uint8Array) { + const audioContext = QwenApi.getAudioContext(); const channels = 1; const sampleRate = 24000; return new Promise((resolve, reject) => {