https://github.com/guest271314/SpeechSynthesisRecorderを参考にしてやってみましたがindex.html:1 Uncaught (in promise) DOMExceptionというエラーが出てしまいます。
クロームのデベロッパーで見てみると .then(({tts, data}) => の処理に×がついているのですが何がダメなのかがわかりません。
分かる人がいましたら教えてください
HTML
1<!DOCTYPE html> 2<html lang="ja"> 3<head> 4 <meta charset="utf-8"> 5 <script type="text/javascript" src="js/voiceappclass.js"></script> 6 <script type="text/javascript" src="js/voiceapp.js"></script> 7</head> 8<body> 9 <p>API版</p> 10 11 <button onclick="dospeech();">test!</button> 12 <input type="text" value="TEST" id="word" style="width:80%;" /> 13</body> 14</html>
javascript
1function dospeech(){ 2 let ttsRecorder = new SpeechSynthesisRecorder({ 3 text: "The revolution will not be televised", 4 utteranceOptions: { 5 voice: "english-us espeak", 6 lang: "en-US", 7 pitch: .75, 8 rate: 1 9 } 10 }); 11 12 // ArrayBuffer 13 ttsRecorder.start() 14 .then(tts => tts.arrayBuffer()) 15 .then(({tts, data}) => { 16 tts.audioNode.src = URL.createObjectURL(new Blob([data], {type:tts.mimeType})); 17 tts.audioNode.title = tts.utterance.text; 18 tts.audioNode.onloadedmetadata = () => { 19 console.log(tts.audioNode.duration); 20 tts.audioNode.play(); 21 } 22 }) 23// AudioBuffer 24 ttsRecorder.start() 25 .then(tts => tts.audioBuffer()) 26 .then(({tts, data}) => { 27 let source = tts.audioContext.createBufferSource(); 28 source.buffer = data; 29 source.connect(tts.audioContext.destination); 30 source.start() 31 }) 32 33 // Blob 34 ttsRecorder.start() 35 .then(tts => tts.blob()) 36 .then(({tts, data}) => { 37 tts.audioNode.src = URL.createObjectURL(blob); 38 tts.audioNode.title = tts.utterance.text; 39 tts.audioNode.onloadedmetadata = () => { 40 console.log(tts.audioNode.duration); 41 tts.audioNode.play(); 42 } 43 }) 44// ReadableStream 45 ttsRecorder.start() 46 .then(tts => tts.readableStream()) 47 .then(({tts, data}) => { 48 console.log(tts, data); 49 data.getReader().read().then(({value, done}) => { 50 tts.audioNode.src = URL.createObjectURL(value[0]); 51 tts.audioNode.title = tts.utterance.text; 52 tts.audioNode.onloadedmetadata = () => { 53 console.log(tts.audioNode.duration); 54 tts.audioNode.play(); 55 } 56 }) 57 }) 58 // MediaSource 59 ttsRecorder.start() 60 .then(tts => tts.mediaSource()) 61 .then(({tts, data}) => { 62 console.log(tts, data); 63 tts.audioNode.srcObj = data; 64 tts.audioNode.title = tts.utterance.text; 65 tts.audioNode.onloadedmetadata = () => { 66 console.log(tts.audioNode.duration); 67 tts.audioNode.play(); 68 } 69 }) 70// MediaStream 71// let ttsRecorder = new SpeechSynthesisRecorder({ 72// text: "", 73// utternanceOptions: { 74// voice: "日本人", 75// lang: "ja-JP", 76// pitch: 1.0, 77// rate: 1 78// }, 79// dataType:"mediaStream" 80// }); 81// ttsRecorder.start() 82// .then(({tts, data}) => { 83// }) 84// .catch(err => console.log(err)) 85};
javascript
1class SpeechSynthesisRecorder { 2 constructor({ 3 text = '', utteranceOptions = {}, recorderOptions = {}, dataType = '' 4 }) { 5 if (text === '') throw new Error('no words to synthesize') 6 this.dataType = dataType 7 this.text = text 8 this.mimeType = MediaRecorder.isTypeSupported('audio/webm; codecs=opus') ? 'audio/webm; codecs=opus' : 'audio/ogg; codecs=opus' 9 this.utterance = new SpeechSynthesisUtterance(this.text) 10 this.speechSynthesis = window.speechSynthesis 11 this.mediaStream_ = new MediaStream() 12 this.mediaSource_ = new MediaSource() 13 this.mediaRecorder = new MediaRecorder(this.mediaStream_, { 14 mimeType: this.mimeType, 15 bitsPerSecond: 256 * 8 * 1024 16 }) 17 this.audioContext = new AudioContext() 18 this.audioNode = new Audio() 19 this.chunks = [] 20 if (utteranceOptions) { 21 if (utteranceOptions.voice) { 22 this.speechSynthesis.onvoiceschanged = e => { 23 const voice = this.speechSynthesis.getVoices().find(({ 24 name: _name 25 }) => _name === utteranceOptions.voice) 26 this.utterance.voice = voice 27 console.log(voice, this.utterance) 28 } 29 this.speechSynthesis.getVoices() 30 } 31 let { 32 lang, rate, pitch, volume 33 } = utteranceOptions; 34 console.log(rate) 35 this.utterance.lang = 'en-US' 36 this.utterance.voice = window.speechSynthesis.getVoices()[0]; // 7:Google 日本人 ja-JP ※他は英語のみ(次項参照) 37 this.utterance.volume = 1.0; // 音量 min 0 ~ max 1 38 this.utterance.rate = 1.0; // 速度 min 0 ~ max 10 39 this.utterance.pitch = 1.0; // 音程 min 0 ~ max 2 40 } 41 console.log(this.utterance) 42 this.audioNode.controls = 'controls' 43 document.body.appendChild(this.audioNode) 44 } 45 start(text = '') { 46 if (text) this.text = text 47 if (this.text === '') throw new Error('no words to synthesize') 48 return navigator.mediaDevices.getUserMedia({ 49 audio: true 50 }) 51 .then(stream => navigator.mediaDevices.enumerateDevices() 52 .then(devices => { 53 const audiooutput = devices.find(device => device.kind == "audiooutput"); 54 stream.getTracks().forEach(track => track.stop()) 55 if (audiooutput) { 56 const constraints = { 57 deviceId: { 58 exact: audiooutput.deviceId 59 } 60 }; 61 return navigator.mediaDevices.getUserMedia({ 62 audio: constraints 63 }); 64 } 65 return navigator.mediaDevices.getUserMedia({ 66 audio: true 67 }); 68 })) 69 .then(stream => new Promise(resolve => { 70 const track = stream.getAudioTracks()[0] 71 this.mediaStream_.addTrack(track) 72 // return the current `MediaStream` 73 if (this.dataType && this.dataType === 'mediaStream') { 74 resolve({ 75 tts: this, 76 data: this.mediaStream_ 77 }) 78 }; 79 this.mediaRecorder.ondataavailable = event => { 80 if (event.data.size > 0) { 81 this.chunks.push(event.data) 82 }; 83 } 84 this.mediaRecorder.onstop = () => { 85 track.stop() 86 this.mediaStream_.getAudioTracks()[0].stop() 87 this.mediaStream_.removeTrack(track) 88 console.log(`Completed recording ${this.utterance.text}`, this.chunks) 89 resolve(this) 90 } 91 this.mediaRecorder.start() 92 this.utterance.onstart = () => { 93 console.log(`Starting recording SpeechSynthesisUtterance ${this.utterance.text}`) 94 } 95 this.utterance.onend = () => { 96 this.mediaRecorder.stop() 97 console.log(`Ending recording SpeechSynthesisUtterance ${this.utterance.text}`) 98 } 99 this.speechSynthesis.speak(this.utterance) 100 })) 101 } 102 blob() { 103 if (!this.chunks.length) throw new Error('no data to return') 104 return Promise.resolve({ 105 tts: this, 106 data: this.chunks.length === 1 ? this.chunks[0] : new Blob(this.chunks, { 107 type: this.mimeType 108 }) 109 }) 110 } 111 arrayBuffer(blob) { 112 if (!this.chunks.length) throw new Error('no data to return') 113 return new Promise(resolve => { 114 const reader = new FileReader() 115 reader.onload = e => resolve(({ 116 tts: this, 117 data: reader.result 118 })) 119 reader.readAsArrayBuffer(blob ? new Blob(blob, { 120 type: blob.type 121 }) : this.chunks.length === 1 ? this.chunks[0] : new Blob(this.chunks, { 122 type: this.mimeType 123 })) 124 }) 125 } 126 audioBuffer() { 127 if (!this.chunks.length) throw new Error('no data to return') 128 return this.arrayBuffer() 129 .then(({ 130 tts, data 131 }) => this.audioContext.decodeAudioData(data)) 132 .then(buffer => ({ 133 tts: this, 134 data: buffer 135 })) 136 } 137 mediaSource() { 138 if (!this.chunks.length) throw new Error('no data to return') 139 return this.arrayBuffer() 140 .then(({ 141 data: ab 142 }) => new Promise((resolve, reject) => { 143 this.mediaSource_.onsourceended = () => resolve({ 144 tts: this, 145 data: this.mediaSource_ 146 }) 147 this.mediaSource_.onsourceopen = () => { 148 if (MediaSource.isTypeSupported(this.mimeType)) { 149 const sourceBuffer = this.mediaSource_.addSourceBuffer(this.mimeType) 150 sourceBuffer.mode = 'sequence' 151 sourceBuffer.onupdateend = () => 152 this.mediaSource_.endOfStream() 153 sourceBuffer.appendBuffer(ab) 154 } else { 155 reject(new Error(`${this.mimeType} is not supported`)) 156 } 157 } 158 this.audioNode.src = URL.createObjectURL(this.mediaSource_) 159 })) 160 } 161 readableStream({ 162 size = 1024, controllerOptions = {}, rsOptions = {} 163 }) { 164 if (!this.chunks.length) throw new Error('no data to return') 165 const src = this.chunks.slice(0) 166 const chunk = size 167 return Promise.resolve({ 168 tts: this, 169 data: new ReadableStream(controllerOptions || { 170 start(controller) { 171 console.log(src.length) 172 controller.enqueue(src.splice(0, chunk)) 173 }, 174 pull(controller) { 175 if (src.length === 0) controller.close() 176 controller.enqueue(src.splice(0, chunk)) 177 } 178 }, rsOptions) 179 }) 180 } 181} 182if (typeof module !== 'undefined') module.exports = SpeechSynthesisRecorder 183if (typeof window !== 'undefined') window.SpeechSynthesisRecorder = SpeechSynthesisRecorder
あなたの回答
tips
プレビュー