How to record "Japanese" SpeechSynthesis voice in Electron - javascript

I am creating an application using Electron.
Therefore, I want to output SpeechSynthesis voice to wav file.
This is only Japanese voice.
(So I can't use library like meSpeak.)
And I want to use the voice installed in local PC. (like 唄詠, Softalk, etc, ...)
It was unable to output normally if it was the following code.
Although you can output the file itself,
the content is unintended.
The tested environment is macOS 10.13.1.
How should I do it?
Main Process:
// This code is a part.
// Actually there are many different properties,
// This code shows only the minimum necessary part.
let obj = {
speech: "コンニチワ", // it was converted from sentence to reading katakana
cast: {
voice: {
pitch: 100,
speed: 100,
volume: 50.0,
name: ""
}
}
}
// mainWindow is BrowserWindow
mainWindow.webContents.send('record-speech', obj, "example.wav")
Renderer Process:
ipcRenderer.on('record-speech', (event, obj, path) => {
// Setup utterance
let voice = null
speechSynthesis.onvoiceschanged = () => {
voice = speechSynthesis.getVoices().find(({
name: _name
}) => _name === obj.cast.voice.name)
}
let utter = new SpeechSynthesisUtterance()
utter.lang = "ja-JP"
utter.text = obj.speech
utter.pitch = obj.cast.voice.pitch / 100
utter.rate = obj.cast.voice.speed / 20
utter.voice = voice
utter.volume = obj.cast.voice.volume / 100
let audioContext = new AudioContext()
navigator.getUserMedia({audio: true}, (stream) => {
let audioInput = audioContext.createMediaStreamSource(stream)
// Using 'wave-recorder' package of npm
let recorder = WaveRecorder(audioContext, {
channels: 2,
bitDepth: 32,
silenceDuration: 0.1
})
audioInput.connect(recorder.input)
let fileStream = fs.createWriteStream(path)
recorder.pipe(fileStream)
recorder.on('header', (header) => {
let headerStream = fs.createWriteStream(path, {
start: 0,
flags: 'w'
})
headerStream.write(header)
headerStream.end()
})
utter.onend = ((event) => {
console.log("Speak ended");
})
speechSynthesis.speak(utter)
}, (e) => { console.log(e); })
})

Related

How do I configure TypeScript to use the latest ecma version?

I have a dependency that calls the evaluate() function. And I'm attempting to deploy it to heroku however, according to the readme file for puppeteer : https://github.com/puppeteer/puppeteer/blob/main/docs/troubleshooting.md : at the very bottom, that calling evaluate with an async function won't work because while puppeteer uses Function.prototype.toString() to serialize functions while transpilers could be changing the output code in such a way it's incompatible with puppeteer. Which seems to be happening with my code because it's not reading or evaluating after I pass the argument ['--no-sandbox'] to the browser. Which is necessary according to the section about deploying on heroku. This is the dependency script that is the issue: And below is the whole dependency file.
// puppeteer-extra is a drop-in replacement for puppeteer,
// it augments the installed puppeteer with plugin functionality
const puppeteer = require('puppeteer-extra');
// add stealth plugin and use defaults (all evasion techniques)
const StealthPlugin = require('puppeteer-extra-plugin-stealth');
puppeteer.use(StealthPlugin());
// load helper function to detect stealth plugin
const { warnIfNotUsingStealth } = require("../helpers/helperFunctions.js");
/**
* Scrapes all collections from the Rankings page at https://opensea.io/rankings
* options = {
* nbrOfPages: number of pages that should be scraped? (defaults to 1 Page = top 100 collections)
* debug: [true,false] enable debugging by launching chrome locally (omit headless mode)
* logs: [true,false] show logs in the console
* browserInstance: browser instance created with puppeteer.launch() (bring your own puppeteer instance)
* }
*/
const rankings = async (type = "total", optionsGiven = {}, chain = undefined) => {
const optionsDefault = {
debug: false,
logs: false,
browserInstance: undefined,
};
const options = { ...optionsDefault, ...optionsGiven };
const { debug, logs, browserInstance } = options;
const customPuppeteerProvided = Boolean(optionsGiven.browserInstance);
logs && console.log(`=== OpenseaScraper.rankings() ===\n`);
// init browser
let browser = browserInstance;
if (!customPuppeteerProvided) {
browser = await puppeteer.launch({
headless: !debug, // when debug is true => headless should be false
args: ['--start-maximized', '--no-sandbox'],
});
}
customPuppeteerProvided && warnIfNotUsingStealth(browser);
const page = await browser.newPage();
const url = getUrl(type, chain);
logs && console.log("...opening url: " + url);
await page.goto(url);
logs && console.log("...🚧 waiting for cloudflare to resolve");
await page.waitForSelector('.cf-browser-verification', {hidden: true});
logs && console.log("extracting __NEXT_DATA variable");
const __NEXT_DATA__ = await page.evaluate(() => {
const nextDataStr = document.getElementById(`${__NEXT_DATA__}`).innerText;
return JSON.parse(nextDataStr);
});
// extract relevant info
const top100 = _parseNextDataVarible(__NEXT_DATA__);
logs && console.log(`🥳 DONE. Total ${top100.length} Collections fetched: `);
return top100;
}
function _parseNextDataVarible(__NEXT_DATA__) {
const extractFloorPrice = (statsV2) => {
try {
return {
amount: Number(statsV2.floorPrice.eth),
currency: "ETH",
}
} catch(err) {
return null;
}
}
const extractCollection = (obj) => {
return {
name: obj.name,
slug: obj.slug,
logo: obj.logo,
isVerified: obj.isVerified,
floorPrice: extractFloorPrice(obj.statsV2),
// statsV2: obj.statsV2, // 🚧 comment back in if you need additional stats
};
}
return __NEXT_DATA__.props.relayCache[0][1].json.data.rankings.edges.map(obj => extractCollection(obj.node));
}
function getUrl(type, chain) {
chainExtraQueryParameter = chain ? `&chain=${chain}` : ''
if (type === "24h") {
return `https://opensea.io/rankings?sortBy=one_day_volume${chainExtraQueryParameter}`;
} else if (type === "7d") {
return `https://opensea.io/rankings?sortBy=seven_day_volume${chainExtraQueryParameter}`;
} else if (type === "30d") {
return `https://opensea.io/rankings?sortBy=thirty_day_volume${chainExtraQueryParameter}`;
} else if (type === "total") {
return `https://opensea.io/rankings?sortBy=total_volume${chainExtraQueryParameter}`;
}
throw new Error(`Invalid type provided. Expected: 24h,7d,30d,total. Got: ${type}`);
}
module.exports = rankings;
This is why I need to know how to do it

Incorrect response mime type (Expected 'application/wasm) PSPDFKIT and the reserved size is not enough to contain the signature issue oracle apex?

I followed the PSPDFKit Standalone Integration steps ,and all file in public directory (folder "i" in Apache tomcat),
I get the "PSPDFKit for Web successfully loaded!" message but it hangs up and the console gives me a single error :-
Failed to execute 'compile' on 'WebAssembly': Incorrect response MIME type. Expected 'application/wasm'.
also when i try to add digital signature faced this issue
Cannot add the container structure because the reserved size is not enough to contain the signature. Available size 8193, actual size 89694
I am tried that by below code
PSPDFKit.load({
container: "#pspdfkit",
document: 'f?p=&APP_ID.:0:&APP_SESSION.:APPLICATION_PROCESS=PREVIEW_FILE:::FILE_ID:' + apex.item('P6_ID').getValue(),
licenseKey: "",
initialViewState: new PSPDFKit.ViewState({
showSignatureValidationStatus:
PSPDFKit.ShowSignatureValidationStatusMode.IF_SIGNED
}),
async trustedCAsCallback() {
const response = await fetch("http://localhost:90/i/certs/ca.pem");
const cert = await response.text();
return [cert];
}
})
.then(function(instance) {
var item = {
type: "custom",
id: "my-button",
title: "digital sign",
onPress: function(event) {
instance
.signDocument(null, generatePKCS7)
.then(() => {
console.log("document signed.");
})
.catch(error => {
console.error("The document could not be signed.", error);
});
}
};
if (app_user =="aaaa") {
instance.setToolbarItems(function(items) {
items.push(item);
return items;
});
}
function generatePKCS7({ fileContents }) {
const certificatePromise = fetch("http://localhost:90/i/certs/certificate.pem").then(response =>
response.text()
);
const privateKeyPromise = fetch("http://localhost:90/i/certs/private-key.pem").then(response =>
response.text()
);
return new Promise((resolve, reject) => {
Promise.all([certificatePromise, privateKeyPromise])
.then(([certificatePem, privateKeyPem]) => {
const certificate = forge.pki.certificateFromPem(certificatePem);
const privateKey = forge.pki.privateKeyFromPem(privateKeyPem);
const p7 = forge.pkcs7.createSignedData();
p7.content = new forge.util.ByteBuffer(fileContents);
p7.addCertificate(certificate);
p7.addSigner({
key: privateKey,
certificate: certificate,
digestAlgorithm: forge.pki.oids.sha256,
authenticatedAttributes: [
{
type: forge.pki.oids.contentType,
value: forge.pki.oids.data
},
{
type: forge.pki.oids.messageDigest
},
{
type: forge.pki.oids.signingTime,
Value: new Date()
}
]
}
);
p7.sign({ detached: true })
const result = stringToArrayBuffer(
forge.asn1.toDer(p7.toAsn1()).getBytes()
);
resolve(result);
})
.catch(reject);
});
}
function stringToArrayBuffer(binaryString) {
const buffer = new ArrayBuffer(binaryString.length);
let bufferView = new Uint8Array(buffer);
for (let i = 0, len = binaryString.length; i < len; i++) {
bufferView[i] = binaryString.charCodeAt(i);
}
return buffer;
}
})
.catch(function(error) {
console.error(error.message);
})
apex version 19.2
tomcat 8.5 ,
ords
It looks like the WASM file is not served with the correct content type. The solution is either to fix your server to return the correct content type or disable streaming instantiation for WebAssembly when loading PSPDFKit, as described here: https://pspdfkit.com/guides/web/current/troubleshooting/common-issues/#response-has-unsupported-mime-type-error
In the future, please reach out to https://pspdfkit.com/support/request and our support team will help you in there.

screen sharing on webrtc (How can add to track?)

pc.ontrack = (e) => {
let _remoteStream = null
let remoteStreams = this.state.remoteStreams
let remoteVideo = {}
// 1. check if stream already exists in remoteStreams
const rVideos = this.state.remoteStreams.filter(stream => stream.id === socketID)
// 2. if it does exist then add track
if (rVideos.length) {
_remoteStream = rVideos[0].stream
_remoteStream.addTrack(e.track, _remoteStream)
remoteVideo = {
...rVideos[0],
stream: _remoteStream,
}
remoteStreams = this.state.remoteStreams.map(_remoteVideo => {
return _remoteVideo.id === remoteVideo.id && remoteVideo || _remoteVideo
})
} else {
// 3. if not, then create new stream and add track
_remoteStream = new MediaStream()
_remoteStream.addTrack(e.track, _remoteStream)
remoteVideo = {
id: socketID,
name: socketID,
stream: _remoteStream,
}
remoteStreams = [...this.state.remoteStreams, remoteVideo]
}
// const remoteVideo = {
// id: socketID,
// name: socketID,
// stream: e.streams[0]
// }
this.setState(prevState => {
// If we already have a stream in display let it stay the same, otherwise use the latest stream
// const remoteStream = prevState.remoteStreams.length > 0 ? {} : { remoteStream: e.streams[0] }
const remoteStream = prevState.remoteStreams.length > 0 ? {} : { remoteStream: _remoteStream }
// get currently selected video
let selectedVideo = prevState.remoteStreams.filter(stream => stream.id === prevState.selectedVideo.id)
// if the video is still in the list, then do nothing, otherwise set to new video stream
selectedVideo = selectedVideo.length ? {} : { selectedVideo: remoteVideo }
return {
// selectedVideo: remoteVideo,
...selectedVideo,
// remoteStream: e.streams[0],
...remoteStream,
remoteStreams, //: [...prevState.remoteStreams, remoteVideo]
}
})
}
screenshare.onclick=function(){
navigator.mediaDevices.getDisplayMedia(constraints)
.then(success)
.catch(failure)
}
this is my pc.ontrack code
and I added this button event to people can covert local streams to screen streams.
In aspect of current peer, the stream is changed.
How can I other people can see one people's screen sharing?
this is my pc.ontrack code
and I added this button event to people can covert local streams to screen streams.
In aspect of current peer, the stream is changed.
How can I other people can see one people's screen sharing?

Autodesk.Viewing.OBJECT_TREE_CREATED_EVENT only emit at the first initialization in Forge viewer

export const initForgeViewer = (urn: string, renderingHTMLElemet: HTMLElement): Promise<any> => {
const forgeOptions = getForgeOptions(urn)
return new Promise((resolve, reject) => {
Autodesk.Viewing.Initializer(forgeOptions, () => {
const viewerConfig = {
extensions: ["ToolbarExtension"],
sharedPropertyDbPath: undefined,
canvasConfig: undefined, // TODO: Needs documentation or something.
startOnInitialize: true,
experimental: []
}
const viewer = new Autodesk.Viewing.Private.GuiViewer3D(renderingHTMLElemet, viewerConfig)
const avd = Autodesk.Viewing.Document
viewer.setTheme('light-theme')
viewer.start()
avd.load(forgeOptions.urn, (doc: any) => { // Autodesk.Viewing.Document
const viewables = avd.getSubItemsWithProperties(doc.getRootItem(), { type: 'geometry', role: '3d' }, true)
if (viewables.length === 0) {
reject(viewer)
return
} else {
const initialViewable = viewables[0]
const svfUrl = doc.getViewablePath(initialViewable)
const modelOptions = { sharedPropertyDbPath: doc.getPropertyDbPath() }
viewer.loadModel(svfUrl, modelOptions, (model: any) => { // Autodesk.Viewing.Model
this.loadedModel = model
resolve(viewer)
})
}
})
})
})
}
I am using the above code to initialise Forge viewer. But I realise that Autodesk.Viewing.OBJECT_TREE_CREATED_EVENT only emit at the first time I initialize the Forge viewer. If I clean the viewer in the following way and initialize it again. The OBJECT_TREE_CREATED_EVENT would be fired
this.viewer.finish()
this.viewer.removeEventListener(Autodesk.Viewing.OBJECT_TREE_CREATED_EVENT,this.onObjectTreeReady)
this.viewer = null
So I can assume you're completely destroying the viewer and creating it again, including all events, right? Please use the following:
viewer.tearDown()
viewer.finish()
viewer = null
Tested using v6

How to capture generated audio from window.speechSynthesis.speak() call?

Previous questions have presented this same or similar inquiry
Can Web Speech API used in conjunction with Web Audio API?
How to access audio result from Speech Synthesis API?
Record HTML5 SpeechSynthesisUtterance generated speech to file
generate audio file with W3C Web Speech API
yet no workarounds appear to be have been created using window.speechSynthesis(). Though there are workarounds using epeak , meSpeak How to create or convert text to audio at chromium browser? or making requests to external servers.
How to capture and record audio output of window.speechSynthesis.speak() call and return result as a Blob, ArrayBuffer, AudioBuffer or other object type?
The Web Speech API Specification does not presently provide a means or hint on how to achieve returning or capturing and recording audio output of window.speechSynthesis.speak() call.
See also
MediaStream, ArrayBuffer, Blob audio result from speak() for recording?
Re: MediaStream, ArrayBuffer, Blob audio result from speak() for recording?
Re: MediaStream, ArrayBuffer, Blob audio result from speak() for recording?. In pertinent part, use cases include, but are not limited to
Persons who have issues speaking; i.e.g., persons whom have suffered a
stroke or other communication inhibiting afflictions. They could convert
text to an audio file and send the file to another individual or group.
This feature would go towards helping them communicate with other persons,
similar to the technologies which assist Stephen Hawking communicate;
Presently, the only person who can hear the audio output is the person
in front of the browser; in essence, not utilizing the full potential of
the text to speech functionality. The audio result can be used as an
attachment within an email; media stream; chat system; or other
communication application. That is, control over the generated audio output;
Another application would be to provide a free, libre, open source audio
dictionary and translation service - client to client and client to server,
server to client.
It is possible to capture the output of audio output of window.speechSynthesis.speak() call utilizing navigator.mediaDevices.getUserMedia() and MediaRecorder(). The expected result is returned at Chromium browser. Implementation at Firefox has issues. Select Monitor of Built-in Audio Analog Stereo at navigator.mediaDevices.getUserMedia() prompt.
The workaround is cumbersome. We should be able to get generated audio, at least as a Blob, without navigator.mediaDevices.getUserMedia() and MediaRecorder().
More interest is evidently necessary by users of browsers, JavaScript and C++ developers, browser implementers and specification authors for further input; to create a proper specification for the feature, and consistent implementation at browsers' source code; see How to implement option to return Blob, ArrayBuffer, or AudioBuffer from window.speechSynthesis.speak() call.
At Chromium a speech dispatcher program should be installed and the instance launched with --enable-speech-dispatcher flag set, as window.speechSynthesis.getVoices() returns an empty array, see How to use Web Speech API at chromium?.
Proof of concept
// SpeechSynthesisRecorder.js guest271314 6-17-2017
// Motivation: Get audio output from `window.speechSynthesis.speak()` call
// as `ArrayBuffer`, `AudioBuffer`, `Blob`, `MediaSource`, `MediaStream`, `ReadableStream`, or other object or data types
// See https://lists.w3.org/Archives/Public/public-speech-api/2017Jun/0000.html
// https://github.com/guest271314/SpeechSynthesisRecorder
// Configuration: Analog Stereo Duplex
// Input Devices: Monitor of Built-in Audio Analog Stereo, Built-in Audio Analog Stereo
class SpeechSynthesisRecorder {
constructor({text = "", utteranceOptions = {}, recorderOptions = {}, dataType = ""}) {
if (text === "") throw new Error("no words to synthesize");
this.dataType = dataType;
this.text = text;
this.mimeType = MediaRecorder.isTypeSupported("audio/webm; codecs=opus")
? "audio/webm; codecs=opus" : "audio/ogg; codecs=opus";
this.utterance = new SpeechSynthesisUtterance(this.text);
this.speechSynthesis = window.speechSynthesis;
this.mediaStream_ = new MediaStream();
this.mediaSource_ = new MediaSource();
this.mediaRecorder = new MediaRecorder(this.mediaStream_, {
mimeType: this.mimeType,
bitsPerSecond: 256 * 8 * 1024
});
this.audioContext = new AudioContext();
this.audioNode = new Audio();
this.chunks = Array();
if (utteranceOptions) {
if (utteranceOptions.voice) {
this.speechSynthesis.onvoiceschanged = e => {
const voice = this.speechSynthesis.getVoices().find(({
name: _name
}) => _name === utteranceOptions.voice);
this.utterance.voice = voice;
console.log(voice, this.utterance);
}
this.speechSynthesis.getVoices();
}
let {
lang, rate, pitch
} = utteranceOptions;
Object.assign(this.utterance, {
lang, rate, pitch
});
}
this.audioNode.controls = "controls";
document.body.appendChild(this.audioNode);
}
start(text = "") {
if (text) this.text = text;
if (this.text === "") throw new Error("no words to synthesize");
return navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => new Promise(resolve => {
const track = stream.getAudioTracks()[0];
this.mediaStream_.addTrack(track);
// return the current `MediaStream`
if (this.dataType && this.dataType === "mediaStream") {
resolve({tts:this, data:this.mediaStream_});
};
this.mediaRecorder.ondataavailable = event => {
if (event.data.size > 0) {
this.chunks.push(event.data);
};
};
this.mediaRecorder.onstop = () => {
track.stop();
this.mediaStream_.getAudioTracks()[0].stop();
this.mediaStream_.removeTrack(track);
console.log(`Completed recording ${this.utterance.text}`, this.chunks);
resolve(this);
}
this.mediaRecorder.start();
this.utterance.onstart = () => {
console.log(`Starting recording SpeechSynthesisUtterance ${this.utterance.text}`);
}
this.utterance.onend = () => {
this.mediaRecorder.stop();
console.log(`Ending recording SpeechSynthesisUtterance ${this.utterance.text}`);
}
this.speechSynthesis.speak(this.utterance);
}));
}
blob() {
if (!this.chunks.length) throw new Error("no data to return");
return Promise.resolve({
tts: this,
data: this.chunks.length === 1 ? this.chunks[0] : new Blob(this.chunks, {
type: this.mimeType
})
});
}
arrayBuffer(blob) {
if (!this.chunks.length) throw new Error("no data to return");
return new Promise(resolve => {
const reader = new FileReader;
reader.onload = e => resolve(({
tts: this,
data: reader.result
}));
reader.readAsArrayBuffer(blob ? new Blob(blob, {
type: blob.type
}) : this.chunks.length === 1 ? this.chunks[0] : new Blob(this.chunks, {
type: this.mimeType
}));
});
}
audioBuffer() {
if (!this.chunks.length) throw new Error("no data to return");
return this.arrayBuffer()
.then(ab => this.audioContext.decodeAudioData(ab))
.then(buffer => ({
tts: this,
data: buffer
}))
}
mediaSource() {
if (!this.chunks.length) throw new Error("no data to return");
return this.arrayBuffer()
.then(({
data: ab
}) => new Promise((resolve, reject) => {
this.mediaSource_.onsourceended = () => resolve({
tts: this,
data: this.mediaSource_
});
this.mediaSource_.onsourceopen = () => {
if (MediaSource.isTypeSupported(this.mimeType)) {
const sourceBuffer = this.mediaSource_.addSourceBuffer(this.mimeType);
sourceBuffer.mode = "sequence"
sourceBuffer.onupdateend = () =>
this.mediaSource_.endOfStream();
sourceBuffer.appendBuffer(ab);
} else {
reject(`${this.mimeType} is not supported`)
}
}
this.audioNode.src = URL.createObjectURL(this.mediaSource_);
}));
}
readableStream({size = 1024, controllerOptions = {}, rsOptions = {}}) {
if (!this.chunks.length) throw new Error("no data to return");
const src = this.chunks.slice(0);
const chunk = size;
return Promise.resolve({
tts: this,
data: new ReadableStream(controllerOptions || {
start(controller) {
console.log(src.length);
controller.enqueue(src.splice(0, chunk))
},
pull(controller) {
if (src.length = 0) controller.close();
controller.enqueue(src.splice(0, chunk));
}
}, rsOptions)
});
}
}
Usage
let ttsRecorder = new SpeechSynthesisRecorder({
text: "The revolution will not be televised",
utternanceOptions: {
voice: "english-us espeak",
lang: "en-US",
pitch: .75,
rate: 1
}
});
// ArrayBuffer
ttsRecorder.start()
// `tts` : `SpeechSynthesisRecorder` instance, `data` : audio as `dataType` or method call result
.then(tts => tts.arrayBuffer())
.then(({tts, data}) => {
// do stuff with `ArrayBuffer`, `AudioBuffer`, `Blob`,
// `MediaSource`, `MediaStream`, `ReadableStream`
// `data` : `ArrayBuffer`
tts.audioNode.src = URL.createObjectURL(new Blob([data], {type:tts.mimeType}));
tts.audioNode.title = tts.utterance.text;
tts.audioNode.onloadedmetadata = () => {
console.log(tts.audioNode.duration);
tts.audioNode.play();
}
})
// AudioBuffer
ttsRecorder.start()
.then(tts => tts.audioBuffer())
.then(({tts, data}) => {
// `data` : `AudioBuffer`
let source = tts.audioContext.createBufferSource();
source.buffer = data;
source.connect(tts.audioContext.destination);
source.start()
})
// Blob
ttsRecorder.start()
.then(tts => tts.blob())
.then(({tts, data}) => {
// `data` : `Blob`
tts.audioNode.src = URL.createObjectURL(blob);
tts.audioNode.title = tts.utterance.text;
tts.audioNode.onloadedmetadata = () => {
console.log(tts.audioNode.duration);
tts.audioNode.play();
}
})
// ReadableStream
ttsRecorder.start()
.then(tts => tts.readableStream())
.then(({tts, data}) => {
// `data` : `ReadableStream`
console.log(tts, data);
data.getReader().read().then(({value, done}) => {
tts.audioNode.src = URL.createObjectURL(value[0]);
tts.audioNode.title = tts.utterance.text;
tts.audioNode.onloadedmetadata = () => {
console.log(tts.audioNode.duration);
tts.audioNode.play();
}
})
})
// MediaSource
ttsRecorder.start()
.then(tts => tts.mediaSource())
.then(({tts, data}) => {
console.log(tts, data);
// `data` : `MediaSource`
tts.audioNode.srcObj = data;
tts.audioNode.title = tts.utterance.text;
tts.audioNode.onloadedmetadata = () => {
console.log(tts.audioNode.duration);
tts.audioNode.play();
}
})
// MediaStream
let ttsRecorder = new SpeechSynthesisRecorder({
text: "The revolution will not be televised",
utternanceOptions: {
voice: "english-us espeak",
lang: "en-US",
pitch: .75,
rate: 1
},
dataType:"mediaStream"
});
ttsRecorder.start()
.then(({tts, data}) => {
// `data` : `MediaStream`
// do stuff with active `MediaStream`
})
.catch(err => console.log(err))
plnkr
This is an updated code from previous answer which works in Chrome 96:
make sure to select "Share system audio" checkbox in "Choose what to share" window
won't run via SO code snippet (save to demo.html)
<script>
(async () => {
const text = "The revolution will not be televised";
const blob = await new Promise(async resolve => {
console.log("picking system audio");
const stream = await navigator.mediaDevices.getDisplayMedia({video:true, audio:true});
const track = stream.getAudioTracks()[0];
if(!track)
throw "System audio not available";
stream.getVideoTracks().forEach(track => track.stop());
const mediaStream = new MediaStream();
mediaStream.addTrack(track);
const chunks = [];
const mediaRecorder = new MediaRecorder(mediaStream, {bitsPerSecond:128000});
mediaRecorder.ondataavailable = event => {
if (event.data.size > 0)
chunks.push(event.data);
}
mediaRecorder.onstop = () => {
stream.getTracks().forEach(track => track.stop());
mediaStream.removeTrack(track);
resolve(new Blob(chunks));
}
mediaRecorder.start();
const utterance = new SpeechSynthesisUtterance(text);
utterance.onend = () => mediaRecorder.stop();
window.speechSynthesis.speak(utterance);
console.log("speaking...");
});
console.log("audio available", blob);
const player = new Audio();
player.src = URL.createObjectURL(blob);
player.autoplay = true;
player.controls = true;
})()
</script>

Categories