Okay, so I've been trying to do this for a long time but I just can't find a solution. I'm building a personal Voice Assistant that only records when a hotword is detected, and everything until here works fine. To record the audio, I'm using the npm package node-record-lcpm16. I can't seem to find a solution to pause or stop(and start again) the recording. On the npm website of the audiorecorder there is a function specified that says recording.stop()
but it doesn't work for me. My code right now is:
const recorder = require('node-record-lpcm16');
const fs = require('file-system');
const speech = require('#google-cloud/speech');
const say = require('say');
const notifier = require('node-notifier');
const Bumblebee = require('bumblebee-hotword-node');
const { setTimeout } = require('timers');
const { record } = require('node-record-lpcm16');
const bumblebee = new Bumblebee;
const voice = 'Microsoft Zira Desktop';
bumblebee.addHotword('computer');
const config = {
encoding: 'LINEAR16',
sampleRateHertz: 16000,
languageCode: 'en-US',
};
const request = {
config,
interimResults: false,
};
const client = new speech.SpeechClient();
const recognizeStream = client
.streamingRecognize(request)
.on('error', console.error)
.on('data', data => findfunction(data.results[0].alternatives[0].transcript)
);
const recording = recorder.record({
sampleRateHertz: 16000,
threshold: 0,
recorder: 'sox',
silence: '5.0',
})
.stream().on('error', console.error); //Here is the Recorder, and I can't actually stop it and that's my problem.
recording.pipe(recognizeStream);
bumblebee.on('hotword', function(hotword){
console.log('Hotword detected:', hotword); // It does these actions as soon as the hotword is detected
recording.pipe(recognizeStream);
setTimeout(function stop(){
recording.pipe(fs.createWriteStream("\\\\.\\NUL")),
console.log('Stopped Recording.')
}, 5000);
});
console.log('Computer initiated.');
bumblebee.start();
//Everything down from here is just what do to with the translated speech, it doesn't play a role in my problem.
function findfunction(Data){
let findFunction = Data;
console.log(Data);
if(findFunction.includes('time')){
whattimeisit(findFunction);
};
if(findFunction.includes('day')){
whatdateisit(findFunction);
};
if(findFunction.includes('thank you')){
thankyou();
};
if(findFunction.includes('remind')){
setatimer(findFunction);
};
};
function whattimeisit(timeString){
const date = new Date();
const time = date.toLocaleTimeString();
say.speak(`It's currently ${time}.`, voice);
console.log(`It's currently ${time}.`);
};
function whatdateisit(dateString){
const date = new Date();
const currentDate = date.toLocaleDateString();
say.speak(`It's currently ${currentDate}.`, voice);
console.log(`It's currently ${currentDate}.`);
};
function thankyou(){
say.speak("You're welcome!", voice);
console.log("You're welcome!");
};
function setatimer(timerString){
const timer = timerString.replace(/\D/g, '');
setTimeout(function stop() {notifier.notify({title: 'Computer', message: 'Your timer ran out!', icon: './computericon1.png'})} , timer * 60000);
if(timer == 1){
say.speak(`Set a timer for ${timer} minute.`, voice);
console.log(`Set a timer for ${timer} minute.`);
}else{
say.speak(`Set a timer for ${timer} minutes.`, voice);
console.log(`Set a timer for ${timer} minutes.`);
};
};
Any help would be greatly appreciated!
I've played about with your code.. it's definitely a fun project to play with!
I would suggest maybe just modifying the code to record to a buffer, then send that to the google speech recognition engine.
The reason recording.stop() was probably not working for you is that you were calling it on the stream. If we separate the recording and recordingStream variables we can control the flow better.
I've updated the code so when we get the hotword, we stop recording, recognize the speech, then start recording again.
const recorder = require('node-record-lpcm16');
const Bumblebee = require('bumblebee-hotword-node');
const say = require('say');
const voice = 'Microsoft Zira Desktop';
const speech = require('#google-cloud/speech');
let chunks = null;
let recording = null;
function startRecording() {
console.log("listening...");
chunks = [];
recording = recorder.record({
sampleRateHertz: 16000,
threshold: 0,
recorder: 'sox',
silence: '5.0',
})
const recordingStream = recording.stream();
recordingStream.on('error', () => {});
// Tune this to ensure we only send the last few seconds of audio to google..
const maxChunks = 10;
recordingStream.on('data', (chunk) => {
chunks.push(chunk);
// keep the number of chunks below a reasonable limit...
if (chunks.length > maxChunks) {
chunks = chunks.slice(-maxChunks);
}
});
recordingStream.on('end', async () => {
// Create a buffer from our recording, it should only be a few seconds long.
const audioBuffer = Buffer.concat(chunks);
console.log("Chunk count:", chunks.length);
await recognizeSpeech(audioBuffer);
startRecording();
});
}
async function recognizeSpeech(audioBuffer) {
console.log(`recognizeSpeech: Converting audio buffer to text (${audioBuffer.length} bytes)...`)
const client = new speech.SpeechClient();
const request = {
config: { encoding: 'LINEAR16', sampleRateHertz: 16000, languageCode: 'en-US'},
audio: { content: audioBuffer.toString("base64") }
};
// Convert our audio to text.
const response = await client.recognize(request)
findfunction(response[0].results[0].alternatives[0].transcript);
}
startRecording();
startBumblebee();
function startBumblebee() {
const bumblebee = new Bumblebee();
bumblebee.addHotword('computer');
bumblebee.on('hotword', function(hotword){
console.log('Hotword detected:', hotword);
setTimeout(() => {
console.log('Stopping recording...');
recording.stop()
}, 2000);
});
bumblebee.start( );
}
// Nothing changed from here...
function findfunction(Data){
let findFunction = Data;
console.log(Data);
if(findFunction.includes('time')){
whattimeisit(findFunction);
};
if(findFunction.includes('day')){
whatdateisit(findFunction);
};
if(findFunction.includes('thank you')){
thankyou();
};
if(findFunction.includes('remind')){
setatimer(findFunction);
};
};
function whattimeisit(timeString){
const date = new Date();
const time = date.toLocaleTimeString();
say.speak(`It's currently ${time}.`, voice);
console.log(`It's currently ${time}.`);
};
function whatdateisit(dateString){
const date = new Date();
const currentDate = date.toLocaleDateString();
say.speak(`It's currently ${currentDate}.`, voice);
console.log(`It's currently ${currentDate}.`);
};
function thankyou(){
say.speak("You're welcome!", voice);
console.log("You're welcome!");
};
function setatimer(timerString){
const timer = timerString.replace(/\D/g, '');
setTimeout(function stop() {notifier.notify({title: 'Computer', message: 'Your timer ran out!', icon: './computericon1.png'})} , timer * 60000);
if(timer == 1){
say.speak(`Set a timer for ${timer} minute.`, voice);
console.log(`Set a timer for ${timer} minute.`);
}else{
say.speak(`Set a timer for ${timer} minutes.`, voice);
console.log(`Set a timer for ${timer} minutes.`);
};
};
Related
I'm using the Google Cloud API for Speech-to-text, with a NodeJS back-end.
The app needs to be able to listen for voice commands, and transmit them to the back-end as a buffer. For this, I need to send the buffer of the preceding audio when silence is detected.
Any help would be appreciated. Including the js code below
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, success, function (e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
var recording = false;
window.startRecording = function () {
recording = true;
};
window.stopRecording = function () {
recording = false;
// window.Stream.end();
};
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
// the sample rate is in context.sampleRate
audioInput = context.createMediaStreamSource(e);
var bufferSize = 4096;
recorder = context.createScriptProcessor(bufferSize, 1, 1);
recorder.onaudioprocess = function (e) {
if (!recording) return;
console.log('recording');
var left = e.inputBuffer.getChannelData(0);
console.log(convertoFloat32ToInt16(left));
};
audioInput.connect(recorder);
recorder.connect(context.destination);
}
I'm not too sure as to what exactly is being asked in the question, so this answer is only intended to give a way to detect silences in an AudioStream.
To detect silence in an AudioStream, you can use an AudioAnalyser node, on which you will call the getByteFrequencyData method at regular intervals, and check whether there were sounds higher than than your expected level for a given time.
You can set the threshold level directly with the minDecibels property of the AnalyserNode.
function detectSilence(
stream,
onSoundEnd = _=>{},
onSoundStart = _=>{},
silence_delay = 500,
min_decibels = -80
) {
const ctx = new AudioContext();
const analyser = ctx.createAnalyser();
const streamNode = ctx.createMediaStreamSource(stream);
streamNode.connect(analyser);
analyser.minDecibels = min_decibels;
const data = new Uint8Array(analyser.frequencyBinCount); // will hold our data
let silence_start = performance.now();
let triggered = false; // trigger only once per silence event
function loop(time) {
requestAnimationFrame(loop); // we'll loop every 60th of a second to check
analyser.getByteFrequencyData(data); // get current data
if (data.some(v => v)) { // if there is data above the given db limit
if(triggered){
triggered = false;
onSoundStart();
}
silence_start = time; // set it to now
}
if (!triggered && time - silence_start > silence_delay) {
onSoundEnd();
triggered = true;
}
}
loop();
}
function onSilence() {
console.log('silence');
}
function onSpeak() {
console.log('speaking');
}
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
detectSilence(stream, onSilence, onSpeak);
// do something else with the stream
})
.catch(console.error);
And as a fiddle since stackSnippets may block gUM.
You can use SpeechRecognition result event to determine when a word or phrase has been recognized, for example, ls, cd, pwd or other commands, pass the .transcript of SpeechRecognitionAlternative to speechSynthesis.speak() where at attached start and end event of SpeechSynthesisUtterance call .start() or .resume() on MediaRecorder object where MediaStream is passed; convert the Blob at dataavailable event to an ArrayBuffer using FileReader or Response.arrayBuffer().
We could alternatively use audiostart or soundstart with audioend or soundend events of SpeechRecognition to record the users' actual voice, though the ends may not be fired consistently in relation to the actual start and end of audio captured by only a standard system microphone.
<!DOCTYPE html>
<html>
<head>
<title>Speech Recognition Recording</title>
</head>
<body>
<input type="button" value="Stop speech command recognition" id="stop">
<script>
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
const recorder = new MediaRecorder(stream);
const recognition = new webkitSpeechRecognition();
const synthesis = new SpeechSynthesisUtterance();
const handleResult = e => {
recognition.onresult = null;
console.log(e.results);
const result = e.results[e.results.length - 1];
if (result.isFinal) {
const [{transcript}] = result;
console.log(transcript);
synthesis.text = transcript;
window.speechSynthesis.speak(synthesis);
}
}
synthesis.onstart = () => {
if (recorder.state === "inactive") {
recorder.start()
} else {
if (recorder.state === "paused") {
recorder.resume();
}
}
}
synthesis.onend = () => {
recorder.pause();
recorder.requestData();
}
recorder.ondataavailable = async(e) => {
if (stream.active) {
try {
const blobURL = URL.createObjectURL(e.data);
const request = await fetch(blobURL);
const ab = await request.arrayBuffer();
console.log(blobURL, ab);
recognition.onresult = handleResult;
// URL.revokeObjectURL(blobURL);
} catch (err) {
throw err
}
}
}
recorder.onpause = e => {
console.log("recorder " + recorder.state);
}
recognition.continuous = true;
recognition.interimResults = false;
recognition.maxAlternatives = 1;
recognition.start();
recognition.onend = e => {
console.log("recognition ended, stream.active", stream.active);
if (stream.active) {
console.log(e);
// the service disconnects after a period of time
recognition.start();
}
}
recognition.onresult = handleResult;
stream.oninactive = () => {
console.log("stream ended");
}
document.getElementById("stop")
.onclick = () => {
console.log("stream.active:", stream.active);
if (stream && stream.active && recognition) {
recognition.abort();
recorder.stop();
for (let track of stream.getTracks()) {
track.stop();
}
console.log("stream.active:", stream.active);
}
}
})
.catch(err => {
console.error(err)
});
</script>
</body>
</html>
plnkr https://plnkr.co/edit/4DVEg6mhFRR94M5gdaIp?p=preview
The simplest approach would be to use .pause() and .resume(), .stop() methods of MediaRecorder() to allow user to start, pause, and stop recording audio captured utilizing navigator.mediaDevices.getUserMedia() and convert the resulting Blob to an ArrayBuffer, if that is what the api is expecting to be POSTed to server
<!DOCTYPE html>
<html>
<head>
<title>User Media Recording</title>
</head>
<body>
<input type="button" value="Start/resume recording audio" id="start">
<input type="button" value="Pause recording audio" id="pause">
<input type="button" value="Stop recording audio" id="stop">
<script>
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = async(e) => {
if (stream.active) {
try {
const blobURL = URL.createObjectURL(e.data);
const request = await fetch(blobURL);
const ab = await request.arrayBuffer();
// do stuff with `ArrayBuffer` of recorded audio
console.log(blobURL, ab);
// we do not need the `Blob URL`, we can revoke the object
// URL.revokeObjectURL(blobURL);
} catch (err) {
throw err
}
}
}
recorder.onpause = e => {
console.log("recorder " + recorder.state);
recorder.requestData();
}
stream.oninactive = () => {
console.log("stream ended");
}
document.getElementById("start")
.onclick = () => {
if (recorder.state === "inactive") {
recorder.start();
} else {
recorder.resume();
}
console.log("recorder.state:", recorder.state);
}
document.getElementById("pause")
.onclick = () => {
if (recorder.state === "recording") {
recorder.pause();
}
console.log("recorder.state:", recorder.state);
}
document.getElementById("stop")
.onclick = () => {
if (recorder.state === "recording" || recorder.state === "paused") {
recorder.stop();
}
for (let track of stream.getTracks()) {
track.stop();
}
document.getElementById("start").onclick = null;
document.getElementById("pause").onclick = null;
console.log("recorder.state:", recorder.state
, "stream.active", stream.active);
}
})
.catch(err => {
console.error(err)
});
</script>
</body>
</html>
plnkr https://plnkr.co/edit/7caWYMsvub90G6pwDdQp?p=preview
I have set 4 timeout for audios in my application and I need to stop the audios after user click. The macro function is working correctly, however the clearTimout does not stop the sound. Anyone knows how to clear it?
export function handlePlay(audio) {
audio.currentTime = 0;
return audio.play();
}
export function handleConversation(clear) {
const timer1 = () => setTimeout(() => {
handlePlay(conversation[Math.floor(Math.random() * conversation.length)]);
}, TIME1);
const timer2 = () => setTimeout(() => {
handlePlay(conversation[Math.floor(Math.random() * conversation.length)]);
}, TIME2);
const timer3 = () => setTimeout(() => {
handlePlay(conversation[Math.floor(Math.random() * conversation.length)]);
}, TIME3);
const timer4 = () => setTimeout(() => {
handlePlay(conversation[Math.floor(Math.random() * conversation.length)]);
}, TIME4);
if (clear) {
console.log('enter clear');
return () => {
clearTimeout(timer1);
clearTimeout(timer2);
clearTimeout(timer3);
clearTimeout(timer4);
};
}
timer1();
timer2();
timer3();
timer4();
}
after clearTimeouts call this code
audio.pause();
audio.currentTime = 0;
Here a suggestion of what you could do.
I guess this could be improved further regarding how this handleConversation function is used, I didn't really get the whole idea and there is still some inconsistencies...
function createAudio(track) {
track.audio = conversation[Math.floor(Math.random() * conversation.length)];
}
export class Track {
constructor(time) {
this.time = time;
this.timeoutid = 0;
this.audio = new Audio();
}
timer() {
this.timeoutid = setTimeout(() => {
createAudio(this);
handlePlay(this.audio);
}, TIME1);
}
play() {
this.audio.currentTime = 0;
this.audio.play();
}
stop() {
this.audio.pause();
this.audio.currentTime = 0;
clearTimeout(this.timeoutid)
}
}
export function handleConversation(clear) {
const track1 = new Track(TIME1);
const track2 = new Track(TIME2);
const track3 = new Track(TIME3);
const track4 = new Track(TIME4);
// this part actually doesn't make a lot of sense, since all tracks will be recreated each time the function is called.
// I don't really understand how this is used.
// I imagine the tracks should more likey be stored outside the function in a persistent object.
// I will modify my answer if you provide more details about how you use handleConversation
if (clear) {
console.log('enter clear');
return () => {
[track1, track2, track3, track4].forEach(track => {
track.stop()
});
};
}
[track1, track2, track3, track4].forEach(track => {
track.timer()
});
}
I have a project requirement to record and store the currently running video stream. I have used webRTC for video streaming, and to record the video streaming, I have used MediaRecorder API. It is completely working fine in the desktop system. But it is not working in the mobile browser.
Any idea why it is not working in the mobile browser?
Following is the code snippet:
componentDidMount = async () => {
recordingStream = await navigator.mediaDevices.getDisplayMedia({
video: true,
audio: true,
});
const mergedAudio = await this.mergeAudioStreams();
console.log("audio track length... ", mergedAudio.length);
const tracks = [
...recordingStream.getVideoTracks(),
...mergedAudio,
];
captureStream = new MediaStream(tracks);
mediaRecorder = new MediaRecorder(captureStream, options);
mediaRecorder.ondataavailable = this.handleDataAvailable;
mediaRecorder.start();
}
handleDataAvailable = async event => {
console.log("data-available", event);
if (event.data.size > 0) {
recordedChunks.push(event.data);
this.download();
} else {
// ...
console.log("in else");
}
};
download = async () => {
console.log("in download fn");
var blob = await new Blob(recordedChunks, {
type: "video/mp4",
});
//called the API to store the recorded video
}
mergeAudioStreams = async () => {
console.log("recordScreen fn called");
const ctx = new AudioContext();
const dest = ctx.createMediaStreamDestination();
let localSource, remoteSource;
if (this.state.localStream.getAudioTracks().length > 0) {
localSource = ctx.createMediaStreamSource(this.state.localStream);
}
if (this.state.selectedVideo.stream.getAudioTracks().length > 0) {
remoteSource = ctx.createMediaStreamSource(
this.state.selectedVideo.stream
);
}
const localGain = ctx.createGain();
const remoteGain = ctx.createGain();
localGain.gain.value = 0.7;
remoteGain.gain.value = 0.7;
localSource.connect(localGain).connect(dest);
remoteSource.connect(remoteGain).connect(dest);
console.log("combine tracks..", dest.stream.getAudioTracks());
return dest.stream.getAudioTracks();
};
This code is not working as the video audio muted on the user1 screen but on the next user2 screen it's unable to stop. The same problem is with the mic. Please help me out.
I tried it out on the localhost only. Please tell if this problem occurs on localhost only or will occur in deployed web applications also.
const socket = io("/");
const chatInputBox = document.getElementById("chat_message");
const all_messages = document.getElementById("all_messages");
const main__chat__window = document.getElementById("main__chat__window");
const videotable = document.getElementById("video-table");
const myVideo = document.createElement("video");
myVideo.muted = true;//for not recieving own voice
var peer = new Peer(undefined, {
path: "/peerjs",
host: "/",
port: "3000",
});
let myVideoStream;
const peers = {};
var getUserMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
//setting initial controls of video and passing them as constraints
const constraints = {
'video': true,
'audio': true
}
navigator.mediaDevices
.getUserMedia(constraints)
.then((stream) => {
myVideoStream = stream;
addVideoStream(myVideo,myVideoStream);//call to function addVideoStream
//answering to calls
peer.on("call", (call) => {
call.answer(myVideoStream);
const video = document.createElement("video");
call.on("stream", (userVideoStream) => {
addVideoStream(video, userVideoStream);// Show stream in some video/canvas element.
});
});
socket.on("user_connected", (userId) => {//recieving info
connectToNewUser(userId, stream);//call function with id and stream
});
//adding event for messages of chat
document.addEventListener("keydown", (e) => {
if (e.which === 13 && chatInputBox.value != "") {
socket.emit("message", chatInputBox.value);
chatInputBox.value = "";
}
});
//adding text to chat window
socket.on("createMessage", (msg) => {
//console.log(msg);
let li = document.createElement("li");
li.innerHTML = msg;
all_messages.append(li);
main__chat__window.scrollTop = main__chat__window.scrollHeight;//scrolled to latest message
});
});
//For disconnecting user
socket.on('user_disconnected', userId => {
if (peers[userId]) peers[userId].close()
});
peer.on("call", function (call) {
getUserMedia(constraints,
function (stream) {
call.answer(stream); // Answer the call with an A/V stream.
const video = document.createElement("video");
call.on("stream", function (remoteStream) {
addVideoStream(video, remoteStream); // Show stream in some video/canvas element.
});
},
function (err) {
console.log("Failed to get local stream", err);
}
);
});
peer.on("open", (id) => {//send with an id for user
// on open will be launch when you successfully connect to PeerServ
socket.emit("join_room", ROOM_ID, id);//emiting event
});
// Fetch an array of devices of a certain type
async function getConnectedDevices(type) {
const devices = await navigator.mediaDevices.enumerateDevices();
return devices.filter(device => device.kind === type)
}
// Open camera with echoCancellation for better audio
async function openCamera(cameraId) {
const constraints = {
'audio': {'echoCancellation': true}
}
return await navigator.mediaDevices.getUserMedia(constraints);
}
const cameras = getConnectedDevices('videoinput');
if (cameras && cameras.length > 0) {
const stream = openCamera(cameras[0].deviceId);
}
function connectToNewUser (userId, streams) {
const call = peer.call(userId, streams);
//console.log(call);
const video = document.createElement("video");
call.on("stream", (userVideoStream) => {
//console.log(userVideoStream);
addVideoStream(video, userVideoStream);
});
call.on('close', () => {
video.remove()//for removing video elemnt on closing call
});
peers[userId] = call;
};
const addVideoStream = (videoEl, stream) => {
videoEl.srcObject = stream;
videoEl.addEventListener("loadedmetadata", () => {
videoEl.play();
});
videotable.append(videoEl);//adding video to front-end
let totalUsers = document.getElementsByTagName("video").length;
if (totalUsers > 1) {
for (let index = 0; index < totalUsers; index++) {
document.getElementsByTagName("video")[index].style.width =
100 / totalUsers + "%";
}
}
};
//js for pause and play of video
const playStop = () => {
let enabled = myVideoStream.getVideoTracks()[0].enabled;
if (enabled) {
myVideoStream.getVideoTracks()[0].enabled = false;
setPlayVideo();
} else {
myVideoStream.getVideoTracks()[0].enabled = true;
setStopVideo();
}
};
//js of pause and play of audio
const muteUnmute = () => {
let enabled = myVideoStream.getAudioTracks()[0].enabled;
if (enabled) {
myVideoStream.getAudioTracks()[0].enabled = false;
setUnmuteButton();
} else {
myVideoStream.getAudioTracks()[0].enabled = true;
setMuteButton();
}
};
//setting icon for representing current state of video
const setPlayVideo = () => {
const html = `<i class="unmute fa fa-pause-circle"></i>
<span class="unmute">Resume Video</span>`;
document.getElementById("playPauseVideo").innerHTML = html;
};
//setting icon for representing current state of video
const setStopVideo = () => {
const html = `<i class=" fa fa-video-camera"></i>
<span class="">Pause Video</span>`;
document.getElementById("playPauseVideo").innerHTML = html;
};
//setting icon for representing current state of audio
const setUnmuteButton = () => {
const html = `<i class="unmute fa fa-microphone-slash"></i>
<span class="unmute">Unmute</span>`;
document.getElementById("muteButton").innerHTML = html;
};
//setting icon for representing current state of audio
const setMuteButton = () => {
const html = `<i class="fa fa-microphone"></i>
<span>Mute</span>`;
document.getElementById("muteButton").innerHTML = html;
};
This code is not working as the video audio muted on the user1 screen but on the next user2 screen it's unable to stop. The same problem is with the mic. Please help me out.
I tried it out on the localhost only. Please tell if this problem occurs on localhost only or will occur in deployed web applications also.
Hello Everyone I am trying to Record Audio and then after Recording I play it and when I hit the Cut function it just get the second I am on it in the player and cut to the end and start recording again and then overwrite the chunks array till the end with the new recording to sum up all of that all I need is when I record 5 seconds and then I want to overwrite the last 3 seconds so I remove the last 3 elements in the array and push the new chunks to the original array
when I do that and send the new chunk to the audio player it plays the first time as intended and if I pressed play again it just plays the newly recorded part only without the first 2 seconds that I preserved from the old recording
let audioChunks = [];
let Recorder = null;
const recordAudio = () => {
return new Promise(resolve => {
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
const mediaRecorder = new MediaRecorder(stream);
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
const start = () => {
mediaRecorder.start(1000);
};
const stop = () => {
return new Promise(resolve => {
mediaRecorder.addEventListener("stop", () => {
const audioBlob = new Blob(audioChunks,{type: 'audio/mpeg-3'});
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
document.getElementById("AudioRecodingPlayer").src = audio.src;
document.getElementById("AudioRecodingPlayer").currentTime = 0;
const play = () => {
audio.play();
};
resolve({ audioBlob, audioUrl, play });
});
mediaRecorder.stop();
});
};
resolve({ start, stop });
});
});
};
async function StartRecording(){
Recorder = await recordAudio();
Recorder.start();
}
async function StopRecording(){
const audio = Recorder.stop();
}
function Cut(){
var Audio = document.getElementById("AudioRecodingPlayer");
var CurrenTime = Math.round(Audio.currentTime);
audioChunks.length = CurrenTime;
StartRecording();
}
On the Cut() function, you are not actually removing the elements of the array, nor changing the index or window. You are only changing the value of length.
What you would have to do is overwrite the audio chunks, or remove the chunks from that point forward.
let audioChunks = [];
let Recorder = null;
const recordAudio = () => {
return new Promise(resolve => {
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
const mediaRecorder = new MediaRecorder(stream);
mediaRecorder.addEventListener("dataavailable", event => {
audioChunks.push(event.data);
});
const start = () => {
mediaRecorder.start(1000);
};
const stop = () => {
return new Promise(resolve => {
mediaRecorder.addEventListener("stop", () => {
const audioBlob = new Blob(audioChunks,{type: 'audio/mpeg-3'});
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
document.getElementById("AudioRecodingPlayer").src = audio.src;
document.getElementById("AudioRecodingPlayer").currentTime = 0;
const play = () => {
audio.play();
};
resolve({ audioBlob, audioUrl, play });
});
mediaRecorder.stop();
});
};
resolve({ start, stop });
});
});
};
async function StartRecording(){
Recorder = await recordAudio();
Recorder.start();
}
async function StopRecording(){
const audio = Recorder.stop();
}
function Cut(){
var Audio = document.getElementById("AudioRecodingPlayer");
var CurrenTime = Math.round(Audio.currentTime);
audioChunks.splice(CurrentTime); // this will actually remove the elements after the "CurrentTime" index
StartRecording();
}