Hi I want to load an audio file to display digital signals on browser. I am using wavesurfer.js to display digital signals. Could someone help me to display digital signals for dynamically loaded audio file. How to modify my code to able to read system selected audio and display digital signal for it?
javacript:
input.onchange = function(){
var sound = document.getElementById('sound');
var reader = new FileReader();
reader.onload = function(e) {
sound.src = this.result;
sound.controls = true;
sound.display();
// sound.play(); autoplay feature!!
};
reader.readAsDataURL(this.files[0]);
}
var buttons = {
play: document.getElementById("btn-play"),
pause: document.getElementById("btn-pause"),
stop: document.getElementById("btn-stop")
};
// Create an instance of wave surfer with its configuration
var wavesurfer = WaveSurfer.create({
container: '#audio-spectrum',
progressColor: "#03a9f4",
container: document.querySelector('#audio-spectrum'),
backend: 'MediaElement'
});
// Handle Play button
buttons.play.addEventListener("click", function(){
wavesurfer.play();
// Enable/Disable respectively buttons
buttons.stop.disabled = false;
buttons.pause.disabled = false;
buttons.play.disabled = true;
}, false);
wavesurfer.load('http://ia902606.us.archive.org/35/items/shortpoetry_047_librivox/song_cjrg_teasdale_64kb.mp3');
Related
I tried to implement recorder.js. took the code snippets from https://blog.addpipe.com/using-recorder-js-to-capture-wav-audio-in-your-html5-web-site/ .
But after the implementation, whenever I try to record, it gives me a .wav file of 0:00 length. I couldn't find any logical reason behind it. even the console does not showing any error. Did anyone faced this issue before?
my code looks like:
HTML file
<button id="recordButton">Record</button>
<button id="pauseButton" disabled>Pause</button>
<button id="stopButton" disabled>Stop</button>
<h3>Recordings</h3>
<ol id="recordingsList"></ol>
JS file
//webkitURL is deprecated but nevertheless
URL = window.URL || window.webkitURL;
var gumStream;
//stream from getUserMedia()
var rec;
//Recorder.js object
var input;
//MediaStreamAudioSourceNode we'll be recording
// shim for AudioContext when it's not avb.
var AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext = new AudioContext;
//new audio context to help us record
var recordButton = document.getElementById("recordButton");
var stopButton = document.getElementById("stopButton");
var pauseButton = document.getElementById("pauseButton");
//add events to those 3 buttons
recordButton.addEventListener("click", startRecording);
stopButton.addEventListener("click", stopRecording);
pauseButton.addEventListener("click", pauseRecording);
function startRecording()
{
console.log("recordButton clicked");
/* Simple constraints object, for more advanced audio features see
https://addpipe.com/blog/audio-constraints-getusermedia/ */
var constraints = {
audio: true,
video: false
}
/* Disable the record button until we get a success or fail from getUserMedia() */
recordButton.disabled = true;
stopButton.disabled = false;
pauseButton.disabled = false;
/* We're using the standard promise based getUserMedia()
https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia */
navigator.mediaDevices.getUserMedia(constraints).then(function (stream) {
console.log("getUserMedia() success, stream created, initializing Recorder.js ...");
/* assign to gumStream for later use */
gumStream = stream;
/* use the stream */
input = audioContext.createMediaStreamSource(stream);
/* Create the Recorder object and configure to record mono sound (1 channel) Recording 2 channels will double the file size */
rec = new Recorder(input, {
numChannels: 1
})
//start the recording process
rec.record();
console.log("Recording started");
}).catch(function (err) {
//enable the record button if getUserMedia() fails
recordButton.disabled = false;
stopButton.disabled = true;
pauseButton.disabled = true;
});
}
function pauseRecording()
{
console.log("pauseButton clicked rec.recording=", rec.recording);
if (rec.recording) {
//pause
rec.stop();
pauseButton.innerHTML = "Resume";
} else {
//resume
rec.record()
pauseButton.innerHTML = "Pause";
}
}
function stopRecording()
{
console.log("stopButton clicked");
//disable the stop button, enable the record too allow for new recordings
stopButton.disabled = true;
recordButton.disabled = false;
pauseButton.disabled = true;
//reset button just in case the recording is stopped while paused
pauseButton.innerHTML = "Pause";
//tell the recorder to stop the recording
rec.stop(); //stop microphone access
gumStream.getAudioTracks()[0].stop();
//create the wav blob and pass it on to createDownloadLink
rec.exportWAV(createDownloadLink);
}
function createDownloadLink(blob)
{
var url = URL.createObjectURL(blob);
var au = document.createElement('audio');
var li = document.createElement('li');
var link = document.createElement('a');
//add controls to the <audio> element
au.controls = true;
au.src = url;
//link the a element to the blob
link.href = url;
link.download = new Date().toISOString() + '.wav';
link.innerHTML = link.download;
//add the new audio and a elements to the li element
li.appendChild(au);
li.appendChild(link);
//add the li element to the ordered list
recordingsList.appendChild(li);
}
I am building an audio recorder and want to manipulate the audio using Python but am recording the audio using JavaScript. Currently I can download the blob file as a .wav using a link in HTML but simply want to transfer the audio to be read in as a .wav file in Python.
I have thought about saving the file as a .wav locally and then reading it in using Flask but would prefer to not do this. Does anyone know how to do one of these methods? Thanks in advance for any help. Below is my code:
app.js:
URL = window.URL || window.webkitURL;
//to stream audio from getUserMedia() from MediaStream Recording API https://developer.mozilla.org/en-US/docs/Web/API/MediaStream_Recording_API
var gumStream;
//this creates mediarecorder object
var rec;
//this is the media stream audio source node to record
var input;
// shim for AudioContext (shim corrects the existing audio context code) for when it is not available
var AudioContext = window.AudioContext || window.webkitAudioContext;
//variable defining audio context
var audioContext
//variables for the record/pause/stop buttons
var recordButton = document.getElementById("recordButton");
var stopButton = document.getElementById("stopButton");
var pauseButton = document.getElementById("pauseButton");
//add events to buttons upon being clicked
recordButton.addEventListener("click", startRecording);
stopButton.addEventListener("click", stopRecording);
pauseButton.addEventListener("click", pauseRecording);
function startRecording() {
console.log("recordButton clicked");
var constraints = { audio: true, video:false }
//disable record button until success/fail is received from getUserMedia()
recordButton.disabled = true;
stopButton.disabled = false;
pauseButton.disabled = false
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
//success in obtaining stream, so create audio context as sample rate may change after getUserMedia is called (does this on macOS when using AirPods where sr defaults to one set in OS for playback device)
console.log("getUserMedia() success, stream created, initializing Recorder.js ...");
audioContext = new AudioContext();
//update the sr format
document.getElementById("formats").innerHTML="Format: 1 channel pcm # "+audioContext.sampleRate/1000+"kHz"
gumStream = stream;
input = audioContext.createMediaStreamSource(stream);
//create recorded object and configure to record mono sound (1 channel) / no need to record 2 channels as this will double file size and not improve quality of sound
rec = new Recorder(input,{numChannels:1})
//start the recording process
rec.record()
console.log("Recording started");
}).catch(function(err) {
//enable the record button if getUserMedia() fails
recordButton.disabled = false;
stopButton.disabled = true;
pauseButton.disabled = true
});
}
function pauseRecording(){
console.log("pauseButton clicked rec.recording=",rec.recording );
if (rec.recording){
//pause
rec.stop();
pauseButton.innerHTML="Resume";
}else{
//resume
rec.record()
pauseButton.innerHTML="Pause";
}
}
function stopRecording() {
console.log("stopButton clicked");
//disable the stop button, enable the record to allow for new recordings
stopButton.disabled = true;
recordButton.disabled = false;
pauseButton.disabled = true;
//reset pause button in case the recording is stopped while paused
pauseButton.innerHTML="Pause";
//tell the recorder to stop the recording
rec.stop();
//stop microphone access
gumStream.getAudioTracks()[0].stop();
//create the wav blob and pass it on to createDownloadLink
rec.exportWAV(createDownloadLink)*emphasized text*
}
function createDownloadLink(blob) {
var url = URL.createObjectURL(blob);
var au = document.createElement('audio');
var li = document.createElement('li');
var link = document.createElement('a');
//name of .wav file to use during upload and download (without extendion)
var filename = new Date().toISOString();
//add controls to the <audio> element
au.controls = true;
au.src = url;
//save to disk link
link.href = url;
link.download = filename+".wav"; //download forces the browser to donwload the file using the filename
link.innerHTML = "Save to disk";
//add the new audio element to li
li.appendChild(au);
//add the filename to the li
li.appendChild(document.createTextNode(filename+".wav "))
//add the save to disk link to li
li.appendChild(link);
//add the li element to the ol
recordingsList.appendChild(li);
}
main.py:
from flask import request
from flask import render_template
import os
import mysql.connector
app = Flask(__name__)
#app.route("/", methods=['POST', 'GET'])
def index():
if request.method == "POST":
print(type(request.get_data("audio_data")))
if os.path.isfile('./file.wav'):
print("./file.wav exists")
return render_template('index.html', request="POST")
else:
return render_template("index.html")
if __name__ == "__main__":
app.run()```
I am designing a simple web page to record audio from the systems microphone and upload it to the server using php. However on pressing record button, I am getting the following exception:
app.js:44 Uncaught TypeError: Cannot read property 'getUserMedia' of undefined
at HTMLButtonElement.startRecording
My HTML Code:
<div id="controls">
<button id="recordButton">Record</button>
<button id="pauseButton" disabled>Pause</button>
<button id="stopButton" disabled>Stop</button>
</div>
<div id="formats">Format: start recording to see sample rate</div>
<p><strong>Recordings:</strong></p>
<ol id="recordingsList"></ol>
<!-- inserting these scripts at the end to be able to use all the elements in the DOM -->
<script src="https://cdn.rawgit.com/mattdiamond/Recorderjs/08e7abd9/dist/recorder.js"></script>
<script src="js/app.js"></script>
My JavaScript Code:
//webkitURL is deprecated but nevertheless
URL = window.URL || window.webkitURL;
var gumStream; //stream from getUserMedia()
var rec; //Recorder.js object
var input; //MediaStreamAudioSourceNode we'll be recording
// shim for AudioContext when it's not avb.
var AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext //audio context to help us record
var recordButton = document.getElementById("recordButton");
var stopButton = document.getElementById("stopButton");
var pauseButton = document.getElementById("pauseButton");
//add events to those 2 buttons
recordButton.addEventListener("click", startRecording);
stopButton.addEventListener("click", stopRecording);
pauseButton.addEventListener("click", pauseRecording);
function startRecording() {
console.log("recordButton clicked");
/*
Simple constraints object, for more advanced audio features see
https://addpipe.com/blog/audio-constraints-getusermedia/
*/
var constraints = { audio: true, video:false }
/*
Disable the record button until we get a success or fail from getUserMedia()
*/
recordButton.disabled = true;
stopButton.disabled = false;
pauseButton.disabled = false
/*
We're using the standard promise based getUserMedia()
https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia
*/
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
console.log("getUserMedia() success, stream created, initializing Recorder.js ...");
/*
create an audio context after getUserMedia is called
sampleRate might change after getUserMedia is called, like it does on macOS when recording through AirPods
the sampleRate defaults to the one set in your OS for your playback device
*/
audioContext = new AudioContext();
//update the format
document.getElementById("formats").innerHTML="Format: 1 channel pcm # "+audioContext.sampleRate/1000+"kHz"
/* assign to gumStream for later use */
gumStream = stream;
/* use the stream */
input = audioContext.createMediaStreamSource(stream);
/*
Create the Recorder object and configure to record mono sound (1 channel)
Recording 2 channels will double the file size
*/
rec = new Recorder(input,{numChannels:1})
//start the recording process
rec.record()
console.log("Recording started");
}).catch(function(err) {
//enable the record button if getUserMedia() fails
recordButton.disabled = false;
stopButton.disabled = true;
pauseButton.disabled = true
});
}
function pauseRecording(){
console.log("pauseButton clicked rec.recording=",rec.recording );
if (rec.recording){
//pause
rec.stop();
pauseButton.innerHTML="Resume";
}else{
//resume
rec.record()
pauseButton.innerHTML="Pause";
}
}
function stopRecording() {
console.log("stopButton clicked");
//disable the stop button, enable the record too allow for new recordings
stopButton.disabled = true;
recordButton.disabled = false;
pauseButton.disabled = true;
//reset button just in case the recording is stopped while paused
pauseButton.innerHTML="Pause";
//tell the recorder to stop the recording
rec.stop();
//stop microphone access
gumStream.getAudioTracks()[0].stop();
//create the wav blob and pass it on to createDownloadLink
rec.exportWAV(createDownloadLink);
}
function createDownloadLink(blob) {
var url = URL.createObjectURL(blob);
var au = document.createElement('audio');
var li = document.createElement('li');
var link = document.createElement('a');
//name of .wav file to use during upload and download (without extendion)
var filename = new Date().toISOString();
//add controls to the <audio> element
au.controls = true;
au.src = url;
//save to disk link
link.href = url;
link.download = filename+".wav"; //download forces the browser to donwload the file using the filename
link.innerHTML = "Save to disk";
//add the new audio element to li
li.appendChild(au);
//add the filename to the li
li.appendChild(document.createTextNode(filename+".wav "))
//add the save to disk link to li
li.appendChild(link);
//upload link
var upload = document.createElement('a');
upload.href="#";
upload.innerHTML = "Upload";
upload.addEventListener("click", function(event){
var xhr=new XMLHttpRequest();
xhr.onload=function(e) {
if(this.readyState === 4) {
console.log("Server returned: ",e.target.responseText);
}
};
var fd=new FormData();
fd.append("audio_data",blob, filename);
xhr.open("POST","upload2.php",true);
xhr.send(fd);
})
li.appendChild(document.createTextNode (" "))//add a space in between
li.appendChild(upload)//add the upload link to li
//add the li element to the ol
recordingsList.appendChild(li);
}
I'm using a javascript build a radio and I'm trying to give it different functionalities but I need to be able to detect the end of a song to make it work. Is there any way to do this?
I've tried different methods I've found online like .ended and such but I don't think those work without using the html audio tag. So I tried to make an audio tag that uses the same data for the source that my js radio uses and get the file length to stop my sourceNode at the end time and make a new one but but i keep getting null returned as the data so that doesn't work either.
I want to do something like:
context.onended = function() {
$.ajax({
url: '../scripts/radio.php',
data: {
attr1: 'value1'
},
success: function(data) {
console.log(data);
fileChosen = true;
setupAudioNodes();
var request = new XMLHttpRequest();
request.addEventListener("progress", updateProgress);
request.addEventListener("load", transferComplete);
request.addEventListener("error", transferFailed);
request.addEventListener("abort", transferCanceled);
request.open('GET', data, true);
request.responseType = 'arraybuffer';
// When loaded decode the data
request.onload = function() {
$("#title").html("Title");
$("#album").html("Goes");
$("#artist").html("Here");
onWindowResize();
$("#title, #artist, #album").css("visibility", "visible");
// decode the data
context.decodeAudioData(request.response, function(buffer) {
// when the audio is decoded play the sound
sourceNode.buffer = buffer;
sourceNode.start(0);
$("#freq, body").addClass("animateHue");
//on error
}, function(e) {
console.log(e);
});
};
request.send();
}
});
I want for this to run at the end of a song and play the next file. Which it would work if I could get the end time of the currently playing song.
To fix the above issue I added the .ended event inside the function that the source was set up:
function setupAudioNodes() {
// setup a analyser
analyser = context.createAnalyser();
// create a buffer source node
sourceNode = context.createBufferSource();
//connect source to analyser as link
sourceNode.connect(analyser);
// and connect source to destination
sourceNode.connect(context.destination);
//start updating
rafID = window.requestAnimationFrame(updateVisualization);
//I had to place a function for the .ended event inside the function sourceNode was set up.
sourceNode.onended = function() {
sourceNode.stop(0);
playFirst();
}
}
I would like to use integrated Vimeo videos on a page and have them start playing as soon as the page is loaded and when the first video has finished, the second one starts. In my code I have an array of video ids but my problem is the video does not load.
<div id="headervideo"></div>
<script src="https://player.vimeo.com/api/player.js"></script>
<script>
//====================
document.addEventListener("DOMContentLoaded", function(event) {
var videos = ['240512614', '227735391']; // videos ids
var options = {
width: 640,
loop: true
};
var player = new Vimeo.Player('headervideo', options);
playMovie(player, videos)
})//end function
//====================
var playMovie = function(player, videos) {
if(!videos.length){
return false;
}
var video = videos.shift();
alert (video)
player.loadVideo(videos).then(function(id) {
alert("the video successfully loaded ")
player.getEnded().then(function(ended) {
playMovie(player, videos)
}).catch(function(error) {
console.warn(error)
});
}).catch(function(error) {
console.warn(error);
});
}//end function
//====================
</script>
Your first issue lies in
new Vimeo.Player('headervideo', options);
You cannot create a Vimeo.Player without specifying a video in the options object (or as an html attribute on an associated element).
So, specifying either:
var video = videos.shift();
var options = {
width: 640,
loop: true,
id: video
}
or:
var options = {
width: 640,
loop: true,
id: videos[0]
}
Will let you load the video.
But this will raise your second issue, that playMovie will not wait for your currently playing video to finish, but rather swap the video out immediately. getEnded() is not a synchronously blocking function.
What you want to do is instead listen on the ended event. And also turn of the loop: true as this will disable the ended event
document.addEventListener("DOMContentLoaded", function(event) {
window.videos = ['240512614', '227735391']; // videos ids
var video = videos.shift();
var options = {
width: 640,
loop: false,
id: video
};
window.player = new Vimeo.Player('headervideo', options);
window.player.on('ended', playNext);
window.player.play();
})//end function
//====================
var playNext= function() {
alert('foo');
if(!window.videos.length){
return false;
}
var video = window.videos.shift();
alert (video);
player.loadVideo(video).then(function(id) {
alert("the video "+id+" successfully loaded ");
player.play();
}).catch(function(error) {
console.warn(error)
});
}//end function