Web Audio Api : How do I add a working convolver? - javascript

What I am trying to learn / do: How to set up a simple working convolver (reverb) into my code sandbox below using an impulse response. I thought it was similar to setting a filter but things seem quite different.
What I tried: As with all new technologies things change at a fast pace making it difficult to know which implementation is correct and what is not. I looked at countless WebAudio Api Convolver Tutorials, many were old and others were working but far too "bloated" making it hard to understand what is going on. I tried to implement some of the examples from the mozilla documentation:
I already had a look at: https://developer.mozilla.org/en-US/docs/Web/API/ConvolverNode/buffer
My question: How do I integrate a convolver properly in the context below? As you can see I tried but cant figure this out.
window.addEventListener('load', init, false);
function init() {
setupWebAudio();
}
function setupWebAudio() {
var audio = document.getElementById('music');
var context = new AudioContext();
var source = context.createMediaElementSource(audio);
var filter = context.createBiquadFilter();
var convolver = context.createConvolver();
var inpulseRes = "hall.mp3";
var hallBuffer = inpulseRes;
soundSource = context.createBufferSource();
soundSource.buffer = hallBuffer;
convolver.buffer = hallBuffer;
filter.type = 'lowpass';
filter.frequency.value = 400;
var theParent = document.getElementById("test");
theParent.addEventListener("mousedown", doSomething, false);
function doSomething(e) {
if (e.target !== e.currentTarget) {
if(e.target == theParent.children[0]){
filter.frequency.value += 200;
}
else if(e.target == theParent.children[1]){
filter.frequency.value -= 200;
}
else if(e.target == theParent.children[2]){
filter.type = 'highpass';
}
}
e.stopPropagation();
}
source.connect(filter);
source.connect(convolver);
filter.connect(context.destination);
audio.play();
}

This is a pretty open-ended question; what have you tried that hasn't worked, or is the piece you're missing what the "impulse response" is supposed to be? If the latter, search for "impulse response files" and you'll find tons of free files you can use. You can also generate noise on a logarithmic decay curve into a buffer, and you'll get a basic reverb effect. Basic method to create an impulseResponse buffer:
function impulseResponse( duration, decay, reverse ) {
var sampleRate = audioContext.sampleRate;
var length = sampleRate * duration;
var impulse = audioContext.createBuffer(2, length, sampleRate);
var impulseL = impulse.getChannelData(0);
var impulseR = impulse.getChannelData(1);
if (!decay)
decay = 2.0;
for (var i = 0; i < length; i++){
var n = reverse ? length - i : i;
impulseL[i] = (Math.random() * 2 - 1) * Math.pow(1 - n / length, decay);
impulseR[i] = (Math.random() * 2 - 1) * Math.pow(1 - n / length, decay);
}
return impulse;
}
Your code has both a BufferSourceNode and the convolver pointing to the same buffer, which is almost certainly wrong; you don't usually play back an impulse response file using a buffersource, and you don't usually use a normal sound file as an impulse response. (Look up convolution on Wikipedia if the role of an impulse response isn't clear.) You need to do something like:
function setupWebAudio() {
var audio = document.getElementById('music');
var context = new AudioContext();
var source = context.createMediaElementSource(audio);
var convolver = context.createConvolver();
var irRRequest = new XMLHttpRequest();
irRRequest.open("GET", "hall.mp3", true);
irRRequest.responseType = "arraybuffer";
irRRequest.onload = function() {
context.decodeAudioData( irRRequest.response,
function(buffer) { convolver.buffer = buffer; } );
}
irRRequest.send();
// note the above is async; when the buffer is loaded, it will take effect, but in the meantime, the sound will be unaffected.
source.connect( convolver );
convolver.connect( context.destination );
}

Connect the output of the convolver to something. What you have now is the source connected to the convolver, but the convolver isn't connected to anything. As a first cut, convolver.connect(context.destination).

Related

Building an Array from with JavaScript

I am trying to capture the Sound Frequency value from the Device Microphone from a Web Browser, utilizing the Web Audio API. The Instructions only covers how to play a sound file and manipulate its output, ect.
I need to listen to a Humming Sound from a person and figure out the Sound frequency, which I will then convert to a musical note. I need to build an array with all the frequencies that are generated from a Single Musical Note that I play from a digital Piano keyboard. I will then get the mode of all the frequencies to get the actual Musical note.
I am able to display the frequency and notes that I detect onto the Console.log(), however, I am not able to build the array, get the mode, and then clear-out the array when I play a new note. My array remains active because the code is on an event listening mode and remains active to listen for the next note.
This is my code:
var arrayList = [];
function getModes(array) {
var frequency = []; // array of frequency.
var maxFreq = 0; // holds the max frequency.
var modes = [];
for (var i in array) {
frequency[array[i]] = (frequency[array[i]] || 0) + 1; // increment frequency.
if (frequency[array[i]] > maxFreq) { // is this frequency > max so far ?
maxFreq = frequency[array[i]]; // update max.
}
}
for (var k in frequency) {
if (frequency[k] == maxFreq) {
modes.push(k);
}
}
return modes;
}
function updatePitch(time) {
var cycles = new Array;
analyser.getFloatTimeDomainData(buf);
var ac = autoCorrelate(buf, audioContext.sampleRate);
// TODO: Paint confidence meter on canvasElem here.
if (DEBUGCANVAS) { // This draws the current waveform, useful for debugging
waveCanvas.clearRect(0, 0, 512, 256);
waveCanvas.strokeStyle = "red";
waveCanvas.beginPath();
waveCanvas.moveTo(0, 0);
waveCanvas.lineTo(0, 256);
waveCanvas.moveTo(128, 0);
waveCanvas.lineTo(128, 256);
waveCanvas.moveTo(256, 0);
waveCanvas.lineTo(256, 256);
waveCanvas.moveTo(384, 0);
waveCanvas.lineTo(384, 256);
waveCanvas.moveTo(512, 0);
waveCanvas.lineTo(512, 256);
waveCanvas.stroke();
waveCanvas.strokeStyle = "black";
waveCanvas.beginPath();
waveCanvas.moveTo(0, buf[0]);
for (var i = 1; i < 512; i++) {
waveCanvas.lineTo(i, 128 + (buf[i] * 128));
}
waveCanvas.stroke();
}
if (ac == -1) {
detectorElem.className = "vague";
pitchElem.innerText = "--";
noteElem.innerText = "-";
detuneElem.className = "";
detuneAmount.innerText = "--";
} else {
detectorElem.className = "confident";
pitch = ac;
pitchElem.innerText = Math.round(pitch);
var note = noteFromPitch(pitch);
// Here is where I am converting the frequency to a note letter
var noteString = noteStrings[note % 12];
console.log(noteString);
// This is where I am building the array range with the notes that I find
// I have a nice array, but it keeps building and I do not know how to clear it for
// the next session.
if (note >=36 && note <= 96) {
if (arrayList) {
arrayList.push(noteString);
}
console.log(noteString);
}
else {
console.log("not note");
var MyNote = getModes(arrayList)
noteElem.innerHTML = MyNote;
arrayList = [];
}
// This function remains active and continues to listen for the next not to
// generate and new note letter
var detune = centsOffFromPitch(pitch, note);
if (detune == 0) {
detuneElem.className = "";
detuneAmount.innerHTML = "--";
} else {
if (detune < 0)
detuneElem.className = "flat";
else
detuneElem.className = "sharp";
detuneAmount.innerHTML = Math.abs(detune);
}
}
if (!window.requestAnimationFrame)
window.requestAnimationFrame = window.webkitRequestAnimationFrame;
rafID = window.requestAnimationFrame(updatePitch);
}
How do I clear the array and use a new array when I play a new note?
thank you for the support...

Connecting Multiple PannerNodes to output

I am trying to connect multiple PannerNodes to an output but I have a problem with proper connecting them (while testing I have no audio). Audio is multichannel so I want to process each channel separately and audio is form html video element.
My main problem is that I have 6 different PannerNodes each one have one output with 2 channels and I don't know how to properly connect them to the destination to have a stereo output (or any output but my system is supporting only stereo).
Here is what I am doing:
Creating audioContext, MediaElementSource, etc. and first connection.
const context = new AudioContext();
var source = context.createMediaElementSource(video);
var dest = context.createMediaStreamDestination();
//Spliter channels L, R, SL, SR, C, LFE
let splitter = new ChannelSplitterNode(context, {numberOfOutputs: 6});
// I didn't used that later because I don't know if that is necessary and how to implement it properly (with that line commented it code is still not working correctly)
let channel_merger = new ChannelMergerNode(context, {numberOfInputs: 2});
let listener = context.listener;
source.connect(splitter, 0, 0);
Setup the listener
if(listener.forwardX) {
listener.forwardX.setValueAtTime(0, audioCtx.currentTime);
listener.forwardY.setValueAtTime(0, audioCtx.currentTime);
listener.forwardZ.setValueAtTime(-1, audioCtx.currentTime);
listener.upX.setValueAtTime(0, audioCtx.currentTime);
listener.upY.setValueAtTime(1, audioCtx.currentTime);
listener.upZ.setValueAtTime(0, audioCtx.currentTime);
} else {
listener.setOrientation(0,0,-1,0,1,0);
}
Creates PannerNodes and connects it to a splitter
var FrontLeft, FrontCenter, FrontRight, SurroundLeft, SurroundRight, Sub = null;
var pannerNodesObjects = [FrontLeft, FrontCenter, FrontRight, SurroundLeft, SurroundRight, Sub];
function set_pannerNode(node, panningModel /* 'HRTF' */, distanceModel /* Possible values are "linear", "inverse" and "exponential". The default value is "inverse". */, refDistance, maxDistance, rolloffFactor, coneInnerAngle, coneOuterAngle, coneOuterGain, x, y, z /* position */, n /* index of the pannerNodesObjects */) {
node = context.createPanner();
// Seting options
node.panningModel = panningModel;
node.distanceModel = distanceModel;
node.refDistance = refDistance;
node.maxDistance = maxDistance;
node.rolloffFactor = rolloffFactor;
node.coneInnerAngle = coneInnerAngle;
node.coneOuterAngle = coneOuterAngle;
node.coneOuterGain = coneOuterGain;
// Setting position
node.positionX = x;
node.positionY = y;
node.positionZ = z;
pannerNodesObjects[n] = node;
}
for (i=0; i< pannerNodesObjects.length; i++){
[nx, ny, nz] = set_rotation(distanceFromScreen,screenCenterY,angleList[i],i); // Here I get a position of the PannerNode
set_pannerNode(pannerNodesObjects[i],'HRTF',"exponential", 1,100,2,360,0,0, nx, ny, nz, i);
splitter.connect(pannerNodesObjects[i], i);
}
Connect PannerNodes to destination
for (i=0; i< pannerNodesObjects.length; i++){
pannerNodesObjects[i].connect(dest);
}
After that I am updating each PannerNode position in a loop. But I have no audio while playing the video so I probably done something wrong with AudioNodes connection.

Is there any way to change file FPS in javascript browser or prepare wav conventer to 60FPS videos?

I'm making web application which stores short audio files that have been cut from large video files. User uploads .mp4 file, chooses sound length and here's a little trick. Cutting audio can only be done in backend (correct me if I'm wrong) and sending 700MB data is not good option, so I use code below to decode audio data from .mp4 and then I send it with start and stop params. Backend (Node.js) use's FFMPEG to cut audio and save's it.
This part works, but i realised that decoded audio from 60FPS video doesn't sound good (not terrible but totally useless in my app). My goal is to avoid third party, especially desktop, apps (like audacity) and allow user to cut revelant part of audio from any mp4 video. Is there any way to convert 60FPS video to 30FPS video (ArrayBuffer) in browser and then decode audio?
fileInput.onchange = event => {
this.file = event.target["files"][0];
//.mp4 file
this.fileURL = URL.createObjectURL(this.file)
let baseAudioContext = new AudioContext();
this.file.arrayBuffer().then(buff => {
baseAudioContext.decodeAudioData(buff,
success => {
console.log(success)
this.bufferToWave(success, 0, success.length);
},
err => console.log(err));
})
}
bufferToWave(abuffer, offset, len) {
var numOfChan = abuffer.numberOfChannels,
length = len * numOfChan * 2 + 44,
buffer = new ArrayBuffer(length),
view = new DataView(buffer),
channels = [], i, sample,
pos = 0;
// write WAVE header
setUint32(0x46464952); // "RIFF"
setUint32(length - 8); // file length - 8
setUint32(0x45564157); // "WAVE"
setUint32(0x20746d66); // "fmt " chunk
setUint32(16); // length = 16
setUint16(1); // PCM (uncompressed)
setUint16(numOfChan);
setUint32(abuffer.sampleRate);
setUint32(abuffer.sampleRate * 2 * numOfChan); // avg. bytes/sec
setUint16(numOfChan * 2); // block-align
setUint16(16); // 16-bit (hardcoded in this demo)
setUint32(0x61746164); // "data" - chunk
setUint32(length - pos - 4); // chunk length
// write interleaved data
for (i = 0; i < abuffer.numberOfChannels; i++)
channels.push(abuffer.getChannelData(i));
while (pos < length) {
for (i = 0; i < numOfChan; i++) { // interleave channels
sample = Math.max(-1, Math.min(1, channels[i][offset])); // clamp
sample = (0.5 + sample < 0 ? sample * 32768 : sample * 32767) | 0; // scale to 16-bit signed int
view.setInt16(pos, sample, true); // update data chunk
pos += 2;
}
offset++ // next source sample
}
// create Blob
//return (URL || webkitURL).createObjectURL(new Blob([buffer], { type: "audio/wav" }));
var u = (URL || webkitURL).createObjectURL(new Blob([buffer], { type: "audio/wav" }));
//temporary part
//downloading file to check quality
//in this part sound is already broken, no need to show backend code
const a = document.createElement('a');
a.style.display = 'none';
a.href = u;
a.download = name;
document.body.appendChild(a);
a.click();
function setUint16(data) {
view.setUint16(pos, data, true);
pos += 2;
}
function setUint32(data) {
view.setUint32(pos, data, true);
pos += 4;
}
}

Bomberman Vanilla JS

I'm trying to make bomberman using vanilla JS, for my examination project.
I am a little stuck right now with how to take out the bombs in the array and push them into the array again after they explode.
They need to explode after 2 seconds.
My code for bombs:
function bombPlayerOne() {
let ss = new createjs.SpriteSheet(game.q.getResult('bomb'))
let temp = new createjs.Sprite(ss, "bombIt");
temp.x = playerOne.x;
temp.y = playerOne.y;
game.stage.addChild(temp);
powerUps.bombs.push(temp);
console.log("player one placed a bomb");
for (var i = powerUps.bombs.length - 1; i > 0; i--) {
powerUps.bombs.splice;
// TODO : tween bomber ud...
powerUps.bombs.push;
}
}
function bombPlayerTwo() {
let ss = new createjs.SpriteSheet(game.q.getResult('bomb'))
let temp = new createjs.Sprite(ss, "bombIt");
temp.x = playerTwo.x;
temp.y = playerTwo.y;
game.stage.addChild(temp);
powerUps.bombs.push(temp);
console.log("player two placed a bomb");
for (var i = powerUps.bombs.length - 1; i > 0; i--) {
powerUps.bombs.splice;
// TODO : tween bomber ud...
powerUps.bombs.push;
}
}
So you have a few options, and FYI this isn't necessarily a javascript question so much as how do you handle game logic/code design type of question.
1) A bomb when placed contains a reference back to it's owner. ie
bomb.owner = playerOne
2) You have a manager that controls the state of a level, which keeps track of bombs
LevelManager.player1Bombs = ....
3) You have an array of bombs placed belonging to each player, which you then update during your logic update calls.
function gameUpdate(long milliSecondsSinceLastFrame){
for(bomb in playerOne.placedBombs){
if(bomb.isExploded){
//do other cleanup
playerOne.availableBombs ++;
}
}
//... do same for player 2 etc
}
All of them have their own advantages/disadvantages.

How can I use JS WebAudioAPI for beat detection?

I'm interested in using the JavaScript WebAudioAPI to detect song beats, and then render them in a canvas.
I can handle the canvas part, but I'm not a big audio guy and really don't understand how to make a beat detector in JavaScript.
I've tried following this article but cannot, for the life of me, connect the dots between each function to make a functional program.
I know I should show you some code but honestly I don't have any, all my attempts have failed miserably and the relevant code it's in the previously mentioned article.
Anyways I'd really appreciate some guidance, or even better a demo of how to actually detect song beats with the WebAudioAPI.
Thanks!
The main thing to understand about the referenced article by Joe Sullivan is that even though it gives a lot of source code, it's far from final and complete code. To reach a working solution you will still need both some coding and debugging skills.
This answer draws most of its code from the referenced article, original licensing applies where appropriate.
Below is a naïve sample implementation for using the functions described by the above article, you still need to figure out correct thresholds for a functional solution.
The code consists of preparation code written for the answer:
reading a local file over the FileReader API
decoding the file as audio data using the AudioContext API
and then, as described in the article:
filtering the audio, in this example with a low-pass filter
calculating peaks using a threshold
grouping interval counts and then tempo counts
For the threshold I used an arbitrary value of .98 of the range between maximum and minimum values; when grouping I added some additional checks and arbitrary rounding to avoid possible infinite loops and make it an easy-to-debug sample.
Note that commenting is scarce to keep the sample implementation brief because:
the logic behind processing is explained in the referenced article
the syntax can be referenced in the API docs of the related methods
audio_file.onchange = function() {
var file = this.files[0];
var reader = new FileReader();
var context = new(window.AudioContext || window.webkitAudioContext)();
reader.onload = function() {
context.decodeAudioData(reader.result, function(buffer) {
prepare(buffer);
});
};
reader.readAsArrayBuffer(file);
};
function prepare(buffer) {
var offlineContext = new OfflineAudioContext(1, buffer.length, buffer.sampleRate);
var source = offlineContext.createBufferSource();
source.buffer = buffer;
var filter = offlineContext.createBiquadFilter();
filter.type = "lowpass";
source.connect(filter);
filter.connect(offlineContext.destination);
source.start(0);
offlineContext.startRendering();
offlineContext.oncomplete = function(e) {
process(e);
};
}
function process(e) {
var filteredBuffer = e.renderedBuffer;
//If you want to analyze both channels, use the other channel later
var data = filteredBuffer.getChannelData(0);
var max = arrayMax(data);
var min = arrayMin(data);
var threshold = min + (max - min) * 0.98;
var peaks = getPeaksAtThreshold(data, threshold);
var intervalCounts = countIntervalsBetweenNearbyPeaks(peaks);
var tempoCounts = groupNeighborsByTempo(intervalCounts);
tempoCounts.sort(function(a, b) {
return b.count - a.count;
});
if (tempoCounts.length) {
output.innerHTML = tempoCounts[0].tempo;
}
}
// http://tech.beatport.com/2014/web-audio/beat-detection-using-web-audio/
function getPeaksAtThreshold(data, threshold) {
var peaksArray = [];
var length = data.length;
for (var i = 0; i < length;) {
if (data[i] > threshold) {
peaksArray.push(i);
// Skip forward ~ 1/4s to get past this peak.
i += 10000;
}
i++;
}
return peaksArray;
}
function countIntervalsBetweenNearbyPeaks(peaks) {
var intervalCounts = [];
peaks.forEach(function(peak, index) {
for (var i = 0; i < 10; i++) {
var interval = peaks[index + i] - peak;
var foundInterval = intervalCounts.some(function(intervalCount) {
if (intervalCount.interval === interval) return intervalCount.count++;
});
//Additional checks to avoid infinite loops in later processing
if (!isNaN(interval) && interval !== 0 && !foundInterval) {
intervalCounts.push({
interval: interval,
count: 1
});
}
}
});
return intervalCounts;
}
function groupNeighborsByTempo(intervalCounts) {
var tempoCounts = [];
intervalCounts.forEach(function(intervalCount) {
//Convert an interval to tempo
var theoreticalTempo = 60 / (intervalCount.interval / 44100);
theoreticalTempo = Math.round(theoreticalTempo);
if (theoreticalTempo === 0) {
return;
}
// Adjust the tempo to fit within the 90-180 BPM range
while (theoreticalTempo < 90) theoreticalTempo *= 2;
while (theoreticalTempo > 180) theoreticalTempo /= 2;
var foundTempo = tempoCounts.some(function(tempoCount) {
if (tempoCount.tempo === theoreticalTempo) return tempoCount.count += intervalCount.count;
});
if (!foundTempo) {
tempoCounts.push({
tempo: theoreticalTempo,
count: intervalCount.count
});
}
});
return tempoCounts;
}
// http://stackoverflow.com/questions/1669190/javascript-min-max-array-values
function arrayMin(arr) {
var len = arr.length,
min = Infinity;
while (len--) {
if (arr[len] < min) {
min = arr[len];
}
}
return min;
}
function arrayMax(arr) {
var len = arr.length,
max = -Infinity;
while (len--) {
if (arr[len] > max) {
max = arr[len];
}
}
return max;
}
<input id="audio_file" type="file" accept="audio/*"></input>
<audio id="audio_player"></audio>
<p>
Most likely tempo: <span id="output"></span>
</p>
I wrote a tutorial here which shows how to do this with the javascript Web Audio API.
https://askmacgyver.com/blog/tutorial/how-to-implement-tempo-detection-in-your-application
Outline of Steps
Transform Audio File into an Array Buffer
Run Array Buffer Through Low Pass Filter
Trim a 10 second Clip from the Array Buffer
Down Sample the Data
Normalize the Data
Count Volume Groupings
Infer Tempo from Groupings Count
This code below does the heavy lifting.
Load Audio File Into Array Buffer and Run Through Low Pass Filter
function createBuffers(url) {
// Fetch Audio Track via AJAX with URL
request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function(ajaxResponseBuffer) {
// Create and Save Original Buffer Audio Context in 'originalBuffer'
var audioCtx = new AudioContext();
var songLength = ajaxResponseBuffer.total;
// Arguments: Channels, Length, Sample Rate
var offlineCtx = new OfflineAudioContext(1, songLength, 44100);
source = offlineCtx.createBufferSource();
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
window.originalBuffer = buffer.getChannelData(0);
var source = offlineCtx.createBufferSource();
source.buffer = buffer;
// Create a Low Pass Filter to Isolate Low End Beat
var filter = offlineCtx.createBiquadFilter();
filter.type = "lowpass";
filter.frequency.value = 140;
source.connect(filter);
filter.connect(offlineCtx.destination);
// Render this low pass filter data to new Audio Context and Save in 'lowPassBuffer'
offlineCtx.startRendering().then(function(lowPassAudioBuffer) {
var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
var song = audioCtx.createBufferSource();
song.buffer = lowPassAudioBuffer;
song.connect(audioCtx.destination);
// Save lowPassBuffer in Global Array
window.lowPassBuffer = song.buffer.getChannelData(0);
console.log("Low Pass Buffer Rendered!");
});
},
function(e) {});
}
request.send();
}
createBuffers('https://askmacgyver.com/test/Maroon5-Moves-Like-Jagger-128bpm.mp3');
You Now Have an Array Buffer of the Low Pass Filtered Song (And Original)
It's comprised of a number of entries, sampleRate (44100 multiplied by the number of seconds of the song).
window.lowPassBuffer // Low Pass Array Buffer
window.originalBuffer // Original Non Filtered Array Buffer
Trim a 10 Second Clip from the Song
function getClip(length, startTime, data) {
var clip_length = length * 44100;
var section = startTime * 44100;
var newArr = [];
for (var i = 0; i < clip_length; i++) {
newArr.push(data[section + i]);
}
return newArr;
}
// Overwrite our array buffer to a 10 second clip starting from 00:10s
window.lowPassFilter = getClip(10, 10, lowPassFilter);
Down Sample Your Clip
function getSampleClip(data, samples) {
var newArray = [];
var modulus_coefficient = Math.round(data.length / samples);
for (var i = 0; i < data.length; i++) {
if (i % modulus_coefficient == 0) {
newArray.push(data[i]);
}
}
return newArray;
}
// Overwrite our array to down-sampled array.
lowPassBuffer = getSampleClip(lowPassFilter, 300);
Normalize Your Data
function normalizeArray(data) {
var newArray = [];
for (var i = 0; i < data.length; i++) {
newArray.push(Math.abs(Math.round((data[i + 1] - data[i]) * 1000)));
}
return newArray;
}
// Overwrite our array to the normalized array
lowPassBuffer = normalizeArray(lowPassBuffer);
Count the Flat Line Groupings
function countFlatLineGroupings(data) {
var groupings = 0;
var newArray = normalizeArray(data);
function getMax(a) {
var m = -Infinity,
i = 0,
n = a.length;
for (; i != n; ++i) {
if (a[i] > m) {
m = a[i];
}
}
return m;
}
function getMin(a) {
var m = Infinity,
i = 0,
n = a.length;
for (; i != n; ++i) {
if (a[i] < m) {
m = a[i];
}
}
return m;
}
var max = getMax(newArray);
var min = getMin(newArray);
var count = 0;
var threshold = Math.round((max - min) * 0.2);
for (var i = 0; i < newArray.length; i++) {
if (newArray[i] > threshold && newArray[i + 1] < threshold && newArray[i + 2] < threshold && newArray[i + 3] < threshold && newArray[i + 6] < threshold) {
count++;
}
}
return count;
}
// Count the Groupings
countFlatLineGroupings(lowPassBuffer);
Scale 10 Second Grouping Count to 60 Seconds to Derive Beats Per Minute
var final_tempo = countFlatLineGroupings(lowPassBuffer);
// final_tempo will be 21
final_tempo = final_tempo * 6;
console.log("Tempo: " + final_tempo);
// final_tempo will be 126

Categories