I have used web audio api to connect a microphone to a convolver to an analyser to a flot gui to plot the spectrum. For testing I set the buffer of the convolver to be unity but I don't get any output. If I bypass the convolver and connect the mic directly to the analyser it works. Can you please help?
In the code below use_convolver determines if to bypass convolver or not.
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.4/jquery.min.js"></script>
<script src="http://www.flotcharts.org/flot/jquery.flot.js" type="text/javascript"></script>
</head>
<body>
<h1>Audio Spectrum</h1>
<div id="placeholder" style="width:400px; height:200px; display: inline-block;">
</div>
<script>
var microphone;
var analyser;
var convolver;
//user media
navigator.getUserMedia = (navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia);
if (navigator.getUserMedia) {
console.log('getUserMedia supported.');
navigator.getUserMedia(
// constraints - only audio needed for this app
{
audio : true,
echoCancellation : true
},
// Success callback
user_media_setup,
// Error callback
function(err) {
console.log('The following gUM error occured: ' + err);
});
} else {
console.log('getUserMedia not supported on your browser!');
};
function user_media_setup(stream) {
console.log('user media setup');
// set up forked web audio context, for multiple browsers
// window. is needed otherwise Safari explodes
audioCtx = new (window.AudioContext || window.webkitAudioContext)();
//microphone
microphone = audioCtx.createMediaStreamSource(stream);
//analyser
analyser = audioCtx.createAnalyser();
analyser.fftSize = 1024;
analyser.smoothingTimeConstant = 0.85;
//convolver
convolver = audioCtx.createConvolver();
convolver.normalize = true;
convolverBuffer = audioCtx.createBuffer(1, 1, audioCtx.sampleRate);
// convolverBuffer[0] = 1; //wrong
convolverChannel = convolverBuffer.getChannelData(0);
convolverChannel[0] = 1;
convolver.buffer = convolverBuffer;
//connectivity
var use_convolver = false;
if (use_convolver) {
//through convolver:
microphone.connect(convolver);
convolver.connect(analyser);
} else {
//direct:
microphone.connect(analyser);
}
visualize();
}
function visualize() {
console.log('visualize');
dataArray = new Float32Array(analyser.frequencyBinCount);
draw = function() {
analyser.getFloatFrequencyData(dataArray);
var data = [];
for (var i = 0; i < dataArray.length; i++) {
freq = audioCtx.sampleRate * i / dataArray.length / 2;
data.push([freq, dataArray[i]]);
}
var options = {
yaxis : {
min : -200,
max : 0
}
};
$.plot("#placeholder", [data], options);
window.requestAnimationFrame(draw);
};
window.requestAnimationFrame(draw);
}
</script>
</body>
</html>
convolverBuffer[0] is the wrong way to get at the sample data in the buffer. You need to call convolverBuffer.getChannelData(0) to get the sample array to modify.
#aldel This problem was bugging me for a few frustrating days .. much thanks for this tip. I can confirm that this is an issue in firefox as well. It seems if you use a mono WAV file as the buffer for the convolver, you will not get any output from the convolver.
After I switched to a stereo WAV impulse response as the buffer, the convolver worked.
Also a tip I learned today is that firefox's web audio tools (enabled by clicking the gear in the top right section of the firefox dev tools, and checking 'web audio' over on the left) are really useful for visualizing the order of your nodes. And you can easily switch a node on/off (bypass it) to see if that's causing problems in your audio context.
Related
Ok, so Im an IT guy and kind of a noob on the dev side of the fence. But I've been able to create this ffmpeg wasm page that takes a canvas and converts it to webm / and .mp4 -- what i WANT to do is take the resulting .mp4 file and upload it to the server where the page/js are being served from. is this possible? I will include my source code which is fairly simple and straight forward, I just don't know how to manipulate the resulting mp4 file that ffmpeg spits out (i realize it is happening client side) to be able to push it up to the server (maybe with aupload.php type situation?) the solution can be html/java/php whatever, so long as it takes the mp4 output and gets it onto the server. I'd VERY MUCH appreciate a hand here.
Going to try my best to properly insert the html and js. please bear with me if i've done something wrong, i've never had to -ask- a question on here, usually just look up existing answers.
const { createFFmpeg } = FFmpeg;
const ffmpeg = createFFmpeg({
log: true
});
const transcode = async (webcamData) => {
const message = document.getElementById('message');
const name = 'record.webm';
await ffmpeg.load();
message.innerHTML = 'Start transcoding';
await ffmpeg.write(name, webcamData);
await ffmpeg.transcode(name, 'output.mp4');
message.innerHTML = 'Complete transcoding';
const data = ffmpeg.read('output.mp4');
const video = document.getElementById('output-video');
video.src = URL.createObjectURL(new Blob([data.buffer], { type: 'video/mp4' }));
dl.href = video.src;
dl.innerHTML = "download mp4"
}
fn().then(async ({url, blob})=>{
transcode(new Uint8Array(await (blob).arrayBuffer()));
})
function fn() {
var recordedChunks = [];
var time = 0;
var canvas = document.getElementById("canvas");
return new Promise(function (res, rej) {
var stream = canvas.captureStream(60);
mediaRecorder = new MediaRecorder(stream, {
mimeType: "video/webm; codecs=vp9"
});
mediaRecorder.start(time);
mediaRecorder.ondataavailable = function (e) {
recordedChunks.push(event.data);
// for demo, removed stop() call to capture more than one frame
}
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedChunks, {
"type": "video/webm"
});
var url = URL.createObjectURL(blob);
res({url, blob}); // resolve both blob and url in an object
myVideo.src = url;
// removed data url conversion for brevity
}
// for demo, draw random lines and then stop recording
var i = 0,
tid = setInterval(()=>{
if(i++ > 20) { // draw 20 lines
clearInterval(tid);
mediaRecorder.stop();
}
let canvas = document.querySelector("canvas");
let cx = canvas.getContext("2d");
cx.beginPath();
cx.strokeStyle = 'green';
cx.moveTo(Math.random()*100, Math.random()*100);
cx.lineTo(Math.random()*100, Math.random()*100);
cx.stroke();
},200)
});
}
<html>
<head>
<script src="https://unpkg.com/#ffmpeg/ffmpeg#0.8.1/dist/ffmpeg.min.js" defer></script>
<script src="canvas2mp4.js" defer></script>
</head>
<body>
here is a canvas<br>
<canvas id="canvas" style="height:100px;width:100px"></canvas><br>
here is a recorded video of the canvas in webM format<br>
<video id="myVideo" controls="controls"></video><br>
here is a transcoded mp4 from the webm above CLIENT SIDE using ffmpeg<br>
<video id="output-video" controls="controls"></video><br>
<a id="dl" href="" download="download.mp4"></a>
<div id="message"></div><br><br>
</body>
</html>
I'm trying to figure out how to use the Web Audio API to record low volume input from a mircophone. So essentially I'm looking to record in low frequencies or decibels that start from 0Hz to around 100Hz.
Any help would be appreciated. Thanks.
So this is what I've got so far:
if (!navigator.getUserMedia) {
navigator.getUserMedia = navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
}
navigator.getUserMedia({
audio: true
}, function(stream) {
var ctx = new AudioContext();
var source = ctx.createMediaStreamSource(stream);
var gainNode = ctx.createGain();
source.connect(gainNode);
gainNode.connect(ctx.destination);
document.getElementById('volume').onchange = function() {
gainNode.gain.value = this.value;
};
gainNode.gain.value = document.getElementById('volume').value;
new Audio().play();
}, function(e) {
alert(e);
});
// For the demo only:
document.getElementById('volume').onchange = function() {
alert('Please provide access to the microhone before using this.');
}
This is HTML control:
Volume: <input type=range id=volume min=0 max=100 value=50 step=0.01/>
From what I can tell, all I am doing with this code is lowering the output volume level from the microphone.
As I said, I am trying to capture low volume input from 0Hz to 100Hz.
If you want record just the frequencies between 0 and 100 Hz, use one or more BiquadFilterNodes or an IIRFilterNode to implement a lowpass filter with a cutoff of 100 Hz or so.
Generally, it's up to you to figure out the right filter, but perhaps this filter design page will be helpful. Use at your own risk!
I am trying to use WebRTC to get access to the mobile camera. I have set it up according to what I found online. However, I am running into a few issues.
My code doesn't seem to work on Android devices.
When I try to set the width and height properties of the video it doesn't work.
On iOS when I view the video. Then leave the Safari app for a few seconds. Then return to the page the video has black on the sides / changed aspect ratio?
Anyone know how to fix these issues / has a better implementation?
ILLUSTRATIONS
MY CODE
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
</head>
<body>
<video id="video"></video>
<script type="text/javascript">
var video; // INPUT WEBRTC as <video> tag
var cameras = ["user", "environment"]; // USUAL CAMERA TYPES
var options = []; // CAMERAS AVAILABLE
navigator.mediaDevices.enumerateDevices().then((devices) => {
let index = 0;
devices.find((device) => {
if (device.kind === 'videoinput') {
if (device.deviceId == '') {
options.push({
audio: false,
video: {
facingMode: {
exact: cameras[index]
}
}
});
index++;
} else {
options.push({
audio: false,
video: {
deviceId: {
exact: device.deviceId
}
}
});
}
}
});
if (options.length == 0) {
console.log("NO DEVICES FOUND");
} else {
navigator.mediaDevices.getUserMedia(options[options.length - 1]).then(stream => {
video = document.getElementById("video");
video.setAttribute('playsinline', 'playsinline');
video.setAttribute('position', 'absolute');
video.setAttribute('top', '0');
video.setAttribute('left', '0');
document.body.appendChild(video);
try {
video.srcObject = stream;
video.style.display = 'block';
video.play();
} catch (error) {
video.src = URL.createObjectURL(stream);
video.style.display = 'block';
video.play();
}
// TRYING TO CHANGE VIDEO SIZE BELOW DOESN'T WORK
/*
var w = window.innerWidth;
var h = w * (video.videoHeight / video.videoWidth);
video.width = w;
video.height = h;
*/
})
}
}).catch(err => {
console.log(err);
});
</script>
</body>
</html>
A simple answer to this issue is use of object-fit property of CSS within your video container.
object-fit property allow your video or images to render within specified area using crop functionality. It is good choice to use object-fit:cover. But if you are going to use screen-sharing functionality as well make sure to set object-fit property value to fill to render full screen avoiding crop functionality otherwise it will crop your shared screen video to center.
Example:
<video style="object-fit:cover; width:320px; height:240px"></video>
You can read more about this property usage here:
object-fit property
I have just found a html5 game framework named phaser and it says that this framework support both PC browser and mobile device browser only if they support Html5. So I wrote a sample as the tutorial and It worked fine in my PC with Chrome, But When I launch it with chrome browser on my iphone. It just give a blank page with nothing.
Here is the code:
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<title>Phaser - Making your first game, part 1</title>
<script type="text/javascript" src="js/phaser.min.js"></script>
<style type="text/css">
body {
margin: 0;
}
</style>
</head>
<body>
<script type="text/javascript">
var game = new Phaser.Game(800, 600, Phaser.AUTO, '', { preload: preload, create: create, update: update });
var score = 0;
var scoreText;
function preload() {
game.load.image('sky', 'assets/sky.png');
game.load.image('ground', 'assets/platform.png');
game.load.image('star', 'assets/star.png');
game.load.spritesheet('dude','assets/dude.png', 32,48);
game.add.sprite(0,0,'star');
}
var platforms;
function create() {
game.physics.startSystem(Phaser.Physics.ARCADE);
game.add.sprite(0,0,'sky');
platforms = game.add.group();
platforms.enableBody = true;
var ground= platforms.create(0,game.world.height-64,'ground');
ground.scale.setTo(2,2);
ground.body.immovable=true;
var ledge = platforms.create(400,400,'ground');
ledge.body.immovable=true;
ledge = platforms.create(-150,250,'ground');
ledge.body.immovable = true;
player = game.add.sprite(32,game.world.height-150,'dude');
game.physics.arcade.enable(player);
player.body.bounce.y = 0.2;
player.body.gravity.y = 300;
player.body.collideWorldBounds = true;
player.animations.add('left',[0,1,2,3],10,true);
player.animations.add('right',[5,6,7,8],10,true);
cursors = game.input.keyboard.createCursorKeys();
stars = game.add.group();
stars.enableBody = true;
for(var i=0;i<12;i++){
var star = stars.create(i*70,0,'star');
star.body.gravity.y = 100;
star.body.bounce.y=0.7 + Math.random()*0.2;
}
scoreText = game.add.text(16,16, 'score:0',{fontSize:'32px',fill:'#000'});
}
function collectStar(player, star){
star.kill();
score = score + 10;
scoreText.text = 'Score: ' + score;
}
function update() {
game.physics.arcade.collide(player, platforms);
game.physics.arcade.collide(stars,platforms);
game.physics.arcade.overlap(player,stars,collectStar, null,this);
player.body.velocity.x=0;
if(cursors.left.isDown){
player.body.velocity.x=-150;
player.animations.play('left');
}
else if(cursors.right.isDown){
player.body.velocity.x=150;
player.animations.play('right');
}else {
player.animations.stop();
player.frame=4;
}
if(cursors.up.isDown && player.body.touching.down){
player.body.velocity.y = -350;
}
}
</script>
</body>
</html>
And you can try the code here: http://game.ximing.org/
In my experience, the supported renderers are for some reason not always properly recognized when using auto detection. Try using
Phaser.CANVAS
in the game constructor.
I use the CocoonJS launcher app on my iPhone to test out my games. I'm not familiar with Android, but for iOS users, just connect your iPhone to your computer, open up the applications folder, and then drag and drop a copy of your game into the CocoonJS app, then sync your phone. You can then test your game on you phone as if it were in a wrapper (no URL, or any signs of it being a web application).
I have a program(Here) that converts a video blob into a gif using getusermedia. To take advantage of library (GIFEncoder) that let's me convert canvas frames into frames of a gif, I am first drawing the vide blob
to the canvas using ctx.drawImage then I am drawing the frames to the gif.
Is there any more efficient way to do this?
Code:
<!Doctype html>
<html>
<head>
<style type="text/css">
body {
}
</style>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<script type="text/javascript" src="LZWEncoder.js"></script>
<script type="text/javascript" src="NeuQuant.js"></script>
<script type="text/javascript" src="GIFEncoder.js"></script>
<script type="text/javascript" src="b64.js"></script>
</head>
<body>
<title>RecorderGif</title>
<header>
<h1>RecordGif</h1>
</header>
<article>
<video id="video" width="320" height="200" style="display:none" autoplay=""></video>
<section>
<button id="btnStart">Start video</button>
<button id="btnStop">Stop video</button>
<button id="btnSave">Download</button>
</section>
<canvas id="canvas" width="320" height="240"></canvas>
</article>
<script type="text/javascript">//<![CDATA[
var encoder = new GIFEncoder();
encoder.setRepeat(0);
encoder.setDelay(100);
encoder.start();
window.onload = function() {
//Compatibility
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia;
var data = []
var canvas = document.getElementById("canvas"),
context = canvas.getContext("2d"),
video = document.getElementById("video"),
btnStart = document.getElementById("btnStart"),
btnStop = document.getElementById("btnStop"),
btnSave = document.getElementById("btnSave")
videoObj = {
video: true,
};
btnStart.addEventListener("click", function() {
var localMediaStream;
if (navigator.getUserMedia) {
navigator.getUserMedia(videoObj, function(stream) {
video.src = (navigator.webkitGetUserMedia) ? window.webkitURL.createObjectURL(stream) : stream;
localMediaStream = stream;
var addFrame = setInterval(function() {
data.push(canvas.toDataURL('image/png'))
},100);
}, function(error) {
console.error("Video capture error: ", error.code);
});
btnStop.addEventListener("click", function() {
clearInterval(addFrame)
localMediaStream.stop();
});
btnSave.addEventListener("click", function() {
for (var i = 0; i < data.length; i++) {
var frame = new Image();
frame.src=data[i]
context.drawImage(frame,0,0)
encoder.addFrame(context);
};
encoder.finish();
var binary_gif = encoder.stream().getData() //notice this is different from the as3gif package!
var data_url = 'data:image/gif;base64,'+encode64(binary_gif);
var gif = window.open('about:blank','','width=320,height=240')
gif.document.location.href=data_url
});
setInterval(function() {context.drawImage(video, 0, 0, 320, 240)},100);
}
});
};
//]]>
</script>
</body>
</html>
As there is no native support for GIF encoding in the browsers you very much depend on JavaScript solutions such as the one you're using.
The only way to improve the encoding is to go through the source code and see if you can optimize parts of it such as utilizing typed arrays (if not), conform to engine specifics knowing what works well with the engine compilers (ie. things like using simulated types, full constructor, non-morphing objects etc.).
Another tip is to encode the GIF when the video is not playing (stepping it forward "manually" for each frame using currentPosition and the event for it) and with an off-screen video element. This may leave some more resources for the browser and the engine to run the script faster (depending partly on the system and its support for hardware decoding video - there will still be an overhead updating the video on-screen which may or may not affect the encoding process).
My 2.5 cents..