I am making an audio recorder using HTML5 and Javascript and do not want to include any third party API, I reached at my first step by creating an audio retriever and player using <audio> tag and navigator.webkitGetUserMedia Function which get audio from my microphone and play in through <audio> element but I am not able to get the audio data in an array at this point I don't know what to do which function to use.
simple just create a audio node, below is tweaked code from MattDiamond's RecorderJS:
function RecordAudio(stream, cfg){
var config = cfg || {};
var bufferLen = config.bufferLen || 4096;
var numChannels = config.numChannels || 2;
this.context = stream.context;
var recordBuffers = [];
var recording = false;
this.node = (this.context.createScriptProcessor ||
this.context.createJavaScriptNode).call(this.context,
bufferLen, numChannels, numChannels);
stream.connect(this.node);
this.node.connect(this.context.destination);
this.node.onaudioprocess = function(e){
if (!recording) return;
for (var i = 0; i < numChannels; i++){
if(!recordBuffers[i]) recordBuffers[i] = [];
recordBuffers[i].push.apply(recordBuffers[i], e.inputBuffer.getChannelData(i));
}
}
this.getData = function(){
var tmp = recordBuffers;
recordBuffers = [];
return tmp; // returns an array of array containing data from various channels
};
this.start() = function(){
recording = true;
};
this.stop() = function(){
recording = false;
};
}
example usage:
var recorder = new RecordAudio(userMedia);
recorder.start();
recorder.stop();
var recordedData = recorder.getData();
Related
I would like my audio2 file to play when audio1.currentTime is 3 seconds, but I'm not able to make it work. I'm a newbie in javascript, what am I missing?. This is my current javascript code:
function initAudioPlayer(){
var audio1, audio2, ext, agent;
ext = ".mp3";
agent = navigator.userAgent.toLocaleLowerCase();
if(agent.indexOf('firefox') != -1 || agent.indexOf('opera') != -1) { ext = ".ogg";}
//Audio Objects: audio1 and audio2
audio1 = new Audio();
audio1.src = "folder/Audio1"+ext;
audio1.loop = false;
audio1.play();
audio2 = new Audio();
audio2.src = "folder/Audio2"+ext;
audio2.loop = false;
audio2.play();
//Function that reproduces the second audio file at second 3 of the first audio file
function audio2(){
if(audio1.currentTime == 3) {audio2.play();}
};
}
window.addEventListener("load", initAudioPlayer);
You Must use Audio Api and fetch buffers of your files.
Then you must plus each byte and copy to another new buffer.
this code can help you:
let idnex=0;
samples.forEach(buufer => {
if (index === 0) {
tempBuf = buufer;
} else {
tempBuf = this.appendBuffer(tempBuf, buufer);
}
index++;
});
and by thi method you can append two buffer:
private appendBuffer(buffer1, buffer2) {
const numberOfChannels = Math.min(buffer1.numberOfChannels, buffer2.numberOfChannels);
const tmp = this.audioContextService.createBuffer(Math.max(buffer1.numberOfChannels, buffer2.numberOfChannels),
Math.max(buffer1.length, buffer2.length), buffer1.sampleRate);
for (let i = 0; i < numberOfChannels; i++) {
const channel = tmp.getChannelData(i);
let finallArray = [];
let d = [];
const chanelTemp = buffer1.getChannelData(i);
if (buffer2.numberOfChannels <= i) {
finallArray = chanelTemp;
} else {
const c = buffer2.getChannelData(i);
if (chanelTemp.length > c.length) {
finallArray = chanelTemp;
d = c;
} else {
finallArray = c;
d = chanelTemp;
}
for (let j = 0; j < d.length; j++) {
finallArray[j] += d[j] / 2;
}
}
channel.set(finallArray, i);
}
you can see my demo here
Also You Can See this Answer
If you truly want this to be accurate in time, you can't use Audio() - that's the HTML5 <audio> element, which is not sample-accurate. Javascript is also not accurate enough in event delivery to use a callback to do this (to be fair, neither are most general-purpose native OS APIs). You need to schedule the playback in advance, which is where Web Audio (https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) comes in. You need to load both samples, decode them into AudioBuffers, and then schedule playing back each of them with an AudioBufferSourceNode.
I have an html5 video element and I need to apply different processing realtime on the video's output audio. On desktop I made it work with the WebAudio API. The Api is seemingly present on iOS also. I am able to inspect the created objects, but it doesn't modify the video's output signal.
Here's my example code:
$(function () {
window.AudioContext = window.AudioContext||window.webkitAudioContext;
var audioContext = new AudioContext();
var bufferSize = 1024;
var selectedChannel = 0;
var effect = (function() {
var node = audioContext.createScriptProcessor(bufferSize, 2, 2);
node.addEventListener('audioprocess', function(e) {
var input = e.inputBuffer.getChannelData(selectedChannel);
var outputL = e.outputBuffer.getChannelData(0);
var outputR = e.outputBuffer.getChannelData(1);
for (var i = 0; i < bufferSize; i++) {
outputL[i] = selectedChannel==0? input[i] : 0.0;
outputR[i] = selectedChannel==1? input[i] : 0.0;
}
});
return node;
})();
var streamAttached = false;
function attachStream(video) {
if (streamAttached) {
return;
}
var source = audioContext.createMediaElementSource(video);
source.connect(effect);
effect.connect(audioContext.destination);
streamAttached = true;
}
function iOS_video_touch_start() {
var video = $('#vid')[0];
video.play();
attachStream(video);
}
var needtouch = false;
$('#vid').on('play', function () {
attachStream(this);
}).on('loadedmetadata', function () {
this.play();
this.volume=1.0;
if (this && this.paused) {
if (needtouch == false) {
needtouch = true;
this.addEventListener("touchstart", iOS_video_touch_start, true);
}
}
});
window.panToRight = function(){
selectedChannel = 1;
};
window.panToLeft = function(){
selectedChannel = 0;
};
});
You can also check it on CP:
http://codepen.io/anon/pen/pgeJQG
With the buttons you are able to toggle between the left and the right channels. On desktop browsers (Chrome, Firefox, Safari tested) it works fine.
I have also tried the older createJavaScriptNode() instead of createScriptProcessor(). I have also tried it with an alternative effect chain, which was looking like this:
var audioContext = new (window.AudioContext||window.webkitAudioContext)();
audioContext.createGain = audioContext.createGain||audioContext.createGainNode;
var gainL = audioContext.createGain();
var gainR = audioContext.createGain();
gainL.gain.value = 1;
gainR.gain.value = 1;
var merger = audioContext.createChannelMerger(2);
var splitter = audioContext.createChannelSplitter(2);
//Connect to source
source = audioContext.createMediaElementSource(video);
//Connect the source to the splitter
source.connect(splitter, 0, 0);
//Connect splitter' outputs to each Gain Nodes
splitter.connect(gainL, 0);
splitter.connect(gainR, 1);
//Connect Left and Right Nodes to the Merger Node inputs
//Assuming stereo as initial status
gainL.connect(merger, 0, 0);
gainL.connect(merger, 0, 1);
//Connect Merger output to context destination
merger.connect(audioContext.destination, 0, 0);
As you probably noticed this code was using the built in nodes only. But no luck.
So my questions are: Is this even possible on mobile? If it is, than what am I missing? If it is not, than any possible workaround? Thanks
With Chrome on Android, MediaElementSource is not currently routed to WebAudio. This is a known issue and is planned to be fixed eventually.
I am trying to play one audio after another using Javascript. I get the audio file name from a vector (int) and from some strings I have created.
This is my code but I can't make it works.
var audio = new Audio();
function playAll(){
var folder_name = "synthex/";
var file_name = ".sentence";
var audio_file = ".wav";
for(i=0;i<18;++i)
{
var dir_file = folder_name + voicesClicks[i];
dir_file = dir_file + file_name;
if(voicesClicks[i] == null){
continue;
}
//var value = voicesClicks[i];
dir_file = dir_file + i;
dir_file = dir_file + audio_file;
console.log(dir_file);
audio.src = dir_file;
audio.play();
}
}
One major problem that I find with your code is, audio's src keep changing in each iteration, so even if the audio.src value is a vaild wav file, you code would end up playing the last one...
I had a similar requirement, I did...
function playAll() {
// CHANGE this list to as per your requirement.
var urls = ['http://upload.wikimedia.org/wikipedia/en/f/f9/Beatles_eleanor_rigby.ogg',
'http://upload.wikimedia.org/wikipedia/commons/5/5b/Ludwig_van_Beethoven_-_Symphonie_5_c-moll_-_1._Allegro_con_brio.ogg'
];
var idx = 0;
var aud = new Audio();
aud.src = urls[idx];
aud.addEventListener('ended', function() { // for automatically starting the next.
idx++;
if (idx === urls.length) idx = 0;
aud.src = urls[idx];
aud.play();
});
aud.play();
}
document.addEventListener("DOMContentLoaded", playAll); // you can change this to button.onclick handler.
Edit: you can change the urls to ...
var urls = [];
for(i=0;i<18;++i){
if(!voicesClicks[i]){
continue;
}
urls.push(folder_name + voicesClicks[i] + file_name + i + audio_file);
}
console.log(JSON.stringify(urls)); // for debug
I have a webcam streaming app based on the webcam.fla example by Wowza. The app streams audio and video from Flash to a Wowza server where it's transcoded etc.
We're trying to add a feature that lets the audio source be changed to any other system audio source. So far we successfully create a dropdown containing all the interfaces and handle the callback but, despite starting and stopping the stream with the doConnect() function, the audio source seems to remain the default.
import flash.media.*;
import flash.geom.*;
import flash.net.*;
import flash.media.*;// Should this be duplicated
var parsed:Object = root.loaderInfo.parameters;
var nc:NetConnection = null;
var nsPublish:NetStream = null;
var nsPlay:NetStream = null;
var camera:Camera = null;
var microphone:Microphone = null;
// Testing
var serverName:String = "rtmp://stream-na.example.tv:1935/live";
var movieName:String = "streamName";
var flushVideoBufferTimer:Number = 0;
// Quality settings
var videoBitrate:Number = 200000;
var videoQuality:Number = 80; // Quality %
var videoWidth:Number = 640;
var videoHeight:Number = 360;
var videoFrameRate:Number = 30;
//////////////// UI Functions Bellow
import fl.controls.ComboBox;
import fl.data.DataProvider;
var aCb:ComboBox = new ComboBox();
function createAudioComboBox(sources)
{
var sourcesArray:Array = new Array();
aCb.dropdownWidth = 210;
aCb.width = 200;
aCb.move(0, 365);
aCb.prompt = "Change Audio Source";
aCb.dataProvider = new DataProvider(sourcesArray);
aCb.addEventListener(Event.CHANGE, changeAudioHandler);
addChild(aCb);
for (var index in sources)
{
//ExternalInterface.call("logBrowserStreaming", sources[index]);
aCb.addItem( { label: sources[index], data: index} );
}
function changeAudioHandler(event:Event):void
{
doConnect();
//var request:URLRequest = new URLRequest();
//request.url = ComboBox(event.target).selectedItem.data;
//navigateToURL(request);
//aCb.selectedIndex = -1;
var audioSource = ComboBox(event.target).selectedItem.data;
//microphone:Microphone = null;
microphone = Microphone.getMicrophone(audioSource);
microphone.rate = 16;
microphone.codec = SoundCodec.SPEEX;
microphone.encodeQuality = 10; // This is shit!! offer better audio in native app?
microphone.setSilenceLevel(0, -1);
microphone.setUseEchoSuppression(true);
//ExternalInterface.call("logBrowserStreaming", audioSource);
// Trigger restart camera...
//startCamera(); // Nope
doConnect();
}
}
//////////////// Core Streaming Functions Bellow
function startCamera()
{
// get the default Flash camera and microphone
camera = Camera.getCamera();
microphone = Microphone.getMicrophone();
// here are all the quality and performance settings
// here are all the quality and performance settings
if (camera != null)
{
//camera.setMode(1280, 720, 30, false);
camera.setMode(videoWidth, videoHeight, videoFrameRate, false); // false gives framerate priority apparently?? http://www.flash-communications.net/technotes/setMode/index.html
camera.setQuality(videoBitrate, videoQuality);
// Max 800kbps;
camera.setKeyFrameInterval(2);
// List audio sources names
// sourceVideoLabel.text += Camera.names;
// Create audio sources dropdown
// Hide video sources for now...
//createVideoComboBox(Camera.names);
}
else
{
sourceVideoLabel.text = "No Camera Found\n";
}
if ( microphone != null)
{
microphone.rate = 16;
microphone.codec = SoundCodec.SPEEX;
microphone.encodeQuality = 10; // This is shit!! offer better audio in native app?
microphone.setSilenceLevel(0, -1);
microphone.setUseEchoSuppression(true);
// List audio sources names;
// sourceVideoLabel.text += Microphone.names;
// Create audio sources dropdown
createAudioComboBox(Microphone.names);
// Don't show audio slider for now...
// createAudioSlider();
// Don't monitor audio level for now...
//monitorAudioLevel();
}
else
{
sourceVideoLabel.text += "No Microphone Found\n";
}
nameStr.text = movieName;
AppendCheckbox.selected = false;
connect.connectStr.text = serverName;
connect.connectButton.addEventListener(MouseEvent.CLICK, doConnect);
//enablePlayControls(false);
doConnect();
}
function ncOnStatus(infoObject:NetStatusEvent)
{
trace("nc: "+infoObject.info.code+" ("+infoObject.info.description+")");
if (infoObject.info.code == "NetConnection.Connect.Failed")
{
prompt.text = "Connection failed. Try again or email support#chew.tv";
}
else if (infoObject.info.code == "NetConnection.Connect.Rejected")
{
// Hide connect fail...
prompt.text = infoObject.info.description;
}
}
// Ask for permission to use the camera and show the preview to the user
// event:MouseEvent
// doConnect toggles connections on and off.
function doConnect()
{
// connect to the Wowza Media Server
if (nc == null)
{
// create a connection to the wowza media server
nc = new NetConnection();
nc.addEventListener(NetStatusEvent.NET_STATUS, ncOnStatus);
nc.connect(connect.connectStr.text);
//connect.connectButton.label = "Disconnect";
// uncomment this to monitor frame rate and buffer length
//setInterval("updateStreamValues", 500);
// Attach camera to preview
videoCamera.clear();
videoCamera.attachCamera(camera);
//enablePlayControls(true);
// Pass status to
// ExternalInterface.call("logBrowserStreaming", "cameraagreed");
}
else
{
nsPublish = null;
nsPlay = null;
videoCamera.attachNetStream(null);
videoCamera.clear();
videoRemote.attachNetStream(null);
videoRemote.clear();
nc.close();
nc = null;
//enablePlayControls(false);
doSubscribe.label = 'Play';
doPublish.label = 'Stream';
AppendCheckbox.selected = false;
connect.connectButton.label = "Connect";
prompt.text = "";
}
}
// function to monitor the frame rate and buffer length
function updateStreamValues()
{
if (nsPlay != null)
{
fpsText.text = (Math.round(nsPlay.currentFPS*1000)/1000)+" fps";
bufferLenText.text = (Math.round(nsPlay.bufferLength*1000)/1000)+" secs";
}
else
{
fpsText.text = "";
bufferLenText.text = "";
}
}
function nsPlayOnStatus(infoObject:NetStatusEvent)
{
trace("nsPlay: onStatus: "+infoObject.info.code+" ("+infoObject.info.description+")");
if (infoObject.info.code == "NetStream.Play.StreamNotFound" || infoObject.info.code == "NetStream.Play.Failed")
{
prompt.text = infoObject.info.description;
}
}
function doCloseRecord()
{
// after we have hit "Stop" recording and after the buffered video data has been
// sent to the Wowza Media Server close the publishing stream
nsPublish.publish("null");
}
// this function gets called every 250 ms to monitor the;
// progress of flushing the video buffer. Once the video
// buffer is empty we close publishing stream
function flushVideoBuffer()
{
var buffLen:Number = nsPublish.bufferLength;
if (buffLen == 0)
{
clearInterval(flushVideoBufferTimer);
flushVideoBufferTimer = 0;
doCloseRecord();
doPublish.label = 'Stream';
}
}
function nsPublicOnStatus(infoObject:NetStatusEvent)
{
trace("nsPublish: "+infoObject.info.code+" ("+infoObject.info.description+")");
// After calling nsPublish.publish(false); we wait for a status;
// event of "NetStream.Unpublish.Success" which tells us all the video
// and audio data has been written to the flv file. It is at this time
// that we can start playing the video we just recorded.
if (infoObject.info.code == "NetStream.Unpublish.Success")
{
//doPlayStart();
}
if (infoObject.info.code == "NetStream.Play.StreamNotFound" || infoObject.info.code == "NetStream.Play.Failed")
{
prompt.text = infoObject.info.description;
}
}
function initH264Recording(nsPublish:NetStream)
{
var h264Settings:H264VideoStreamSettings = new H264VideoStreamSettings();
h264Settings.setProfileLevel(H264Profile.BASELINE, H264Level.LEVEL_3);
nsPublish.videoStreamSettings = h264Settings;
}
// Start recording video to the server
function doStreamStart()
{
//prompt.text = "Starting stream with mic...";
//prompt.text = microphone;
ExternalInterface.call("logBrowserStreaming", "starting stream");
// stop video playback
//doPlayStop();
// create a new NetStream object for publishing
nsPublish = new NetStream(nc);
var nsPublishClient:Object = new Object();
nsPublish.client = nsPublishClient;
// Set the H.264 encoding parameters
if (testVersion(11,0,0,0))
{
initH264Recording(nsPublish);
}
else
{
prompt.text = "Flash player 11 or greater is required for H.264 encoding (" + Capabilities.version + ").";
}// trace the NetStream status information
nsPublish.addEventListener(NetStatusEvent.NET_STATUS, nsPublicOnStatus);
// publish the stream by name;
nsPublish.publish(nameStr.text, (AppendCheckbox.selected?"append":"record"));
// add custom metadata to the header of the .flv file;
var metaData:Object = new Object();
metaData["description"] = "Recorded using WebcamRecording example.";
nsPublish.send("#setDataFrame", "onMetaData", metaData);
// attach the camera and microphone to the server;
nsPublish.attachCamera(camera);
nsPublish.attachAudio(microphone);
ExternalInterface.call("logBrowserStreaming", microphone);
// set the buffer time to 20 seconds to buffer 20 seconds of video;
// data for better performance and higher quality video
nsPublish.bufferTime = 20;
// Disable the audio choice dropdown
aCb.enabled = false;
}
function doStreamStop()
{
ExternalInterface.call("logBrowserStreaming", "stopping stream");
// stop streaming video and audio to the publishing
// NetStream object
nsPublish.attachAudio(null);
nsPublish.attachCamera(null);
// After stopping the publishing we need to check if there is;
// video content in the NetStream buffer. If there is data
// we are going to monitor the video upload progress by calling
// flushVideoBuffer every 250ms. If the buffer length is 0
// we close the recording immediately.
var buffLen:Number = nsPublish.bufferLength;
if (buffLen > 0)
{
flushVideoBufferTimer = setInterval(flushVideoBuffer,250);
doPublish.label = 'Wait...';
}
else
{
trace("nsPublish.publish(null)");
doCloseRecord();
doPublish.label = 'Start';
}
// Disable the audio choice dropdown
aCb.enabled = true;
}
// Test version function checks if the current flash version supports H.264 Encoding.
function testVersion(v0:Number, v1:Number, v2:Number, v3:Number):Boolean
{
var version:String = Capabilities.version;
var index:Number = version.indexOf(" ");
version = version.substr(index+1);
var verParts:Array = version.split(",");
var i:Number;
var ret:Boolean = true;
while (true)
{
if (Number(verParts[0]) < v0)
{
ret = false;
break;
}
else if (Number(verParts[0]) > v0)
{
break;
}
if (Number(verParts[1]) < v1)
{
ret = false;
break;
}
else if (Number(verParts[1]) > v1)
{
break;
}
if (Number(verParts[2]) < v2)
{
ret = false;
break;
}
else if (Number(verParts[2]) > v2)
{
break;
}
if (Number(verParts[3]) < v3)
{
ret = false;
break;
}
break;
}
trace("testVersion: "+Capabilities.version+">="+v0+","+v1+","+v2+","+v3+": "+ret);
return ret;
}
// External trigger from Javascript;
// Allow stream to start with startBrowserStreaming call from js
ExternalInterface.addCallback("startBrowserStreaming", doStreamStart);
// Allow stream to stop with stopBrowserStreaming call from js;
ExternalInterface.addCallback("stopBrowserStreaming", doStreamStop);
stage.align = "TL";
stage.scaleMode = "noScale";
startCamera();
You can switch your audio source without touching the NetConnection and/or the NetStream.
Take this simple example, where I used a button to change my audio source :
const server:String = 'rtmp://localhost/live';
const stream:String = 'live';
var nc:NetConnection;
var ns_publish:NetStream;
nc = new NetConnection();
nc.addEventListener(
NetStatusEvent.NET_STATUS,
function(e:NetStatusEvent):void {
if(e.info.code == 'NetConnection.Connect.Success'){
publish();
}
}
)
nc.addEventListener(AsyncErrorEvent.ASYNC_ERROR, function(e:AsyncErrorEvent):void {})
nc.connect(server);
function publish():void {
var cam:Camera = Camera.getCamera();
// for my case, I have 2 mic, and I start with the first
var mic:Microphone = Microphone.getMicrophone(0);
ns_publish = new NetStream(nc);
ns_publish.attachAudio(mic);
ns_publish.attachCamera(cam);
ns_publish.publish(stream, 'record');
}
btn_switch_mic.addEventListener(MouseEvent.CLICK, function(e){
// I can switch to the second mic without initialize my NetConnection and/or my NetStream
var mic:Microphone = Microphone.getMicrophone(1);
ns_publish.attachAudio(mic);
})
I tested this code with Wowza Streaming Engine 4.1.1 (free version without Wowza Transcoder AddOn of course) and Flash Media Server 4.5, and It's working fine.
Note : We can use the same manner to change video source (Camera).
Hope all that can help you.
Im researching into WebAudio API and HTML5/Javascript as a learning process, i've looked into the code of some peoples project to try and break it down as a kind of learning curve but something is puzzling me. The code below is some JS of Audio on a play/stop button along with a LOWPASS filter and Quality slider, how would this be changed to Autoplay instead of the play/stop button?
Its confusing me like crazy.
var QUAL_MUL = 30;
function FilterSample() {
this.isPlaying = false;
loadSounds(this, {buffer: '02.mp3'});
};
FilterSample.prototype.play = function() {
// Create the source.
var source = context.createBufferSource();
source.buffer = this.buffer;
// Create the filter.
var filter = context.createBiquadFilter();
filter.type = filter.LOWPASS;
filter.frequency.value = 5000;
// Connect source to filter, filter to destination.
source.connect(filter);
filter.connect(context.destination);
// Play!
source.start(0);
source.loop = true;
//!-- THIS DOESN'T WORK source.autoplay = true;
// Save source and filterNode for later access.
this.source = source;
this.filter = filter;
};
// PAUSE Button Function
FilterSample.prototype.stop = function() {
this.source.stop(0);
};
// Play Button Toggle Function
FilterSample.prototype.toggle = function() {
this.isPlaying ? this.stop() : this.play();
this.isPlaying = !this.isPlaying;
};
FilterSample.prototype.changeFrequency = function(element) {
var minValue = 40;
var maxValue = context.sampleRate / 2;
var numberOfOctaves = Math.log(maxValue / minValue) / Math.LN2;
var multiplier = Math.pow(2, numberOfOctaves * (element.value - 1.0));
this.filter.frequency.value = maxValue * multiplier;
};
FilterSample.prototype.changeQuality = function(element) {
this.filter.Q.value = element.value * QUAL_MUL;
};
FilterSample.prototype.toggleFilter = function(element) {
this.source.disconnect(0);
this.filter.disconnect(0);
if (element.checked) {
this.source.connect(this.filter);
this.filter.connect(context.destination);
} else {
this.source.connect(context.destination);
}
};
Cheers in advance if you can help me understand this
You need to modify loadSounds to accept a callback function that gets executed when all the sounds have been fetched/loaded/whatever.
Then just change your constructor to:
function FilterSample() {
this.isPlaying = false;
loadSounds(this, {buffer: '02.mp3'}, function() {
this.play();
}.bind(this));
};