How to set volume of audio file in JavaScript - javascript

I can't seem to adjust the volume on this audio element i have when loading the page. here is the code
var bleep = new Audio();
bleep.src = "Projectwebcrow2.mp3";
bleep.volume = 0.1;

If you are using the audio tags, just get the DOM Node in Javascript and manipulate the volume property.
var audio = document.querySelector('audio');
// Getting
console.log(volume); // 1
// Setting
audio.volume = 0.5; // Reduce the Volume by Half
The number that you set should be in the range 0.0 to 1.0, where 0.0 is the quietest and 1.0 is the loudest.
Note: If the value you set is not in the range 0.0 to 1.0, then JS will throw an IndexSizeError.
FOR WEB AUDIO API,
A bit of code first, where we’ll load our music file and play it using the Web Audio API.
var ctx = new webkitAudioContext();
function loadMusic(url) {
var req = new XMLHttpRequest();
req.open('GET', url, true);
req.responseType = 'arraybuffer';
req.onload = function() {
ctx.decodeAudioData(req.response, playSound);
};
req.send();
}
function playSound(buffer) {
var src = ctx.createBufferSource();
src.buffer = buffer;
src.connect(ctx.destination);
// Play now!
src.noteOn(0);
}

It should work. You need to give us more details(I can't comment). If you didn't already, just add
bleep.play()
Also, autoplaying audio is disabled by default in most browsers, maybe that is the cause.

If your music is an element from the page, you can use:
var music = document.getElementById("myMusic");
music.volume = 0.2;
If it isn't, use:
var music = new Audio('audio/correct.mp3');
music.volume = 0.2;
You can check https://www.w3schools.com/tags/av_prop_volume.asp
For more details

Ok..I was going to suggest you create a function for it and call the function. Since it has worked out, best of luck.

Related

Record internal audio of a website via javascript

i made this webapp to compose music, i wanted to add a feature to download the composition as .mp3/wav/whateverFileFormatPossible, i've been searching on how to do this for many times and always gave up as i couldn't find any examples on how to do it, only things i found were microphone recorders but i want to record the final audio destination of the website.
I play audio in this way:
const a_ctx = new(window.AudioContext || window.webkitAudioContext)()
function playAudio(buf){
const source = a_ctx.createBufferSource()
source.buffer = buf
source.playbackRate.value = pitchKey;
//Other code to modify the audio like adding reverb and changing volume
source.start(0)
}
where buf is the AudioBuffer.
To sum up, i want to record the whole window audio but can't come up with a way.
link to the whole website code on github
Maybe you could use the MediaStream Recording API (https://developer.mozilla.org/en-US/docs/Web/API/MediaStream_Recording_API):
The MediaStream Recording API, sometimes simply referred to as the Media Recording API or the MediaRecorder API, is closely affiliated with the Media Capture and Streams API and the WebRTC API. The MediaStream Recording API makes it possible to capture the data generated by a MediaStream or HTMLMediaElement object for analysis, processing, or saving to disk. It's also surprisingly easy to work with.
Also, you may take a look at this topic: new MediaRecorder(stream[, options]) stream can living modify?. It seems to discuss a related issue and might provide you with some insights.
The following code generates some random noise, applies some transform, plays it and creates an audio control, which allows the noise to be downloaded from the context menu via "Save audio as..." (I needed to change the extension of the saved file to .wav in order to play it.)
<html>
<head>
<script>
const context = new(window.AudioContext || window.webkitAudioContext)()
async function run()
{
var myArrayBuffer = context.createBuffer(2, context.sampleRate, context.sampleRate);
// Fill the buffer with white noise;
// just random values between -1.0 and 1.0
for (var channel = 0; channel < myArrayBuffer.numberOfChannels; channel++) {
// This gives us the actual array that contains the data
var nowBuffering = myArrayBuffer.getChannelData(channel);
for (var i = 0; i < myArrayBuffer.length; i++) {
// audio needs to be in [-1.0; 1.0]
nowBuffering[i] = Math.random() * 2 - 1;
}
}
playAudio(myArrayBuffer)
}
function playAudio(buf){
const streamNode = context.createMediaStreamDestination();
const stream = streamNode.stream;
const recorder = new MediaRecorder( stream );
const chunks = [];
recorder.ondataavailable = evt => chunks.push( evt.data );
recorder.onstop = evt => exportAudio( new Blob( chunks ) );
const source = context.createBufferSource()
source.onended = () => recorder.stop();
source.buffer = buf
source.playbackRate.value = 0.2
source.connect( streamNode );
source.connect(context.destination);
source.start(0)
recorder.start();
}
function exportAudio( blob ) {
const aud = new Audio( URL.createObjectURL( blob ) );
aud.controls = true;
document.body.prepend( aud );
}
</script>
</head>
<body onload="javascript:run()">
<input type="button" onclick="context.resume()" value="play"/>
</body>
</html>
Is this what you were looking for?

AnalyserNode.getFloatFrequencyData always returns -Infinity

Alright, so I'm trying to determine the intensity (in dB) on samples of an audio file which is recorded by the user's browser.
I have been able to record it and play it through an HTML element.
But when I try to use this element as a source and connect it to an AnalyserNode, AnalyserNode.getFloatFrequencyData always returns an array full of -Infinity, getByteFrequencyData always returns zeroes, getByteTimeDomainData is full of 128.
Here's my code:
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var source;
var analyser = audioCtx.createAnalyser();
var bufferLength = analyser.frequencyBinCount;
var data = new Float32Array(bufferLength);
mediaRecorder.onstop = function(e) {
var blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
var audioURL = window.URL.createObjectURL(blob);
// audio is an HTML audio element
audio.src = audioURL;
audio.addEventListener("canplaythrough", function() {
source = audioCtx.createMediaElementSource(audio);
source.connect(analyser);
analyser.connect(audioCtx.destination);
analyser.getFloatFrequencyData(data);
console.log(data);
});
}
Any idea why the AnalyserNode behaves like the source is empty/mute? I also tried to put the stream as source while recording, with the same result.
I ran into the same issue, thanks to some of your code snippets, I made it work on my end (the code bellow is typescript and will not work in the browser at the moment of writing):
audioCtx.decodeAudioData(this.result as ArrayBuffer).then(function (buffer: AudioBuffer) {
soundSource = audioCtx.createBufferSource();
soundSource.buffer = buffer;
//soundSource.connect(audioCtx.destination); //I do not need to play the sound
soundSource.connect(analyser);
soundSource.start(0);
setInterval(() => {
calc(); //In here, I will get the analyzed data with analyser.getFloatFrequencyData
}, 300); //This can be changed to 0.
// The interval helps with making sure the buffer has the data
Some explanation (I'm still a beginner when it comes to the Web Audio API, so my explanation might be wrong or incomplete):
An analyzer needs to be able to analyze a specific part of your sound file. In this case I create a AudioBufferSoundNode that contains the buffer that I got from decoding the audio data. I feed the buffer to the source, which eventually will be able to be copied inside the Analyzer. However, without the interval callback, the buffer never seems to be ready and the analysed data contains -Inifinity (which I assume is the absence of any sound, as it has nothing to read) at every index of the array. Which is why the interval is there. It analyses the data every 300ms.
Hope this helps someone!
You need to fetch the audio file and decode the audio buffer.
The url to the audio source must also be on the same domain or have have the correct CORS headers as well (as mentioned by Anthony).
Note: Replace <FILE-URI> with the path to your file in the example below.
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var source;
var analyser = audioCtx.createAnalyser();
var button = document.querySelector('button');
var freqs;
var times;
button.addEventListener('click', (e) => {
fetch("<FILE-URI>", {
headers: new Headers({
"Content-Type" : "audio/mpeg"
})
}).then(function(response){
return response.arrayBuffer()
}).then((ab) => {
audioCtx.decodeAudioData(ab, (buffer) => {
source = audioCtx.createBufferSource();
source.connect(audioCtx.destination)
source.connect(analyser);
source.buffer = buffer;
source.start(0);
viewBufferData();
});
});
});
// Watch the changes in the audio buffer
function viewBufferData() {
setInterval(function(){
freqs = new Uint8Array(analyser.frequencyBinCount);
times = new Uint8Array(analyser.frequencyBinCount);
analyser.smoothingTimeConstant = 0.8;
analyser.fftSize = 2048;
analyser.getByteFrequencyData(freqs);
analyser.getByteTimeDomainData(times);
console.log(freqs)
console.log(times)
}, 1000)
}
If the source file from a different domain? That would fail in createMediaElementSource.

Play sound with delay (100ms)?

What is the best way to play sound with delay 50ms or 100ms?
Here is something, what i tried:
var beat = new Audio('/sound/BEAT.wav');
var time = 300;
playbeats();
function playbeats(){
beat.cloneNode().play();
setTimeout(playbeats, time);
}
This is working correctly but my goal is to play BEAT.wav after every 100ms. When I change "time" variable to 100, then it is so "laggy".
721ms is my BEAT.wav (that's why im using cloneNode())
What is alternatives to solve this?
You can use setInterval(), the arguments are the same.
setInterval(function() {
playbeats();
}, 100);
and your function playbeats function should be.
function playbeats(){
var tempBeat=beat.cloneNode();
tempBeat.play();
}
your whole program should be like this.
var beat = new Audio('/sound/BEAT.wav');
setInterval(function() {
playbeats();
}, 100);
function playbeats(){
var tempBeat=beat.cloneNode();
tempBeat.play();
}
You can use the Web Audio API but the code will be a bit different.If you want the Web Audio API's timing and loop capabillities you will need to load the file into a buffer first. It also requires that your code is run on a server. Here is an example:
var audioContext = new AudioContext();
var audioBuffer;
var getSound = new XMLHttpRequest();
getSound.open("get", "sound/BEAT.wav", true);
getSound.responseType = "arraybuffer";
getSound.onload = function() {
audioContext.decodeAudioData(getSound.response, function(buffer) {
audioBuffer = buffer;
});
};
getSound.send();
function playback() {
var playSound = audioContext.createBufferSource();
playSound.buffer = audioBuffer;
playSound.loop = true;
playSound.connect(audioContext.destination);
playSound.start(audioContext.currentTime, 0, 0.3);
}
window.addEventListener("mousedown", playback);
I would also recommend using the Web Audio API. From there, you can simply loop a buffer source node every 100ms or 50ms or whatever time you want.
To do this, as stated in other responses, you'll need to use an XMLHttpRequest to load the sound file via a server
// set up the Web Audio context
var audioCtx = new AudioContext();
// create a new buffer
// 2 channels, 4410 samples (100 ms at 44100 samples/sec), 44100 samples per sec
var buffer = audioCtx.createBuffer(2, 4410, 44100);
// load the sound file via an XMLHttpRequest from a server
var request = new XMLHttpRequest();
request.open('GET', '/sound/BEAT.wav', true);
request.responseType = 'arraybuffer';
request.onload = function () {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function (newBuffer) {
buffer = newBuffer;
});
}
request.send();
Now you can make a Buffer Source Node to loop the playback
// create the buffer source
var bufferSource = audioCtx.createBufferSource();
// set the buffer we want to use
bufferSource.buffer = buffer;
// set the buffer source node to loop
bufferSource.loop = true;
// specify the loop points in seconds (0.1s = 100ms)
// this is a little redundant since we already set our buffer to be 100ms
// so by default it would loop when the buffer comes to an end (at 100ms)
bufferSource.loopStart = 0;
bufferSource.loopEnd = 0.1;
// connect the buffer source to the Web Audio sound output
bufferSource.connect(audioCtx.destination);
// play!
bufferSource.start();
Note that if you stop the playback via bufferSource.stop(), you will not be able to start it again. You can only call start() once, so you'll need to create a new source node if you want to start playback again.
Note that because of the way the sound file is loaded via an XMLHttpRequest, if you try to test this on your machine without running a server, you'll get a cross-reference request error on most browsers. So the simplest way to get around this if you want to test this on your machine is to run a Python SimpleHTTPServer

How to draw analyzer from audio url?

How to draw analyzer from audio url?
I am getting the audio url (http://api.server.com/uploads/files/ecae64b511b1266fa3930731ec379d2dcdcc7546.wav) from an API server. I want to draw this sound on canvas, following function works on the blob object (recorded from suer), but it does not work with the url:
$window.AudioContext = $window.AudioContext || $window.webkitAudioContext;
vm.audioContext = new AudioContext();
function gotStream(stream) {
vm.inputPoint = vm.audioContext.createGain();
vm.realAudioInput = vm.audioContext.createMediaStreamSource(stream);
vm.audioInput = vm.realAudioInput;
vm.audioInput.connect(vm.inputPoint);
// audioInput = convertToMono( input );
vm.analyserNode = vm.audioContext.createAnalyser();
vm.analyserNode.fftSize = 2048;
vm.inputPoint.connect(vm.analyserNode);
vm.audioRecorder = new Recorder(vm.inputPoint);
var zeroGain = vm.audioContext.createGain();
zeroGain.gain.value = 0.0;
vm.inputPoint.connect(zeroGain);
zeroGain.connect(vm.audioContext.destination);
updateAnalysers();
}
This is probably a CORS issue if the file is from a different origin from the code. If you don't fix this, MediaStreamSource will probably just output zeroes.

How can i publish audio by web socket

I am developing an application which publish audio stream from mic through web sockets i am not able to play web socket response in audio control or can anyone tell how to play audio buffer in audio control please help me out?
I use the following code to play the sounds created with a software-synth.
The samples need to be in the range [-1.0 .. 1.0]. You should initialize context in the page init function.
var context = new webkitAudioContext();
function playSound(buffer, freq, vol) // buffer, sampleRate, 0-100
{
var mBuffer = context.createBuffer(1, buffer.length, freq);
var dataBuffer = mBuffer.getChannelData(0);
var soundBuffer = buffer;
var i, n = buffer.length;
for (i=0;i<n;i++)
dataBuffer[i] = soundBuffer[i];
var node = context.createBufferSource();
node.buffer = mBuffer;
node.gain.value = 0.5 * vol/100.0;
node.connect(context.destination);
node.noteOn(0);
}

Categories