I'm building an audio player as a mobile app using React Native and aiming to achieve perfect transition i.e audio to next audio without any silence, when looping. There's a heart beat, half second skip in between each song and when looping any given song that there's no sound.
Currently using Expo Audio player:
https://docs.expo.dev/versions/latest/sdk/audio/
Also tried:
https://www.npmjs.com/package/react-native-vlc-media-player
My Audioprovider.js where the logic is located looks like this:
onPlaybackStatusUpdate = async playbackStatus => {
if (playbackStatus.isLoaded && playbackStatus.isPlaying) {
this.updateState(this, {
playbackPosition: playbackStatus.positionMillis,
playbackDuration: playbackStatus.durationMillis,
});
}
if (playbackStatus.isLoaded && !playbackStatus.isPlaying) {
storeAudioForNextOpening(
this.state.currentAudio,
this.state.currentAudioIndex,
playbackStatus.positionMillis
);
}
let durationSeconds = playbackStatus.durationMillis/1000;
let positionSeconds = playbackStatus.positionMillis/1000;
if (playbackStatus.didJustFinish || (parseInt(durationSeconds)-2 === parseInt(positionSeconds))) {
if (this.state.isPlayListRunning) {
let audio;
const indexOnPlayList = this.state.activePlayList.audios.findIndex(
({ id }) => id === this.state.currentAudio.id
);
const nextIndex = this.state.isLoop ? indexOnPlayList : indexOnPlayList + 1;
audio = this.state.activePlayList.audios[nextIndex];
if (!audio) audio = this.state.activePlayList.audios[0];
const indexOnAllList = this.state.audioFiles.findIndex(
({ id }) => id === audio.id
);
const status = await playNext(this.state.playbackObj, audio.url);
return this.updateState(this, {
soundObj: status,
isPlaying: true,
currentAudio: audio,
currentAudioIndex: indexOnAllList,
});
}
Any ideas? Is it even possible?
Github:
https://github.com/samirhassen/OctaCoil
Related
So I found a way to make a peer connection multiple times by a lot..but I can't make the video work although there is no error shown...
UPDATE: DEMO but this demo is not allow working localStream so try it in your own browser index.html
First let say we have this html file
// This one is for multiple videos
<div class="item-videos">
//This first video is a start video
<video id="video1" playsinline autoplay muted></video>
//This is join videos
</div>
<div>
<button id="start"> Start </button>
<button id="join"> Join </button>
<button id="hangup"> Hang Up </button>
</div>
First I will takes the initial inputs for starter in script.js
let containers = document.querySelector('.item-videos');
const startButton = document.querySelector('#start')
const joinButton = document.querySelector("#join")
const video1 = document.querySelector('video#video1');
let localStream;
// This is the RTCPeerConnections arrays.
let pcLocals = [];
let pcRemotes = [];
const offerOptions = {
offerToReceiveAudio: 1,
offerToReceiveVideo: 1
};
const servers = {
iceServers: [
{
urls: ['stun:stun1.l.google.com:19302', 'stun:stun2.l.google.com:19302'],
},
],
iceCandidatePoolSize: 10,
};
And then let say first we will start our call server..which will be created.
So now we will make a start click then our code
...
function gotStream(stream) {
console.log('Received local stream');
video1.srcObject = stream;
localStream = stream;
joinButton.disabled = false;
}
function start() {
console.log('Requesting local stream');
startButton.disabled = true;
navigator.mediaDevices
.getUserMedia({
audio: true,
video: true
})
.then(gotStream)
.catch(e => console.log('getUserMedia() error: ', e));
}
startButton.addEventListener("click",start)
Now this is for join button in the server...let say I have let count = 0
and I will createElement each video I click the button
So our code for the join button click is
let count = 0;
joinButton.addEventListener("click",() => {
count += 1
//Creating Video Element
const addVideo = document.createElement('video')
addVideo.setAttribute('id',`video${count + 1}`)
addVideo.setAttribute('class',`try-${count + 1}`)
// Here I believe this part was my error where in the video is set up yet for the RTCPeerConnection functions.
containers.appendChild(addVideo)
const videoCall = containers.querySelectorAll('video')[count]
// Here I will create RTCPeerConnections and push it in the pcLocals and pcRemotes;
const init_localStreams = new RTCPeerConnection(servers);
const init_remoteStreams = new RTCPeerConnection(servers);
pcLocals.push(init_localStreams)
pcRemotes.push(init_remoteStreams)
console.log(pcLocals)
console.log(pcRemotes)
//Here I'm passing the stream videos in RTCPeer Arrays...
pcRemotes[count - 1].ontrack = (ev) => {
function gotRemoteStream(e,video,idx) {
if (video.srcObject !== e.streams[0]) {
video.srcObject = e.streams[0]
console.log(`pc${idx+1}: received remote stream`);
}
}
gotRemoteStream(ev,videoCall,count - 1)
}
//Here I'm passing the tracks of the video in each locals
localStream.getTracks().forEach((track) =>
{
pcLocals[count - 1].addTrack(track, localStream)
});
function onAddIceCandidateSuccess() {
console.log('AddIceCandidate success.');
}
function onAddIceCandidateError(error) {
console.log(`Failed to add ICE candidate: ${error.toString()}`);
}
function handleCandidate(candidate, dest, prefix, type) {
dest.addIceCandidate(candidate)
.then(onAddIceCandidateSuccess, onAddIceCandidateError);
console.log(`${prefix}New ${type} ICE candidate: ${candidate ? candidate.candidate : '(null)'}`);
}
function iceCallbackRemote(e,local_) {
handleCandidate(e.candidate,local_,`pc${count}: `, 'remote')
}
function iceCallbackLocal(e,remote_) {
handleCandidate(e.candidate,remote_,`pc${count}: `, 'local')
}
pcLocals[count - 1].onicecandidate = (ev) => {
iceCallbackRemote(ev,pcLocals[count - 1])
}
pcRemotes[count - 1].onicecandidate = (ev) => {
iceCallbackLocal(ev,pcRemotes[count - 1])
}
function gotDescriptionRemote(desc) {
pcRemotes[count-1].setLocalDescription(desc);
// console.log(`Answer from pc1Remote\n${desc.sdp}`);
pcLocals[count-1].setRemoteDescription(desc);
}
function gotDescriptionLocal(desc) {
pcLocals[count-1].setLocalDescription(desc);
// console.log(`Answer from pc1Remote\n${desc.sdp}`);
pcRemotes[count-1].setRemoteDescription(desc);
pcRemotes[count-1].createAnswer().then(gotDescriptionRemote,onCreateSessionDescriptionError)
}
function onCreateSessionDescriptionError(error) {
console.log(`Failed to create session description: ${error.toString()}`);
}
pcLocals[count - 1].
createOffer(offerOptions)
.then(gotDescriptionLocal, onCreateSessionDescriptionError)
})
I'm somehow doubt my if my video was not to pass yet before the RTCPeerConnection operations happening..I don't know if where my errors here... I just tried to make a multiple peerconnection that was documentary here at WEBRTC TUTORIAL
I've looked at you codesandbox code and found two issues:
The playback of your created video is never started
Your icecandidates are set on the wrong peerconnection
To adress the first problem, you need to start your playback either with autoplay or by starting it with addVideo.play(). For the autoplay solution, you can simply add:
addVideo.setAttribute("autoplay", true);
addVideo.setAttribute("playsinline", true);
To address the second problem, you need to change the passed peerconnections in the onicecandidate event handlers:
pcLocals[count - 1].onicecandidate = (ev) => {
//old: iceCallbackRemote(ev, pcLocals[count - 1]);
//new:
iceCallbackRemote(ev, pcRemotes[count - 1]);
};
pcRemotes[count - 1].onicecandidate = (ev) => {
//old: iceCallbackRemote(ev, pcRemotes[count - 1]);
//new:
iceCallbackLocal(ev, pcLocals[count - 1]);
};
The ice candidates need to be exchanged, meaning, ice candidates gathered by local must be passed to the remote and vice versa. Before, you added the ice candidates from local to your local connection, thats why it didn't work. This why the connectionState was "connecting", and never changed to "connected", as the connection was never fully connected and was still expecting ice candidate exchange.
I used the code from Nikolay answer https://jsfiddle.net/makhalin/nzw5tv1q/ on my Ionic - Angular PWA (I put it on a custom.js file and imported it on angular.json). It's working great if I open it in Chrome or Edge on Android but if I install it as a PWA it works the first time, then stops working.
Is there anything I must do to make it work as a PWA?
//have a console on mobile
const consoleOutput = document.getElementById("console");
const log = function (msg) {
consoleOutput.innerText = `${consoleOutput.innerText}\n${msg}`;
console.log(msg);
}
//Test browser support
const SUPPORTS_MEDIA_DEVICES = 'mediaDevices' in navigator;
if (SUPPORTS_MEDIA_DEVICES) {
//Get the environment camera (usually the second one)
navigator.mediaDevices.enumerateDevices().then(devices => {
const cameras = devices.filter((device) => device.kind === 'videoinput');
if (cameras.length === 0) {
log('No camera found on this device.');
}
// Create stream and get video track
navigator.mediaDevices.getUserMedia({
video: {
facingMode: 'environment',
}
}).then(stream => {
const track = stream.getVideoTracks()[0];
//Create image capture object and get camera capabilities
const imageCapture = new ImageCapture(track)
imageCapture.getPhotoCapabilities().then(capabilities => {
//let there be light!
const btn = document.querySelector('.switch');
const torchSupported = !!capabilities.torch || (
'fillLightMode' in capabilities &&
capabilities.fillLightMode.length != 0 &&
capabilities.fillLightMode != 'none'
);
if (torchSupported) {
let torch = false;
btn.addEventListener('click', function (e) {
try {
track.applyConstraints({
advanced: [{
torch: (torch = !torch)
}]
});
} catch (err) {
log(err);
}
});
} else {
log("No torch found");
}
}).catch(log);
}).catch(log);
}).catch(log);
//The light will be on as long the track exists
}
I am attempting to add silence before and after an audio file using javascript. My idea was to add zero values at the beginning and end of the audio. both of the methods I tried failed
Method 1, playback audio via AudioContext
const context = new AudioContext()
async function createSoundArray() {
await fetch('file.mp3')
.then(response => response.arrayBuffer())
.then(arrayBuffer => context.decodeAudioData(arrayBuffer))
.then(audioBuffer => {
const data = audioBuffer.getChannelData(0) // Float32Array(100000)
editAudio(data)
});
}
createSoundArray()
function editAudio(data){
const editedData = []
for(var i = 0; i < data.length + 50000; i++){// adds 25000 0 values (silence) to the begginning and end of array
if( i > 25000 && i <= 125000){
editedData.push(data[i - 25000])
}else{
editedData.push(0)
}
}
console.log(editedData) // array(150000)
const Uint32 = Uint32Array.from(values)
const audioCtx = new AudioContext()
console.log(Uint32.buffer)// ArrayBuffer(150000)
audioCtx.decodeAudioData(pimbo.buffer, function (buf) { //error could not decode
const playback = audioCtx.createBufferSource();
playback.buffer = buf;
playback.connect(audioCtx.destination);
audioCtx.resume()
console.log(playback.buffer)
});
}
This method resulted in the following error:
The buffer passed to decodeAudioData contains an unknown content type.
Method 2, Playback audio via audio element
const context = new AudioContext()
async function createSoundArray() {
await fetch('file.mp3')
.then(response => response.arrayBuffer())
.then(arrayBuffer => context.decodeAudioData(arrayBuffer))
.then(audioBuffer => {
const data = audioBuffer.getChannelData(0) // Float32Array(100000)
editAudio(data)
});
}
createSoundArray()
function editAudio(data){
const editedData = []
for(var i = 0; i < data.length + 50000; i++){// adds 25000 0 values (silence) to the begginning and end of array
if( i > 25000 && i <= 125000){
editedData.push(data[i - 25000])
}else{
editedData.push(0)
}
}
const blob = new Blob(editedData)
const audioUrl = URL.createObjectURL(audioBlob);
const audio = new Audio(audioUrl);
audio.play()//error no supported source found
}
This method resulted in the following error:
Failed to play audio because no supported source was found
Because Both of these methods failed, this leads me to believe that the zeros added are causing the data to not be recognized as audio. However I don't understand why this is because I am simply adding zeros which are values that already exist inside the audio data before editing it. Does anyone know what I am doing wrong or why this is not working?
I am making an application that reads and plays two audio files.
CodeSnadBox
The above CodeSandBox has the following specifications.
Press the "play" button to play the audio.
The volume of each of the two audio tracks can be changed.
Problem
When playing audio, there is sometimes a delay.
However, there is not always an audio delay, and there are times when two tracks can be played back at exactly the same time.
Although not implemented in the CodeSandBox above, the application I am currently working on implements a seek bar to indicate the current playback position.
By moving the seek bar to indicate the current playback position, the audio is reloaded and the resulting delay may be cured.
On the other hand, moving the seek bar may cause a delay even though the audio was playing at exactly the same timing.
Anyway, is there a way to play multiple audio tracks at the same time in a stable and consistent manner?
Code
let ctx,
tr1,
tr2,
tr1gain = 0,
tr2gain = 0,
start = false;
const trackList = ["track1", "track2"];
const App = () => {
useEffect(() => {
ctx = new AudioContext();
tr1 = ctx.createBufferSource();
tr2 = ctx.createBufferSource();
tr1gain = ctx.createGain();
tr2gain = ctx.createGain();
trackList.forEach(async (item) => {
const res = await fetch("/" + item + ".mp3");
const arrayBuffer = await res.arrayBuffer();
const audioBuffer = await ctx.decodeAudioData(arrayBuffer);
item === "track1"
? (tr1.buffer = audioBuffer)
: (tr2.buffer = audioBuffer);
});
tr1.connect(tr1gain);
tr1gain.connect(ctx.destination);
tr2.connect(tr2gain);
tr2gain.connect(ctx.destination);
return () => ctx.close();
}, []);
const [playing, setPlaying] = useState(false);
const playAudio = () => {
if (!start) {
tr1.start();
tr2.start();
start = true;
}
ctx.resume();
setPlaying(true);
};
const pauseAudio = () => {
ctx.suspend();
setPlaying(false);
};
const changeVolume = (e) => {
const target = e.target.ariaLabel;
target === "track1"
? (tr1gain.gain.value = e.target.value)
: (tr2gain.gain.value = e.target.value);
};
const Inputs = trackList.map((item, index) => (
<div key={index}>
<span>{item}</span>
<input
type="range"
onChange={changeVolume}
step="any"
max="1"
aria-label={item}
/>
</div>
));
return (
<>
<button
onClick={playing ? pauseAudio : playAudio}
style={{ display: "block" }}
>
{playing ? "pause" : "play"}
</button>
{Inputs}
</>
);
};
When calling start() without a parameter it's the same as calling start with currentTime of the AudioContext as the first parameter. In your example that would look like this:
tr1.start(tr1.context.currentTime);
tr2.start(tr2.context.currentTime);
By definition the currentTime of an AudioContext increases over time. It's totally possible that this happens between the two calls. Therefore a first attempt to fix the problem could be to make sure both function calls use the same value.
const currentTime = tr1.context.currentTime;
tr1.start(currentTime);
tr2.start(currentTime);
Since currentTime usually increases by the time of a render quantum you could add an extra safety net by adding a little delay.
const currentTime = tr1.context.currentTime + 128 / tr1.context.sampleRate;
tr1.start(currentTime);
tr2.start(currentTime);
If this doesn't help you could also use an OfflineAudioContext to render your mix upfront into a single AudioBuffer.
I am writing a front end to a game engine in Javascript. The engine runs on the server and sends pictures and sounds to the web browser through 'SignalR'. I am using the React framework.
As the game runs the server sends small sound samples in WAVE format, passed into this component through the AudioPlayerProps.
I am having two main issues with the sound. The first is that the sound sounds 'disjointed'.
And the second is that after a time the sound just stops playing. I can see sound being queued in the audio queue, but the 'playNextAudioTrack' method is not being called. There are no errors in the console to explain this.
If this is not the best way to provide sound for a game front end, please let me know.
Also if you want to see any more code please let me know. This is a huge multi tiered project so I am only showing what I think you need to see.
Right now I am testing in Chrome. At this stage I need to turn on the DEV tools to get past the 'user didn't interact with page so you can't play any sound issue'. I will sort that issue out in due course.
import * as React from "react";
import { useEffect, useState } from "react";
export interface AudioPlayerProps {
data: string;
}
export const AudioPlayer = function (props: AudioPlayerProps): JSX.Element {
const [audioQueue, setAudioQueue] = useState<string[]>([])
useEffect(
() => {
if (props.data != undefined) {
audioQueue.push(props.data);
}
}, [props.data]);
const playNextAudioTrack = () => {
if (audioQueue.length > 0) {
const audioBase64 = audioQueue.pop();
const newAudio = new Audio(`data:audio/wav;base64,${audioBase64}`)
newAudio.play().then(playNextAudioTrack).catch(
(error) => {
setTimeout(playNextAudioTrack, 10);
}
)
}
else {
setTimeout(playNextAudioTrack, 10);
}
}
useEffect(playNextAudioTrack, []);
return null;
}
I solved my own problem. Here is the typescript class I wrote to handle chunked audio in JavaScript.
I am not a JavaScript expert and therefore there may be faults.
EDIT: After running it in 15 minute lots several times, it failed a couple of times at about the 10 minute mark. Still needs some work.
// mostly from https://gist.github.com/revolunet/e620e2c532b7144c62768a36b8b96da2
// Modified to play chunked audio for games
import { setInterval } from "timers";
//
const MaxScheduled = 10;
const MaxQueueLength = 2000;
const MinScheduledToStopDraining = 5;
export class WebAudioStreamer {
constructor() {
this.isDraining = false;
this.isWorking = false;
this.audioStack = [];
this.nextTime = 0;
this.numberScheduled = 0;
setInterval(() => {
if (this.audioStack.length && !this.isWorking) {
this.scheduleBuffers(this);
}
}, 0);
}
context: AudioContext;
audioStack: AudioBuffer[];
nextTime: number;
numberScheduled: number;
isDraining: boolean;
isWorking: boolean;
pushOntoAudioStack(encodedBytes: number[]) {
if (this.context == undefined) {
this.context = new (window.AudioContext)();
}
const encodedBuffer = new Uint8ClampedArray(encodedBytes).buffer;
const streamer: WebAudioStreamer = this;
if (this.audioStack.length > MaxQueueLength) {
this.audioStack = [];
}
streamer.context.decodeAudioData(encodedBuffer, function (decodedBuffer) {
streamer.audioStack.push(decodedBuffer);
}
);
}
scheduleBuffers(streamer: WebAudioStreamer) {
streamer.isWorking = true;
if (streamer.context == undefined) {
streamer.context = new (window.AudioContext)();
}
if (streamer.isDraining && streamer.numberScheduled <= MinScheduledToStopDraining) {
streamer.isDraining = false;
}
while (streamer.audioStack.length && !streamer.isDraining) {
var buffer = streamer.audioStack.shift();
var source = streamer.context.createBufferSource();
source.buffer = buffer;
source.connect(streamer.context.destination);
if (streamer.nextTime == 0)
streamer.nextTime = streamer.context.currentTime + 0.01; /// add 50ms latency to work well across systems - tune this if you like
source.start(streamer.nextTime);
streamer.nextTime += source.buffer.duration; // Make the next buffer wait the length of the last buffer before being played
streamer.numberScheduled++;
source.onended = function () {
streamer.numberScheduled--;
}
if (streamer.numberScheduled == MaxScheduled) {
streamer.isDraining = true;
}
};
streamer.isWorking = false;
}
}