I am new to RecordRTC.js, i have simple application that record audio and video and save that video. this is working fine if i record video using headphone. but if i remove the headphone and try to record video then it creating some terrible noise. some time happen like if i refresh the page then it not making the noise but if i plugged-in the headphone and remove it and press the record button then it start making the noise.
here is my code to start recording.
function captureUserMedia(mediaConstraints, successCallback, errorCallback) {
navigator.mediaDevices.getUserMedia(mediaConstraints).then(successCallback).catch(errorCallback);
}
function onMediaSuccess(stream) {
streamMedia = stream;
var videoPreview = document.getElementById('webrtcVideo');
var videoFile = !!navigator.mozGetUserMedia ? 'video.gif' : 'video.webm';
videoPreview.src = window.URL.createObjectURL(stream);
videoPreview.play();
recordVideo = RecordRTC(stream, {
type: 'video'
});
recordVideo.startRecording();
}
function onMediaError(e) {
console.error('media error', e);
}
/**
* This function will be called from html on click of record button.
*/
function startRecording() {
captureUserMedia(mediaConstraints, onMediaSuccess, onMediaError);
}
RecordRTC
var mediaConstraints = {
video: true,
audio: {
mandatory: {
echoCancellation: false,
googAutoGainControl: false,
googNoiseSuppression: false,
googHighpassFilter: false
},
optional: [{
googAudioMirroring: false
}]
},
};
function captureUserMedia(mediaConstraints, successCallback,errorCallback) {
navigator.mediaDevices.getUserMedia(mediaConstraints)
.then(successCallback)
.catch(errorCallback);
}
Related
Trying to create a chrome extension (manifest v.3) that can record screen. The flow is:
Showing a record button in popup
Clicking a record button opens an html page in background
html page sends request to background to show desktop selector for the active tab
User selects the window/tab and start recording
Here is the code (https://stackblitz.com/edit/web-platform-mxfsyx?file=index.html):
Created a record button in popup
document.querySelector('#startFromBackgroundPage')
.addEventListener('click', function(event) {
chrome.runtime.sendMessage(
{ event: 'open-bg-page' }, function(response) { console.log(response); });
});
Background page
chrome.runtime.onMessage.addListener(function(message, sender, sendResponse) {
if (message.event === 'open-bg-page') {
chrome.tabs.create({
url: 'index.html',
selected: false,
});
}
if (message.event === 'record') {
chrome.tabs.query({ currentWindow: true, active: true }, function (tabs) {
chrome.desktopCapture.chooseDesktopMedia(
["screen", "window"],
tabs[0],
function(id, options) {
chrome.runtime.sendMessage(
{'mediaId': id, options: options});
});
});
}
});
index.html/app.js
// Send message as soon page is loaded.
setTimeout(() => {
chrome.runtime.sendMessage(
{ event: 'record' }, function(response) { console.log(response); });
}, 500);
chrome.runtime.onMessage.addListener(function(message, sender, sendResponse) {
console.log('on message', message);
if (message.mediaId) {
setTimeout(() => {
onAccessApproved(message.mediaId, message.options);
}, 100);
}
});
// Launch webkitGetUserMedia() based on selected media id.
function onAccessApproved(id, options) {
if (!id) {
console.log('Access rejected.');
return;
}
var audioConstraint = {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: id
}
};
if (!options.canRequestAudioTrack)
audioConstraint = false;
navigator.getUserMedia({
audio: audioConstraint,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: id,
maxWidth:screen.width,
maxHeight:screen.height} }
}, gotStream, getUserMediaError);
}
function getUserMediaError(error) {
document.querySelector('#message').innerHTML = error;
console.log('navigator.getUserMedia() error: ', error);
}
But its throwing error: NotAllowedError: Invalid state
It works fine If I replace tabs[0] with sender.tab in background.js. But then it shows the desktop/window chooser on the background page which I don't want. I want to record the current active tab. I am handling it in background page so even the page is refresh/closed the video recording continues.
I am trying to run my game inside telegram in-app browser.
Below are my code so far
bot.action('wheeloffortune', ctx => {
bot.telegram.sendGame(ctx.chat.id, 'wheeloffortune');
})
bot.on("callback_query", function (query) {
let gameurl = "https://dazzling-ritchie-f3ad20.netlify.app/?id="+query.id;
bot.answerCallbackQuery({
callback_query_id: query.id,
url: gameurl
});
});
bot.on("inline_query", function(iq) {
bot.answerInlineQuery(iq.id, [ { type: "game", id: "0", game_short_name: 'wheeloffortune' } ] );
});
What I expect: after clicking the button 'Play wheeloffortune', the game should open in webview.
What is actually happening:
this image keep rendering
any advice is appreciated
According to the game example of telegraf:
You can simply use:
bot.gameQuery((ctx) => {
let queryId = ctx.callbackQuery.id
let gameurl = "https://dazzling-ritchie-f3ad20.netlify.app/?id="+queryId;
ctx.answerGameQuery(gameUrl)
})
I have my Angular view file like below.
<!DOCTYPE html>
<video id="myVideo" class="video-js vjs-default-skin"></video>
<script>
var dataUri;
var videoData;
var player = videojs("myVideo", {
controls: true,
width: 320,
height: 240,
fluid: false,
plugins: {
record: {
audio: true,
video: true,
maxLength: 100,
debug: true
}
}
}, function(){
// print version information at startup
videojs.log('Using video.js', videojs.VERSION,
'with videojs-record', videojs.getPluginVersion('record'),
'and recordrtc', RecordRTC.version);
});
// error handling
player.on('deviceError', function() {
console.log('device error:', player.deviceErrorCode);
});
player.on('error', function(error) {
console.log('error:', error);
});
// user clicked the record button and started recording
player.on('startRecord', function() {
console.log('started recording!');
});
// user completed recording and stream is available
player.on('finishRecord', function() {
console.log('player : ', player.recordedData.video.name);
videoData = player.recordedData;
console.log('finished recording: ', player.recordedData);
}
);
function getVideoData()
{
return videoData;
}
</script>
<button id="record" onClick="getVideoData();" ng-model="onFileSelect()"></button>
When player.on('finishRecord', function() function is called it will have the recorded video data in player.recordedData variable. What My problem is, I want to send the player.recordedData to the angular controller on button click whose id is record.
If the vairiable is defined globally, you can directly use it in any of controllers. Try to put you data in object.xxx format.
example:
var model = {videoData: null};
player.on('finishRecord', function() {
...
model.videoData = player.recordedData;
}
in controller:
//directly use it, ensure it has data
model.videoData
I'm developing video calling functionality with WebRTC and facing a very strange problem.
When I make a call everything is fine and I'm getting a remote video stream, but when I receive a call, I get a black screen with no remote video. The strange part is that when I refresh the page I get the remote video!
In the console, I'm getting the following thing:
Video constraints false
But when I refresh the page I get the video object.
This is my video container in index.html,
<video id="video-container" autoplay="autoplay" class="video-style"></video>
Main.js:
(function() {
var vertoHandle, vertoCallbacks, currentCall;
document.getElementById("make-call").addEventListener("click", makeCall);
document.getElementById("hang-up-call").addEventListener("click", hangupCall);
document.getElementById("answer-call").addEventListener("click", answerCall);
$.verto.init({}, bootstrap);
function bootstrap(status) {
vertoHandle = new jQuery.verto({
// ID of HTML video tag where the video feed is placed.
tag: "video-container",
deviceParams: {
// Asking for camera permissions and devices.
useCamera: 'any',
useMic: 'any',
useSpeak: 'any',
},
login: '1008#127.0.0.1',
passwd: '1234',
socketUrl: 'wss://127.0.0.1:8082',
ringFile: '',
iceServers: true,
}, vertoCallbacks);
};
vertoCallbacks = {
onWSLogin : onWSLogin,
onWSClose : onWSClose,
onDialogState: onDialogState,
}
function onWSLogin(verto, success) {
console.log('onWSLogin', success);
}
function onWSClose(verto, success) {
console.log('onWSClose', success);
}
function onDialogState(d) {
console.debug('onDialogState', d);
if(!currentCall) {
currentCall = d;
}
switch (d.state.name) {
case 'trying':
//
break;
case 'ringing':
alert('Someone is calling you, answer!');
break;
case 'answering':
//
break;
case 'active':
//
break;
case 'hangup':
//
break;
case 'destroy':
//
break;
}
}
function makeCall() {
vertoHandle.videoParams({
minWidth: 320,
minHeight: 240,
maxWidth: 640,
maxHeight: 480,
// The minimum frame rate of the client camera, Verto will fail if it's
// less than this.
minFrameRate: 15,
// The maximum frame rate to send from the camera.
vertoBestFrameRate: 30,
});
currentCall = vertoHandle.newCall({
useVideo: true,
mirrorInput: true,
destination_number : '3520',
caller_id_name : 'Test Caller',
caller_id_number: '1008',
outGoingBandwidth: 'default',
inComingBandwidth: 'default',
useStereo: true,
useMic: true,
useSpeak: true,
userVariables: {
email: 'test#test.com'
},
dedEnc: false,
});
}
function hangupCall() {
currentCall.hangup();
};
function answerCall() {
currentCall.answer();
}
})();
What's wrong with this code?
Thanks in advance!
So after some research, I've found the solution.
I was getting the error Video constraints false because they were set at the time of making a call, not at the time of receiving. So I manually set the property,
useVideo: true
after deviceParams.
Just like,
tag: "video-container",
deviceParams: {
// Asking for camera permissions and devices.
useCamera: 'any',
useMic: 'any',
useSpeak: 'any',
},
useVideo: true,
//other properties
Now I'm getting the video at the time of making a call too.
I am attempting to piece together a jPlayer example that plays two audio files, one right after another.
I need to have full event control, so i decided not to use the playlist option, unless somebody can give me an example of having a way of controlling when the events fire, something like:
first media 'before' function
first media 'after' function
second media 'before' function
second media 'after' function
My example below is using a global variable in the "ended:" function and seems pretty clumsy.
Does anybody have a better suggestion how i might accomplish this? here is my working example of using the global variable:
$(document).ready(function(){
$("#jquery_jplayer_1").jPlayer({
ready: function (event) {
console.log('ready function.');
},
ended: function() {
console.log('ending function starting.');
var player = $("#jquery_jplayer_1");
player.jPlayer("stop");
player.jPlayer("clearMedia");
player.jPlayer("setMedia", {
wav: globalParmTwo
});
player.jPlayer("play", 0);
globalParmTwo = null;
},
loop: false, // added to remove the if condition
supplied: "wav, oga",
wmode: "window",
useStateClassSkin: true,
autoBlur: false,
smoothPlayBar: true,
keyEnabled: true,
remainingDuration: true,
toggleDuration: true
});
});
var globalParmTwo = null;
function komPare(parmOne, parmTwo) {
globalParmTwo = parmTwo;
var player = $("#jquery_jplayer_1");
player.jPlayer("stop");
player.jPlayer("clearMedia");
player.jPlayer("setMedia", {
wav: parmOne
});
player.jPlayer("play", 0);
}
And here my working example using the playlist, which seems like a better solution, assuming there is some way to fire specific functions at specific times:
var myPlaylist = null;
$(document).ready(function(){
myPlaylist = new jPlayerPlaylist({
jPlayer: "#jquery_jplayer_N",
cssSelectorAncestor: "#jp_container_N"
}, []
, {
playlistOptions: {
enableRemoveControls: true
},
supplied: "wav, ogv",
useStateClassSkin: true,
autoBlur: false,
smoothPlayBar: true,
keyEnabled: true,
audioFullScreen: false
});
$('.jp-playlist').hide();
myPlaylist.option("autoPlay", true);
});
function komPare(firstMediaUrl, secondMediaUrl) {
myPlaylist.setPlaylist([
{ wav: firstMediaUrl },
{ wav: secondMediaUrl }
]);
};
any suggestions are very appreciated.
UPDATE: i removed an if-condition and put in "loop: false," instead in the first example.
We can bind events to the playlist player, and then use a global media counter. This still uses global variables, but the code is cleaner.
as always, any suggestions are most appreciated.
var myPlaylist = null;
var mediaCounter = null;
$(document).ready(function(){
myPlaylist = new jPlayerPlaylist({
jPlayer: "#jquery_jplayer_1"
}, [] // empty initial playlist
, {
playlistOptions: {
enableRemoveControls: true
},
supplied: "ogv",
useStateClassSkin: true,
autoBlur: false,
smoothPlayBar: true,
keyEnabled: true,
audioFullScreen: false
});
$('#jquery_jplayer_1').bind($.jPlayer.event.ready,
function(event) {
console.log('ready.');
}
);
$('#jquery_jplayer_1').bind($.jPlayer.event.loadeddata,
function(event) {
console.log('loadeddata ' + mediaCounter++ );
}
);
$('#jquery_jplayer_1').hide();
$('.jp-playlist').hide();
$('.jp-controls').hide();
myPlaylist.option("autoPlay", true);
});
function komPare() {
var playlist = [];
for ( urlName of arguments ) {
playlist.push( { ogv: urlName } );
};
mediaCounter = 0;
myPlaylist.setPlaylist(playlist);
};