I'm trying to extract metadata from video files by running ffprobe using nativeProcess. The code below works just as it should for one file, but causes an error when trying to loop through a series of files.
I know the cause of the problem is that Air tries to start a new nativeProcess before the old one is finished. I know it's something to do with listening to the air.NativeProcessExitEvent.EXIT. I just can't get it to work. Any suggestions would be greatly appreciated.
function fileOpen(){
var directory = air.File.userDirectory;
try
{
directory.browseForDirectory("Select Directory");
directory.addEventListener(air.Event.SELECT, directorySelected);
}
catch (error)
{
air.trace("Failed:", error.message)
}
function directorySelected(event)
{
directory = event.target ;
var files = directory.getDirectoryListing();
for(i=0; i < files.length; i++){
getMetadata(files[0].nativePath)
//wait here for nativeProcess to finish
}
}
}
function getMetadata(filePathIn){
if(air.NativeProcess.isSupported)
{
}
else
{
air.trace("NativeProcess not supported.");
}
fileIn = filePathIn.toString()
var nativeProcessStartupInfo = new air.NativeProcessStartupInfo();
var file = air.File.applicationStorageDirectory.resolvePath("ffprobe");
nativeProcessStartupInfo.executable = file;
args = new air.Vector["<String>"]();
args.push("-sexagesimal","-show_format","-loglevel","quiet","-show_streams","-print_format","json",filePathIn)
nativeProcessStartupInfo.arguments = args;
process = new air.NativeProcess();
process.start(nativeProcessStartupInfo);
process.addEventListener(air.ProgressEvent.STANDARD_OUTPUT_DATA, onOutputData);
process.addEventListener(air.ProgressEvent.STANDARD_ERROR_DATA, onErrorData);
process.addEventListener(air.NativeProcessExitEvent.EXIT, onExit);
process.addEventListener(air.IOErrorEvent.STANDARD_OUTPUT_IO_ERROR, onIOError);
process.addEventListener(air.IOErrorEvent.STANDARD_ERROR_IO_ERROR, onIOError);
}
function onOutputData()
{
var fileMetadataJSON = process.standardOutput.readUTFBytes(process.standardOutput.bytesAvailable);
air.trace(fileMetadataJSON)
}
function onErrorData(event)
{
air.trace("ERROR -", process.standardError.readUTFBytes(process.standardError.bytesAvailable));
}
function onExit(event)
{
air.trace("Process exited with ", event.exitCode);
}
function onIOError(event)
{
air.trace(event.toString());
}
Here an outline that should give you an idea of what to do:
In your directorySelected() method, promote the local variable files (an Array) to a member variable. This will make the list of files that was selected available so that other methods can access it.
Remove the for loop in your directorySelected() method. Instead of running the loop and trying to run the native process here, call a new method, let's call it dequeueFileAndRunNativeProcess().
This new dequeueFileAndRunNativeProcess() method should check the length of the files array, and if it is greater than 0, it should pop() the first file in the files array and execute the native process on it.
In your onExit() method, call the dequeueFileAndRunNativeProcess() method again.
The idea here is to move the code that runs the native process into a method that you can trigger from two places: once immediately after the user finishes browsing for files, and then again after the native process finishes it's work. The process ends when the files array has no more files in it.
Related
I'm doing an API for a gallery; so, I create a method that let copy an image from the database.
Now, I want to add a number at the end of the copy-image name. For example:
-original image name: image
-copy image name: image(1)
-2nd copy image name: image(2)
How can I add the number to the name of copied name automatically?
'use strict'
let imageObject= require('../models/image-object');
let fs=require('fs');
let path= require('path');
let gallery_controllers={
copyImage:function(req,res){
//get the id param of the image to copy
let imageId=req.params.id;
if(imageId==null) return res.status(404).send({message:"no ID defined"});
//I search the requiere image on the database
imageObject.findById(imageId,(err,image)=>{
if(err) return res.status(500).send({message:'err to response data'});
if(!image) return res.status(404).send({message:'image not found'});
if(image){
//set a new model-object
let imageCopied= new imageObject();
imageCopied.name= image.name;
imageCopied.image=image.image;
//save image copied on the database
imageCopied.save((err,image_copied)=>{
if(err) return res.status(500).send({message:"error 500"});
if(!image_copied) return res.status(404).send({message:"error 404"});
return res.status(200).send({
image:image_copied
})
})
}
})
},
}
Here's a function that looks in the directory passed to it for files of the name file(nnn) where nnn is some sequence of digits and returns back to you the full path of the next one in sequence (the one after the highest number that already exists).
This function pre-creates a placeholder file by that name to avoid concurrency issues with multiple asynchronous operations calling this function and potentially conflicting (if it only returned the filename without creating the file). To further handle conflicts, it creates the placeholder file in a mode that fails if it already exists (so only one invocation of this function will ever create that particular file) and it automatically retries to find a new number if it gets a conflict (e.g. someone else created the next available file before we got to it). All of this logic is to avoid the subtleties of possible race conditions in creating the next filename in the sequence.
Once the caller has a unique filename that this resolves to, then it is expected that they will overwrite the placeholder contents with their own contents.
// pass directory to look in
// pass base file name so it will look for next in sequence as in "file(3)"
// returns the full path of the unique placeholder file it has created
// the caller is then responsible for that file
// calling multiple times will create a new placeholder file each time
async function findNextName(dir, base) {
let cntr = 0;
const cntr_max = 5;
const regex = new RegExp(`^${base}\\((\\d+)\\)$`);
async function run() {
const files = await fs.promises.readdir(dir);
let highest = 0;
for (let f of files) {
let matches = f.match(regex);
if (matches) {
let num = parseInt(matches[1]);
if (num > highest) {
highest = num;
}
}
}
let name = `${base}(${highest + 1})`;
// create placeholder file with this name to avoid concurrency issues
// of another request also trying to use the same next file
try {
// write to file, fail if the file exists due to a concurrency issue
const fullPath = path.resolve(path.join(dir, name));
await fs.promises.writeFile(fullPath, "placeholder", { flag: "wx" });
return fullPath;
} catch (e) {
// if this fails because of a potential concurrency issue, then try again
// up to cntr_max times to avoid looping forever on a persistent error
if (++cntr < cntr_max) {
return run();
} else {
throw e;
}
}
}
return run();
}
You could call it like this:
findNextName(".", "file").then(filename=> {
console.log(filename);
}).catch(err => {
console.log(err);
});
On iOS I'm trying to upload videos to my own website and to do so I use the FileReader.readAsArrayBuffer method with a chunkSize of 1048576.
I upload chunks when the onprogress event fires and all of this actually works perfect except for bigger files. When trying to upload a file of 1.33GB i'm getting a out of memory exception when calling the readAsArrayBuffer method.
I guess this is because it's trying to reserve memory for the complete file, however this is not necessary. Is there a way to read a binary chunk from a file without it reserving memory for the complete file? or are there other solutions?
Thanks!
I fixed it myself today by changing the plugin code, this is the original code:
FileReader.prototype.readAsArrayBuffer = function (file) {
if (initRead(this, file)) {
return this._realReader.readAsArrayBuffer(file);
}
var totalSize = file.end - file.start;
readSuccessCallback.bind(this)('readAsArrayBuffer', null, file.start, totalSize, function (r) {
var resultArray = (this._progress === 0 ? new Uint8Array(totalSize) : new Uint8Array(this._result));
resultArray.set(new Uint8Array(r), this._progress);
this._result = resultArray.buffer;
}.bind(this));
};
and since at start progress is always 0, it always reserves the entire filesize. I added a propery READ_CHUNKED (because I still have other existing code that also uses this method and expects it to work as it did, i have to check to see that everything else also keeps working) and changed the above to this:
FileReader.prototype.readAsArrayBuffer = function(file) {
if (initRead(this, file)) {
return this._realReader.readAsArrayBuffer(file);
}
var totalSize = file.end - file.start;
readSuccessCallback.bind(this)('readAsArrayBuffer', null, file.start, totalSize, function(r) {
var resultArray;
if (!this.READ_CHUNKED) {
resultArray = new Uint8Array(totalSize);
resultArray.set(new Uint8Array(r), this._progress);
} else {
var newSize = FileReader.READ_CHUNK_SIZE;
if ((totalSize - this._progress) < FileReader.READ_CHUNK_SIZE) {
newSize = (totalSize - this._progress);
}
resultArray = new Uint8Array(newSize);
resultArray.set(new Uint8Array(r), 0);
}
this._result = resultArray.buffer;
}.bind(this));
};
When the READ_CHUNKED property is true, it only returns the chunks and doesn't reserve memory for the entire file and when it is false it works like it used to.
I've never used github (except for pulling code) so i'm not uploading this for now, I might look into it in the near future.
I am tryng to create posts using a for loop, but when i look at Parse database only the last object of my array get's stored. this is the code i wrote.
var Reggione = Parse.Object.extend("Reggione");
var creaReggione = new Reggione();
var selectobject = $('#searcharea')[0];
for (var i = 2; i < selectobject.length; i++) {
creaReggione.set("name", selectobject.options[i].text);
creaReggione.save();
Thanks, Bye.
Do this by creating an array of new objects, then save them together...
var newObjects = [];
for (var i = 2; i < selectobject.length; i++) {
creaReggione.set("name", selectobject.options[i].text);
newObjects.push(creaReggione);
// ...
}
Parse.Object.saveAll(newObjects);
Remember, if you want something to happen after saveAll completes (like call response.success() if you're in cloud code), then you should use that promise as follows...
Parse.Object.saveAll(newObjects).then(function(result) {
response.success(result);
}, function(error) {
response.error(error);
});
In extension to danhs answer, the reason this does not work is because only one transaction can happen at a time from the JS client to Parse.
Therefore in your loop the first call to .save() is made and the object is saved to Parse at some rate (asynchrounously), in that time the loop continues to run and skips over your other save calls, these objects are NOT queued to be saved. As Danh pointed out, you must use Parse's batch operations to save multiple objects to the server in one go, to do this you can:
var newObjects = [];
for (var i = 2; i < selectobject.length; i++) {
creaReggione.set("name", selectobject.options[i].text);
newObjects.push(creaReggione);
// ...
}
Parse.Object.saveAll(newObjects);
Hope this helps, I'd also recommend taking a look at Parse's callback functions on the save method to get more details on what happened (you can check for errors and success callbacks here to make debugging a little easier)
An example of this would be to extend the previous call with:
Parse.Object.saveAll(newObjects, {
success: function(messages) {
console.log("The objects were successfully saved...")
},
error: function(error) {
console.log("An error occurred when saving the messages array: %s", error.message)
}
})
I hope this is of some help to you
I'm using this Gumroad-API npm package in order to fetch data from an external service (Gumroad). Unfortunately, it seems to use a .then() construct which can get a little unwieldy as you will find out below:
This is my meteor method:
Meteor.methods({
fetchGumroadData: () => {
const Gumroad = Meteor.npmRequire('gumroad-api');
let gumroad = new Gumroad({ token: Meteor.settings.gumroadAccessKey });
let before = "2099-12-04";
let after = "2014-12-04";
let page = 1;
let sales = [];
// Recursively defined to continue fetching the next page if it exists
let doThisAfterResponse = (response) => {
sales.push(response.sales);
if (response.next_page_url) {
page = page + 1;
gumroad.listSales(after, before, page).then(doThisAfterResponse);
} else {
let finalArray = R.unnest(sales);
console.log('result array length: ' + finalArray.length);
Meteor.call('insertSales', finalArray);
console.log('FINISHED');
}
}
gumroad.listSales(after, before, page).then(doThisAfterResponse); // run
}
});
Since the NPM package exposes the Gumorad API using something like this:
gumroad.listSales(after, before, page).then(callback)
I decided to do it recursively in order to grab all pages of data.
Let me try to re-cap what is happening here:
The journey starts on the last line of the code shown above.
The initial page is fetched, and doThisAfterResponse() is run for the first time.
We first dump the returned data into our sales array, and then we check if the response has given us a link to the next page (as an indication as to whether or not we're on the final page).
If so, we increment our page count and we make the API call again with the same function to handle the response again.
If not, this means we're at our final page. Now it's time to format the data using R.unnest and finally insert the finalArray of data into our database.
But a funny thing happens here. The entire execution halts at the Meteor.call() and I don't even get an error output to the server logs.
I even tried switching out the Meteor.call() for a simple: Sales.insert({text: 'testing'}) but the exact same behaviour is observed.
What I really need to do is to fetch the information and then store it into the database on the server. How can I make that happen?
EDIT: Please also see this other (much more simplified) SO question I made:
Calling a Meteor Method inside a Promise Callback [Halting w/o Error]
I ended up ditching the NPM package and writing my own API call. I could never figure out how to make my call inside the .then(). Here's the code:
fetchGumroadData: () => {
let sales = [];
const fetchData = (page = 1) => {
let options = {
data: {
access_token: Meteor.settings.gumroadAccessKey,
before: '2099-12-04',
after: '2014-12-04',
page: page,
}
};
HTTP.call('GET', 'https://api.gumroad.com/v2/sales', options, (err,res) => {
if (err) { // API call failed
console.log(err);
throw err;
} else { // API call successful
sales.push(...res.data.sales);
res.data.next_page_url ? fetchData(page + 1) : Meteor.call('addSalesFromAPI', sales);
}
});
};
fetchData(); // run the function to fetch data recursively
}
I am just getting started with coding for FirefoxOS and am trying to get a list of files in a directory.
The idea is to find the name of each file and add it to the array (which works), but I want to return the populated array and this is where I come unstuck. It seems that the array gets populated during the function (as I can get it to spit out file names from it) but when I want to return it to another function it appears to be empty?
Here is the function in question:
function getImageFromDevice (){
var imageHolder = new Array();
var pics = navigator.getDeviceStorage('pictures');
// Let's browse all the images available
var cursor = pics.enumerate();
var imageList = new Array();
var count = 0;
cursor.onsuccess = function () {
var file = this.result;
console.log("File found: " + file.name);
count = count +1;
// Once we found a file we check if there are other results
if (!this.done) {
imageHolder[count] = file.name;
// Then we move to the next result, which call the cursor
// success with the next file as result.
this.continue();
}
console.log("file in array: "+ imageHolder[count]);
// this shows the filename
}
cursor.onerror = function () {
console.warn("No file found: " + this.error);
}
return imageHolder;
}
Thanks for your help!
Enumerating over pictures is an asynchronous call. Essentially what is happening in your code is this:
You are initiating an empty array
You are are telling firefox os to look for pictures on the device
Then in cursor.onsuccess you are telling firefox os to append to the array you have created WHEN it gets back the file. The important thing here is that this does not happen right away, it happens at some point in the future.
Then you are returning the empty array you have created. It's empty because the onsuccess function hasn't actually happened.
After some point in time the onsuccess function will be called. One way to wait until the array is full populated would be to add in a check after:
if (!this.done) {
imageHolder[count] = file.name;
this.continue();
}
else {
//do something with the fully populated array
}
But then of course your code has to go inside the getImageFromDevice function. You can also pass a callback function into the getImageFromDevice function.
See Getting a better understanding of callback functions in JavaScript
The problem is with the aSynchronous nature of the calls you are using.
You are returning (and probably using) the value of imageHolder when it's still empty - as calls to the "onsuccess" function are deferred calls, they happen later in time, whereas your function returns immediately, with the (yet empty) imageHolder value.
You should be doing in this case something along those lines:
function getImageFromDevice (callback){
...
cursor.onsuccess = function () {
...
if (!this.done) {
// next picture
imageHolder[count] = file.name;
this.continue();
} else {
// no more pictures, return with the results
console.log("operation finished:");
callback(imageHolder);
}
}
}
Or use Promises in your code to accomplish the same.
Use the above by e.g.:
getImageFromDevice(function(result) {
console.log(result.length+" pictures found!");
});