I am pretty new this area and I started firebase cloud function 2 days ago.
Sorry, I am still a student so I might not understand clearly some documentation.
I tried to figure out how the parameter is passed from my client-side javascript to firebase cloud function.
my cloud function
exports.OCR = functions.https.onCall((req) => {
const vision = require('#google-cloud/vision');
// Creates a client
const client = new vision.ImageAnnotatorClient();
console.log(req);
// Performs label detection on the image file
client
.documentTextDetection(req)
.then((results) => {
console.log("Entered");
console.log(req);
const fullTextAnnotation = results[0].fullTextAnnotation;
console.log(fullTextAnnotation.text);
return results[0].fullTextAnnotation.text;
})
.catch(err => {
console.error('ERROR:', err);
return "error";
});
})
I am using firebase cloud function and Google Vision API.
actually I tried to pass the parameter like this
My client side coe
document.getElementById("fileInput").click();
var file = document.getElementById("fileInput");
var fileInput = document.getElementById('fileInput');
fileInput.addEventListener('change', function (e) {
var file = e.target.files[0];
// Do something with the image file.
var tmppath = URL.createObjectURL(file);
console.log(file);
console.log(tmppath);
//var url = "https://firebasestorage.googleapis.com/v0/b/recette-f3ef5.appspot.com/o/FB1.gif?alt=media&token=28727220-181c-440e-87ae-4808b5c9ba28";
OCR(file)
.then(function(result) {
console.log(result);
}).catch(function(err) {
console.log(err);
});
});
and it did not work. I always got null return when I trigger the function.
So, my question is that how can I pass the file (HTML INPUT TAG) to my cloud function?
p.s: when I tried the code with node the_code.js it works.
According to the Google Cloud Node.js library documentation the documentTextDetection function should receive a JS object like this:
var image = {
source: {imageUri: 'gs://path/to/image.jpg'}
};
vision.documentTextDetection(image).then(response => {
// doThingsWith(response);
}).catch(err => {
console.error(err);
});
The file you are passing to OCR function has probably a different structure than that defined in documentation.
There are some variants to this:
If the key is source, the value should be another object containing
imageUri or filename as a key and a string as a value.
If the key is content, the value should be a Buffer.
So your code should look something like this.
console.log(tmppath);
//var url = "https://firebasestorage.googleapis.com/v0/b/recette-f3ef5.appspot.com/o/FB1.gif?alt=media&token=28727220-181c-440e-87ae-4808b5c9ba28";
image = {source: {imageUri: 'https://firebasestorage.googleapis.com/v0/b/recette-f3ef5.appspot.com/o/FB1.gif?alt=media&token=28727220-181c-440e-87ae-4808b5c9ba28'}}
OCR(image)
Please provide complete error messages and description of what is file..
Related
I use watson assistant v1
My problem is that every time I make a call to the code in Nodejs, where I return the context, to have a coordinated conversation, the context is only updated once and I get stuck in a node of the conversation
this is my code
client.on('message', message => {
//general variables
var carpetaIndividual = <../../../>
var cuerpoMensaje = <....>
var emisorMensaje = <....>
//detect if context exists
if(fs.existsSync(carpetaIndividual+'/contexto.json')) {
var watsonContexto = require(carpetaIndividual+'/contexto.json');
var variableContexto = watsonContexto;
} else {
var variableContexto = {}
}
//conection with Watson Assistant
assistant.message(
{
input: { text: cuerpoMensaje },
workspaceId: '<>',
context: variableContexto,
})
.then(response => {
let messageWatson = response.result.output.text[0];
let contextoWatson = response.result.context;
console.log('Chatbot: ' + messageWatson);
//Save and create JSON file for context
fs.writeFile(carpetaIndividual+'/contexto.json', JSON.stringify(contextoWatson), 'utf8', function (err) {
if (err) {
console.error(err);
}
});
//Send messages to my application
client.sendMessage(emisorMensaje, messageWatson)
})
.catch(err => {
console.log(err);
});
}
client.initialize();
the context.json file is updated, but when it is read the code only reads the first update of the context.json file and not the other updates
This will be because you are using require to read the .json file. For all subsequent requires of an already-required file, the data is cached and reused.
You will need to use fs.readfile and JSON.parse
// detect if context exists
if (fs.existsSync(carpetaIndividual+'/contexto.json')) {
var watsonContexto = fs.readFileSync(carpetaIndividual+'/contexto.json');
// Converting to JSON
var variableContexto = JSON.parse(watsonContexto);
} else {
var variableContexto = {}
}
There is another subtle problem with your code, in that you are relying on
your async call to fs.writeFile completing before you read the file. This will be the case most of the time, but as you don't wait for the fs.writeFile to complete there is the chance that you may try to read the file, before it is written.
I am trying to loop through all the images in my folder convert it into base64 and send to MongoDB.
I started with one image, worked fine.
var filename = '1500.jpg';
var binarydata = fs.readFileSync(filename);
var converted = new Buffer(binarydata).toString("base64");
console.log(converted);
The above code gives me base64 for one file.
I tried changing the code so that it will loop through all the files in my directory and give me base64 for each file.
here is what I wrote but it did not work;
var variantfolder = './variantimages';
fs.readdir(variantfolder, function(err, files){
if (err) {
console.log(err);
}
else {
fs.readFileSync(files, function(err, res){
if (err){console.log('err')} else {
var converted = new Buffer(res).toString("base64");
var onevariant = {
"imagename":files,
"imagebase64":converted
}
var newvariant = new Variant(onevariant)
newvariant.save(err, newvar){
if (err) {
console.log('err');
}
else {
console.log('saved to mongo');
}
}
}
})
}
})
I suspect the problem will be related to you calling functions in the wrong ways.
Check the inputs and outputs of the functions you are using.
The fs.readdir() function callback is passed 2 parameters, an error and an array of file names.
The fs.readFileSync() function takes the parameters path and options. It also returns the file contents, it doesn't take a callback. The callback version is fs.readFile().
So in your code you are passing an array of file names into the file path parameter, which will not work.
You can also pass base64 as the encoding when reading the file and you won't have to convert it after.
I expect you will want something more along these lines (add your own error handling as required):
fs.readdir(variantfolder, (err, fileNames) => {
fileNames.forEach((fileName) => {
fs.readFile(`${variantfolder}/${fileName}`, 'base64', (err, base64Data) => {
// Do your thing with the file data.
});
});
});
Note that you can use the async, sync or promise (fs.promises) version of the fs functions depending on what is most suitable for your code.
I'm using an Azure function to perform OCR on an image received on a Blob Trigger. I want to store the results in an output blob and I'm not sure how to proceed.
var abbyyclient = require('nodejs-ocr');
var client = new abbyyclient('ocrTest', 'ocrpassword', 'http://cloud.ocrsdk.com');
module.exports = function (context, inputBlob, resultBlob) {
function ocrComplete(err, results) {
if( !err ) {
//context.log(results.toString());
context.bindings.resultBlob = results;
}
}
let apiParameters = {
language: 'English',
exportFormat: 'docx'
};
client.processImage(apiParameters, inputBlob, ocrComplete);
context.done();
};
The code above is what I'm using in the Azure blob Trigger function but the results of the OCR returns a buffer instead of a word document though I've specified the export format. I am a complete beginner at this, can someone please help me out? Thanks.
I have written a function to upload a local file (using cordova File) to my firebase storage. It looks like there is no API to iterate all the files in storage (based on other SO threads), so I decided to write the download location to my realtime DB.
At the moment, authentication is OFF in my realtime DB and storage for testing. The file is uploaded correctly and I can see it in my storage, but I don't see any DB entry. Can someone help on what is going wrong?
// upload trip data to firebase. currently a public bucket
cloudUpload(prg) {
console.log ("cloud upload");
//this.presentLoader("loading...");
let storageRef = firebase.storage().ref();
console.log ("storage ref is "+storageRef);
this.file.readAsArrayBuffer(this.file.dataDirectory, this.logFile)
.then (succ=>{
console.log ("File read");
console.log (succ);
let blob = new Blob([succ],{type:"text/plain"});
console.log ("Blob created");
let name = "file-"+Date()+".txt";
let uploadUrl = storageRef.child(`tripdata/${name}`);
let uploadTask = uploadUrl.put(blob);
uploadTask.on(firebase.storage.TaskEvent.STATE_CHANGED,
(snapshot) => {
let progress = Math.round((snapshot.bytesTransferred / snapshot.totalBytes) * 100);
prg.val = progress;
},
(error) => {
console.log ("Firebase put error "+error);
setTimeout(()=>{prg.val = -1; },500);
this.presentToast("upload error","error") },
() => { prg.val = 100;
setTimeout(()=>{prg.val = -1; },500);
// write download URL to realtime DB so we can iter it later
// there is no API in storage today to iterate
let downloadURL = uploadTask.snapshot.downloadURL;
console.log ("Download url is "+downloadURL);
let key = 'tripDataIndex/'+name;
console.log ("key="+key);
firebase.database().ref(key)
.set ({'url':downloadURL, 'uploadedon':Date()}); // nothing created
.catch (err=> {console.log ("ERROR "+err);this.presentToast("error creating index","error")})
this.presentToast("upload complete")}
)
})
.catch (err=>{console.log ("Cordova Read Error "+err);})
}
It seems the problem was in the key value. My "name" was
let name = "file-"+Date()+".txt";
Date() includes spaces/parenthesis etc, which conflict with the naming rules -- using a different key name worked perfectly!
What is very odd, however, is it did not throw an error. I added a .catch at the end like Frank suggested, but it never went to the catch handler.
I have uploaded a pdf via a MEAN stack web application using fs. I want to extract certain fields from the pdf and display them on the web app. I have looked at a couple npm packages like pdf.js, pdf2json. I can't figure out the documentation and javascript callbacks used in the examples available. Please help!
I hope I can help answer your question. Using pdf2json can be used to parse a pdf and extract the text. There are a couple of steps that need to be taken to get it working. I have adapted the example from https://github.com/modesty/pdf2json.
The setup is to install pdf2json in the node app, and also underscore. The example page didn't explain the need to define your own callback functions. It also used self instead of this to register them. So, with the appropriate changes the code to extract all the text from the pdf will be something like this:
// Get the dependencies that have already been installed
// to ./node_modules with `npm install <dep>`in the root director
// of your app
var _ = require('underscore'),
PDFParser = require('pdf2json');
var pdfParser = new PDFParser();
// Create a function to handle the pdf once it has been parsed.
// In this case we cycle through all the pages and extraxt
// All the text blocks and print them to console.
// If you do `console.log(JSON.stringify(pdf))` you will
// see how the parsed pdf is composed. Drill down into it
// to find the data you are looking for.
var _onPDFBinDataReady = function (pdf) {
console.log('Loaded pdf:\n');
for (var i in pdf.data.Pages) {
var page = pdf.data.Pages[i];
for (var j in page.Texts) {
var text = page.Texts[j];
console.log(text.R[0].T);
}
}
};
// Create an error handling function
var _onPDFBinDataError = function (error) {
console.log(error);
};
// Use underscore to bind the data ready function to the pdfParser
// so that when the data ready event is emitted your function will
// be called. As opposed to the example, I have used `this` instead
// of `self` since self had no meaning in this context
pdfParser.on('pdfParser_dataReady', _.bind(_onPDFBinDataReady, this));
// Register error handling function
pdfParser.on('pdfParser_dataError', _.bind(_onPDFBinDataError, this));
// Construct the file path of the pdf
var pdfFilePath = 'test3.pdf';
// Load the pdf. When it is loaded your data ready function will be called.
pdfParser.loadPDF(pdfFilePath);
I am running the code out of my server side controller.
module.exports = (function() {
return {
add: function(req, res) {
var tmp_path = req.files.pdf.path;
var target_path = './uploads/' + req.files.pdf.name;
fs.rename(tmp_path, target_path, function(err) {
if (err) throw err;
// delete the temporary file, so that the explicitly set temporary upload dir does not get filled with unwanted files
fs.unlink(tmp_path, function() {
if (err) throw err;
//edit here pdf parser
res.redirect('#/');
});
})
},
show: function(req, res) {
var pdfParser = new PDFParser();
var _onPDFBinDataReady = function (pdf) {
console.log('Loaded pdf:\n');
for (var i in pdf.data.Pages) {
var page = pdf.data.Pages[i];
// console.log(page.Texts);
for (var j in page.Texts) {
var text = page.Texts[j];
// console.log(text.R[0].T);
}
}
console.log(JSON.stringify(pdf));
};
// Create an error handling function
var _onPDFBinDataError = function (error) {
console.log(error);
};
pdfParser.on('pdfParser_dataReady', _.bind(_onPDFBinDataReady, this));
// Register error handling function
pdfParser.on('pdfParser_dataError', _.bind(_onPDFBinDataError, this));
// Construct the file path of the pdf
var pdfFilePath = './uploads/Invoice_template.pdf';
// Load the pdf. When it is loaded your data ready function will be called.
pdfParser.loadPDF(pdfFilePath);
},
//end controller
}