I'm trying to write some text on a simple jpg or png with nodejs and JIMP but i'm having issues on making it work.
The picture is gotten from a telegram bot, it gets merged with another picture with canvas and then i must write some simple text on it.
Here is my code:
const Jimp = require("jimp");
var imageCaption = 'WRITE THIS ON PICTURE';
var loadedImage;
const image = await Jimp.read(finalCanvas)
.then(function (image) {
loadedImage = image;
return Jimp.loadFont(Jimp.FONT_SANS_16_BLACK);
})
.then(function (font) {
loadedImage.print(font, 10, 10, imageCaption)
.write(finalCanvas);
})
.catch(function (err) {
console.error(err);
});
I keep getting an error about a matching contstructor overloading not found.
Also had troubles getting JIMP read my local files.
Complete error i get:
Error: No matching constructor overloading was found. Please see the docs for how to call the Jimp constructor.
at Jimp.throwError (/home/smp0/ifsbadge/node_modules/#jimp/utils/dist/index.js:33:13)
at new Jimp (/home/smp0/ifsbadge/node_modules/#jimp/core/dist/index.js:412:85)
at _construct (/home/smp0/ifsbadge/node_modules/#babel/runtime/helpers/construct.js:19:21)
at /home/smp0/ifsbadge/node_modules/#jimp/core/dist/index.js:926:32
at new Promise (<anonymous>)
at Function.Jimp.read (/home/smp0/ifsbadge/node_modules/#jimp/core/dist/index.js:925:10)
at TelegramBot.<anonymous> (/home/smp0/ifsbadge/index.js:51:32)
at processTicksAndRejections (internal/process/task_queues.js:93:5) {
methodName: 'constructor'
}
Complete context:
var needle = require('needle');
const Telegram = require('node-telegram-bot-api')
const { createCanvas, loadImage, ImageData } = require('canvas')
var Jimp = require("jimp");
var fs = require('fs');
const factions = {}
token="1234:BLABLA"
const bot = new Telegram(token, { polling: true })
bot.on('message', async (msg) => {
if (msg.photo) {
if (factions[msg.chat.id]) {
console.log(`Generating badge for ${msg.from.first_name} (${msg.from.username})...`)
bot.sendChatAction(msg.chat.id, 'upload_photo').catch(console.error)
const pictureCanvas = createCanvas(559, 772)
const pictureCtx = pictureCanvas.getContext('2d')
const { file_path } = await bot.getFile(msg.photo[msg.photo.length - 1].file_id)
const picture = await loadImage(`https://api.telegram.org/file/bot${token}/${file_path}`)
// PICTURE CALCULATIONS
pheight = picture.height
pwidth = picture.width
aspectratiow = (pwidth/pheight)
aspectratioh = (pheight/pwidth)
oheight = pheight*aspectratioh
owidth = (pwidth) / (pwidth/559)
newheight = 559*pheight/pwidth
var scale = Math.min(559/pwidth, 772/pheight);
var posx = (559 / 2) - (559 / 2) * scale;
var posy = (772 / 2) - (pheight / 2) * scale;
// END OF CALCULATIONS
// MERGING TWO PICTURES
pictureCtx.drawImage(picture, 10 , posy, 559, newheight)
const finalCanvas = createCanvas(559, 772)
const finalCtx = finalCanvas.getContext('2d')
const frame = await loadImage(`./frames/${factions[msg.chat.id]}.png`)
finalCtx.drawImage(pictureCanvas, 0, 0, 559, 772)
finalCtx.drawImage(frame, 0, 0, 559, 772)
factions[msg.chat.id] = null
// END OF MERGING PICTURES
//APPLYING TEXT ON PICTURE
const Jimp = require("jimp");
var imageCaption = 'WRITE THIS ON PICTURE';
var loadedImage;
const image = await Jimp.read(finalCanvas)
.then(function (image) {
loadedImage = image;
return Jimp.loadFont(Jimp.FONT_SANS_16_BLACK);
})
.then(function (font) {
loadedImage.print(font, 10, 10, imageCaption)
.write(finalCanvas);
})
.catch(function (err) {
console.error(err);
});
//END OF APPLYING TEXT ON PICTURE
bot.sendPhoto(msg.chat.id, finalCanvas.toBuffer('image/jpeg', { quality: 1 }))
} else {
bot.sendMessage(msg.chat.id, 'Write /enl1 /enl2 /enl3 o /res1 /res2 /res3 o /xf1 /xf2 !').catch(console.log)
}
}
})
bot.onText(/\/start/, async (msg) => {
bot.sendMessage(msg.chat.id, "Welcome! Select your badge Write /enl1 /enl2 /enl3 o /res1 /res2 /res3 o /xf1 /xf2 !").catch(console.log)
})
bot.onText(/\/(enl1|enl2|enl3|res1|res2|res3|xf1|xf2)/, async (msg, match) => {
factions[msg.chat.id] = match[1]
bot.sendMessage(msg.chat.id, 'Good! Now send me your picture').catch(console.log)
})
srcs on github: https://github.com/pedrofracassi/badgemaker and https://github.com/FerdinandoLM/IngressFSBadgeMaker
From what I can see, finalCanvas is an instance of Canvas. I don't think Jimp would take an instance of Canvas, but Canvas#toBuffer() might be the thing you want as Jimp would take a buffer: Jimp.read(finalCanvas.toBuffer())
Both http.get and response.pipe(writeStream) are asynchronous. At the time you try Jimp.read, the file isn't in the file system yet or it is not completely written onto disk.
To do it after file is written, listen to finish event.
file.on('finish', async () => {
await Jump.read(...);
});
Related
I am very new to Electron and I have been developing with context isolation set to false and node intergration set to true. I am now changing my application to the default window settings (context isolation set to true and node integration set to false) but I am having trouble calling a https get request from the main (because I cannot figure out how to pass parameters through).
Essentially all I want is to be able to use node's https module to do an api call and get the data back to the renderer.
Sorry for the simple error, I am very new to nodejs and electron.
I will link the relevant parts of my code below:
preload.js
const {contextBridge, ipcRenderer} = require('electron');
let indexBridge = {
getHttpReq: async (addressInput,startDateEpoch,endDateEpoch) => {
var result = await ipcRenderer.invoke("getHttpReq")
}
}
contextBridge.exposeInMainWorld("indexBridge", indexBridge);
main.js
const { app, BrowserWindow, ipcMain } = require('electron');
const path = require("path");
const https = require('https');
const createWindow = () =>{
const win = new BrowserWindow({
height: 625,
width: 1000,
webPreferences: {
//nodeIntegration: true,
//contextIsolation: false,
preload: path.join(__dirname, 'preload.js'),
}
});
win.loadFile("index.html");
win.webContents.openDevTools();
};
ipcMain.handle("getHttpReq", (originalWallet,startDateEpoch,endDateEpoch) =>{
console.log("Here are the parameters. Original wallet:" + originalWallet+" start epoch: "+startDateEpoch+" end epoch: "+endDateEpoch);
const request = https.request()
return new Promise ((resolve, reject)=>{
https.get('https://public-api.solscan.io/account/exportTransactions?account=' + originalWallet + '&type=soltransfer&fromTime=' + startDateEpoch + '&toTime=' + endDateEpoch, (resp) => {
let data = '';
// A chunk of data has been received.
resp.on('data', (chunk) => {
data += chunk;
});
//Print status code to console
console.log("Status code from solscan" + resp.statusCode);
// The whole response has been received. Print out the result.
resp.on('end', () => {
if (resp.statusCode != 200) {
console.log("Error details from Solscan: " + data);
}
//console.log("Raw api data: \n\n" + data);
//ORIGINAL:
//var finalArrayToPass = csvToArray(data,originalWallet);
console.log(data);
resolve(data);
});
}).on("error", (err) => {
reject({message: "rejected!"});
console.log("Error: " + err.message);
});
});
})
renderer.js
document.getElementById("myButton").addEventListener("click", processInputWallet);
function getHttpReq(addressInput,startDateEpoch,endDateEpoch){
window.indexBridge.getHttpReq(addressInput,startDateEpoch,endDateEpoch);
}
async function processInputWallet() {
//Get input values from HTML input
var addressInput = document.getElementById("solAddress").value;
var startDate = new Date(document.getElementById("startDate").value);
var endDate = new Date(document.getElementById("endDate").value);
//Converts traditional date input to epoch which is recognised by the Solscan API
var startDateEpoch = startDate.getTime() / 1000;
var endDateEpoch = endDate.getTime() / 1000;
var result = getHttpReq(addressInput,startDateEpoch,endDateEpoch);
I'm trying to validate images before being sent to the DALL E API. The API rejects certain image properties that I need to validate:
file type
image dimensions
file size
whether the image has an alpha channel
What is the best way to do this?
This is a solution that works for me, although I would guess there are more efficient ways to accomplish it:
Validate items 1-3:
// Takes an imageBase64 url (we create locally, within javascript, from a file) and checks the dimensions once rendered.
function checkImageDims(imageBase64: string) {
const img = new Image();
img.src = imageBase64;
const promise = new Promise<string | null>((resolve, reject) => {
img.onload = () => {
const { width, height } = img;
if (height !== width) {
resolve(
`Image needs to be square. Current dimensions are ${width}x${height}`
);
} else {
resolve(null);
}
img.onerror = reject;
};
});
return promise;
}
// I am using AWS Amplify with S3. This gets the URL to the image:
const getS3Url = await Storage.get(`myFolder/${s3ObjectName}`);
// Once we have the URL
const fetched = await fetch(getS3Url);
const blobbed = await fetched.blob();
const imageBase64 = URL.createObjectURL(blobbed);
const dimensionsError = await checkImageDims(imageBase64);
if (dimensionsError) return dimensionsError;
console.log({
size: blobbed.size,
type: blobbed.type,
});
Validate item 4 (alpha)
// Checks image for alpha channel (transparency) https://stackoverflow.com/a/41302302/11664580
function checkForAlphaChannel(buffer: ArrayBuffer) {
const view = new DataView(buffer);
// is a PNG?
if (view.getUint32(0) === 0x89504e47 && view.getUint32(4) === 0x0d0a1a0a) {
// We know format field exists in the IHDR chunk. The chunk exists at
// offset 8 +8 bytes (size, name) +8 (depth) & +9 (type)
const type = view.getUint8(8 + 8 + 9);
return type === 4 || type === 6; // grayscale + alpha or RGB + alpha
}
return false;
}
const arrayBuffer = await blobbed.arrayBuffer();
const checkAlpha = checkForAlphaChannel(arrayBuffer);
console.log({checkAlpha})
Credit https://stackoverflow.com/a/41302302/11664580 for the Alpha validation.
For SEO optimizaion I'm attemting to low off the size of the files that the user attempts to send (I know I could have some size limitation or something not doing so because of the UX). and I'm doing it in the front-end cause I want to use pre-signed URL method (AWS S3)
process(event: any, imageInputElement: any, maxWidth: number): any {
return new Promise<any>((resolve, reject) => {
try {
const file = event.target.files[0]
console.log('🚀 ~ file: index.vue ~ line 143 ~ process ~ file', file)
const fileSize = file.size
if (fileSize < 100000) return
if (!file) return
const reader = new FileReader()
reader.readAsDataURL(file)
reader.onload = function (event: any) {
const src = event.target.result
const canvas = document.createElement('canvas') as any
const imgElement = document.createElement('img') as any
imgElement.src = src
imageInputElement.src = event.target?.result
console.log(maxWidth)
imageInputElement.onload = function (e: any) {
const scaleSize = maxWidth / e.target.width
canvas.width = maxWidth
canvas.height = e.target.height * scaleSize
const ctx = canvas.getContext('2d')
ctx.drawImage(e.target, 0, 0, canvas.width, canvas.height)
const compressPer = (data: number) => {
const result = 10000000 / data
if (Math.trunc(result) >= 100) {
return 100
} else if (Math.trunc(result) < 1) {
return 1
} else {
return Math.trunc(result)
}
}
const srcEncoded = ctx.canvas.toDataURL(
e.target,
'image/jpeg',
compressPer(fileSize)
)
const result = new File([srcEncoded], `${file.name}`, {
type: 'image/jpeg',
})
console.log(
'🚀 ~ file: index.vue ~ line 186 ~ process ~ result',
result
)
resolve(result)
}
}
} catch (error: any) {
reject(error)
}
})
},
This function gets called every time the user changes a file input.
event: is the default change event that includes the file itself.
imageInputElement: is the element that I want to render the new file in it. and maxWidth is the width that I pass to the function to specify the max width
The actual problem: the file will become visible in the browser and gets uploaded to the s3 bucket but the file is crashed when I want to download it again.
instead of
const file = event.target.files[0]
I should have used
var blobBin = atob(srcEncoded.split(',')[1]);
var array = [];
for(let i = 0; i < blobBin.length; i++) {
array.push(blobBin.charCodeAt(i));
}
const result:any = new Blob([new Uint8Array(array)], {type: 'image/png'});
got my answer from here
So I'm using tensorflow JS and python for training models. Now I'm working on the website so that abstract doctors could upload an MRI image and get the prediction. Here's my code:
<script>
async function LoadModels(){
model = undefined;
model = await tf.loadLayersModel("http://127.0.0.1:5500/modelsBrain/modelBrain.json");
const image = document.getElementById("image");
const image1 = tf.browser.fromPixels(image);
const image2 = tf.reshape(image1, [1,200,200,3]);
const prediction = model.predict(image2);
const softmaxPred = prediction.softmax().dataSync();
alert(softmaxPred);
let top5 = Array.from(softmaxPred)
.map(function (p, i) {
return {
probability: p,
className: TARGET_CLASSES_BRAIN[i]
};
}).sort(function (a, b) {
return b.probability - a.probability;
}).slice(0, 4);
const pred = [[]];
top5.forEach(function (p) {
pred.push(p.className, p.probability);
alert(p.className + ' ' + p.probability);
});
}
const fileInput = document.getElementById("file-input");
const image = document.getElementById("image");
function getImage() {
if(!fileInput.files[0])
throw new Error("Image not found");
const file = fileInput.files[0];
const reader = new FileReader();
reader.onload = function (event) {
const dataUrl = event.target.result;
const imageElement = new Image();
imageElement.src = dataUrl;
imageElement.onload = async function () {
image.setAttribute("src", this.src);
image.setAttribute("height", this.height);
image.setAttribute("width", this.width);
await LoadModels();
};
};
reader.readAsDataURL(file);
}
fileInput.addEventListener("change", getImage);
</script>
This error occurrs not every (!) Live Server open. I am confused, what seems to be the problem?
Error CONTEXT_LOST_WEBGL is 99% due to low GPU memory - what kind of HW do you have available? Alternatively, you can try WASM backend which runs computation on CPU and doesn't require GPU resources.
Btw, you're not deallocating your tensors anywhere so if you're running this in a loop for multiple inputs, you do have a massive memory leak. But if error occurs on the first input already, your GPU simply is not good enough for this model.
Here's my code:
// process-image.js (web-worker)
self.addEventListener('message', ev => {
const {id,file} = ev.data;
const reader = new FileReader();
reader.onload = ev => {
const imageFile = new Image(); // <-- error is here
imageFile.src = ev.target.result;
imageFile.onload = () => {
const fit = makeFit(imageFile);
self.postMessage({
id,
file: {
src: ev.target.result,
width: fit.width,
height: fit.height,
}
})
}
};
reader.readAsDataURL(file);
});
This was working fine in the main UI thread, but apparently I don't have access to Image inside of a web-worker. The specific error is:
Uncaught ReferenceError: Image is not defined
at FileReader.reader.onload (process-image.js:12)
Is there another way get the width and height of an image?
I'd like to support as many file types as possible, but just JPG is good enough for now if there's some sort of library that can do this and will run in a web-worker.
Here's the relevant bit from the UI thread:
// some-react-component.js
componentDidMount() {
this.worker = new ImageWorker();
this.worker.addEventListener('message', ev => {
const {id, file} = ev.data;
this.setState(({files}) => {
files = files.update(id, img => ({
...img,
...file,
}))
const withWidths = files.filter(f => f.width);
const avgImgWidth = withWidths.reduce((acc, img) => acc + img.width, 0) / withWidths.size;
return {files, avgImgWidth};
});
});
}
onDrop = ev => {
ev.preventDefault();
Array.prototype.forEach.call(ev.dataTransfer.files, file => {
const id = shortid();
this.worker.postMessage({id, file});
const img = {
id,
width: null,
height: null,
src: null,
name: file.name,
}
this.setState(({files}) => ({files: files.set(id, img)}));
});
}
Only thing worth noting here is that id is just a random UUID, and file is a File. I'm passing the whole thing to the web-worker so that I can do all the processing there.
I think there might be a simpler solution:
https://developer.mozilla.org/en-US/docs/Web/API/WindowOrWorkerGlobalScope/createImageBitmap
I managed to get the size without using FileReader at all:
http://jsfiddle.net/hL1Ljkmv/2/
var response = `self.addEventListener('message', async ev => {
const {id,file} = ev.data;
console.log('received data');
let image = await self.createImageBitmap(file);
console.log(image);
});`;
const blob = new Blob([response], {type: 'application/javascript'});
const worker = new Worker(URL.createObjectURL(blob));
const input = document.querySelector('#file-input');
input.addEventListener('change', (e) => {
worker.postMessage({file: input.files[0], id: 1})
});
I managed to get the width and height of most JPEGs by following this spec.
self.addEventListener('message', ev => {
const {id, file} = ev.data;
const reader = new FileReader();
reader.onload = ev => {
const view = new DataView(reader.result);
let offset = 0;
let header = view.getUint16(offset, false);
if(header !== 0xFFD8) {
throw new Error(`Not a JPEG`);
}
offset += 2;
for(; ;) {
header = view.getUint16(offset, false);
offset += 2;
let length = view.getUint16(offset, false);
if(header === 0xFFC0) break;
offset += length;
}
const height = view.getUint16(offset + 3);
const width = view.getUint16(offset + 5);
const fit = makeFit({width, height});
self.postMessage({
id,
file: {
src: URL.createObjectURL(file),
width: fit.width,
height: fit.height,
}
})
};
reader.readAsArrayBuffer(file);
});
This code is flakey as heck, but surprisingly it works. I'm still looking for a more robust solution.