I'm trying to pass a jpg that I have stored in a table as a blob to the following javascript code:
class ImageLoader {
constructor(imageWidth, imageHeight) {
this.canvas = document.createElement('canvas');
this.canvas.width = imageWidth;
this.canvas.height = imageHeight;
this.ctx = this.canvas.getContext('2d');
}
async getImageData(url) {
await this.loadImageAsync(url);
const imageData = this.ctx.getImageData(0, 0, this.canvas.width, this.canvas.height);
return imageData;
}
loadImageAsync(url) {
return new Promise((resolve, reject) => {
this.loadImageCb(url, () => {
resolve();
});
});
}
loadImageCb(url, cb) {
loadImage(
url,
img => {
if (img.type === 'error') {
throw `Could not load image: ${url}`;
} else {
// load image data onto input canvas
this.ctx.drawImage(img, 0, 0)
//console.log(`image was loaded`);
window.setTimeout(() => { cb(); }, 0);
}
},
{
maxWidth: this.canvas.width,
maxHeight: this.canvas.height,
cover: true,
crop: true,
canvas: true,
crossOrigin: 'Anonymous'
}
);
}
}
I've tried get APEX_UTIL.GET_BLOB_FILE_SRC, but then the javascript code throws the error "Could not load image".
I then to use the following procedure:
create or replace PROCEDURE get_file (p_file_name IN VARCHAR2) IS
l_blob_content UPLOADED_IMAGES.blob_content%TYPE;
l_mime_type UPLOADED_IMAGES.mime_type%TYPE;
BEGIN
SELECT blob_content,
mime_type
INTO l_blob_content,
l_mime_type
FROM UPLOADED_IMAGES
WHERE filename = p_file_name;
sys.HTP.init;
sys.OWA_UTIL.mime_header(l_mime_type, FALSE);
sys.HTP.p('Content-Length: ' || DBMS_LOB.getlength(l_blob_content));
sys.HTP.p('Content-Disposition: filename="' || p_file_name || '"');
sys.OWA_UTIL.http_header_close;
sys.WPG_DOCLOAD.download_file(l_blob_content);
apex_application.stop_apex_engine;
EXCEPTION
WHEN apex_application.e_stop_apex_engine
THEN RAISE;
WHEN OTHERS THEN
HTP.p('Whoops');
END;
Still the same error "Could not load image" and a new error "Whoops Status:204 No Content X-Content-Type-Options:nosniff X-Xss-Protection:1; mode=block Referrer-Policy:strict-origin Cache-Control:no-store Pragma:no-cache Expires:Sun, 27 Jul 1997 13:00:00 GMT X-Frame-Options:SAMEORIGIN".
So I'm not sure what I'm doing wrong or how to better go about doing this right.
Update:
I've now tried "blob2clobbase64", but that doesn't seem to anything:
declare
l_image_clob clob;
l_image_blob blob;
begin
select BLOB_CONTENT
into l_image_blob
from UPLOADED_IMAGES
where FILENAME = :P1_IMAGES;
l_image_clob := apex_web_service.blob2clobbase64(
p_blob => l_image_blob
);
apex_json.open_object;
apex_json.write('imageBase64', l_image_clob );
apex_json.close_object;
end;
The code I'm using for image loading:
const imageSize = 224;
const imageBLOB = new Image();
// Load image.
apex.server.process(
"get_file",
{},
{
success: function(data) {
imageBLOB.src = 'data:image/jpeg;base64,' + data.imageBase64;
const imageLoader = new ImageLoader(imageSize, imageSize);
const imageData = await imageLoader.getImageData(imageBLOB);
}
}
);
I got it to work. I ended up using the "blob2clobbase64" process and instead of using the ImageLoader class and instead used getImageData from the canvas. The PL/SQL is the same but the JavaScript is different:
const imageSize = 224;
var newImageDate;
function returnImageData(imageData){
return newImageDate = imageData;
}
async function runImageExample(){
apex.server.process(
"get_image", {}, {
success: async function(data) {
var canvas = document.createElement("canvas");
var ctx = canvas.getContext("2d");
var imageBLOB = new Image();
imageBLOB.onload = function() {
imageBLOB.crossOrigin = "Anonymous";
ctx.drawImage(imageBLOB, 0, 0, imageSize, imageSize);
var imgData = ctx.getImageData(0, 0, imageSize, imageSize);
returnImageData(imgData.data);
};
imageBLOB.src = 'data:image/jpeg;base64,' + data.photoBase64;
}
}
);
}
Related
I'm using face-api.js Javascript API to develop a web app that user uploads her/his picture and we want to detect faces in the picture.
this is my HTML codes:
<input type="file" id="user_pic" accept="image/x-png,image/gif,image/jpeg">
<img src="images/250x250.webp" id="preview" alt="">
<canvas id="canvas"></canvas>
<script src="scripts/tfjs.js"></script>
<script src="scripts/face-api.min.js"></script>
<script src="scripts/index.js"></script>
and following code are what I wrote to face detection:
const model = tf.loadLayersModel('./web_model/vgg_model.json')
const user_pic = document.getElementById('user_pic')
const preview = document.getElementById('preview')
const canvas = document.getElementById('canvas');
const ctx = canvas.getContext('2d');
window.onload = function() {
canvas.width = preview.width;
canvas.height = preview.height;
ctx.drawImage(preview, 0, 0);
};
preview.onclick = () => user_pic.click()
const MODEL_URL = '../faceapi_models'
Promise.all([
faceapi.nets.ssdMobilenetv1.loadFromUri(MODEL_URL),
faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL),
faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL),])
.then((val) => {
console.log('val')
})
.catch((err) => {
console.log('err')
})
user_pic.addEventListener('change', () => {
const reader = new FileReader()
reader.onload = (e) => {
const img = new Image();
img.onload = function() {
canvas.width = img.width;
canvas.height = img.height;
ctx.drawImage(img, 0, 0);
};
img.src = e.target.result;
}
reader.readAsDataURL(user_pic.files[0]);
detectFaces(user_pic.files[0])
})
async function detectFaces(input) {
let imgURL = URL.createObjectURL(input)
const imgElement = new Image()
imgElement.src = imgURL
const results = await faceapi.detectAllFaces(imgElement)
// .withFaceLandmarks()
// .withFaceExpressions()
.then(results => {
if (Array.isArray(results) && results.forEach) {
results.forEach(result => {
console.log(result)
const { x, y, width, height } = result.box;
ctx.lineWidth = 3;
ctx.strokeRect(x, y, width, height);
});
} else {
console.error('Results is not an array or does not have a forEach function.');
}
});
}
So far I have only used face-api.min.js file and all things work fine and after selecting image File , face is recognized. But as soon as I added tfjs.js file to use const model = tf.loadLayersModel('./web_model/vgg_model.json') method I got new following Errors:
Uncaught (in promise) Error: SsdMobilenetv1 - load model before inference
And after a few seonds this error shown in console :
Uncaught (in promise) TypeError: Og(...).platform.isTypedArray is not a function
What is problem ?
I want to check the image alpha channel to see if it has a background, and reject it if true, allow it if false but when I upload an image using the const changefile, the hasAlpha function doesn't serve an 'error' alert if the image has a background.
Function for checking if the image has a transparent background:
export function hasAlpha(file) {
return new Promise((resolve, reject) => {
let hasAlpha = false;
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
const img = new Image();
img.crossOrigin = "Anonymous";
img.onerror = reject;
img.onload = function () {
canvas.width = img.width;
canvas.height = img.height;
ctx.drawImage(img, 0, 0);
const imgData = ctx.getImageData(0, 0, canvas.width, canvas.height).data;
for (let j = 0; j < imgData.length; j += 4) {
if (imgData[j + 3] < 255) {
hasAlpha = true;
break;
}
}
resolve(hasAlpha);
};
img.src = URL.createObjectURL(file);
});
}
Where the image is uploaded:
const changefile = async (e) => {
if (e.target.id === "mainImg") {
1;
let file = e.target.files[0] ? e.target.files[0] : "";
if (file) {
let extension = file.name.substr(file.name.lastIndexOf(".") + 1);
if (validExtensions.includes(extension)) {
setTempImg(URL.createObjectURL(file));
setstate({
...state,
image: file
});
if (hasAlpha(URL.createObjectURL(file))) {
alert(hasAlpha(URL.createObjectURL(file)));
} else {
alert("error");
}
} else {
setstate({
...state,
image: ""
});
}
} else {
setstate({
...state,
image: ""
});
setTempImg("");
}
}
};
I am here from your Google Docs bug. I have already sent a proposal to you. I tested this code. If it doesn't work in your project, it means another bug exists in your React project. To solve those bugs I need to see your whole react component code.
function hasAlpha(file) {
return new Promise((resolve, reject) => {
const img = new Image()
// create image from file
img.src = URL.createObjectURL(file)
img.onerror = reject
img.onload = () => {
// create canvas
const canvas = document.createElement('canvas')
const ctx = canvas.getContext('2d')
canvas.width = img.width
canvas.height = img.height
ctx.drawImage(img, 0, 0)
// get image data
const data = ctx.getImageData(0, 0, canvas.width, canvas.height)
// check if image has any transparent background
const hasTransparent = [...data.data].some((value, index) => {
return index % 4 === 3 && value < 255
})
return hasTransparent ? resolve(true) : resolve(false)
}
})}
You have to wait before hasAlpha() resolve or reject. So, you should call await hasAlpha(file) and wrap entire call with try catch. If promise rejected you can access it in catch block.
try {
if(await hasAlpha(file)) {
// promise resloved, image is transparent
} else {
// promise resloved, image is not transparent
}
} catch (e) {
// promise rejected
}
I am trying to use jQuery/JavaScript with ZXing.NET to decode a PDF417 barcode from a video source.
Here is my HTML:
<video id="video" width="800" height="800"></video>
<canvas id="canvas" width="800" height="800"></canvas>
And the jQuery for the camera and the code that calls an .NET method to debug the barcode:
var video = document.getElementById('video');
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
const hdConstraints = {
video: { width: { min: 1280 }, height: { min: 720 } }
};
navigator.mediaDevices.getUserMedia({ video: true }).then(function (stream) {
video.srcObject = stream;
video.play();
});
}
$("#video").on("playing", function () {
setInterval(function () { scanBarcode() }, 500);
});
function scanBarcode() {
var video = document.getElementById('video');
var canvas = document.getElementById('canvas');
var canvas_context = canvas.getContext('2d');
canvas_context.drawImage(video, 0, 0, 640, 480);
var image = document.getElementById("canvas").toDataURL("image/png");
image = image.replace('data:image/png;base64,', '');
$.post("Home/OnScan", { imageData: image }, function (data, status) {
console.log(data);
});
}
As you can see there I am getting the image of the canvas and passing it to my .NET method.
And here is my .NET method to debug the PDF417 barcode:
public JsonResult OnScan(string imageData)
{
BitmapImage bitmapImage = new BitmapImage();
byte[] byteBuffer = Convert.FromBase64String(imageData);
Bitmap bmp;
using (var ms = new MemoryStream(byteBuffer))
{
bmp = new Bitmap(ms);
}
BarcodeReader reader = new BarcodeReader();
DecodingOptions options = new DecodingOptions
{
TryHarder = true,
PossibleFormats = new List<BarcodeFormat> { BarcodeFormat.PDF_417 }
};
reader.Options = options;
var result = reader.Decode(bmp);
return Json(result.Text, JsonRequestBehavior.AllowGet);
}
Now this still does not work, but I remembered when I first did this in Xamarin.Forms it also did not work until I add the CameraResolutionSelector option:
var options = new MobileBarcodeScanningOptions
{
TryHarder = true,
CameraResolutionSelector = HandleCameraResolutionSelectorDelegate,
PossibleFormats = new List<BarcodeFormat> { BarcodeFormat.PDF_417 },
};
Here is the HandleCameraResolutionSelectorDelegate method:
public CameraResolution HandleCameraResolutionSelectorDelegate(List<CameraResolution> availableResolutions)
{
//Don't know if this will ever be null or empty
if (availableResolutions == null || availableResolutions.Count < 1)
return new CameraResolution() { Width = 800, Height = 600 };
//Debugging revealed that the last element in the list
//expresses the highest resolution. This could probably be more thorough.
return availableResolutions[availableResolutions.Count - 1];
}
So I am starting to think it the resolution of the camera that is causing my barcode not to scan....on another note when I change BarcodeFormat to QR_CODE and scan a QR code it works, but not with a PDF417 Barcode. What am I doing wrong?
I have some instances like the one of this issue, where with an apparently good image reconstruction, zxing can't decode as expected and i'm not competent to figure out why.
Try putting PureBarcode = true will resolve the issue.
DecodingOptions options = new DecodingOptions
{
TryHarder = true,
PossibleFormats = new List<BarcodeFormat> { BarcodeFormat.PDF_417 },
PureBarcode = true,
AutoRotate = true,
TryInverted = true,
CameraResolutionSelector = HandleCameraResolutionSelectorDelegate
};
CameraResolution HandleCameraResolutionSelectorDelegate(List<CameraResolution> availableResolutions)
{
if (availableResolutions == null || availableResolutions.Count < 1)
return new CameraResolution () { Width = 800, Height = 600 };
return availableResolutions [availableResolutions.Count - 1];
}
Hi I'm currently developing ionic app with firebase.
I'm trying to upload multiple files with resizing.
It is weired that when I call resize method input image is different but once I finish my uploading, it ends up uploading duplicated images(last image of array).
I console.loged dataURL of it everytime it resize and found that DataURL is always the same.
following code is to upload multiple files.
multipleUpload: function(key, folder, files, targetWidth) {
var q = $q.defer();
var ct = Date.now();
var urls = [];
var recursive = function (n, args) {
var arg = args[n];
ImageService.resize(arg.file, targetWidth)
.then(function(file) {
upload(ct + '' + n + key, folder + '/' + key, file, CONFIG.MESSAGE.FILE_UPLOAD + (n + 1) + '번 파일')
.then(function(url) {
urls.push(url);
if (++n < args.length) {
recursive(n, args);
} else {
q.resolve(urls);
}
}), function(error) {
q.reject(error);
};
})
}
recursive(0, files);
return q.promise;
},
Following code is resizing method
resize: function(file, targetWidth) {
var q = $q.defer();
// Resizing Image
var img = new Image();
img.setAttribute('crossOrigin', 'anonymous');
img.onload = function(){
var canvas = document.createElement("canvas"),
ctx = canvas.getContext("2d");
canvas.width = targetWidth;
canvas.height = canvas.width * (img.height / img.width);
ctx.drawImage(this, 0, 0, canvas.width, canvas.height);
// Data URL to BLOB
var dataURL = canvas.toDataURL();
console.log(dataURL); // Returns same dataURL all the time.
dataURLtoBlob(dataURL, Date.now())
.then(function(blob) {
q.resolve(blob);
});
};
img.src = file;
return q.promise;
},
Firstly I found one thing weird that it worked fine on ios but android.
I started to digging plugin options and disabled allow-edit then it works fine. I believe it is an issue from the plugin.
We are building an application where the user can upload images, but I need to resize an image before I can upload them to the server. The input for the file is an input[type=file] and we check if the image is valid. If it is valid, we continue with the following request:
Upload.upload({url: url, data: {file: input.x}, headers: {'Authorization': token}});
The input variable is the value of the input field in the HTML. This codes works great when we don't need to resize the image. Unfortunately, this would only be the case in a perfect world. After a while searching for the same problems, I seem to have found the solution using the following function:
var resize = function(size, url, callback) {
var temp = new Image();
temp.onload = function() {
var target = { width: temp.width, height: temp.height, aspect: temp.width / temp.height };
if (temp.width > temp.height) {
size = Math.min(temp.width, size);
target.width = size; target.height = size / target.aspect;
} else {
size = Math.min(temp.height, size);
target.height = size; target.width = size * target.aspect;
}
var canvas = document.createElement('canvas');
canvas.width = target.width; canvas.height = target.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(this, 0, 0, temp.width, temp.height, 0, 0, target.width, target.height);
callback(canvas.toDataURL(url.split(',')[0].split(':')[1].split(';')[0]));
}; temp.src = url;
}
This appears to be working great. I need to pass a Base64 string. So I wrapped the upload method inside the following:
var reader = new FileReader();
reader.onload = function(e) {
resize(1000, e.target.result, function(done) {
console.log(done);
Upload.upload({url: url, data: {file: done}, headers: {'Authorization': UserService.getToken()}});
});
}
reader.readAsDataURL(file);
This way I pass a Base64 string containing the image and even this seems to be working like a charm. But the biggest problem is, the server expects a real file and not a Base64 string. So the upload itself fails.
I found the function on Convert Data URI to File then append to FormData which should fix this, but it doesn't. I changed the method to this after adding the method from the link:
var reader = new FileReader();
reader.onload = function(e) {
resize(1000, e.target.result, function(done) {
console.log(done);
Upload.upload({url: url, data: {file: dataURItoBlob(done)}, headers: {'Authorization': UserService.getToken()}});
});
}
reader.readAsDataURL(file);
Does any of you know how I could change the Base64 string back to a file again? I found tons of post explaining how I could show them in a browser, but none solving my problem to upload it as file.
Sorry for the question. But it seems the dataURItoBlob function works fine, but there is was no original filename inside the blob, so I couldn't upload it right away. I changed the resize function so it performs both actions at once and this is the result:
var resize = function(size, url, name, callback) {
var temp = new Image();
temp.onload = function() {
var target = { width: temp.width, height: temp.height, aspect: temp.width / temp.height };
if (temp.width > temp.height) {
size = Math.min(temp.width, size);
target.width = size; target.height = size / target.aspect;
} else {
size = Math.min(temp.height, size);
target.height = size; target.width = size * target.aspect;
}
var canvas = document.createElement('canvas');
canvas.width = target.width; canvas.height = target.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(this, 0, 0, temp.width, temp.height, 0, 0, target.width, target.height);
var type = url.split(',')[0].split(':')[1].split(';')[0];
var data = canvas.toDataURL(type);
//callback(data);
var bytes = (data.split(',')[0].indexOf('base64') >= 0) ? atob(data.split(',')[1]) : unescape(dataURI.split(',')[1]);
var ia = new Uint8Array(bytes.length);
for(var i = 0; i < bytes.length; i++) { ia[i] = bytes.charCodeAt(i); }
var blb = new Blob([ia], {type : type});
blb.name = name;
callback(blb);
}; temp.src = url;
};
To upload the file, I do the following:
var deferred = $q.defer();
var reader = new FileReader();
reader.onload = function(e) {
resize(maxSize, e.target.result, image.name, function(done) {
deferred.resolve(Upload.upload({url: url, data: {file: done}, headers: {'Authorization': token}));
});
}
reader.readAsDataURL(image);
return deferred.promise;