Worker blocking UI thread in Chrome - javascript

I'm building a web app that uses EvaporateJS to upload large files to Amazon S3 using Multipart Uploads. I noticed an issue where every time a new chunk was started the browser would freeze for ~2 seconds. I want the user to be able to continue to use my app while the upload is in progress, and this freezing makes that a bad experience.
I used Chrome's Timeline to look into what was causing this and found that it was SparkMD5's hashing. So I've moved the entire upload process into a Worker, which I thought would fix the issue.
Well the issue is now fixed in Edge and Firefox, but Chrome still has the exact same problem.
Here's a screenshot of my Timeline:
As you can see, during the freezes my main thread is doing basically nothing, with <8ms of JavaScript running during that time. All the work is occurring in my Worker thread, and even that is only running for ~600ms or so, not the 1386ms that my frame takes.
I'm really not sure what's causing the issue, are there any gotchas with Workers that I should be aware of?
Here's the code for my Worker:
var window = self; // For Worker-unaware scripts
// Shim to make Evaporate work in a Worker
var document = {
createElement: function() {
var href = undefined;
var elm = {
set href(url) {
var obj = new URL(url);
elm.protocol = obj.protocol;
elm.hostname = obj.hostname;
elm.pathname = obj.pathname;
elm.port = obj.port;
elm.search = obj.search;
elm.hash = obj.hash;
elm.host = obj.host;
href = url;
},
get href() {
return href;
},
protocol: undefined,
hostname: undefined,
pathname: undefined,
port: undefined,
search: undefined,
hash: undefined,
host: undefined
};
return elm;
}
};
importScripts("/lib/sha256/sha256.min.js");
importScripts("/lib/spark-md5/spark-md5.min.js");
importScripts("/lib/url-parse/url-parse.js");
importScripts("/lib/xmldom/xmldom.js");
importScripts("/lib/evaporate/evaporate.js");
DOMParser = self.xmldom.DOMParser;
var defaultConfig = {
computeContentMd5: true,
cryptoMd5Method: function (data) { return btoa(SparkMD5.ArrayBuffer.hash(data, true)); },
cryptoHexEncodedHash256: sha256,
awsSignatureVersion: "4",
awsRegion: undefined,
aws_url: "https://s3-ap-southeast-2.amazonaws.com",
aws_key: undefined,
customAuthMethod: function(signParams, signHeaders, stringToSign, timestamp, awsRequest) {
return new Promise(function(resolve, reject) {
var signingRequestId = currentSigningRequestId++;
postMessage(["signingRequest", signingRequestId, signParams.videoId, timestamp, awsRequest.signer.canonicalRequest()]);
queuedSigningRequests[signingRequestId] = function(signature) {
queuedSigningRequests[signingRequestId] = undefined;
if(signature) {
resolve(signature);
} else {
reject();
}
}
});
},
//logging: false,
bucket: undefined,
allowS3ExistenceOptimization: false,
maxConcurrentParts: 5
}
var currentSigningRequestId = 0;
var queuedSigningRequests = [];
var e = undefined;
var filekey = undefined;
onmessage = function(e) {
var messageType = e.data[0];
switch(messageType) {
case "init":
var globalConfig = {};
for(var k in defaultConfig) {
globalConfig[k] = defaultConfig[k];
}
for(var k in e.data[1]) {
globalConfig[k] = e.data[1][k];
}
var uploadConfig = e.data[2];
Evaporate.create(globalConfig).then(function(evaporate) {
var e = evaporate;
filekey = globalConfig.bucket + "/" + uploadConfig.name;
uploadConfig.progress = function(p, stats) {
postMessage(["progress", p, stats]);
};
uploadConfig.complete = function(xhr, awsObjectKey, stats) {
postMessage(["complete", xhr, awsObjectKey, stats]);
}
uploadConfig.info = function(msg) {
postMessage(["info", msg]);
}
uploadConfig.warn = function(msg) {
postMessage(["warn", msg]);
}
uploadConfig.error = function(msg) {
postMessage(["error", msg]);
}
e.add(uploadConfig);
});
break;
case "pause":
e.pause(filekey);
break;
case "resume":
e.resume(filekey);
break;
case "cancel":
e.cancel(filekey);
break;
case "signature":
var signingRequestId = e.data[1];
var signature = e.data[2];
queuedSigningRequests[signingRequestId](signature);
break;
}
}
Note that it relies on the calling thread to provide it with the AWS Public Key, AWS Bucket Name and AWS Region, AWS Object Key and the input File object, which are all provided in the 'init' message. When it needs something signed, it sends a 'signingRequest' message to the parent thread, which is expected to provided the signature in a 'signature' message once it's been fetched from my API's signing endpoint.

I can't give a very good example or analyze what you are doing with only the Worker code, but I strongly suspect that the issue either has to do with either the reading of the chunk on the main thread or some unexpected processing that you are doing on the chunk on the main thread. Maybe post the main thread code that calls postMessage to the Worker?
If I were debugging it right now, I'd try moving your FileReader operations into the Worker. If you don't mind the Worker blocking while it loads a chunk, you could also use FileReaderSync.
Post-comments update
Does generating the presigned URL require hashing the file content + metadata + a key? Hashing file content is going to take O(n) in the size of the chunk and it's possible, if the hash is the first operation that reads from the Blob, that the loading of the file content could be deferred until the hashing starts. Unless you are compelled to keep the signing in the main thread (you don't trust the worker with key material?) that would be another good thing to bring into the worker.
If moving the signing into the Worker is too much, you could have the worker do something to force the Blob to be read and/or pass the ArrayBuffer(or Uint8Array or what have you) of file content back to the main thread for signing; this would ensure that reading the chunk does not occur on the main thread.

Related

Intercept WebSocket messages

With ajax requests it can be done with this code:
let oldXHROpen = window.XMLHttpRequest.prototype.open;
window.lastXhr = '';
window.XMLHttpRequest.prototype.open = function(method, url, async, user, password) {
this.addEventListener('load', function() {
window.lastXhr = this.responseText;
});
return oldXHROpen.apply(this, arguments);
};
lastXhr variable will hold the last response.
But how can this be achieved for websockets too?
you would need to make this wrapper as soon as possible
#brunoff you're correct in that you can always use your functions before a server's by puppet window logic, or you could just hijack the data from the MessageEvent itself:
function listen(fn){
fn = fn || console.log;
let property = Object.getOwnPropertyDescriptor(MessageEvent.prototype, "data");
const data = property.get;
// wrapper that replaces getter
function lookAtMessage() {
let socket = this.currentTarget instanceof WebSocket;
if (!socket) {
return data.call(this);
}
let msg = data.call(this);
Object.defineProperty(this, "data", { value: msg } ); //anti-loop
fn({ data: msg, socket:this.currentTarget, event:this });
return msg;
}
property.get = lookAtMessage;
Object.defineProperty(MessageEvent.prototype, "data", property);
}
listen( ({data}) => console.log(data))
You can try putting in the code and running it in the console on this page and then running their WebSocket example.
To intercept the messages, you will have to spy on the onmessage = fn and addEventListener("message", fn) calls.
To be able to modify the onmessage we have to override the global WebSocket in the first place. The below is intercepting the incoming messages, but in a similar way you can spy on the send method to intercept the outgoing messages (the ones sent by the client to the server).
I tested this on a page using Firebase and it works nicely, but you have to initialize it before the other scripts making sure that the websocket library (it can be socket.io, ws, etc) is using the overridden WebSocket constructor.
Spy the Incoming Messages and modify the data
Eventually you can override the data before calling the real message listener – this becomes handy if you do not have control over the page functionality and want to inject your own data in the message listener.
const OriginalWebsocket = window.WebSocket
const ProxiedWebSocket = function() {
console.log("Intercepting web socket creation")
const ws = new OriginalWebsocket(...arguments)
const originalAddEventListener = ws.addEventListener
const proxiedAddEventListener = function() {
if (arguments[0] === "message") {
const cb = arguments[1]
arguments[1] = function() {
// Here you can get the actual data from the incoming messages
// Here you can even change the data before calling the real message listener
Object.defineProperty(e, "data", { value: 'your injected data' })
console.log("intercepted", arguments[0].data)
return cb.apply(this, arguments)
}
}
return originalAddEventListener.apply(this, arguments)
}
ws.addEventListener = proxiedAddEventListener
Object.defineProperty(ws, "onmessage", {
set(func) {
return proxiedAddEventListener.apply(this, [
"message",
func,
false
]);
}
});
return ws;
};
window.WebSocket = ProxiedWebSocket;
If you do not need to modify the data, you can follow the second part of the answer.
Spy the Incoming messages without modifying the data
If you want to listen for messages only, without overriding the data, things are simpler:
const OriginalWebsocket = window.WebSocket
const ProxiedWebSocket = function() {
const ws = new OriginalWebsocket(...arguments)
ws.addEventListener("message", function (e) {
// Only intercept
console.log(e.data)
})
return ws;
};
window.WebSocket = ProxiedWebSocket;
Spy the Outgoing Messages
In a very similar way, you can proxy the send method which is used to send data to the server.
const OriginalWebsocket = window.WebSocket
const ProxiedWebSocket = function() {
const ws = new OriginalWebsocket(...arguments)
const originalSend = ws.send
const proxiedSend = function() {
console.log("Intercepted outgoing ws message", arguments)
// Eventually change the sent data
// arguments[0] = ...
// arguments[1] = ...
return originalSend.apply(this, arguments)
}
ws.send = proxiedSend
return ws;
};
window.WebSocket = ProxiedWebSocket;
Feel free to ask any questions if anything is unclear.
In a solution similar to yours, where the window.XMLHttpRequest was replaced with a wrapped version that feeds window.lastXhr, we replace window.WebSockets with a wrapped version that feeds window.WebSocketMessages with all messages and timestamps received from all websockets created after this script.
window.watchedWebSockets = [];
window.WebSocketMessages = [];
function WebSocketAttachWatcher(websocket) {
websocket.addEventListener("message", (event)=>window.WebSocketMessages.push([event.data,Date.now()]));
window.watchedWebSockets.push(websocket);
}
// here we replace WebSocket with a wrapped one, that attach listeners on
window.WebSocketUnchanged = window.WebSocket;
window.WebSocket = function(...args) {
const websocket = new window.WebSocketUnchanged(...args);
WebSocketAttachWatcher(websocket);
return websocket;
}
Differently from your XMLRequest case, the websocket may already exist. If you need garanties that all websockets would be catched then you would need to make this wrapper as soon as possible. If you just can't, there's an not so good trick to capture already existing websockets once they send a message:
// here we detect existing websockets on send event... not so trustable
window.WebSocketSendUnchanged = window.WebSocketUnchanged.prototype.send;
window.WebSocket.prototype.send = function(...args) {
console.log("firstsend");
if (!(this in window.watchedWebSockets))
WebSocketAttachWatcher(this);
this.send = window.WebSocketSendUnchanged; // avoid passing here again on next send
window.WebSocketSendUnchanged.call(this, ...args);
}
It is not so trustable since if they don't send but receive they will stay unnoticed.
Intro
The question/bounty/op is specifically asking for a reputable source.
Instead of rolling a custom solution, my proposal is that a known proven library should be used - that has been used, audited, forked, and in general used by the community and that is hosted on github.
The second option is to roll your own (though not recommended) and there are many exccelent answers on how to do it involving the addEventListener
wshook
Wshook is a library (hosted on github) that allows to easily intercept and modify WebSocket requests and message events. It has been starred and forked multiple times.
Disclaimer: I don't have any relationship with the specific project.strong text
Example:
wsHook.before = function(data, url, wsObject) {
console.log("Sending message to " + url + " : " + data);
}
// Make sure your program calls `wsClient.onmessage` event handler somewhere.
wsHook.after = function(messageEvent, url, wsObject) {
console.log("Received message from " + url + " : " + messageEvent.data);
return messageEvent;
}
From the documentation, you will find:
wsHook.before - function(data, url, wsObject):
Invoked just before
calling the actual WebSocket's send() method.
This method must return data which can be modified as well.
wsHook.after - function(event, url, wsObject):
Invoked just after
receiving the MessageEvent from the WebSocket server and before
calling the WebSocket's onmessage Event Handler.
Websocket addEventListener
The WebSocket object supports .addEventListener().
Please see: Multiple Handlers for Websocket Javascript
if you are using nodejs then you can use socket.io
yarn add socket.io
after installation, you can use the middleware of socket.io
io.use(async (socket, next) => {
try {
const user = await fetchUser(socket);
socket.user = user;
} catch (e) {
next(new Error("unknown user"));
}
});

How can I receive data on client side before calling .end() on the server side for a gRPC stream

I am currently trying to setup a server stream with the gRPC Node.js API. For that I want to achieve that when I write on server side to the stream that the client immediately receives the data event.
At the moment I don't receive anything on client side if I only call write on server side. However as soon as I call the end function on the server the client receives all data events.
To test this I used an endless while loop for writing messages on server side. Then the client does not receive messages (data events). If instead I use a for loop and call end afterwards the client receives all the messages (data events) when end is called.
My .proto file:
syntax = "proto3";
message ControlMessage {
enum Control {
Undefined = 0;
Start = 1;
Stop = 2;
}
Control control = 1;
}
message ImageMessage {
enum ImageType {
Raw = 0;
Mono8 = 1;
RGB8 = 2;
}
ImageType type = 1;
int32 width = 2;
int32 height = 3;
bytes image = 4;
}
service StartImageTransmission {
rpc Start(ControlMessage) returns (stream ImageMessage);
}
On the server side I implement the start function and try to endlessly write messages to the call:
function doStart(call) {
var imgMsg = {type: "Mono8", width: 600, height: 600, image: new ArrayBuffer(600*600)};
//for(var i = 0; i < 10; i++) {
while(true) {
call.write(imgMsg);
console.log("Message sent");
}
call.end();
}
I register the function as service in the server:
var server = new grpc.Server();
server.addService(protoDescriptor.StartImageTransmission.service, {Start: doStart});
On client side I generate an appropriate call and register the data and end event:
var call = client.Start({control: 0});
call.on('data', (imgMessage) => {
console.log('received image message');
});
call.read();
call.on('end', () => {console.log('end');});
I also tried to write the server side in python. In this case the node client instantly receives messages and not only after stream was ended on server side. So I guess this should be also possible for the server written with the Node API.
It seems that the problem was that the endless while loop is blocking all background tasks in node. A possible solution is to use setTimeout to create the loop. The following code worked for me:
First in the gRPC call store the call object in an array:
function doStart(call) {
calls.push(call);
}
For sending to all clients I use a setTimeout:
function sendToAllClients() {
calls.forEach((call) => {
call.write(imgMsg);
});
setTimeout(sendToAllClients, 10);
}
setTimeout(sendToAllClients, 10);
Helpful stackoverflow atricle: Why does a while loop block the event loop?
I was able to use uncork which comes from Node.js's Writable.
Here is an example. Pseudocode, but pulled from across a working implementation:
import * as grpc from '#grpc/grpc-js';
import * as proto from './src/proto/generated/organizations'; // via protoc w/ ts-proto
const OrganizationsGrpcServer: proto.OrganizationsServer = {
async getMany(call: ServerWritableStream<proto.Empty, proto.OrganizationCollection>) {
call.write(proto.OrganizationCollection.fromJSON({ value: [{}] }));
call.uncork();
// do some blocking stuff
call.write(proto.OrganizationCollection.fromJSON({ value: [{}] }));
call.uncork();
// call.end(), or client.close() below, at some point?
},
ping(call, callback) {
callback(null);
}
};
const client = new proto.OrganizationsClient('127.0.0.1:5000', grpc.credentials.createInsecure());
const stream = client.getMany(null);
stream.on('data', data => {
// this cb should run twice
});
export default OrganizationsGrpcServer;
//.proto
service Organizations {
rpc GetMany (google.protobuf.Empty) returns (stream OrganizationCollection) {}
}
message OrganizationCollection {
repeated Organization value = 1;
}
Versions:
#grpc/grpc-js 1.4.4
#grpc/proto-loader 0.6.7
ts-proto 1.92.1
npm 8.1.4
node 17

Service Worker Respond To Fetch after getting data from another worker

I am using service workers to intercept requests for me and provide the responses to the fetch requests by communicating with a Web worker (also created from the same parent page).
I have used message channels for direct communication between the worker and service worker. Here is a simple POC I have written:
var otherPort, parentPort;
var dummyObj;
var DummyHandler = function()
{
this.onmessage = null;
var selfRef = this;
this.callHandler = function(arg)
{
if (typeof selfRef.onmessage === "function")
{
selfRef.onmessage(arg);
}
else
{
console.error("Message Handler not set");
}
};
};
function msgFromW(evt)
{
console.log(evt.data);
dummyObj.callHandler(evt);
}
self.addEventListener("message", function(evt) {
var data = evt.data;
if(data.msg === "connect")
{
otherPort = evt.ports[1];
otherPort.onmessage = msgFromW;
parentPort = evt.ports[0];
parentPort.postMessage({"msg": "connect"});
}
});
self.addEventListener("fetch", function(event)
{
var url = event.request.url;
var urlObj = new URL(url);
if(!isToBeIntercepted(url))
{
return fetch(event.request);
}
url = decodeURI(url);
var key = processURL(url).toLowerCase();
console.log("Fetch For: " + key);
event.respondWith(new Promise(function(resolve, reject){
dummyObj = new DummyHandler();
dummyObj.onmessage = function(e)
{
if(e.data.error)
{
reject(e.data.error);
}
else
{
var content = e.data.data;
var blob = new Blob([content]);
resolve(new Response(blob));
}
};
otherPort.postMessage({"msg": "content", param: key});
}));
});
Roles of the ports:
otherPort: Communication with worker
parentPort: Communication with parent page
In the worker, I have a database say this:
var dataBase = {
"file1.txt": "This is File1",
"file2.txt": "This is File2"
};
The worker just serves the correct data according to the key sent by the service worker. In reality these will be very large files.
The problem I am facing with this is the following:
Since I am using a global dummyObj, the older dummyObj and hence the older onmessage is lost and only the latest resource is responded with the received data.
In fact, file2 gets This is File1, because the latest dummyObj is for file2.txt but the worker first sends data for file1.txt.
I tried by creating an iframe directly and all the requests inside it are intercepted:
<html>
<head></head>
<body><iframe src="tointercept/file1.txt" ></iframe><iframe src="tointercept/file2.txt"></iframe>
</body>
</html>
Here is what I get as output:
One approach could be to write all the files that could be fetched into IndexedDB in the worker before creating the iframe. Then in the Service Worker fetch those from indexed DB. But I don't want to save all the resources in IDB. So this approach is not what I want.
Does anybody know a way to accomplish what I am trying to do in some other way? Or is there a fix to what I am doing.
Please Help!
UPDATE
I have got this to work by queuing the dummyObjs in a global queue instead of having a global object. And on receiving the response from the worker in msgFromW I pop an element from the queue and call its callHandler function.
But I am not sure if this is a reliable solution. As it assumes that everything will occur in order. Is this assumption correct?
I'd recommend wrapping your message passing between the service worker and the web worker in promises, and then pass a promise that resolves with the data from the web worker to fetchEvent.respondWith().
The promise-worker library can automate this promise-wrapping for you, or you could do it by hand, using this example as a guide.
If you were using promise-worker, your code would look something like:
var promiseWorker = new PromiseWorker(/* your web worker */);
self.addEventListener('fetch', function(fetchEvent) {
if (/* some optional check to see if you want to handle this event */) {
fetchEvent.respondWith(promiseWorker.postMessage(/* file name */));
}
});

httpChannel.redirectTo() infinite load

I'm working on a firefox extension for the first time, and thanks to the documentation, it's going on pretty fast.
I've a problem however : I wan't to redirect the users if they go on some domains.
const {Cc, Ci, Cr, Cu} = require("chrome");
const buttons = require('sdk/ui/button/action');
const tabs = require("sdk/tabs");
var httpRequestObserver =
{
observe: function(subject, topic, data)
{
if (topic == "http-on-modify-request") {
var httpChannel = subject.QueryInterface(Ci.nsIHttpChannel);
var eTLDService = Cc["#mozilla.org/network/effective-tld-service;1"].getService(Ci.nsIEffectiveTLDService);
var suffix = eTLDService.getPublicSuffixFromHost(httpChannel.originalURI.host);
var regexp = new RegExp('google\.'+suffix,'i');
if (regexp.test(httpChannel.originalURI.host)) {
Cu.import("resource://gre/modules/Services.jsm");
httpChannel.redirectTo(Services.io.newURI("http://test.tld", null, null));
}
}
get observerService() {
return Cc["#mozilla.org/observer-service;1"].getService(Ci.nsIObserverService);
},
register: function()
{
this.observerService.addObserver(this, "http-on-modify-request", false);
},
unregister: function()
{
this.observerService.removeObserver(this, "http-on-modify-request");
}
};
httpRequestObserver.register();
I'm trying to do a little POC, but it seems to load indefinitely.
Do you know what I am doing wrong?
Don't test the originalURI! It will stay the same even after a redirect
/**
* The original URI used to construct the channel. This is used in
* the case of a redirect or URI "resolution" (e.g. resolving a
* resource: URI to a file: URI) so that the original pre-redirect
* URI can still be obtained. ...
*/
So you redirect, that creates a new channel with the same originalURI but different URI, so your test triggers again and again and again... causing the infinite redirection loop (and redirecting by this API also is not subject to the usual redirection limit).
Instead test the .URI of a channel, which gives the current URI.

Copy file from addon to profile folder

I'm trying to copy a sqlite database from the data folder in my extension directory, to the profile folder, in order to use it.
So for now, I'm trying with that:
const {Cc, Ci, Cu} = require("chrome");
const {NetUtils} = Cu.import("resource://gre/modules/NetUtil.jsm");
const data = require('sdk/self').data;
Cu.import("resource://gre/modules/Services.jsm");
Cu.import("resource://gre/modules/FileUtils.jsm");
var file = Cc["#mozilla.org/file/directory_service;1"].
getService(Ci.nsIProperties).
get("TmpD", Ci.nsIFile);
file.append("searchEngines.sqlite");
file.createUnique(Ci.nsIFile.NORMAL_FILE_TYPE, 0666);
// Then, we need an output stream to our output file.
var ostream = Cc["#mozilla.org/network/file-output-stream;1"].createInstance(Ci.nsIFileOutputStream);
ostream.init(file, -1, -1, 0);
// Finally, we need an input stream to take data from.
var iStreamData = NetUtil.ioService.newChannel(data.url("searchEngines.sqlite"), null, null).open();
let istream = Cc["#mozilla.org/io/string-input-stream;1"].createInstance(Ci.nsIStringInputStream);
istream.setData(iStreamData, iStreamData.length);
NetUtil.asyncCopy(istream, ostream, function(aResult) {
console.log(aResult); // return 0
})
console.log(FileUtils.getFile("ProfD", ["searchEngines.sqlite"]).exists()); // return false
let dbConn = Services.storage.openDatabase(file);
The file seems to exist since the console.log(file.exists()) return FALSE and is not populated (the console.log(aResult) return 0).
Where is my mistake, and is there a better way to do that?
Besides that it uses sync I/O (opening the channel with .open instead of .asyncOpen), the NetUtil.asyncCopy operation is still async, meaning the code
NetUtil.asyncCopy(istream, ostream, function(aResult) {
console.log(aResult); // return 0
})
console.log(FileUtils.getFile("ProfD", ["searchEngines.sqlite"]).exists()); // return false
let dbConn = Services.storage.openDatabase(file);
will try to open the file before the copy likely finishes!
However, file.exists() will be likely true, because you already opened the file for writing. It's just that the file is still blank because the data copy isn't done (or even started) yet. (Actually, it is true, because you're checking searchEngines.sqlite in ProfD and not TmpD, but if you correct that the previous statement would apply).
You can only use the file when/after your callback to .asyncCopy is done, e.g.
NetUtil.asyncCopy(istream, ostream, function(aResult) {
console.log(aResult);
console.log(FileUtils.getFile("ProfD", ["searchEngines.sqlite"]).exists()); // return false
let dbConn = Services.storage.openDatabase(file);
// ...
});
PS: You might want to .asyncOpen the channel, then use NetUtil.asyncFetch and pass the resulting stream to .asyncCopy to be truly async for smallish files, since this caches the contents in memory first.
For large files you could create a variant of the NetUtil.asyncFetch implementation that feeds the .outputStream end directly to NetUtils.asyncCopy. That is a bit more complicated, so I won't be writing this up in detail until somebody is truly interested in this and ask the corresponding question.
Edit, so here is how I'd write it:
const data = require('sdk/self').data;
Cu.import("resource://gre/modules/Services.jsm");
Cu.import("resource://gre/modules/NetUtil.jsm");
function copyDataURLToFile(url, file, callback) {
NetUtil.asyncFetch(url, function(istream) {
var ostream = Cc["#mozilla.org/network/file-output-stream;1"].
createInstance(Ci.nsIFileOutputStream);
ostream.init(file, -1, -1, Ci.nsIFileOutputStream.DEFER_OPEN);
NetUtil.asyncCopy(istream, ostream, function(result) {
callback && callback(file, result);
});
});
}
var file = Services.dirsvc.get("TmpD", Ci.nsIFile);
file.append("searchEngines.sqlite");
copyDataURLToFile(data.url("searchEngine.sqlite"), file, function(file, result) {
console.log(result);
console.log(file.exists());
console.log(file.fileSize);
});
Try using OS.File it's much more straight forward.
Cu.import("resource://gre/modules/FileUtils.jsm");
Cu.import("resource://gre/modules/osfile.jsm")
var fromPath = FileUtils.getFile("ProfD", ["searchEngines.sqlite"]).path;
var toPath = FileUtils.getFile("TmpD", ["searchEngines.sqlite"]).path;;
var promise = OS.File.copy(fromPath, toPath);
var dbConn;
promise.then(
function(aStat) {
alert('success will now open connection');
dbConn = Services.storage.openDatabase(toPath);
},
function(aReason) {
console.log('promise rejected', aReason);
alert('copy failed, see console for details');
}
);

Categories