I have application where images should be set in some of the pages. These images are on the server and each image has unique ID appended. Here is example of the image names:
AA_image1.jpg
AB_image1.jpg
AC_image1.jpg
AD_image1.jpg
AE_image1.jpg
I seen what previous developers did and how they checked the image existance. They used JSON file that has id and image name. Here is example of that code:
var images = [{
"id": "AA",
"image": "AA_image1.jpg"
},
{
"id": "AB",
"image": "AB_image1.jpg"
},
{
"id": "AC",
"image": "AC_image1.jpg"
},
{
"id": "AD",
"image": "AD_image1.jpg"
},
{
"id": "AE",
"image": "AE_image1.jpg"
}
];
var imgID = "AC";
var imgPrimary = "AC_image1.jpg";
var found = false;
var imgDefault = "default.jpg";
for (i = 1; i < images.length; i++) {
if (images[i].id == imgID && (images[i].image).toLowerCase() == (imgID + '_image1.jpg').toLowerCase()) {
found = true;
break;
}
}
if (found === true) {
console.log(imgPrimary);
} else {
console.log(imgDefault);
}
The example above seems pretty simple but my concern is what if image get removed from the folder and JSON file is not updated? In that case we would load the image that do not exist instead of default image. I'm wondering if this approach would be better:
var imgID = "AC";
var imgNames = [imgID + '_image1','default'];
var imgResults = {};
for(var i = 0; i < imgNames.length; i++){
checkImage( imgNames[i] );
}
function checkImage( imgName, keyName ) {
$.ajax({
type: "GET",
async: true,
url: "images/"+imgName+".jpg",
}).done(function(message,text,jqXHR){
imgResults[imgName] = true;
}).fail(function(jqXHR, textStatus, errorThrown){
imgResults[imgName] = false;
});
}
Here is example of imgResults after the process was completed:
console.log(imgResults);
Console result:
{
"default": true,
"AG_image1": false
}
The only problem I'm experiencing with the second example is that if I try to check the result based on the key I'm getting undefined. Here is example:
console.log(imgResults["AG_image1"]);
This is result in the console:
undefined
Between the two I'm not sure which one is better and more secure. If anyone have suggestions please let me know.
This is probably the shortest possible code to handle lost images in JS. imagePad is a reference to the DOM element in which you want to show the image.
var img = new Image();
img.addEventListener('error', function (e) {
// Image not found, show the default
this.src = iconBase + 'gen.png';
});
img.addEventListener('load', function () {
imagePad.appendChild(img);
}
img.src = 'the_given_src';
AJAX request are asynchronous by default
The reason that you're unable to search the imgResults object when you are trying is because AJAX requests are asynchronous, and the request has not completed when you're trying to access the results. You need to wait for the request to complete before continuing.
The reason that console.log shows the results is because console.log is lazy and doesn't evaluate the object until you expand it in dev tools, console.dir will show you an empty object at that point.
Furthermore, as you have multiple requests that you want to wait for, you'll want to create an array of promises where each promise corresponds to the load/failure of each request, then use Promise.all to wait for all the promises to complete before operating on the results.
Verifying that a resource exists
To verify that a resource exists, you can use a HEAD request instead of a GET request, such as to prevent loading the entire image for no reason. There's also no need to depend on a massive library such as jQuery for AJAX requests as XMLHttpRequest is very well supported in this day and age.
class ResourceValidator extends EventTarget {
constructor(target) {
super()
this.target = target
this._ready = false
this._valid = false
this.validate()
}
validate() {
const request = new XMLHttpRequest()
request.addEventListener('load', event => {
this._ready = true
this._valid = true
this.dispatchEvent(new Event('ready'))
})
request.addEventListener('error', event => {
this._ready = true
this.dispatchEvent(new Event('ready'))
})
request.open('HEAD', this.target)
request.send()
}
get ready() {
return new Promise(resolve => {
if(this._ready === true) resolve(true)
else this.addEventListener('ready', _ => resolve(true))
})
}
get valid() {
return new Promise(resolve => {
if(this._ready === true) resolve(this._valid)
else this.addEventListener('ready', _ => resolve(this._valid))
})
}
}
async function validateImageSources(sources) {
const results = {}
const promises = []
for(let source of sources) {
const validator = new ResourceValidator(source)
const promise = validator.valid
promise.then(valid => results[source] = valid)
promises.push(promise)
}
await Promise.all(promises)
return results
}
validateImageSources([
'https://picsum.photos/200',
'https://nosuchaddress.io/image.png'
]).then(results => {
console.log(results)
console.log(results['https://picsum.photos/200'])
})
Related
i am trying to write a chrome extension which could interact with the page which is a react-app. i am able to manipulate DOM by using popup.js.
Here is my popup.js
document.querySelector("#submit").addEventListener("click", async () => {
let [tab] = await chrome.tabs.query({ active: true, currentWindow: true });
console.log(tab);
chrome.scripting.executeScript(
{
target: { tabId: tab.id },
files: ["scripts/script.js"],
},
(result) => {
console.log("injection result :", result);
}
);
});
by the help of script.js i may reach any sort of element except the reacts state.
I have found a Q/A over here which helps to find __reactInternal$ here is the code.
function FindReact(dom, traverseUp = 0) {
const key = Object.keys(dom).find((key) => {
return (
key.startsWith("__reactFiber$") || // react 17+
key.startsWith("__reactInternalInstance$")
); // react <17
});
const domFiber = dom[key];
if (domFiber == null) return key;
// react <16
if (domFiber._currentElement) {
let compFiber = domFiber._currentElement._owner;
for (let i = 0; i < traverseUp; i++) {
compFiber = compFiber._currentElement._owner;
}
return compFiber._instance;
}
// react 16+
const GetCompFiber = (fiber) => {
//return fiber._debugOwner; // this also works, but is __DEV__ only
let parentFiber = fiber.return;
while (typeof parentFiber.type == "string") {
parentFiber = parentFiber.return;
}
return parentFiber;
};
let compFiber = GetCompFiber(domFiber);
for (let i = 0; i < traverseUp; i++) {
compFiber = GetCompFiber(compFiber);
}
return compFiber.stateNode;
}
Even if the code above works well over the console, i cant use it in my script.js. what could be the reason? How can i access reacts state by the help of a chrome-extesion.
Note: i dont want to use reactdevtool. i am trying to write an end-user extension. Thanks in advance for your answers.
You can access it with
$r.state
I'll be adding the example visually.
To view state data this plugin will come in handy: https://chrome.google.com/webstore/detail/react-developer-tools/fmkadmapgofadopljbjfkapdkoienihi
example image
I am trying to fetch JSON data from the WordPress Developer Reference site. I need to search a keyword without knowing if it's a function, class, hook, or method, which is part of the url I need to fetch. So I'm using Promise.all to cycle through all possible urls. It works if the response.status <= 299, throwing the error immediately, and if the response is ok, then it continues to .then. Fine, but occasionally it will return an ok status if the JSON exists and only returns an empty array. So I need to check if the JSON data is an empty array, which I can't seem to do in the first part. I can only check in the second part as far as I know. And if it throws the error it doesn't continue trying the other urls. Any suggestions?
var keyword = 'AtomParser';
const refs = ['function', 'hook', 'class', 'method'];
// Store the promises
let promises = [];
// Cycle through each type until we find one we're looking for
for (let t = 0; t < refs.length; t++) {
const url =
'https://developer.wordpress.org/wp-json/wp/v2/wp-parser-' +
refs[t] +
'?search=' +
keyword;
// console.log(url);
promises.push(fetch(url));
}
Promise.all(promises)
.then(function(response) {
console.log(response[0]);
// Get the status
console.log('Status code: ' + response[0].status);
if (response[0].status <= 299) {
// The API call was successful!
return response[0].json();
} else {
throw new Error('Broken link status code: ' + response[0].status);
}
})
.then(function(data) {
// This is the HTML from our response as a text string
console.log(data);
// Make sure we have data
if (data.length == 0) {
throw new Error('Empty Array');
}
// ref
const reference = data[0];
// Only continue if not null or empty
if (reference !== null && reference !== undefined && data.length > 0) {
// Success
// Return what I want from the reference
}
})
.catch(function handleError(error) {
console.log('Error' + error);
});
Is there some way to get the JSON data in the first part so I can check if it's in an array while I'm checking the response status?
I would recommend encapsulating the success / failure logic for individual requests, then you can determine all the resolved and rejected responses based on the result of that encapsulation.
For example
const checkKeyword = async (ref, keyword) => {
const params = new URLSearchParams({ search: keyword });
const res = await fetch(
`https://developer.wordpress.org/wp-json/wp/v2/wp-parser-${encodeURIComponent(
ref
)}?${params}`
);
if (!res.ok) {
throw new Error(`${res.status}: ${await res.text()}`);
}
const data = await res.json();
if (data.length === 0) {
throw new Error(`Empty results for '${ref}'`);
}
return { ref, data };
};
Now you can use something like Promise.any() or Promise.allSettled() to find the first successful request or all successful requests, respectively
const keyword = "AtomParser";
const refs = ["function", "hook", "class", "method"];
const promises = refs.map((ref) => checkKeyword(ref, keyword));
// First success
Promise.any(promises)
.then(({ ref, data }) => {
console.log(ref, data);
})
.catch(console.error);
// All successes
Promise.allSettled(promises)
.then((responses) =>
responses.reduce(
(arr, { status, value }) =>
status === "fulfilled" ? [...arr, value] : arr,
[]
)
)
.then((results) => {
// results has all the successful responses
});
For whatever reason I couldn't get Phil's answer to work, so I ended up doing the following which works fine for me. (This is for a discord bot in case you're wondering what the other stuff is all about).
var keyword = 'AtomParser';
const refs = ['function', 'hook', 'class', 'method'];
// Store the successful result or error
let final: any[] = [];
let finalError = '';
// Cycle through each type until we find one we're looking for
for (let t = 0; t < refs.length; t++) {
const url =
'https://developer.wordpress.org/wp-json/wp/v2/wp-parser-' +
refs[t] +
'?search=' +
keyword;
console.log(url);
// Try to fetch it
await fetch(url)
.then(function (response) {
console.log(response);
// Get the status
console.log('Status code: ' + response.status);
if (response.status > 299) {
finalError = '`' + refs[t] + '` does not exist.';
throw new Error(finalError);
} else {
// The API call was successful!
return response.json();
}
})
.then(function (data) {
// This is the HTML from our response as a text string
console.log(data);
// Make sure we have data
if (data.length == 0) {
finalError = "Sorry, I couldn't find `" + keyword + '`';
throw new Error(finalError);
}
// Only continue if not null or empty
if (data[0] !== null && data[0] !== undefined && data.length > 0) {
for (let d = 0; d < data.length; d++) {
// Add it to the final array
final.push(data[d]);
}
}
})
.catch(function handleError(error) {
console.log(error);
});
}
if (final.length > 0) {
for (let f = 0; f < final.length; f++) {
// ref
const reference = final[f];
// Get the link
const link = reference.link;
// Get the title
var title = reference.title.rendered;
title = excerpt.replace('>', '>');
// Get the excerpt
var excerpt = reference.excerpt.rendered;
excerpt = excerpt.replace('<p>', '');
excerpt = excerpt.replace('</p>', '');
excerpt = excerpt.replace('<b>', '**');
excerpt = excerpt.replace('</b>', '**');
console.log(excerpt);
message.reply(
new discord.Embed({
title: `${title}`,
url: link,
description: `${excerpt}\n\n`,
footer: {
text: `WordPress Developer Code Reference\nhttps://developer.wordpress.org/`,
},
})
);
}
} else if (finalError != '') {
message.reply(finalError);
} else {
message.reply('Something went wrong...');
}
wp module
#Phil's answer puts you on the right track but I want to expand on some of his ideas. Use of URLSearchParamas is great but you can improve by using the high-level URL API and forego encodeURIComponent and constructing search params manually. Notice I'm putting this code in its own wp module so I can separate concerns more easily. We don't want all of this code leaking into your main program.
// wp.js
import { fetch } from "whatwg-fetch" // or your chosen implementation
const baseURL = "https://developer.wordpress.org"
async function search1(path, query) {
const u = new URL(path, baseURL)
u.searchParams.set("search", query)
const result = await fetch(u)
if (!result.ok) throw Error(`Search failed (${result.status}): ${u}`)
return result.json()
}
search1 searches one path, but we can write search to search all the necessary paths. I don't think there's any reason to get fancy with each path here, so just write them out -
// wp.js (continued)
function search(query) {
const endpoints = [
"/wp-json/wp/v2/wp-parser-function",
"/wp-json/wp/v2/wp-parser-hook",
"/wp-json/wp/v2/wp-parser-class",
"/wp-json/wp/v2/wp-parser-method"
]
return Promise
.all(endpoints.map(e => search1(e, query)))
.then(results => results.flat())
}
export { search }
main module
Notice we only exported search as search1 is internal to the wp module. Let's see how we can use it in our main module now -
// main.js
import { search } from "./wp.js"
for (const result of await search("database"))
if(result.guid.rendered)
console.log(`${result.title.rendered}\n${result.guid.rendered}\n`)
In this example, we first search for "database" -
wp_should_replace_insecure_home_url()
https://developer.wordpress.org/reference/functions/wp_should_replace_insecure_home_url/
wp_delete_signup_on_user_delete()
https://developer.wordpress.org/reference/functions/wp_delete_signup_on_user_delete/
get_post_datetime()
https://developer.wordpress.org/reference/functions/get_post_datetime/
wp_ajax_health_check_get_sizes()
https://developer.wordpress.org/reference/functions/wp_ajax_health_check_get_sizes/
wp_should_replace_insecure_home_url
https://developer.wordpress.org/reference/hooks/wp_should_replace_insecure_home_url/
comments_pre_query
https://developer.wordpress.org/reference/hooks/comments_pre_query/
users_pre_query
https://developer.wordpress.org/reference/hooks/users_pre_query/
WP_Object_Cache
http://developer.wordpress.org/reference/classes/wp_object_cache/
wpdb
http://developer.wordpress.org/reference/classes/wpdb/
WP_REST_Menu_Items_Controller::prepare_item_for_database()
https://developer.wordpress.org/reference/classes/wp_rest_menu_items_controller/prepare_item_for_database/
WP_REST_Global_Styles_Controller::prepare_item_for_database()
https://developer.wordpress.org/reference/classes/wp_rest_global_styles_controller/prepare_item_for_database/
WP_REST_Menus_Controller::prepare_item_for_database()
https://developer.wordpress.org/reference/classes/wp_rest_menus_controller/prepare_item_for_database/
WP_REST_Templates_Controller::prepare_item_for_database()
https://developer.wordpress.org/reference/classes/wp_rest_templates_controller/prepare_item_for_database/
WP_REST_Application_Passwords_Controller::prepare_item_for_database()
https://developer.wordpress.org/reference/classes/wp_rest_application_passwords_controller/prepare_item_for_database/
wpdb::db_server_info()
https://developer.wordpress.org/reference/classes/wpdb/db_server_info/
WP_REST_Attachments_Controller::insert_attachment()
https://developer.wordpress.org/reference/classes/wp_rest_attachments_controller/insert_attachment/
WP_Debug_Data::get_database_size()
https://developer.wordpress.org/reference/classes/wp_debug_data/get_database_size/
WP_REST_Meta_Fields::update_multi_meta_value()
https://developer.wordpress.org/method/wp_rest_meta_fields/update_multi_meta_value/
another search example
Now let's search for "image" -
for (const result of await search("image"))
if(result.guid.rendered)
console.log(`${result.title.rendered}\n${result.guid.rendered}\n`)
get_adjacent_image_link()
https://developer.wordpress.org/reference/functions/get_adjacent_image_link/
get_next_image_link()
https://developer.wordpress.org/reference/functions/get_next_image_link/
get_previous_image_link()
https://developer.wordpress.org/reference/functions/get_previous_image_link/
wp_robots_max_image_preview_large()
https://developer.wordpress.org/reference/functions/wp_robots_max_image_preview_large/
wp_getimagesize()
https://developer.wordpress.org/reference/functions/wp_getimagesize/
is_gd_image()
https://developer.wordpress.org/reference/functions/is_gd_image/
wp_show_heic_upload_error()
https://developer.wordpress.org/reference/functions/wp_show_heic_upload_error/
wp_image_src_get_dimensions()
https://developer.wordpress.org/reference/functions/wp_image_src_get_dimensions/
wp_image_file_matches_image_meta()
https://developer.wordpress.org/reference/functions/wp_image_file_matches_image_meta/
_wp_check_existing_file_names()
https://developer.wordpress.org/reference/functions/_wp_check_existing_file_names/
edit_custom_thumbnail_sizes
https://developer.wordpress.org/reference/hooks/edit_custom_thumbnail_sizes/
get_header_image_tag_attributes
https://developer.wordpress.org/reference/hooks/get_header_image_tag_attributes/
image_editor_output_format
https://developer.wordpress.org/reference/hooks/image_editor_output_format/
wp_image_src_get_dimensions
https://developer.wordpress.org/reference/hooks/wp_image_src_get_dimensions/
wp_get_attachment_image
https://developer.wordpress.org/reference/hooks/wp_get_attachment_image/
image_sideload_extensions
https://developer.wordpress.org/reference/hooks/image_sideload_extensions/
wp_edited_image_metadata
https://developer.wordpress.org/reference/hooks/wp_edited_image_metadata/
wp_img_tag_add_loading_attr
https://developer.wordpress.org/reference/hooks/wp_img_tag_add_loading_attr/
wp_image_file_matches_image_meta
https://developer.wordpress.org/reference/hooks/wp_image_file_matches_image_meta/
get_custom_logo_image_attributes
https://developer.wordpress.org/reference/hooks/get_custom_logo_image_attributes/
Custom_Image_Header
http://developer.wordpress.org/reference/classes/custom_image_header/
WP_Image_Editor_Imagick
http://developer.wordpress.org/reference/classes/wp_image_editor_imagick/
WP_Embed
http://developer.wordpress.org/reference/classes/wp_embed/
WP_Image_Editor
http://developer.wordpress.org/reference/classes/wp_image_editor/
WP_Customize_Background_Image_Setting
http://developer.wordpress.org/reference/classes/wp_customize_background_image_setting/
WP_Customize_Header_Image_Setting
http://developer.wordpress.org/reference/classes/wp_customize_header_image_setting/
WP_Image_Editor_GD
http://developer.wordpress.org/reference/classes/wp_image_editor_gd/
WP_Customize_Header_Image_Control
http://developer.wordpress.org/reference/classes/wp_customize_header_image_control/
WP_REST_Server::add_image_to_index()
https://developer.wordpress.org/reference/classes/wp_rest_server/add_image_to_index/
WP_REST_URL_Details_Controller::get_image()
https://developer.wordpress.org/reference/classes/wp_rest_url_details_controller/get_image/
WP_Image_Editor::get_default_quality()
https://developer.wordpress.org/reference/classes/wp_image_editor/get_default_quality/
WP_Theme_JSON::get_blocks_metadata()
https://developer.wordpress.org/reference/classes/wp_theme_json/get_blocks_metadata/
WP_Image_Editor_Imagick::pdf_load_source()
https://developer.wordpress.org/reference/classes/wp_image_editor_imagick/pdf_load_source/
WP_Image_Editor_Imagick::write_image()
https://developer.wordpress.org/reference/classes/wp_image_editor_imagick/write_image/
WP_Image_Editor_Imagick::maybe_exif_rotate()
https://developer.wordpress.org/reference/classes/wp_image_editor_imagick/maybe_exif_rotate/
WP_Image_Editor_Imagick::make_subsize()
https://developer.wordpress.org/reference/classes/wp_image_editor_imagick/make_subsize/
WP_Image_Editor_GD::make_subsize()
https://developer.wordpress.org/reference/classes/wp_image_editor_gd/make_subsize/
empty search result
Searching for "zzz" will yield no results -
for (const result of await search("zzz"))
if(result.guid.rendered)
console.log(`${result.title.rendered}\n${result.guid.rendered}\n`)
<empty result>
async onSubmit(formValue) {
this.isSubmitted = true;
if(this.selectedImageArray.length > 0) { // 4 images in this array
for (let index = 0; index < this.selectedImageArray.length; index++) { // Loop through this image array
await new Promise(resolve => {
setTimeout(()=> {
console.log('This is iteration ' + index);
var filePath = `images/tours/${this.selectedImageArray[index].name.split('.').slice(0,-1).join('.')}_${new Date(). getTime()}`;
const fileRef = this.storage.ref(filePath);
this.storage.upload(filePath, this.selectedImageArray[index]).snapshotChanges().pipe(
finalize(() => {
fileRef.getDownloadURL().subscribe((url) => {
formValue[`imageUrl${index+1}`] = url;
console.log(url);
});
})
).subscribe()
resolve();
}, 3000);
});
}
console.log('After loop execution');
// this.value(formValue);
}
}
After submitting the code it will download and print 3 urls and then it print 'after loop execution' then it print 4th one I don't understand why. See here in console
see in the image line no of code execution.
What I want to execute code in sequence after all images download then after it will go out of loop.
I wrote another version of this that hopefully works as you expect it to.
First we create an array of all the storage upload snapshot observables.
The we use concat() to run them all in sequence. (If you change from concat() to merge() they will all go at once)
The we use mergeMap to jump over to the getDownloadURL
Then in the subscribe we add the url to the formValues
Finally in the finalize we set the class propery "value" equal to the formValue.
onSubmit(formValue) {
const snapshotObservables = this.selectedImageArray.map(selectedImage => { // 4 images in this array
const filePath = `images/tours/${selectedImage.name.split('.').slice(0, -1).join('.')}_${new Date(). getTime()}`;
return combineLatest(this.storage.upload(filePath, selectedImage).snapshotChanges(), filePath);
});
concat(...snapshotObservables).pipe(
mergeMap(([snapshot, filePath]) => {
const fileRef = this.storage.ref(filePath);
return fileRef.getDownloadURL();
}),
finalize(() => {
this.value(formValue);
})
).subscribe(url => {
formValue[`imageUrl${index+1}`] = url;
});
}
I wrote a new function for multiple file upload
public multipleFileUpload(event, isEncodeNeeded?: Boolean):Array<any> {
if(!isEncodeNeeded){
isEncodeNeeded=false;
}
let fileList = [];
for (let index = 0; index < event.target.files.length; index++) {
let returnData = {};
let file: File = event.target.files[index];
let myReader: FileReader = new FileReader();
returnData['documentName'] = event.target.files[index]['name'];
returnData['documentType'] = event.target.files[index]['type'];
myReader.addEventListener("load", function (e) {
if (myReader.readyState == 2) {
returnData['document'] = isEncodeNeeded ? btoa(e.target['result']) : e.target['result'];
}
});
myReader.readAsBinaryString(file);
fileList.push(returnData);
}
return fileList;
}
In this function event is the event of the input and the isEncodeNeeded is conversion is needed. If this is true then it convert to base64 format.
The output format is
[{
"document": documentbyte,
"documentName": document name,
"documentType": file format
}]
I've got a special producer consumer problem in RxJS: The producer slowly produces elements. A consumer is requesting elements and often has to wait for the producer. This can be achieved by zipping the producer and the request stream:
var produce = getProduceStream();
var request = getRequestStream();
var consume = Rx.Observable.zipArray(produce, request).pluck(0);
Sometimes a request gets aborted. A produced element should only consumed after a not aborted request:
produce: -------------p1-------------------------p2--------->
request: --r1--------------r2---------------r3-------------->
abort: ------a(r1)------------------a(?)------------------>
consume: ------------------c(p1, r2)-------------c(p2, r3)-->
The first request r1 would consume the first produced element p1, but r1 gets aborted by a(r1) before it can consume p1. p1 is produced and gets consumed c(p1, r2) on second request r2. The second abort a(?) is ignored, because no unanswered request happened before. The third request r3 has to wait on the next produced element p2 and is not aborted till p2 is produced. Thus, p2 is consumed c(p2, r3) immediately after it got produced.
How can I achieve this in RxJS?
Edit:
I created an example with a QUnit test on jsbin. You can edit the function createConsume(produce, request, abort) to try/test your solution.
The example contains the function definition of the previously accepted answer.
This (core idea minus details) passes your JSBin test:
var consume = request
.zip(abort.merge(produce), (r,x) => [r,x])
.filter(([r,x]) => isNotAbort(x))
.map(([r,p]) => p);
And the JSBin code.
I can't quite wrap my brain around how to do it with existing operators. Here's how to do it with Observable.create():
return Rx.Observable.create(function (observer) {
var rsub = new Rx.SingleAssignmentDisposable();
var asub = new Rx.SingleAssignmentDisposable();
var psub = new Rx.SingleAssignmentDisposable();
var sub = new Rx.CompositeDisposable(rsub, asub, psub);
var rq = [];
var pq = [];
var completeCount = 0;
var complete = function () {
if (++completeCount === 2) {
observer.onCompleted();
}
};
var consume = function () {
if (pq.length && rq.length) {
var p = pq.shift();
var r = rq.shift();
observer.onNext('p' + p);
}
};
rsub.setDisposable(request.subscribe(
function (r) {
rq.push(r);
consume();
},
function (e) { observer.onError(e); },
complete));
asub.setDisposable(abort.subscribe(
function (a) {
rq.shift();
},
function (e) { observer.onError(e); }
));
psub.setDisposable(produce.subscribe(
function (p) {
pq.push(p);
consume();
},
function (e) { observer.onError(e); },
complete));
return sub;
});
http://jsbin.com/zurepesijo/1/
This solution ignores aborts that don't follow an unanswered request:
const {merge} = Rx.Observable;
Rx.Observable.prototype.wrapValue = function(wrapper) {
wrapper = (wrapper || {});
return this.map(function (value) {
wrapper.value = value;
return wrapper;
});
};
function createConsume(produce, request, abort) {
return merge(
produce.wrapValue({type: 'produce'}),
request.wrapValue({type: 'request'}),
abort.wrapValue({type: 'abort'})
)
.scan(
[false, []],
([isRequest, products], e) => {
// if last time the request was answered
if (isRequest && products.length) {
// remove consumed product
products.shift();
// mark request as answered
isRequest = false;
}
if (e.type === 'produce') {
// save product to consume later
products.push(e.value);
} else {
// if evaluated to false, e.type === 'abort'
isRequest = (e.type === 'request');
}
return [isRequest, products];
}
)
.filter( ([isRequest, products]) => (isRequest && products.length) )
.map( ([isRequest, products]) => products[0] ); // consume
}
Code in newest test on JSBin.
I'm trying to figure out how to rollback only a folder node that wasn't successfully moved. The code below is an example of what I'm trying to do. The problem comes when you have selected a couple of folders and moved them into another folder. If one of the directories fails to be moved I want to be able to roll it back to it's original parent.
Unfortunately $.jstree.rollback(data.rlbk); rollsback all of the folders that were selected to their previous locations.
$("#tree").jstree({...}).bind("move_node.jstree", function (e, data) {
// process all selected nodes directory
data.rslt.o.each(function (i) {
// Send request.
var move = $.parseJSON($.ajax({
url: "./jstree.php",
type: 'post',
async: false,
data: {
operation: "move_dir",
....
}
}).responseText);
// When everything's ok, the reponseText will be {success: true}
// In all other cases it won't exist at all.
if(move.success == undefined){
// Here I want to rollback the CURRENT failed node.
// $.jstree.rollback(data.rlbk); will rollback all
// of the directories that have been moved.
}
}
});
Is there a way for this to be done?
I've looked at using jstree before, but haven't used it in my code. As a result, the code may not be correct, but the concepts should be.
Based on your code, it appears that you're performing the move operation on the server side and you want the tree to be updated to reflect the results.
Based on the jsTree documentation, it looks as though you cannot commit node updates and roll back to the last commit.
Instead of rolling back only the changes that you don't want, you can roll back the tree (all changes) and perform the moves afterward.
In order to better understand the code below, you may want to read it (or create a copy) without the lines where "wasTriggeredByCode" is set or referenced in the condition for an "if" statement.
$("#tree").jstree({...}).bind("move_node.jstree", function (e, data) {
var jsTree = $(this);
var successes = [];
// Becomes true when function was triggered by code that updates jsTree to
// reflect nodes that were successfully moved on the server
var wasTriggeredByCode = false;
// process all selected nodes directory
data.rslt.o.each(function (i) {
// I'm not certain that this is how the node is referenced
var node = $(this);
wasTriggeredByCode = (wasTriggeredByCode || node.data('redoing'));
// Don't perform server changes when event was triggered from code
if (wasTriggeredByCode) {
return;
}
// Send request.
var move = $.parseJSON($.ajax({
url: "./jstree.php",
type: 'post',
async: false,
data: {
operation: "move_dir",
....
}
}).responseText);
if(move.success){
successes.push(node);
}
});
// Don't continue when event was triggered from code
if (wasTriggeredByCode) {
return;
}
// Roll back the tree here
jsTree.rollback(data.rlbk);
// Move the nodes
for (var i=0; i < successes.length; i++) {
var node = successes[i];
// According to the documentation this will trigger the move event,
// which will result in infinite recursion. To avoid this you'll need
// to set a flag or indicate that you're redoing the move.
node.data('redoing', true);
jsTree.move_node(node, ...);
// Remove the flag so that additional moves aren't ignored
node.removeData('redoing');
}
});
I thought about having something like "onbeforenodemove" event in jstree, something like this:
$("#tree").jstree({...}).bind("before_move_node.jstree", function (e, data) {...}
So I looked inside jstree.js file (version jsTree 3.1.1) and searched for declaration of original "move_node.jstree" handler. It found it declared starting line 3689:
move_node: function (obj, par, pos, callback, is_loaded, skip_redraw, origin) {...}
This function contains the following line at the end of its body:
this.trigger('move_node', { "node" : obj, "parent" : new_par.id, "position" : pos, "old_parent" : old_par, "old_position" : old_pos, 'is_multi' : (old_ins && old_ins._id && old_ins._id !== this._id), 'is_foreign' : (!old_ins || !old_ins._id), 'old_instance' : old_ins, 'new_instance' : this });
The above line actually calls your callback declared using .bind("move_node.jstree").
So at the beginning of this function body, I added this:
var before_data = { "node": obj, "parent": new_par.id, "position": pos, "old_parent": old_par, "old_position": old_pos, 'is_multi': (old_ins && old_ins._id && old_ins._id !== this._id), 'is_foreign': (!old_ins || !old_ins._id), 'old_instance': old_ins, 'new_instance': this, cancelled: false };
this.trigger('before_move_node', before_data);
if (before_data.cancelled) {
return false;
}
Mind "cancelled": false at the end of before_data assigned value.
Also mind inserting the above after new_par, etc. values are assigned.
Code (jsTree instantiation) on my page looks now like this:
$('#tree')
.jstree({
core: {...},
plugins: [...]
})
.bind('before_move_node.jstree', function (e, data) {
if (...) {
data.cancelled = true;
}
})
data object passed to 'before_move_node.jstree' contains the same values that you receive in standard 'move_node.jstree' data argument so you have everything to decide whether you want to cancel the move or let it go. If you decide to cancel, just set the additional 'cancelled' property to true. The entire move will then not happen.
As the documentation says https://github.com/vakata/jstree/wiki#more-on-configuration, you can check more.core property
Example
$('#jstree1').jstree({
core: {
check_callback: async (operation, node, node_parent, node_position, more) => {
switch (true) {
case operation === 'move_node':
let canmove = true
const dropped = more.core === true // not dragging anymore...
if (dropped) {
// before move..
const success = await yourHttpRequest()
if (!success) {
canmove = false
}
} else {
canmove = yourCheckHere()
}
return canmove
}
}
}
})
Example 2
document.addEventListener("DOMContentLoaded", function () {
const bootstrap = (() => {
myTree.mySetup()
})
const myTree = {
mySetup: () => {
$('#jstree1').jstree({
core: {
check_callback: (operation, node, node_parent, node_position, more) => {
switch (true) {
case operation === 'move_node':
return myTree.myGates.canMove(node, node_parent, node_position, more)
}
// deny by default
return false
}
},
plugins: ['dnd']
})
.on('move_node.jstree', (node, parent, position, old_parent, old_position, is_multi, old_instance, new_instance) => {
myTree.myHandlers.onMove({
node, parent, position, old_parent, old_position, is_multi, old_instance, new_instance
})
})
},
myGates: {
canMove: (node, node_parent, node_position, more) => {
const canmove = true
const dropped = more.core === true
if (dropped) {
const success = alberoSx.myHandlers.onBeforeMove({
node, node_parent, node_position, more
})
if (!success) {
canmove = false
}
} else {
canmove = yourCheckHere()
}
return canmove
}
},
myHandlers: {
onBeforeMove: async () => {
// try to update the node in database
const success = await yourHttpRequestHere()
return success
},
onMove: () => {
// node moved in the ui
// do other stuff...
},
}
}
bootstrap()
})