I'm new to JavaScript so bear with me if what I'm asking is not "how you do it in JavaScript". Advice on other approaches are welcome.
I have a class named State and I need need to serialize objects of that class using JSON.stringify(). The next step is to deserialize them back into an objects. However, my class uses setters and getters.
The problem that I'm facing is that after I deserialized those objects the setters and getters seem to be gone. I just cannot figure out how I can properly turn serialized objects back into objects of that class so that they behave exactly the same as objects that are created using new directly.
In another language I would cast those objects into State objects. I cannot find a JavaScript mechanism that seems to work that way.
The code looks as follows:
class State {
constructor(href) {
this.url = href;
}
set url(href) {
this._url = new URL(href);
this.demoParam = this._url.searchParams.get("demoParam");
}
get url() {
return this._url;
}
set demoParam(value) {
let param = parseInt(value, 10);
if(isNaN(param)) {
param = 2;
}
console.log("Setting 'demoParam' to value " + param);
this._demoParam = param;
}
get demoParam() {
return this._demoParam;
}
toJSON() {
let stateObject = {};
const prototypes = Object.getPrototypeOf(this);
for(const key of Object.getOwnPropertyNames(prototypes)) {
const descriptor = Object.getOwnPropertyDescriptor(prototypes, key);
if(descriptor && typeof descriptor.get === 'function') {
stateObject[key] = this[key];
}
}
return stateObject;
}
}
let originalState = new State(window.location.href);
let newState1 = JSON.parse(JSON.stringify(originalState));
newState1.demoParam = 12;
let newState2 = Object.create(State.prototype, Object.getOwnPropertyDescriptors(JSON.parse(JSON.stringify(originalState))));
newState2.demoParam = 13;
let newState3 = Object.assign(new State(window.location.href), JSON.parse(JSON.stringify(originalState)));
newState3.demoParam = 14;
let newState4 = Object.setPrototypeOf(JSON.parse(JSON.stringify(originalState)), State.prototype);
newState4.demoParam = 15;
I would expect that everytime I set the demoParam property of a newStateX object I'd see a console log message. However. I only see it twice, i.e. for every new State(window.location.href) statement.
I have used the answer of this question. However, it does not work as expected.
when you serialize an object you trigger the toString or the toJSON method of your class' instance and end up with just a "dumb" JSON representation of your enumerable attributes.
if you want to recreate an instance that behaves like it did prior to serialisation, you will need to set an extra key/value pair in your toJSON function like ___internalType: 'state' and then later use eg. a switch statement to recreate your specific class with the new MyClass(serializedData)and passing in your serialised instance. Within the constructor of your class, you set all the attributes you need and voilà, you have your "old" instance again
/edit: to clarify the reason why your console logs aren't showing up is because you are not recreating an instance of your class but just creating a new plain Object.
You can use Object.assign to copy plain object data into an "empty" new instance of the class along the lines of this code:
function cast(o) {
if (!o._cls) return o;
var _cls = eval(o._cls);
return Object.assign(new _cls(), o);
}
In JavaScript i personally like to avoid using classes for my data objects. TypeScript offers some better opportunities to solve this problem, one of these is TypedJSON:
https://github.com/JohnWeisz/TypedJSON
New to Js, sorry if this is an obvious one.
I have some strings in my code that correspond to the names of variables. I'd like to put them into a function and have the function be able to make changes to the variables that have the same names as the strings.
The best example is where this 'string' is passed through from a data tag in html, but I have some other situations where this issue appears. Open to changing my entire approach too is the premise of my question is backwards.
<html>
<div data-location="deck" onClick="moveCards(this.data-location);">
</html>
var deck = ["card"];
function moveCards(location){
location.shift();};
Thanks!
A script should not depend on the names of standalone variables; this can break certain engine optimizations and minification. Also, inline handlers are nearly universally considered to be pretty poor practice - consider adding an event listener properly using Javascript instead. This will also allow you to completely avoid the issue with dynamic variable names. For example:
const deck = ["card", "card", "card"];
document.querySelector('div[data-location="deck"]').addEventListener('click', () => {
deck.shift();
console.log('deck now has:', deck.length + ' elements');
});
<div data-location="deck">click</div>
I think this can technically be done using eval, but it is good practice to think more clearly about how you design this so that you only access objects you directly declare. One example of better design might be:
container = {
obj1: //whatever this object is
...
objn:
}
function applyMethodToObject(object_passed){
container[object_passed].my_method();
}
I'm not sure I 100% follow what you're trying to do, but rather than trying to dynamically resolve variable names you might consider using keys in an object to do the lookup:
const locations = {
deck: ['card']
}
function moveCards (location) {
// if 'deck' is passed to this function, this is
// the equivalent of locations['deck'].shift();
locations[location].shift();
};
Here's a working demo:
const locations = {
deck: ['card 1', 'card 2', 'card 3', 'card 4']
};
function move (el) {
const location = el.dataset.location;
const item = locations[location];
item.shift();
updateDisplay(item);
}
// update the display so we can see the list
function updateDisplay(item) { document.getElementById('display').innerHTML = item.join(', ');
}
// initial list
updateDisplay(locations['deck']);
#display {
font-family: monospace;
padding: 1em;
background: #eee;
margin: 2em 0;
}
<div data-location='deck' onclick="move(this)">click to shift deck</div>
<div id="display">afda</div>
When you assign a value to an object in javascript you can access with dot or array notation. IE
foo = {};
foo.bar = "bar";
console.log(foo.bar);
console.log(foo["bar"]);
Additionally, global variables are added to the window object, meaning deck is available at window["deck"] or window[location] in your case. That means your moveCards function could do:
function moveCards(location) {
// perform sanity checks since you could change data-location="foo"
// which would then call window.foo.shift()
if (window[location]) {
window[location].shift();
}
}
That said, this probably isn't a great approach, though it's hard to say without a lot more context.
In JS or OOP language the polymorhpism is created by making different types.
For example:
class Field {...}
class DropdownField extends Field {
getValue() {
//implementation ....
}
}
Imagine I have library forms.js with some methods:
class Forms {
getFieldsValues() {
let values = [];
for (let f of this.fields) {
values.push(f.getValue());
}
return values;
}
}
This gets all field values. Notice the library doesnt care what field it is.
This way developer A created the library and developer B can make new fields: AutocompleterField.
He can add methods in AutocompleterField withouth changing the library code (Forms.js) .
If I use functional programming method in JS, how can I achieve this?
If I dont have methods in object i can use case statements but this violates the principle. Similar to this:
if (field.type == 'DropdownField')...
else if (field.type == 'Autocompleter')..
If developer B add new type he should change the library code.
So is there any good way to solve the issue in javascript without using object oriented programming.
I know Js isnt exactly OOP nor FP but anyway.
Thanks
JavaScript being a multi-purpose language, you can of course solve it in different ways. When switching to functional programming, the answer is really simple: Use functions! The problem with your example is this: It is so stripped down, you can do exactly the same it does with just 3 lines:
// getValue :: DOMNode -> String
const getValue = field => field.value;
// readForm :: Array DOMNode -> Array String
const readForm = formFields => formFields.map(getValue);
readForm(Array.from(document.querySelectorAll('input, textarea, select')));
// -> ['Value1', 'Value2', ... 'ValueN']
The critical thing is: How is Field::getValue() implemented, what does it return? Or more precisely: How does DropdownField::getValue() differ from AutocompleteField::getValue() and for example NumberField::getValue()? Do all of them just return the value? Do they return a pair of name and value? Do they even need to be different?
The question is therefor, do your Field classes and their inheriting classes differ because of the way their getValue() methods work or do they rather differ because of other functionality they have? For example, the "autocomplete" functionality of a textfield isn't (or shouldn't be) tied to the way the value is taken from it.
In case you really need to read the values differently, you can implement a function which takes a map/dictionary/object/POJO of {fieldtype: readerFunction} pairs:
/* Library code */
// getTextInputValue :: DOMNode -> String
const getTextInputValue = field => field.value;
// getDropdownValue :: DOMNode -> String
const getDropdownValue = field => field.options[field.selectedIndex].value;
// getTextareaValue :: DOMNode -> String
const getTextareaValue = field => field.textContent;
// readFieldsBy :: {String :: (a -> String)} -> DOMNode -> Array String
readFieldsBy = kv => form => Object.keys(kv).reduce((acc, k) => {
return acc.concat(Array.from(form.querySelectorAll(k)).map(kv[k]));
}, []);
/* Code the library consumer writes */
const readMyForm = readFieldsBy({
'input[type="text"]': getTextInputValue,
'select': getDropdownValue,
'textarea': getTextareaValue
});
readMyForm(document.querySelector('#myform'));
// -> ['Value1', 'Value2', ... 'ValueN']
Note: I intentionally didn't mention things like the IO monad here, because it would make stuff more complicated, but you might want to look it up.
In JS or OOP language the polymorhpism is created by making different types.
Yes. Or rather, by implementing the same type interface in different objects.
How can I use Javascript polymorphism without OOP classes
You seem to confuse classes with types here. You don't need JS class syntax to create objects at all.
You can just have
const autocompleteField = {
getValue() {
…
}
};
const dropdownField = {
getValue() {
…
}
};
and use the two in your Forms instance.
Depends on what you mean by "polymorphism". There's the so-called ad-hoc polymorphism which type classes in Haskell, Scala, or PureScript provide -- and this kind of dispatch is usually implemented by passing witness objects along as additional function arguments, which then will know how to perform the polymorphic functionality.
For example, the following PureScript code (from the docs), which provides a show function for some types:
class Show a where
show :: a -> String
instance showString :: Show String where
show s = s
instance showBoolean :: Show Boolean where
show true = "true"
show false = "false"
instance showArray :: (Show a) => Show (Array a) where
show xs = "[" <> joinWith ", " (map show xs) <> "]"
example = show [true, false]
It gets compiled to the following JS (which I shortened):
var Show = function (show) {
this.show = show;
};
var show = function (dict) {
return dict.show;
};
var showString = new Show(function (s) {
return s;
});
var showBoolean = new Show(function (v) {
if (v) {
return "true";
};
if (!v) {
return "false";
};
throw new Error("Failed pattern match at Main line 12, column 1 - line 12, column 37: " + [ v.constructor.name ]);
});
var showArray = function (dictShow) {
return new Show(function (xs) {
return "[" + (Data_String.joinWith(", ")(Data_Functor.map(Data_Functor.functorArray)(show(dictShow))(xs)) + "]");
});
};
var example = show(showArray(showBoolean))([ true, false ]);
There's absolutely no magic here, just some additional arguments. And at the "top", where you actually know concrete types, you have to pass in the matching concrete witness objects.
In your case, you would pass around something like a HasValue witness for different forms.
You could use a the factory pattern to ensure you follow the open close principle.
This principle says "Software entities (classes, modules, functions, etc.) should be open for extension, but closed for modification".
class FieldValueProviderFactory {
getFieldValue(field) {
return this.providers.find(p => p.type === field.type).provider(field);
}
registerProvider(type, provider) {
if(!this.providers) {
this.providers = [];
}
this.providers.push({type:type, provider:provider});
}
}
var provider = new FieldValueProviderFactory();
provider.registerProvider('DropdownField', (field) => [ 1, 2, 3 ]);
provider.registerProvider('Autocompleter', (field) => [ 3, 2, 1 ]);
class FieldCollection {
getFieldsValues() {
this.fields = [ { type:'DropdownField',value:'1' }, { type:'Autocompleter',value:'2' } ];
let values = [];
for (let field of this.fields) {
values.push(provider.getFieldValue(field));
}
return values;
}
}
Now when you want to register new field types you can register a provider for them in the factory and don't have to modify your field code.
new Field().getFieldsValues();
I want to refactor Snippet 1 to Snippet 2. I don't think performance is quite an issue here considering the size, but I wanted to understand what was going on as far as memory use goes regarding this refactor to the module pattern.
The module pattern ensures that I only pull in this data from the DOM once which is what I want and it also forms a mini-registry pattern in that the data is private.
Both snippets have been tested and basically work.
Snippet 1 // Replace SUniverisals w/ SU
var SUniversals = function () {
// Pull from Server
this.universals.path = document.getElementById('universals').getAttribute('data-path');
this.universals.load = document.getElementById('universals').getAttribute('data-load');
// Set Manually
this.universals.debug = false;
};
SUniversals.prototype.universals = {};
SUniversals.prototype.get = function( key ) {
return this.universals[ key ];
};
SUniversals.prototype.set = function( key, value ) {
this.universals[ key ] = value;
};
Snippet 2
var SU = ( function ()
{
// private SU.get('load');
var universals = {};
universals.path = document.getElementById('universals').getAttribute('data-path');
universals.load = document.getElementById('universals').getAttribute('data-load');
universals.debug = false;
// pubulic
var publik = {};
publik.get = function( key )
{
return universals[ key ];
};
publik.set = function( key, value )
{
universals[ key ] = value;
};
return publik;
}());
There are few things which are different. Snippet 2 is essentially creating a singleton. Snippet 1 can be looked at like a 'class'. You can create multiple instances/objects of 'SUniversals' and do different things with them.
Actually, snippet 1 is more efficient in terms of memory. By adding to the object's prototype, you essentially will have only 1 copy of each function irrespective of the number of objects you create. The module pattern will create separate entities.
Not enough to worry about ;-)
Seriously, the only thing you need to worry about with the module pattern is creating memory leaks; by itself the pattern uses basically nothing.
I'm using Javascript with jQuery. I'd like to implement out params. In C#, it would look something like this:
/*
* odp the object to test
* error a string that will be filled with the error message if odp is illegal. Undefined otherwise.
*
* Returns true if odp is legal.
*/
bool isLegal(odp, out error);
What is the best way to do something like this in JS? Objects?
function isLegal(odp, errorObj)
{
// ...
errorObj.val = "ODP failed test foo";
return false;
}
Firebug tells me that the above approach would work, but is there a better way?
The callback approach mentioned by #Felix Kling is probably the best idea, but I've also found that sometimes it's easy to leverage Javascript object literal syntax and just have your function return an object on error:
function mightFail(param) {
// ...
return didThisFail ? { error: true, msg: "Did not work" } : realResult;
}
then when you call the function:
var result = mightFail("something");
if (result.error) alert("It failed: " + result.msg);
Not fancy and hardly bulletproof, but certainly it's OK for some simple situations.
I think this is pretty much the only way (but I am not a hardcore JavaScript programmer ;)).
What you could also consider is to use a callback function:
function onError(data) {
// do stuff
}
function isLegal(odp, cb) {
//...
if(error) cb(error);
return false;
}
isLegal(value, onError);
Yes, as you yourself mentioned, objects are the best and only way to pass data by reference in JavaScript. I would keep your isLegal function as such and simply call it like this:
var error = {};
isLegal("something", error);
alert(error.val);
The answers I have seen so far aren't implementing out parameters in JavaScript, as they are used in C# (the out keyword). They are merely a workaround that returns an object in case of an error.
But what do you do if you really need out parameters?
Because Javascript doesn't directly support it, you need to build something that is close to C#'s out parameters. Take a look at this approach, I am emulating C#s DateTime.TryParse function in JavaScript. The out parameter is result, and because JavaScript doesn't provide an out keyword, I am using .value inside the function to pass the value outside the function (as inspired by MDN suggestion):
// create a function similar to C#'s DateTime.TryParse
var DateTime = [];
DateTime.TryParse = function(str, result) {
result.value = new Date(str); // out value
return (result.value != "Invalid Date");
};
// now invoke it
var result = [];
if (DateTime.TryParse("05.01.2018", result)) {
alert(result.value);
} else {
alert("no date");
};
Run the snippet and you'll see it works: It parses the str parameter into a Date and returns it in the result parameter. Note that result needs to be initialized as empty array [], before you call the function (it can also be an object{} depending on your needs). This is required because inside the function you "inject" the .value property.
Now you can use the pattern above to write a function as the one in your question (this also shows you how to emulate the new discard parameter known as out _ in C#: In JavaScript we're passing [] as shown below):
// create a function similar to C#'s DateTime.TryParse
var DateTime = [];
DateTime.TryParse = function(str, result) {
result.value = new Date(str); // out value
return (result.value != "Invalid Date");
};
// returns false, if odb is no date, otherwise true
function isLegal(odp, errorObj) {
if (DateTime.TryParse(odp, [])) { // discard result here by passing []
// all OK: leave errorObj.value undefined and return true
return true;
} else {
errorObj.value = "ODP failed test foo"; // return error
return false;
}
}
// now test the function
var odp = "xxx01.12.2018xx"; // invalid date
var errorObj = [];
if (!isLegal(odp, errorObj)) alert(errorObj.value); else alert("OK!");
What this example does is it uses the result parameter to pass an error message as follows:
errorObj.value = "ODP failed test foo"; // return error
If you run the example it will display this message in a popup dialog.
Note: Instead of using a discard parameter as shown above, in JavaScript you could also use a check for undefined, i.e. inside the function check for
if (result === undefined) {
// do the check without passing back a value, i.e. just return true or false
};
Then it is possible to omit result as a parameter completely if not needed, so you could invoke it like
if (DateTime.TryParse(odp)) {
// ... same code as in the snippet above ...
};
I am using a callback method (similar to Felix Kling's approach) to simulate the behavior of out parameters. My answer differs from Kling's in that the callback function acts as a reference-capturing closure rather than a handler.
This approach suffers from JavaScript's verbose anonymous function syntax, but closely reproduces out parameter semantics from other languages.
function isLegal(odp, out_error) {
//...
out_error("ODP failed test foo"); // Assign to out parameter.
return false;
}
var error;
var success = isLegal(null, function (e) { error = e; });
// Invariant: error === "ODP failed test foo".
there is another way JS can pass 'out' parameters. but i believe the best ones for your situation were already mentioned.
Arrays are also passed by reference, not value. thus just as you can pass an object to a function, and then set a property of the object in the function, and then return, and access that object's property, you can similarly pass an Array to a function, set some values of the array inside the function, and return and access those values outside the array.
so in each situation you can ask yourself, "is an array or an object better?"
I'm not going to post any code but what fails to be done here in these answers is to put rhyme to reason. I'm working in the native JS arena and the problem arose that some native API calls need to be transformed because we can't write to the parameters without ugly shameful hacks.
This is my solution:
// Functions that return parameter data should be modified to return
// an array whose zeroeth member is the return value, all other values
// are their respective 1-based parameter index.
That doesn't mean define and return every parameter. Only the
parameters that recieve output.
The reason for this approach is thus: Multiple return values may be needed for any number of procedures. This creates a situation where objects with named values (that ultimately will not be in sync with the lexical context of all operations), constantly need to be memorized in order to appropriately work with the procedure(s).
Using the prescribed method, you only have to know what you called, and where you should be looking rather than having to know what you are looking for.
There is also the advantage that "robust and stupid" alogrithms can be written to wrap around the desired procedure calls to make this operation "more transparent".
It would be wise to use an object, function, or an array (all of which are objects) as a "write-back-output" parameter, but I believe that if any extraneous work must be done, it should be done by the one writing the toolkit to make things easier, or broaden functionality.
This is a one for all answer for every occaision, that keeps APIs looking the way the should at first look, rather than appearing to be and having every resemblence of a hobble-cobbled weave of spaghetti code tapestry that cannot figure out if it is a definition or data.
Congratulations, and good luck.
I'm using the webkitgtk3 and interfaceing some native C Library procs. so this proven code sample might at least serve the purpose of illustration.
// ssize_t read(int filedes, void *buf, size_t nbyte)
SeedValue libc_native_io_read (SeedContext ctx, SeedObject function, SeedObject this_object, gsize argument_count, const SeedValue arguments[], SeedException *exception) {
// NOTE: caller is completely responsible for buffering!
/* C CODING LOOK AND FEEL */
if (argument_count != 3) {
seed_make_exception (ctx, exception, xXx_native_params_invalid,
"read expects 3 arguments: filedes, buffer, nbyte: see `man 3 read' for details",
argument_count
); return seed_make_undefined (ctx);
}
gint filedes = seed_value_to_int(ctx, arguments[0], exception);
void *buf = seed_value_to_string(ctx, arguments[1], exception);
size_t nbyte = seed_value_to_ulong(ctx, arguments[2], exception);
SeedValue result[3];
result[0] = seed_value_from_long(ctx, read(filedes, buf, nbyte), exception);
result[2] = seed_value_from_binary_string(ctx, buf, nbyte, exception);
g_free(buf);
return seed_make_array(ctx, result, 3, exception);
}
The following is approach i am using. And this is answer for this question. However code has not been tested.
function mineCoords( an_x1, an_y1 ) {
this.x1 = an_x1;
this.y1 = an_y1;
}
function mineTest( an_in_param1, an_in_param2 ) {
// local variables
var lo1 = an_in_param1;
var lo2 = an_in_param2;
// process here lo1 and lo2 and
// store result in lo1, lo2
// set result object
var lo_result = new mineCoords( lo1, lo2 );
return lo_result;
}
var lo_test = mineTest( 16.7, 22.4 );
alert( 'x1 = ' + lo_test.x1.toString() + ', y1 = ' + lo_test.y1.toString() );
The usual approach to the specific use case you outlined in Javascript, and in fact most high level languages, is to rely on Errors (aka exceptions) to let you know when something out of the ordinary has occurred. There's no way to pass a value type (strings, numbers etc) by reference in Javascript.
I would just do that. If you really need to feed custom data back to the calling function you can subclass Error.
var MyError = function (message, some_other_param)
{
this.message = message;
this.some_other_param = some_other_param;
}
//I don't think you even need to do this, but it makes it nice and official
MyError.prototype = Error;
...
if (something_is_wrong)
throw new MyError('It failed', /* here's a number I made up */ 150);
Catching exceptions is a pain, I know, but then again so is keeping track of references.
If you really really need something that approaches the behavior of out variables, objects are passed by reference by default, and can handily capture data from other scopes--
function use_out (outvar)
{
outvar.message = 'This is my failure';
return false;
}
var container = { message : '' };
var result = use_out(container );
console.log(container.message); ///gives the string above
console.log(result); //false
I think this goes a some ways towards answering your question, but I think your entire approach is broken from the start. Javascript supports so many much more elegant and powerful ways to get multiple values out of a function. Do some reading about generators, closures, hell even callbacks can be nice in certain situations-- look up continuation passing style.
My point with this whole rant is to encourage anyone reading this to adapt their programming style to the limitations and capabilities of the language they're using, rather than trying to force what they learned from other languages into it.
(BTW some people strongly recommend against closures because they cause evil side-effects, but I wouldn't listen to them. They're purists. Side effects are almost unavoidable in a lot of applications without a lot of tedious backtracking and stepping around cant-get-there-from-here obstacles. If you need them, keeping them all together in a neat lexical scope rather than scattered across a hellscape of obscure pointers and references sounds a lot better to me)
The main advantage of real output parameters is direct modification of one or more scalar variables in the scope of the caller. Among the approaches proposed in other answers, only callbacks satisfy this requirement:
function tryparse_int_1(s, cb)
{ var res = parseInt(s);
cb(res);
return !isNaN( res );
}
function test_1(s)
{ var /* inreger */ i;
if( tryparse_int_1( s, x=>i=x ) )
console.log(`String "${s}" is parsed as integer ${i}.`); else
console.log(`String "${s}" does not start with an integer.`);
}
test_1("47");
test_1("forty-seven");
In this case, passing each output parameter requires five extra characters to wrap its identifier into an anonymous setter function. It is neither very readable nor easy to type frequently, so one can resort to the single most interesting property of scripting languages—their ability to do magick, such as executing strings as code.
The following example implements an extended version of the integer-parsing function above, which now has two output parameters: the resulting integer and a flag indicating whether it is positive:
/* ------------ General emulator of output parameters ------------ */
function out_lit(v)
{ var res;
if( typeof(v) === "string" )
res = '"' + v.split('\"').join('\\\"') + '"'; else
res = `${v}`;
return res;
}
function out_setpar(col, name, value)
{ if( col.outs == undefined ) col.outs = [];
col.outs[name] = value;
}
function out_setret(col, value)
{ col.ret = value; }
function out_ret( col )
{ var s;
for(e in col.outs)
{ s = s + "," + e + "=" + out_lit( col.outs[e] ); }
if( col.ret != undefined )
{ s = s + "," + out_lit( col.ret ); }
return s;
}
/* -------- An intger-parsing function using the emulator -------- */
function tryparse_int_2 // parse the prefix of a string as an integer
( /* string */ s, // in: input string
/* integer */ int, // out: parsed integer value
/* boolean */ pos // out: whether the result is positive
)
{ var /* integer */ res; // function result
var /* array */ col; // collection of out parameters
res = parseInt(s);
col = [];
out_setpar( col, int, res );
out_setpar( col, pos, res > 0 );
out_setret( col, !isNaN( res ) );
return out_ret( col );
}
In this version, passing each output parameters requires two extra characters around its identifier to embed it into a string literal, plus six characters per invocation to evaluate the result:
function test_2(s)
{ var /* integer */ int;
var /* boolean */ pos;
if( !eval( tryparse_int_2( s, "int", "pos" ) ) )
{ console.log(`String "${s}" does not start with an integer.`); }
else
{ if( pos ) adj = "positive";
else adj = "non-positive";
console.log(`String "${s}" is parsed as a ${adj} integer ${int}.`);
}
}
test_2( "55 parrots" );
test_2( "-7 thoughts" );
test_2( "several balls" );
The output of the test code above is:
String "55 parrots" is parsed as a positive integer 55.
String "-7 thoughts" is parsed as a non-positive integer -7.
String "several balls" does not start with an integer.
This solution, however, has a deficiency: it cannot handle returns of non-basic types.
Perhaps a cleaner approach is the emulation of pointers:
// Returns JavaScript for the defintion of a "pointer" to a variable named `v':
// The identifier of the pointer is that of the variable prepended by a $.
function makeref(v)
{ return `var $${v} = {set _(val){${v}=val;},get _() {return ${v};}}`; }
// Calcualtes the square root of `value` and puts it into `$root`.
// Returns whether the operation has succeeded.
// In case of an error, stores error message in `$errmsg`.
function sqrt2
( /* in number */ value, /* value to take the root of */
/* out number */ $root , /* "pointer" to result */
/* out string */ $errmsg /* "pointer" to error message */
)
{ if( typeof( value ) !== "number" )
{ $errmsg._ = "value is not a number.";
return false;
}
if( value < 0 )
{ $errmsg._ = "value is negative.";
return false;
}
$root._ = Math.sqrt(value);
return true;
}
The following test code:
function test(v)
{ var /* string */ resmsg;
var /* number */ root ; eval( makeref( "root" ) );
var /* string */ errmsg; eval( makeref( "errmsg" ) );
if( sqrt2(v, $root, $errmsg) ) resmsg = `Success: ${root}`;
else resmsg = `Error: ${errmsg}`;
console.log(`Square root of ${v}: ` + resmsg );
}
test("s" );
test(-5 );
test( 1.44);
prints:
Square root of s: Error: value is not a number.
Square root of -5: Error: value is negative.
Square root of 1.44: Success: 1.2
"Pointers" created by this method are reusable in other functions and subsequent invocations of the same function. For example, you could define a function that appends strings:
// Append string `sep' to a string pointed to by $s, using `sep` as separator:
// $s shall not point to an undefined value.
function append($s, sep, val)
{ if( $s._ != '' ) $s._ += sep;
$s._ += val;
}
and use it thus:
const sep = ", "
var s; eval( makeref("s") );
s = '';
append( $s, sep, "one" );
append( $s, sep, "two" );
append( $s, sep, "three" );
console.log( s );
It will print:
one, two, three