Deserialize DeepObject Querystring Keys in Azure Functions / Javascript - javascript

We are constructing an API with Azure Functions, and the spec calls for DeepObject references in the GET request's querystring. So, the structure looks like https://example.com/api/persons?name[first]=Todd. The expectation is that some of the query keys may be DeepObject references, others will be flat.
This is a pattern that apparently Express can handle, but Azure Functions uses an ASP.NET router. The expectation is that the reference above should deserialize into req.params: { name: { first: "Todd" } }. However, instead the output looks like req.params: { "name[first]": "Todd" }.
I would really love to not have to regex/search each key, so does anyone know if:
There's a config/flag in ASP.NET to support this structure?
There's a good pattern in Javascript to deserialize this in some functional -- or at least non-idiosyncratic -- way?
Note: Anyone who suggest some use of the eval() method will not be selected. But for playing you will take home a comment with a Nineties reference, because that was the last decade the use of the that method was considered acceptable. :stuck_out_tongue_winking_eye:

For this problem, I don't think we can change some configuration to support this structure. What we can do is to implement in code by ourselves.
Here is my function code:
module.exports = async function (context, req) {
console.log("======query url is:" + req.url);
const result = queryStringToJSON(req.url);
console.log(result);
context.res = {
body: "success"
};
}
function queryStringToJSON(queryUrl) {
if(queryUrl.indexOf('?') > -1){
var queryString = queryUrl.split('?')[1];
}
var pairs = queryString.split('&');
var result = {};
pairs.forEach(function(pair) {
if (pair.indexOf('[') > -1) {
var nameObj = {};
var firstObj = {};
var nameStr = pair.substring(0, pair.indexOf('['));
var firstStr = pair.substring(pair.indexOf('[')+1, pair.indexOf(']'));
firstObj[firstStr] = pair.split('=')[1];
nameObj[nameStr] = firstObj;
Object.assign(result, nameObj);
}else {
pair = pair.split('=');
result[pair[0]] = decodeURIComponent(pair[1] || '');
}
});
return result;
}
Start the function project, I request it with http://localhost:7071/api/HttpTrigger1?name[first]=Todd&email=test#mail.com. The result shows:

After much searching, I wasn't able to find any way to natively implement this in the ASP.NET router. Though there is a great deal of suggestions on how to deserialize this structure directly in your ASP.NET controller functions, I am working in Javascript.
What was helpful was the qs library, available in NPM, which supports a number of nuances related to this query string structure.
const { query } = req;
// => { "name[first]": "Todd" };
const deserializedQuery = qs.parse(query);
// => { name: { first: "Todd" }
Equally helpful to me is that I need a way to restructure my outbound query string in this same format. Qs works with the paramsSerializer attribute in Axios.
const params = { name: { first: "Todd" };
const paramsSerializer = (params) => { return Qs.stringify(params); };
const reqOptions = { params, paramsSerializer };
axios.get("https://example.com/api/persons", reqOptions);
// => GET https://example.com/api/persons?name[first]=Todd
Thanks to #hury-shen for a completely workable solution. It just wasn't turnkey solution I was looking for.

Related

Execute SPARQL Query with vue.js

I want to make a website by implementing the use of sparql in vue js. The scenario I want is to create a special place to write Sparql Query and then execute it with vue.js. Is all that possible? if possible how should i start it? if not possible, is there any other alternative than using vue.js? Please help.
I am not a JS pro by any means. Anyway, for a similar problem, I used axios for the HTTP request.
This worked fine for my use case. Anyway, you will find a precise description of the JSON format at https
var params = new URLSearchParams();
// queryArtistsByNameTpl contains the actual SPARQL query,
// with $artistName as a placeholder
let similarGroupsQuery = queryArtistsByNameTpl.replace("$artistName", this.artistName);
params.append('query', similarGroupsQuery);
let getArtistsHandler = sparqlResponseHandler(this, 'artistList');
axios.post(sparqlEndPoint, params).then(getArtistsHandler);
function sparqlResponseHandler(currentObj, currList) {
return function(response) {
const rows = response.data.results.bindings;
currentObj[currList] = [];
if (rows.length > 0) {
rows.forEach(function(item, index) {
var record = {};
for (var prop in item) {
if (item.hasOwnProperty(prop)) {
record[prop] = item[prop].value;
}
}
currentObj[currList].push(record);
})
} else {
console.log("no data from SPARQL end point");
}
}
}
The JSON resturned by SPARQl endpoints is specified in https://www.w3.org/TR/sparql11-results-json/ , which is rather short and very understandable.
The code can certainly be improved by someone more knowledgeable then me, but it was fine for the tech demo we used it for.

How to crawling using Node.js

I can't believe that I'm asking an obvious question, but I still get the wrong in console log.
Console shows crawl like "[]" in the site, but I've checked at least 10 times for typos. Anyways, here's the javascript code.
I want to crawl in the site.
This is the kangnam.js file :
const axios = require('axios');
const cheerio = require('cheerio');
const log = console.log;
const getHTML = async () => {
try {
return await axios.get('https://web.kangnam.ac.kr', {
headers: {
Accept: 'text/html'
}
});
} catch (error) {
console.log(error);
}
};
getHTML()
.then(html => {
let ulList = [];
const $ = cheerio.load(html.data);
const $allNotices = $("ul.tab_listl div.list_txt");
$allNotices.each(function(idx, element) {
ulList[idx] = {
title : $(this).find("list_txt title").text(),
url : $(this).find("list_txt a").attr('href')
};
});
const data = ulList.filter(n => n.title);
return data;
}). then(res => log(res));
I've checked and revised at least 10 times
Yet, Js still throws this result :
root#goorm:/workspace/web_platform_test/myapp/kangnamCrawling(master)# node kangnam.js
[]
Mate, I think the issue is you're parsing it incorrectly.
$allNotices.each(function(idx, element) {
ulList[idx] = {
title : $(this).find("list_txt title").text(),
url : $(this).find("list_txt a").attr('href')
};
});
The data that you're trying to parse for is located within the first index of the $(this) array, which is really just storing a DOM Node. As to why the DOM stores Nodes this way, it's most likely due to efficiency and effectiveness. But all the data that you're looking for is contained within this Node object. However, the find() is superficial and only checks the indexes of an array for the conditions you supplied, which is a string search. The $(this) array only contains a Node, not a string, so when you you call .find() for a string, it will always return undefined.
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/find
You need to first access the initial index and do property accessors on the Node. You also don't need to use $(this) since you're already given the same exact data with the element parameter. It's also more efficient to just use element since you've already been given the data you need to work with.
$allNotices.each(function(idx, element) {
ulList[idx] = {
title : element.children[0].attribs.title,
url : element.children[0].attribs.href
};
});
This should now populate your data array correctly. You should always analyze the data structures you're parsing for since that's the only way you can correctly parse them.
Anyways, I hope I solved your problem!

How to setup Appinsights with azure search javascript sdk

From the Azure Search documentation I know that we have to get some search information to setup appinsights telemetry.
The problem is: How do I get SearchID information from the #azure/search-documents SearchDocumentResult?
Using the #azure/search-documents module, you can set up your client and add custom headers to operations like so:
const { SearchClient, AzureKeyCredential } = require("#azure/search-documents");
const indexName = "nycjobs";
const apiKey = "252044BE3886FE4A8E3BAA4F595114BB";
const client = new SearchClient(
`https://azs-playground.search.windows.net/`,
indexName,
new AzureKeyCredential(apiKey)
);
async function main() {
var searchId = '';
const searchResults = await client.search('Microsoft', {
top: 3,
requestOptions: {
customHeaders: {
'Access-Control-Expose-Headers': 'x-ms-azs-searchid',
'x-ms-azs-return-searchid': 'true'
},
shouldDeserialize: (response) => {
searchId = response.headers.get('x-ms-azs-searchid');
return true;
}
}
});
console.log(`Search ID: ${searchId}\n`);
for await (const result of searchResults.results) {
console.log(`${result.document.business_title}\n${result.document.job_description}\n`);
}
}
It seems that currently the only way to get them out is the shouldDeserialize callback as shown in the example since it gives you the raw response including the headers before deserializing when the headers are stripped from some objects, such as those paged response objects returned by search.
I'm assuming that you care more about search query telemetry and not indexer telemetry, but please correct me if I'm wrong. Is this documentation page helpful? https://learn.microsoft.com/azure/search/search-traffic-analytics
From that page, here is how you set the searchId:
request.setRequestHeader("x-ms-azs-return-searchid", "true");
request.setRequestHeader("Access-Control-Expose-Headers", "x-ms-azs-searchid");
var searchId = request.getResponseHeader('x-ms-azs-searchid');
Please let me know if I'm misunderstanding the question.

How do you prevent a NodeJS server from potentially exposing function code?

Let's imagine you're building a banking app backend. You want to respond to a user with a string that returns the balance but you forgot to add ().
class User {
constructor() {console.log("ctor")}
balance() { console.log("secret balance code")}
}
Then when referencing the user, instead of writing this:
const userA = new User();
return `Your balance is $${userA.balance()}`;
I accidentally write this:
const userA = new User();
return `Your balance is $${userA.balance}`;
Which sadly outputs:
'Your balance is balance() { console.log("secret balance code")}'
Which leaks the source code.
You do not need to worry about it, if you forget something, then testing will help to find it. Nobody deploy in production without testing when he has a serious project. It is better to write tests than to try to correct language behavior.
One workaround is to override all functions' toString like so:
> Function.prototype.toString = () => {return "bla"}
[Function]
> '' + new User().balance
'bla'
When responding to a request, you're undoubtedly going to be running the response through some sort of serializer. JSON, CBOR, etc. Handle it on that layer.
Fortunately for you, if you're returning JSON data, it's already handled:
JSON.stringify(someFunction);
// undefined
If you really are returning plain text strings, you can still have such a layer that ensures you're not putting out functions.
I've a solution which is definitely slower than raw templates, but here it goes.
So basically I just send a context object which has all the string I want to resolve. And before the actual string replacement, I just check for the types of arguments.
function resolveTemplates(str, args){
if(args && Array.isArray(args) && args.length){
args.forEach((argument) => {
// check here for any unwanted types
if(typeof arg === 'function'){
throw new Error('Cannot send function to create raw Strings')
}
})
}
const rx = /\{([^{}]*)\}/g;
let match = {};
let matches = [];
while(match = rx.exec(str)){
matches.push(match)
}
matches.reverse();
matches.forEach(function(match){
const key = match[1];
const index = match.index;
str = str.slice(0, index) + args[key] + str.slice(index + 2 + key.length)
})
return str;
}
resolveTemplates('Hello! My name is {firstName} {lastName}', {firstName: 'Shobhit', lastName: 'Chittora'})
PS: Instead of throwing errors for functions as arguments, you can call the functions. But binding the functions to the correct context can be a overhead to think about and generally not suggested.

How to expose object in node module

I've a module which is using parsing ( a parsing functionality), other modules should query this parser values.
my question is
how should I build it (design aspects ) ?
which method should init the parser (the first method that call it
to get specific value)
This is sample code which return two object from the parser but I dont think that this is the right way to do that since maybe I'll need to provide additional properties
the is the module parse
parse = function (data) {
var ymlObj = ymlParser.parse(data);
return {
web: ymlObj.process_types.web,
con: ymlObj.con
}
};
If I understood you right you can just make simple module with getters and setter.
(parse.js)
var ymlObj = {};
function Parse() {}
Parse.prototype.setData = function (data) {
ymlObj = data;
}
Parse.prototype.getWeb = function () {
return ymlObj.process_types.web;
}
Parse.prototype.getCon = function () {
return ymlObj.con;
}
module.exports = new Parse();
(parseUser.js)
var parse = require('./parse.js');
function ParseUser() { }
ParseUser.prototype.useParse = function () {
console.log(parse.getCon());
}
module.exports = new ParseUser();
(app.js)
var parse = require('./parse.js');
var parseUser = require('parseUser.js');
parse.setData({ ... });
parseUser.useParse();
You still have to do basics like handle exceptions but hope this helps you understand the basic structure.
What comes to init it really depends when you want to initialize (fetch?) your data and where does that data come from. You can set timestamp to indicate how old your data is and make decision if you still rely on it or fetch newer data. Or you can register callbacks from your user modules to deal with new data every time its fetched.
So its up to you how you design your module. ;)

Categories