how to test a json file with json schema file - javascript

I'm newbie to json. I'm learning more things in Json schema but I stood helpless in testing my user.json file against json-schema.json file. Please note I need to test with a javascript variable which should return either true or false to process further. Hereby I pasted my files.
json-schema.json
{
"description": "Any validation failures are shown in the right-hand Messages pane.",
"type": "object",
"properties": {
"foo": {
"type": "number"
},
"bar": {
"type": "string",
"enum": [
"a",
"b",
"c"
]
}
}
}
user.json
{
"foo": 12345,
"bar": "a"
}
When I tested the above code in http://jsonschemalint.com/#/version/draft-05/markup/json IT say's user.json is in right format. But I need to test locally
Thanks in advance.

You can use one of JSON schema validators.
Example of using one of these libraries, ajv:
import Ajv from 'ajv';
import schema from 'schema.json';
import data from 'data.json';
function isValid(schema, data) {
const ajv = new Ajv();
const valid = ajv.validate(schema, data);
if (!valid) {
console.log(ajv.errors);
return false;
}
return true;
}

Related

Parse non JSON to JSON

I have a file with data in it that I am needing to parse and store in a DB. Below, is an example of 2 entries in the file. I'm not quite sure what the structure is (although it looks to be ndJSON). I am trying to parse the data in to a JSON object in order to store it in a DB, but cannot seem to figure it out. Here is what I have so far
var ndjson = {
"sequence-num": "0123456789",
"version": "N1.4",
"record-type": "R",
"session-id": "197-30760303",
"date": "2021-07-23 15:00:53",
"passport-header": { "alg": "ES256", "ppt": "test", "typ": "passport", "x5u": "https://cr.com" },
"passport-payload": { "attest": "A", "dest": { "tn": ["0123456789"] }, "iat": 0123456789, "orig": { "tn": "0123456789" }, "origid": "c699f78a-ebc6-11eb-bfd8-bec0bbc98888" },
"identity-header": "eyJhbGciOiJFUzI1NiIsInBwdCI6InNoYWtlbiIsInR5cCI6InBhc3Nwb3J0IiwieDV1IjoiaHR0cHM6Ly9jci5zYW5zYXkuY29tL1RvdWNodG9uZV82ODNBIn0.eyJhdHRlc3QiOiJCIiwiZGVzdCI6eyJ0biI6WyIxMjUeyJhdHRlc3QiOiJCIiwiZGVzdCI6eyJ0biI6WyIxMj;info=<https://google.com/>;alg=ES256;ppt=\"test\""
}
{
"sequence-num": "0123456788",
"version": "N1.4",
"record-type": "R",
"session-id": "214-30760304",
"date": "2021-07-23 15:00:53",
"passport-header": { "alg": "ES256", "ppt": "test", "typ": "passport", "x5u": "https://cr.com" },
"passport-payload": { "attest": "B", "dest": { "tn": ["0123456788"] }, "iat": 0123456788, "orig": { "tn": "0123456788" }, "origid": "c69d0588-ebc6-11eb-bfd8-bec0bbc98888" },
"identity-header": "eyJhbGciOiJFUzI1NiIsInBwdCI6InNoYWtlbiIsInR5cCI6InBhc3Nwb3J0IiwieDV1IjoiaHR0cHM6Ly9jci5zYW5zYXkuY29tL1RvdWNodG9uZV82ODNBIn0.eyJhdHRlc3QiOiJCIiwiZGVzdCI6eyJ0biI6WyIxMjUeyJhdHRlc3QiOiJCIiwiZGVzdCI6eyJ0biI6WyIxMj;info=<https://google.com/>;alg=ES256;ppt=\"test\""
};
let result = ndjson.split(',').map(s => JSON.parse(s));
console.log('The resulting array of items:');
console.log(result);
console.log('Each item at a time:');
for (o of result) {
console.log("item:", o);
}
When I run this, I get Uncaught SyntaxError: Unexpected token ':' error on line 12 at the 2nd node of "sequence-num": "0123456788",.
Any help is appreciated, thank you!
If you actually have ndJSON(newline-delimited JSON) then each line in the file is valid JSON, delimited by newlines. A simple file would look like this:
{"key1": "Value 1","key2": "Value 2","key3": "Value 3","key4": "Value 4"}
{"key1": "Value 5","key2": "Value 6","key3": "Value 7","key4": "Value 8"}
This differs from the formatted data you've posted here, and the difference is important since once you've formatted it, the valid JSON objects cannot simply be distinguished by the presence of newlines.
So, on the assumption that you do have valid ndJSON, in its original form, you can extract it by using split() on newLines and using JSON.parse() on the resulting array.
This snippet adds a little file handling to allow a file to be uploaded, but thereafter it uses split() and JSON.parse() to extract the data:
"use strict";
document.getElementsByTagName('form')[0].addEventListener('submit',function(e){
e.preventDefault();
const selectedFile = document.getElementById('inputFile').files[0];
let fr = new FileReader();
fr.onload = function(e){
let ndJSON = e.target.result; // ndJSON extracted here
let ndJSONLines = ndJSON.split('\n');
// Process JSON objects here
ndJSONLines.forEach(function(el){
let obj = JSON.parse(el);
Object.keys(obj).forEach(key=>{
console.log(`Key: ${key}, Value: ${obj[key]}`);
});
});
}
fr.readAsText(selectedFile)
});
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Parsing ndJSON</title>
</head>
<body>
<form method="post" enctype="multipart/form-data">
<input type="file" name="inputFile" id="inputFile">
<input type="submit">
</form>
</body>
</html>
Output, based on the sample file above:
Here is what I do
const end_point_url = 'https://ipfs.io/ipfs/bafkqap33ejwgs3tfgerduitomrvhg33oebtg64tnmf2c4lroej6qu6zcnruw4zjsei5ce5dinfzsa2ltebqsa43fmnxw4zbanruw4zjcpufa';
let json = await fetch(end_point_url).
then( resp => resp.text() ).
then( buf => { // NDJSON format ...
return buf.slice(0,-1).split('\n').map(JSON.parse);
}).
catch(console.error);

Editing JSON files in NodeJS and DiscordJS

Right, so I am trying to wrap my head around editing (appending data) to a JSON file.
The file (users.json) looks like this:
{
"users": {
"id": "0123456789",
"name": "GeirAndersen"
}
}
Now I want to add users to this file, and retain the formatting, which is where I can't seem to get going. I have spent numerous hours now trying, reading, trying again... But no matter what, I can't get the result I want.
In my .js file, I get the data from the json file like this:
const fs = require('fs').promises;
let data = await fs.readFile('./test.json', 'utf-8');
let users = JSON.parse(data);
console.log(JSON.stringify(users.users, null, 2));
This console log shows the contents like it should:
{
"id": "0123456789",
"name": "GeirAndersen"
}
Just to test, I have defined a new user directly in the code, like this:
let newUser = {
"id": '852852852',
"name": 'GeirTrippleAlt'
};
console.log(JSON.stringify(newUser, null, 2));
This console log also shows the data like this:
{
"id": "852852852",
"name": "GeirTrippleAlt"
}
All nice and good this far, BUT now I want to join this last one to users.users and I just can't figure out how to do this correctly. I have tried so many version and iterations, I can't remember them all.
Last tried:
users.users += newUser;
users.users = JSON.parse(JSON.stringify(users.users, null, 2));
console.log(JSON.parse(JSON.stringify(users.users, null, 2)));
console.log(users.users);
Both those console logs the same thing:
[object Object][object Object]
What I want to achieve is: I want to end up with:
{
"users": {
"id": "0123456789",
"name": "GeirAndersen"
},
{
"id": "852852852",
"name": "GeirTrippleAlt"
}
}
When I get this far, I am going to write back to the .json file, but that part isn't an issue.
That's not really a valid data structure, as you're trying to add another object to an object without giving that value a key.
I think what you're really looking for is for 'users' to be an array of users.
{
"users": [
{
"id": "0123456789",
"name": "GeirAndersen"
},
{
"id": "852852852",
"name": "GeirTrippleAlt"
}
]
}
You can easily create an array in JS and the push() new items into your array. You JSON.stringify() that with no issue.
const myValue = {
users: []
};
const newUser = {
'id': '0123456789',
'name': "GeirAndersen'
};
myValue.users.push(newUser);
const strigified = JSON.stringify(myValue);

Graphql - creating a schema from a generated JSON file

I'm trying to create a custom graphql schema to use on my graphql yoga server. The graphql yoga server is just a proxy to another graphql API from which I have managed to retrieve a schema from in JSON format. Here is a preview of what that schema looks like:
{
"data": {
"__schema": {
"queryType": {
"name": "Query"
},
"mutationType": null,
"subscriptionType": null,
"types": [
{
"kind": "OBJECT",
"name": "Core",
"description": null,
"fields": [
{
"name": "_meta",
"description": null,
"args": [],
"type": {
"kind": "NON_NULL",
"name": null,
"ofType": {
"kind": "OBJECT",
"name": "Meta",
"ofType": null
}
},
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "_linkType",
"description": null,
"args": [],
"type": {
"kind": "SCALAR",
"name": "String",
"ofType": null
},
"isDeprecated": false,
"deprecationReason": null
}
],
"inputFields": null,
"interfaces": [
{
I now want to take this generated JSON schema and use it to create a graphql schema to use in my graphql yoga server. I believe the correct way to do this is by using the new GraphQLSchema method from graphql along with a root query. Here is my code attempting this:
schema = new GraphQLSchema({
query: new GraphQLObjectType({
name: 'Query',
fields: schema.data.__schema
})
});
The above code gives me the following error:
Error: Query.mutationType field config must be an object
Not entirely sure where it's going wrong or if this is the proper approach to creating a graphql schema from generated JSON?
The JSON you have is the results of an introspection query. Unfortunately, introspection will not allow you to copy a remote schema. That's because while it does identify what fields exist in a schema, it does not tell you anything about how they should be executed. For example, based on the snippet you posted, we know the remote server exposes a _meta query that returns a Meta type -- but we don't know what code to run to resolve the value returned by the query.
Technically, it's possible to pass the results of an introspection query to buildClientSchema from the graphql/utilities module. However, the schema will not be executable, as the docs point out:
Given the result of a client running the introspection query, creates and returns a GraphQLSchema instance which can be then used with all GraphQL.js tools, but cannot be used to execute a query, as introspection does not represent the "resolver", "parse" or "serialize" functions or any other server-internal mechanisms.
If you want to create a proxy to another GraphQL endpoint, the easiest way is to use makeRemoteExecutableSchema from graphql-tools.
Here's the example based on the docs:
import { HttpLink } from 'apollo-link-http';
import fetch from 'node-fetch';
const link = new HttpLink({ uri: 'http://your-endpoint-url/graphql', fetch });
async function getRemoteSchema () {
const schema = await introspectSchema(link);
return makeRemoteExecutableSchema({
schema,
link,
});
}
The resulting schema is a GraphQLSchema object that can be used like normal:
import { GraphQLServer } from 'graphql-yoga'
async function startServer () {
const schema = await introspectSchema(link);
const executableSchema = makeRemoteExecutableSchema({
schema,
link,
});
const server = new GraphQLServer({ schema: executableSchema })
server.start()
}
startServer()
graphql-tools also allows you to stitch schemas together if you not only wanted to proxy the existing endpoint, but wanted to add on to it as well.

pass json in process variables in camunda process

I am trying to pass json payload in variables as value to start a process definition using engine-rest api as below:-
API:
http://localhost:8080/engine-rest/process-definition/processService:1:9459dbe9-6b2c-11e8-b9e8-28d2447c697a/start
Body :
{
"variables": {
"payload": {
"value": {
"mode": "email",
"meta": [{
"key": "topic",
"value": "weather"
}, {
"key": "qos",
"value": "2"
}]
},
"type": "Json"
}
}
}
but it is giving 400 BAD REQUEST with below error:-
Must provide 'null' or String value for value of SerializableValue type 'Json'.
Also i have used a expression in my BPMN process to fetch a key-value pair like below, it also throwing me error :-
${S(payload).prop("mode").stringValue() == 'email'}
Now working steps:-
when i try to send body json payload in string format then it works fine.
API:
http://localhost:8080/engine-rest/process-definition/processService:1:9459dbe9-6b2c-11e8-b9e8-28d2447c697a/start
Body:
{
"variables": {
"payload": {
"value": "{\"mode\": \"email\",\"meta\": [{\"key\": \"topic\",\"value\": \"weather\"},{\"key\": \"qos\",\"value\": \"2\"}]}",
"type": "String"
}
}
}
same java code i am using here to fetch json payload-
public void notify(DelegateExecution delegateProcessExecution) throws Exception {
Object notificationPayload =
delegateProcessExecution.getVariable("payload");
if (null != notificationPayload) {
String notifyPayload = notificationPayload.toString();
JSONObject inputJson = new JSONObject(notifyPayload);
}
// ...
}
So i want this payload as json for whole process so that i don't need to convert it to string as above working example.
You should only change the type to "json", example:
{
"variables": {
"broker": {
"value": "{\"name\":\"Broker Name\"}",
"type": "json"
}
}
}
This is by design in the rest engine API, they support other data formats, too, so it has to be an escaped JSON String, see https://app.camunda.com/jira/browse/CAM-9617.
The solution is to pass an escaped JSON String as value, as you have pointed out above. One can also use "type": "Object" if the engine has a Jackson Java Bean on the classpath that matches the given value. You supply the bean type name in a valueInfo object:
https://docs.camunda.org/manual/7.10/reference/rest/process-definition/post-start-process-instance/#request-body
For example:
{
"variables": {
"payload": {
"value": "{\"mode\": \"email\",\"meta\": [{\"key\": \"topic\",\"value\": \"weather\"},{\"key\": \"qos\",\"value\": \"2\"}]}",
"type": "String",
"valueInfo": {
"objectTypeName": "my.own.BeanWithModeAndMetaProps",
"serializationDataFormat": "application/json"
}
}
}
}

AWS - import JSON file to load Dynamo table

I have a json file that I want to use to load my Dynamo table in AWS.
In the AWS console, there is only an option to create one record at a time. Not good: )
Essentially my .JSON file is an array of objects which hold the data for each column in the table
ie:
{
"Column1": "Column1 Value",
"Column2": "Column2 Value",
"Column3": "Column3 Value",
"Column4": "Column4 Value",
},
Is there any way to do this via AWS console and importing my json file, or do I have to use AWS JS SDK to programmatically do this ??
The answer from E.J. Brennan looks correct, for a single record, but it doesn't answer the original question (which needs to add an array of records).
For this, the command is
aws dynamodb batch-write-item --request-items file://aws-requests.json
But, you'll need to make a modified JSON file, like so (note the DynamoDB JSON that specifies data types):
{
"YourTableName": [
{
"PutRequest": {
"Item": {
"Column1": { "S": "Column1 Value" },
"Column2": { "S": "Column2 Value" },
"Column3": { "S": "Column3 Value" },
"Column4": { "S": "Column4 Value" },
}
}
},
{
"PutRequest": {
"Item": {
"Column1": { "S": "Column1 Value" },
"Column2": { "S": "Column2 Value" },
"Column3": { "S": "Column3 Value" },
"Column4": { "S": "Column4 Value" },
}
}
}
]
}
You don't need to use the API. You could use the AWS-CLI instead, i.e:
aws dynamodb put-item --table-name MusicCollection --item file://item.json --return-consumed-capacity TOTAL
but you may need to tweak your JSON format a bit.
More examples and documentation here:
https://docs.aws.amazon.com/cli/latest/reference/dynamodb/put-item.html
I used boto3 in python to load the data
import boto3
import json
dynamodbclient=boto3.resource('dynamodb')
sample_table = dynamodbclient.Table('ec2metadata')
with open('/samplepath/spotec2interruptionevent.json', 'r') as myfile:
data=myfile.read()
# parse file
obj = json.loads(data)
#instance_id and cluster_id is the Key in dynamodb table
response=sample_table.put_item(
Item={
'instance_id': instanceId,
'cluster_id': clusterId,
'event':obj
}
)
Here is a sample for javascript:
https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GettingStarted.Js.02.html#GettingStarted.Js.02.02
The code above doesn't read in individual JSON objects, if you wanted to do that from a JSON file with multiple objects:
import boto3
import json
dynamodbclient=boto3.resource('dynamodb')
sample_table = dynamodbclient.Table('ec2metadata')
with open('/samplepath/spotec2interruptionevent.json', 'r') as myfile:
data=myfile.read()
# parse file
objects = json.loads(data)
#instance_id and cluster_id is the Key in dynamodb table
for object in objects:
instance_id = object["instance_id"]
cluster_id = object["cluster_id"]
sample_table.put_item=(item=object)

Categories