I have the following config set in tracing.js (1:1 from the documentation) and I can't figure out how to set the service name. Right now in Datadog all I see is "unknown_service".
const opentelemetry = require("#opentelemetry/sdk-node");
const { getNodeAutoInstrumentations } = require("#opentelemetry/auto-instrumentations-node");
const { OTLPTraceExporter } = require("#opentelemetry/exporter-trace-otlp-http");
const { diag, DiagConsoleLogger, DiagLogLevel } = require('#opentelemetry/api');
//How can I set custom service name?
diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.INFO);
const sdk = new opentelemetry.NodeSDK({
traceExporter: new OTLPTraceExporter({
url: process.env.OPENTELEMETRY_URL,
headers: {},
}),
instrumentations: [getNodeAutoInstrumentations()],
});
sdk.start();
There are a couple of ways to do that:
First adding it to your code:
const opentelemetry = require("#opentelemetry/sdk-node");
const { getNodeAutoInstrumentations } = require("#opentelemetry/auto-instrumentations-node");
const { OTLPTraceExporter } = require("#opentelemetry/exporter-trace-otlp-http");
const { Resource } = require('#opentelemetry/resources');
const { SemanticResourceAttributes } = require('#opentelemetry/semantic-conventions');
const { diag, DiagConsoleLogger, DiagLogLevel } = require('#opentelemetry/api');
//How can I set custom service name?
diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.INFO);
const sdk = new opentelemetry.NodeSDK({
traceExporter: new OTLPTraceExporter({
url: process.env.OPENTELEMETRY_URL,
headers: {},
}),
instrumentations: [getNodeAutoInstrumentations()],
resource: new Resource({
[SemanticResourceAttributes.SERVICE_NAME]: '<service-name>',
}),
});
sdk.start();
Another way is using the environment variable OTEL_SERVICE_NAME as recommended by #Jan Garaj:
OTEL_SERVICE_NAME=<service-name>
At last, you can also use the environment variable OTEL_RESOURCE_ATTRIBUTES:
OTEL_RESOURCE_ATTRIBUTES=service.name=<service-name>
Those are the official OpenTelemetry ways of naming a service, IDK if they work with the backend you are using.
That works with OSS tools like Jager and Zipkin.
Try to set general-purpose environment variable OTEL_SERVICE_NAME.
Doc: https://opentelemetry.io/docs/concepts/sdk-configuration/general-sdk-configuration/
Related
I am adding query pagination to my GCF query function and am writing unit tests for the pagination query, I am writing a mock function using Node, and the firestore-mock npm library. Note, I can't use Sinon and no third party modules or libraries are allowed aside from firestore-mock.
I am extending the firestore-mock library to add query pagination to our firestore query. Write now, I have 3 methods that extend Firestore-mock that I need to write. Two of which are completed.
orderBy
limit
startAfter
So far I have successfully written mocks methods for limit and orderBy but am stuck on how to implement startAfter
Ideally this startAfter function would have the same functionality as the firebase built in startAfter function.
The request I am using for the query pagination has the following structure:
const paginatedQuery = await f.getQueryPagination({
customerId: ['62005'],
startDate: '2021-11-04T00:00:00.000Z',
endDate: '2021-11-05T00:00:00.000Z',
next: '1657667147.865000000',
},
'wal-com', ['62005'],
logger
);
I have looked at the docs for the startAfter cursor query and in my request I want the startAfter method to set the cursor to be set to the document that starts with the timestamp that next was converted from.
So for the above example: next: '1657667147.865000000' would be equivelant to the next document having a eventDateTime value of
TimeStamp: {
_seconds: 1657667147
_nanoSeconds: 865000000
}
EDIT: Here is the code I have right now for startAfter but it doesn't work as intended
const FirebaseMock = require('firebase-mock');
const QueryMock = require('firestore-mock/mock_constructors/QueryMock');
QueryMock.prototype._startAfter = function(doc) {
const startAfter = doc;
const startAfterId = startAfter.id;
const startAfterData = startAfter.data();
const startFinder = function(doc) {
const docId = doc.id;
const docData = doc.data();
if (docId === startAfterId) {
return true;
}
if (docData === startAfterData) {
return true;
}
return false;
};
const buildStartFinder = function() {
return startFinder;
};
return buildStartFinder;
};
QueryMock.prototype.startAfter = function(value) {
// if value is passed as a parameter
// set the cursor for the query to the document with the timestamp equal to value
if (value) {
if (value.constructor.name === 'DocumentSnapshot') {
this._docs = this.firestore._startAfter(value, this._docs, this.id);
} else if (value.constructor.name === 'Timestamp') {
this._docs = this.firestore._startAfter(
new TimestampMock(value.seconds, value.nanoseconds),
this._docs,
this.id
);
} else {
throw new Error(
'startAfter() only accepts a DocumentSnapshot or a Timestamp'
);
}
}
};
module.exports = {
FirestoreMock
};
I'm trying to create a listener for incoming transactions with ethers.js (v5.6). According to the docs, to listen to incoming transactions you need to create this filter:
// List all token transfers *to* myAddress:
filter = {
address: tokenAddress,
topics: [
utils.id("Transfer(address,address,uint256)"),
null,
hexZeroPad(myAddress, 32)
]
};
Having this in my script gives me an error saying utils.id("Transfer(address,address,uint256)"), ReferenceError: utils is not defined. I can't find anything in the docs about importing an utils package. Can anyone sort me out?
My full code:
async function incomingTransactions() {
if (loadedUser) {
console.log("User loaded", loadedUser)
let myAddress = loadedUser.publicKey
let filter = {
address: myAddress,
topics: [
utils.id("Transfer(address,address,uint256)"),
null,
hexZeroPad(myAddress, 32)
]
};
// let foo = await provider.getLogs(filter)
// console.log(foo)
}
console.log("No user loaded")
}
const interval = setInterval(function() {
incomingTransactions();
}, 5000);
Looks like utils is part of the ethers object, and hexZeroPad and id are part of utils so you can use them like so:
const { ethers } = require("ethers"); // assuming commonjs
ethers.utils.id("Transfer(address,address,uint256)");
ethers.utils.hexZeroPad(myAddress, 32);
I have a container of cost guides in my Azure Cosmos DB (using the core SQL API). Each cost guide has an array of materials. I need to add a material to this array in every document in the container. Is this possible with javascript in a single transaction? I am familiar with partially updating documents individually using the patch operation but I would prefer to do it all at once if possible. I'm using the #azure/cosmos version 3.15 package
This is how I update individual documents in my function app:
const CosmosClient = require('#azure/cosmos').CosmosClient;
const config = require('../config/config');
const { endpoint, key, databaseId } = config;
const client = new CosmosClient({ endpoint, key });
const database = client.database(databaseId);
module.exports = async function (context, req) {
const containerId = req.query.containerId;
const container = database.container(containerId);
const id = req.query.id;
const updates = req.body;
const querySpec = {
query: `SELECT * from c where c.id = "${id}"`
}
const { resources: items } = await container.items
.query(querySpec)
.fetchAll()
const patchOp = [];
// loop through updates object
Object.keys(updates).map(key => {
patchOp.push({
op: 'replace',
path: `/${key}`,
value: updates[key]
})
})
const { resource: patchSource } = await container.item(items[0].id, items[0].id).patch(patchOp);
}
Technically, till now no such type of single transaction using Java Script is available. There are other possibilities like using .NET which are used for similar requirements.
Other languages like JAVA and PYTHON are having partial implementation. Terraform can also help a bit in partial implementation.
https://learn.microsoft.com/en-us/azure/cosmos-db/sql/sql-api-sdk-bulk-executor-dot-net
The closest I have seen is using the bulk or batch operation on items within a container. For example:
const operations = [
{
operationType: "Create",
resourceBody: { id: "doc1", name: "sample", key: "A" }
},
{
operationType: "Upsert",
partitionKey: 'A',
resourceBody: { id: "doc2", name: "other", key: "A" }
}
];
await database.container.items.batch(operations);
Link to azure documentation: https://learn.microsoft.com/en-us/javascript/api/#azure/cosmos/items?view=azure-node-latest##azure-cosmos-items-batch
I am using nodejs for the server.
Currently I have a Json in my project folder.
name.json
{
"name_English": "Apple",
"name_German": "Apfel",
"name_French": "Pomme"
}
When I send request to server, it returns:
GET http://localhost:3030/name
{
"name_English": "Apple",
"name_German": "Apfel",
"name_French": "Pomme"
}
But I found it is not convenient for frontend development.
Is there any way to do something like below?
GET http://localhost:3030/name?lang=en
{
"name": "Apple"
}
GET http://localhost:3030/name?lang=fr
{
"name": "Apfel"
}
Edit 1
The code of getting the Json in Feathers.js
name.class.js
const nameLists = require('../name.json')
exports.Test = class Test {
constructor (options) {
this.options = options || {};
}
async find (params) {
return nameLists
}
};
Edit 2
Is it possible to make name.json like this?
{
"name": ${name}
}
Edit 3
The reason I want to achieve above because I have to return whole Json file.
For the internationalization library, it seems needed to handle outside the JSON and I don't know what is the best practise to do so.
Here's a full demonstration with just express. (Hope that's ok.)
const express = require('express');
const app = express();
const port = 3030;
const nameLists = require('./name.json');
const unabbreviateLanguage = {
en: 'English',
de: 'German',
fr: 'French'
}
function filterByLanguage(obj, abbr) {
let language = unabbreviateLanguage[abbr];
let suffix = '_' + language;
let result = {};
for (let key in obj) {
if (key.endsWith(suffix)) {
let newkey = key.slice(0, -suffix.length);
result[newkey] = obj[key];
}
}
return result;
}
app.get('/name', (req, res) => {
res.json(filterByLanguage(nameLists, req.query.lang));
});
app.listen(port);
e.g.:
curl http://localhost:3030/name?lang=de
output:
{"name":"Apfel"}
The idea is to iterate over the keys of the input object and prepare an output object that only has keys that match the language suffix (then strip off that suffix). You'll likely want to either have a mapping of en -> English, or just use the key names that match the parameter e.g., rename name_English to name_en.
In FeathersJS, the params object of find will store the query string of the passed in URL. So if you call http://localhost:3030/name?lang=en, the params object will be :-
{
query: {
lang: 'en'
}
}
You can then use this information to determine which result from the JSON to return.
https://docs.feathersjs.com/guides/basics/services.html#service-methods
Your question appears to be two parts: handling queries, and handling the internationalization.
Handling the query:
Feathers presents queries through the context object at the following location:
context.params.query
Handling Internationalization:
There are many solid packages available for handling internationalization:
https://openbase.com/categories/js/best-nodejs-internationalization-libraries?orderBy=RECOMMENDED&
There is a new version out, but the documentation is somewhat lacking a working example.
Github Ticket: https://github.com/jbmusso/gremlin-javascript/issues/109
I've been trying to get an example to work. Any help appreciated:
gremlin-server: 3.3.2 with config gremlin-server-modern.yaml
npm gremlin lib: 3.3.2
import gremlin from 'gremlin';
import DriverRemoteConnection from 'gremlin/lib/driver/driver-remote-connection';
import { Graph } from 'gremlin/lib/structure/graph';
const graph = new Graph()
const g = graph.traversal().withRemote(new DriverRemoteConnection('ws://localhost:8182/gremlin', { mimeType: 'application/vnd.gremlin-v3.0+json' }));
const fetchById = async (id) => {
const result = await g.V(id).toList()
console.log(result);
}
const addUser = async (name) => {
const newVertex = await g.addV().property('name','marko').property('name','marko a. rodriguez').next()
console.log(newVertex)
}
addUser()
fetchById(0)
Current Output:
[]
{ value: null, done: true }
UPDATE
Gremlin JavaScript now supports GraphSON3 and the latest Gremlin Server.
Working example:
const gremlin = require('gremlin');
const Graph = gremlin.structure.Graph;
const DriverRemoteConnection = gremlin.driver.DriverRemoteConnection;
Obtain a traversal source (g):
const graph = new Graph();
const connection = new DriverRemoteConnection('ws://localhost:8182/gremlin');
const g = graph.traversal().withRemote(connection);
Once you have a traversal source (g), reuse it across your application to create traversals, for example:
// Get the friends' names
const friends = await g.V().has("name","Matt")
.out("knows").values("name").toList();
See more information on the documentation: https://tinkerpop.apache.org/docs/current/reference/#gremlin-javascript
ORIGINAL ANSWER
Gremlin JavaScript doesn't support GraphSON3 serialization, which is the default in TinkerPop 3.3+. This causes your response to not be properly parsed.
I've filed a ticket to support GraphSON3 in the JavaScript GLV: https://issues.apache.org/jira/browse/TINKERPOP-1943
In the meantime, as a workaround, you can add GraphSON2 serializers to the server by adding the following line to your yaml, below serializers:
- { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerGremlinV2d0, config: { ioRegistries: [org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerIoRegistryV2d0] }}
Regarding the read property 'reader' of undefined issue. I falled back to version gremlin#3.3.4 and it works fine.