REST API azure speech to text (RECOGNIZED: Text=undefined) - javascript

I am trying to use the azure api (speech to text), but when I execute the code it does not give me the audio result.
The audio is in the format requested (.WAV).
code example documentation
const fs = require('fs');
const sdk = require("microsoft-cognitiveservices-speech-sdk");
const speechConfig = sdk.SpeechConfig.fromSubscription("---", "eastus2");
function fromFile() {
let pushStream = sdk.AudioInputStream.createPushStream();
fs.createReadStream("audio/aboutSpeechSdk.wav").on('data', function (arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function () {
pushStream.close();
});
let audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);
let recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
recognizer.recognizeOnceAsync(result => {
console.log(`RECOGNIZED: Text=${result.text}`);
recognizer.close();
});
}
fromFile();

According to the code you provide, it seems that you do not configure Speech Recognition Language. Please add the code speechConfig.speechRecognitionLanguage = "" into you sample. For more details about language, please refer to here
For example. You can download the video to do a test.
var sdk = require("microsoft-cognitiveservices-speech-sdk");
var fs = require("fs");
var subscriptionKey = "";
var serviceRegion = "";
var language = "en-US";
function openPushStream(filename) {
// create the push stream we need for the speech sdk.
var pushStream = sdk.AudioInputStream.createPushStream();
// open the file and push it to the push stream.
fs.createReadStream(filename)
.on("data", function (arrayBuffer) {
pushStream.write(arrayBuffer.slice());
})
.on("end", function () {
pushStream.close();
});
return pushStream;
}
var audioConfig = sdk.AudioConfig.fromStreamInput(
openPushStream("aboutSpeechSdk.wav")
);
var speechConfig = sdk.SpeechConfig.fromSubscription(
subscriptionKey,
serviceRegion
);
speechConfig.speechRecognitionLanguage = language;
var recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
recognizer.recognizeOnceAsync(
function (result) {
console.log(result.text);
recognizer.close();
recognizer = undefined;
},
function (err) {
console.log(err);
recognizer.close();
recognizer = undefined;
}
For more details, please refer to the blog

Related

Can covert buffer or arrayBuffer of voice to Text in nodejs?

I have a buffer that contain the voice of the person that in media stream, i and I send it from JavaScript to NodeJS using socket.io
I need to convert that buffer to text (like speech to text, but the voice stored as buffer coming from media stream)
There is a helper function I used (in nodejs see below) that convert from/to buffer/arrayBuffer
and there is a package called node-blob that convert buffer to audio blob
but I search a lot how convert audio or even buffer to text, but I failed
any help, code or package that may help to convert it to text ?
JavaScript
navigator.mediaDevices
.getUserMedia({
video: true,
audio: true,
})
.then((stream) => {
setSrcVideo(stream);
const mediasStream = new MediaStream();
mediasStream.addTrack(stream.getVideoTracks()[0]);
mediasStream.addTrack(stream.getAudioTracks()[0]);
const mediaRecorder = new MediaRecorder(mediasStream);
socket.emit('ready');
mediaRecorder.addEventListener('dataavailable', (event) => {
if (event.data && event.data.size > 0) {
socket.emit('send-chunks', event.data);
}
});
socket.on('start-recording', () => {
mediaRecorder.start(1000);
});
});
and I receive that buffer bysocket.on('send-chunks') in NodeJS like this
NodeJS
// connection to socket.io
io.on('connection', (socket) => {
socket.on('ready', () => {
socket.emit('start-recording');
});
socket.on('send-chunks', (chunks) => {
// covert to text
});
});
// helper functions
const toArrayBuffer = (buffer) => {
const arrayBuffer = new ArrayBuffer(buffer.length);
const view = new Uint8Array(arrayBuffer);
for (let i = 0; i < buffer.length; ++i) {
view[i] = buffer[i];
}
return arrayBuffer;
};
const toBuffer = (arrayBuffer) => {
const buffer = Buffer.alloc(arrayBuffer.byteLength);
const view = new Uint8Array(arrayBuffer);
for (let i = 0; i < buffer.length; ++i) {
buffer[i] = view[i];
}
return buffer;
};

AMQJS0011E Invalid state not connected. AWS IoT core

I'm trying to have some sort of real-time dashboard that can subscribe to AWS IoT core topics and maybe publish too.
I have found a couple of items online, but I can't figure it out.
This is what I currently have:
function p4(){}
p4.sign = function(key, msg) {
const hash = CryptoJS.HmacSHA256(msg, key);
return hash.toString(CryptoJS.enc.Hex);
};
p4.sha256 = function(msg) {
const hash = CryptoJS.SHA256(msg);
return hash.toString(CryptoJS.enc.Hex);
};
p4.getSignatureKey = function(key, dateStamp, regionName, serviceName) {
const kDate = CryptoJS.HmacSHA256(dateStamp, 'AWS4' + key);
const kRegion = CryptoJS.HmacSHA256(regionName, kDate);
const kService = CryptoJS.HmacSHA256(serviceName, kRegion);
const kSigning = CryptoJS.HmacSHA256('aws4_request', kService);
return kSigning;
};
function getEndpoint() {
const REGION = "eu-west-1";
const IOT_ENDPOINT = "blablablabla-ats.iot.eu-west-1.amazonaws.com";
// your AWS access key ID
const KEY_ID = "My-key";
// your AWS secret access key
const SECRET_KEY = "my-access-token";
// date & time
const dt = (new Date()).toISOString().replace(/[^0-9]/g, "");
const ymd = dt.slice(0,8);
const fdt = `${ymd}T${dt.slice(8,14)}Z`
const scope = `${ymd}/${REGION}/iotdevicegateway/aws4_request`;
const ks = encodeURIComponent(`${KEY_ID}/${scope}`);
let qs = `X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=${ks}&X-Amz-Date=${fdt}&X-Amz-SignedHeaders=ho
const req = `GET\n/mqtt\n${qs}\nhost:${IOT_ENDPOINT}\n\nhost\n${p4.sha256('')}`;
qs += '&X-Amz-Signature=' + p4.sign(
p4.getSignatureKey( SECRET_KEY, ymd, REGION, 'iotdevicegateway'),
`AWS4-HMAC-SHA256\n${fdt}\n${scope}\n${p4.sha256(req)}`
);
return `wss://${IOT_ENDPOINT}/mqtt?${qs}`;
}
// gets MQTT client
function initClient() {
const clientId = Math.random().toString(36).substring(7);
const _client = new Paho.MQTT.Client(getEndpoint(), clientId);
// publish method added to simplify messaging
_client.publish = function(topic, payload) {
let payloadText = JSON.stringify(payload);
let message = new Paho.MQTT.Message(payloadText);
message.destinationName = topic;
message.qos = 0;
_client.send(message);
}
return _client;
}
function getClient(success) {
if (!success) success = ()=> console.log("connected");
const _client = initClient();
const connectOptions = {
useSSL: true,
timeout: 3,
mqttVersion: 4,
onSuccess: success
};
_client.connect(connectOptions);
return _client;
}
let client = {};
function init() {
client = getClient();
client.onMessageArrived = processMessage;
client.onConnectionLost = function(e) {
console.log(e);
}
}
function processMessage(message) {
let info = JSON.parse(message.payloadString);
const publishData = {
retailer: retailData,
order: info.order
};
client.publish("sc/delivery", publishData);
}
$(document).ready(() => {
init();
client.subscribe("sc/orders/");
});
I keep getting the same error; AMQJS0011E Invalid state not connected., but I do see in the requests pane of chrome that there is an connection or so... What am I doing wrong here?
I don't see any of the logs anyhow...

How to read first line of a file in JavaScript

In JavaSctipt the FileReader object doesn't seem to have support for just reading the first line of a file. (up to the CR '\n'). I dont want to read in the whole file to save memory.
Is there a way to do it?
My code (note that readLine() function does not exists):
self.loadFirstLineFromFile = function (options, callback) {
var hiddenElement = document.createElement('input');
hiddenElement.id = 'hidden-tsv-file-loader';
hiddenElement.type = 'file';
hiddenElement.accept = options.extension;
hiddenElement.style.display = 'none';
hiddenElement.addEventListener('change', function (event) {
var file = event.target.files[0];
var reader = new FileReader(file);
var firstLine;
firstLine = reader.readLine();
callback(firstLine);
});
document.body.appendChild(hiddenElement);
hiddenElement.click();
};
There's nothing builtin for that, but it's simple to implement:
var file = event.target.files[0];
var sliced = file.slice(0, 2048); // Pick a size that you're ok with
// NOTE: `await` keyword requires transpiling (Babel) for IE11,
// and to be inside an async function. An alternative is:
// sliced.text().then(function(text) { console.log(text); });
var text = await sliced.text();
console.log(text);
Here's an interface that reads the data from the Blob decoded as text and chunked by a delimiter:
async function* readLines (blob, encoding = 'utf-8', delimiter = /\r?\n/g) {
const reader = blob.stream().getReader();
const decoder = new TextDecoder(encoding);
try {
let text = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
text += decoder.decode(value, { stream: true });
const lines = text.split(delimiter);
text = lines.pop();
yield* lines;
}
yield text;
} finally {
reader.cancel();
}
}
We can use this to read a single line and discard the rest without reading the entire file:
hiddenElement.addEventListener('change', async function (event) {
const file = event.target.files[0];
for await (const line of readLines(file, 'utf-8', '\n')) {
callback(line);
return; // signals reader.cancel() to the async iterator
}
});
Since I use Javascript with Knockout I refactored Patricks solution into this:
self.loadStream = function (options, callback) {
var hiddenElement = document.createElement('input');
hiddenElement.id = 'hidden-tsv-file-loader';
hiddenElement.type = 'file';
hiddenElement.accept = options.extension;
hiddenElement.style.display = 'none';
hiddenElement.addEventListener('change', function (event) {
var file = event.target.files[0];
var reader = file.stream().getReader();
var decoder = new TextDecoder('utf-8');
var data;
var readNextChunk = function () {
data = reader.read();
data.then(function (result) {
if (!result.value) {
callback({ chunk: '', done: true, shouldStop: true }, file);
} else {
var chunk = decoder.decode(result.value, { stream: true });
var args = {
chunk: chunk,
done: result.done,
shouldStop: true
};
callback(args, file);
if (!result.done && !args.shouldStop) {
readNextChunk();
}
}
});
};
readNextChunk();
hiddenElement.remove();
});
document.body.appendChild(hiddenElement);
hiddenElement.click();
};

node.js webtorrent collect all files by magnet link

I have not used node.js before.
Have a .txt file with list of magnet links.
Want to write a json file with list of all files contained in these links.
var WebTorrent = require('webtorrent');
var fs = require('fs');
var client = new WebTorrent();
var array = fs.readFileSync('yop.txt').toString().split("\n");
i = 0;
while (i < array.length) {
//console.log(array[i]);
var magnetURI = array[i];
n = 0;
client.add(magnetURI, function (torrent) {
torrent.files.forEach(function (file) {
//console.log( file.name)
jsonString = JSON.stringify({'book': file.name});
fs.appendFile("data.json", jsonString, function (err) {
if (err) {console.log(err);} else { n++ }
});
if (n == torrent.files.length) {i++ }
})
})
}
when run gives the following error
Sorry for such a terrible code.
var WebTorrent = require('webtorrent')
var fs = require('fs')
var stream = fs.createWriteStream("2.txt");
var client = new WebTorrent()
var array = fs.readFileSync('yop.txt').toString().split("\n");
i = 0;
function parseMagnet (uri){
var magnetURI = uri[i]
console.log(magnetURI)
client.add(magnetURI, function (torrent) {
torrent.files.forEach(function (file) {
writeStr = (uri[i]+ '\n'+ file.name+ '\n');
stream.write(writeStr);
console.log(file.name)
});
console.log('Done !')
console.log(i)
i += 1
parseMagnet(array);
client.remove(magnetURI);
})
}
parseMagnet(array)

Overlapping parts while buffering mp3

I am trying to stream MP3 file from a nodeJS server using BinaryJS - http://binaryjs.com/
But, when I am decoding the buffers on the client side they are seems to be overlapping, Meaning that the new chunk of data is being played few milliseconds before the previous one ended, causing the audio to lag.
is there any way to make the client wait until the current buffer is finished before starting the new one?
Server:
var BinaryServer = require('binaryjs').BinaryServer;
var fs = require('fs');
var server = BinaryServer({port: 9000});
server.on('connection', function(client){
var file = fs.createReadStream(__dirname + '/Song.mp3', {
'flags': 'r',
'bufferSize': 4 * 1024
});
});
client.send(file);
});
Client:
var client = new BinaryClient('ws://localhost:9000');
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new AudioContext();
client.on('stream', function (stream, meta) {
var parts = [];
var last = 0;
stream.on('data', function (data) {
var source = context.createBufferSource();
context.decodeAudioData(data, function (buf) {
source.buffer = buf;
source.connect(context.destination);
source.loop = false;
source.start(last);
last += buf.duration;
source.onended = function() {
console.log('Your audio has finished playing');
};
},
function (e) {
"Error with decoding audio data" + e.err
});
parts.push(data);
});
stream.on('end', function () {
console.log(parts);
});
});
Not sure about this, but instead of initializing last to 0, you might want to initialize it to context.currentTime.

Categories