unable to read the full content of the file in javascript - javascript

I have this piece of javascript code
var file = Components.classes["#mozilla.org/file/local;1"]
.createInstance(Components.interfaces.nsILocalFile);
file.initWithPath( this.savefile );
if ( file.exists() == false ) {
return null;
}
var is = Components.classes["#mozilla.org/network/file-input-stream;1"]
.createInstance( Components.interfaces.nsIFileInputStream );
is.init( file,0x01, 00004, null);
var sis = Components.classes["#mozilla.org/scriptableinputstream;1"]
.createInstance( Components.interfaces.nsIScriptableInputStream );
sis.init( is );
output = sis.read( sis.available() );
sis.close();
is.close();
this.filterData = output;
return output;
Actually the file that i am reading is a binary file and has lets say 350 bytes.
Now the 19 byte is "zero", so what happens is in the above code i get only 18 bytes in output.
when i tried debugging sis.available does return 350. But sis.read only reads upto Zero byte.
I want the way to read whole of 350 bytes in output.

EDIT
See https://developer.mozilla.org/en-US/docs/Reading_textual_data
Quote:
var charset = /* Need to find out what the character encoding is. Using UTF-8 for this example: */ "UTF-8";
var is = Components.classes["#mozilla.org/intl/converter-input-stream;1"]
.createInstance(Components.interfaces.nsIConverterInputStream);
// This assumes that fis is the nsIInputStream you want to read from
is.init(fis, charset, 1024, 0xFFFD);
is.QueryInterface(Components.interfaces.nsIUnicharLineInputStream);
if (is instanceof Components.interfaces.nsIUnicharLineInputStream) {
var line = {};
var cont;
do {
cont = is.readLine(line);
// Now you can do something with line.value
} while (cont);
}
This avoids the null byte problems, is unicode safe, and works with less esoteric object types.
Original:
As per my comment above, and in light of your edit,
See https://developer.mozilla.org/en-US/docs/XPCOM_Interface_Reference/nsIScriptableInputStream where read() comes with the warning: If the data contains a null byte, then this method will return a truncated string. You may want to use readBytes() instead.
Alternatively, here's another way to do it:
var ph = Components.classes["#mozilla.org/network/protocol;1?name=file"]
.createInstance(Components.interfaces.nsIFileProtocolHandler);
var file_to_read = ph.getURLSpecFromFile(file);
var req = new XMLHttpRequest();
req.onerror = function(e) {
onError(e);
}
req.onreadystatechange = function() {
if (log.readyState == 4) {
//...
}
}
req.open("GET", file_to_read, true);

I may be wrong, but have you tried sending a simple GET request? In AJAX? Or do you strictly want to use JS?
EDIT:
Refer to this - How do I load the contents of a text file into a javascript variable?

Related

javascript, how could we read a local text file with accent letters into it?

I have one doubt because I need to read a local file and I have been studying some threads, and I have seen various ways to handle it, in most of the cases there is an input file.
I would need to load it directly through code.
I have studied this thread:
How to read a local text file?
And I could read it.
The surprising part was when I tried to split the lines and words, it showed: � replacing accent letters.
The code I have right now is:
myFileReader.js
function readTextFile(file) {
var rawFile = new XMLHttpRequest();
rawFile.open("GET", file, false);
rawFile.onreadystatechange = function () {
if (rawFile.readyState === 4) {
if (rawFile.status === 200 || rawFile.status == 0) {
allText = rawFile.responseText;
console.log('The complete text is', allText);
let lineArr = intoLines(allText);
let firstLineWords = intoWords(lineArr[0]);
let secondLineWords = intoWords(lineArr[1]);
console.log('Our first line is: ', lineArr[0]);
let atlas = {};
for (let i = 0; i < firstLineWords.length; i++) {
console.log(`Our ${i} word in the first line is : ${firstLineWords[i]}`);
console.log(`Our ${i} word in the SECOND line is : ${secondLineWords[i]}`);
atlas[firstLineWords[i]] = secondLineWords[i];
}
console.log('The atlas is: ', atlas);
let atlasJson = JSON.stringify(atlas);
console.log('Atlas as json is: ', atlasJson);
download(atlasJson, 'atlasJson.txt', 'text/plain');
}
}
};
rawFile.send(null);
}
function download(text, name, type) {
var a = document.getElementById("a");
var file = new Blob([text], {type: type});
a.href = URL.createObjectURL(file);
a.download = name;
}
function intoLines(text) {
// splitting all text data into array "\n" is splitting data from each new line
//and saving each new line as each element*
var lineArr = text.split('\n');
//just to check if it works output lineArr[index] as below
return lineArr;
}
function intoWords(lines) {
var wordsArr = lines.split('" "');
return wordsArr;
}
The doubt is: how could we handle those special character which are the vowels with accent?
I ask this, because even in the IDE thet interrogation marks appeared if we load the txt in UTF-8, so then I changed to ISO-8859-1 and it loaded well.
Also I have studied:
Read UTF-8 special chars from external file using Javascript
Convert special characters to HTML in Javascript
Reading a local text file from a local javascript file?
In addition, could you explain if there is a shorter way to load files in client javascript. For example in Java there is the FileReader / FileWriter / BufferedWriter. Is theren in Javascript something similar?
Thank you for you help!
It sounds like the file is encoded with ISO-8859-1 (or possibly the very-similar Windows-1252).
There's no BOM or equivalent for those encodings.
The only solutions I can see are:
Use a (local) server and have it return the HTTP Content-Type header with the encoding identified as a charset, e.g. Content-Type: text/plain; encoding=ISO-8859-1
Use UTF-8 instead (e.g., open the file in an editor as ISO-8859-1, then save it as UTF-8 instead), as that's the default encoding for XHR response bodies.
Put your text in an .html file with the corresponding content type,
for example:
<meta http-equiv="Content-Type" content="text/html; charset="UTF-8">
enclose the text between two tags ("####" in my example) (or put in a div)
Read the html page, extract the content and select the text:
window.open(url); //..
var content = newWindow.document.body.innerHTML;
var strSep="####";
var x = content.indexOf(strSep);
x=x+strSep.length;
var y = content.lastIndexOf(strSep);
var points=content.slice(x, y);

Recover ArrayBuffer from xhr.responseText

I need to get an array buffer from an http request sending me a base64 answer.
For this request, I can't use XMLHttpRequest.responseType="arraybuffer".
The response I get from this request is read through xhr.responseText. Hence it's encoded as a DOMString. I'm trying to get it back as an array buffer.
I've tried to go back to the base64 from the DOMString using btoa(mysString) or window.btoa(unescape(encodeURIComponent(str))) but the first option just fails, whereas the second option doesn't give the same base64. Example of the first few characters from each base64:
Incoming : UEsDBBQACAgIACp750oAAAAAAAAAAAAAAAALAAAAX3JlbHMvLnJlbH
After the second processing: UEsDBBQACAgIAO+/ve+/ve+/vUoAAAAAAAAAAAAAAAALAAAAX3JlbHMvLnJlbH
As you can see a part of it is similar, but some parts are way off.
What am I missing to get it right?
I have got same issue too.
The solution (I ran at Chrome(68.0.3440.84))
let url = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg=='
let iso_8859_15_table = { 338: 188, 339: 189, 352: 166, 353: 168, 376: 190, 381: 180, 382: 184, 8364: 164 }
function iso_8859_15_to_uint8array(iso_8859_15_str) {
let buf = new ArrayBuffer(iso_8859_15_str.length);
let bufView = new Uint8Array(buf);
for (let i = 0, strLen = iso_8859_15_str.length; i < strLen; i++) {
let octet = iso_8859_15_str.charCodeAt(i);
if (iso_8859_15_table.hasOwnProperty(octet))
octet = iso_8859_15_table[octet]
bufView[i] = octet;
if(octet < 0 || 255 < octet)
console.error(`invalid data error`)
}
return bufView
}
req = new XMLHttpRequest();
req.overrideMimeType('text/plain; charset=ISO-8859-15');
req.onload = () => {
console.log(`Uint8Array : `)
var uint8array = iso_8859_15_to_uint8array(req.responseText)
console.log(uint8array)
}
req.open("get", url);
req.send();
Below is explanation what I learned to solve it.
Explanation
Why some parts are way off?
because TextDecoder cause data loss (Your case is utf-8).
For example, let's talk about UTF-8
variable width character encoding for Unicode.
It has rules(This will become problem.) for reasons such as variable length characteristics and ASCII compatibility, etc.
so, decoder may replace a non-conforming characters to replacement character such as U+003F(?, Question mark) or U+FFFD(�, Unicode replacement character).
in utf-8 case, 0~127 of values are stable, 128~255 of values are unstable. 128~255 will converted to U+FFFD
Are other Text Decoders safe except UTF-8?
No. In most cases, not safe from rules.
UTF-8 is also unrecoverable. (128~255 are set to U+FFFD)
If the binary data and the decoded result can be corresponded to one-to-one, they can be recovered.
How to solve it?
Finds recoverable Text Decoders.
Force MIME type to recoverable charset of the incoming data.
xhr_object.overrideMimeType('text/plain; charset=ISO-8859-15')
Recover binary data from string with recover table when received.
Finds recoverable Text Decoders.
To recover, avoid the situation when decoded results' are duplicated.
The following code is a simple example, so there may be missing recoverable text decoders because it only consider Uint8Array.
let bufferView = new Uint8Array(256);
for (let i = 0; i < 256; i++)
bufferView[i] = i;
let recoverable = []
let decoding = ['utf-8', 'ibm866', 'iso-8859-2', 'iso-8859-3', 'iso-8859-4', 'iso-8859-5', 'iso-8859-6', 'iso-8859-7', 'iso-8859-8', 'iso-8859-8i', 'iso-8859-10', 'iso-8859-13', 'iso-8859-14', 'iso-8859-15', 'iso-8859-16', 'koi8-r', 'koi8-u', 'macintosh', 'windows-874', 'windows-1250', 'windows-1251', 'windows-1252', 'windows-1253', 'windows-1254', 'windows-1255', 'windows-1256', 'windows-1257', 'windows-1258', 'x-mac-cyrillic', 'gbk', 'gb18030', 'hz-gb-2312', 'big5', 'euc-jp', 'iso-2022-jp', 'shift-jis', 'euc-kr', 'iso-2022-kr', 'utf-16be', 'utf-16le', 'x-user-defined', 'ISO-2022-CN', 'ISO-2022-CN-ext']
for (let dec of decoding) {
try {
let decodedText = new TextDecoder(dec).decode(bufferView);
let loss = 0
let recoverTable = {}
let unrecoverable = 0
for (let i = 0; i < decodedText.length; i++) {
let charCode = decodedText.charCodeAt(i)
if (charCode != i)
loss++
if (!recoverTable[charCode])
recoverTable[charCode] = i
else
unrecoverable++
}
let tableCnt = 0
for (let props in recoverTable) {
tableCnt++
}
if (tableCnt == 256 && unrecoverable == 0){
recoverable.push(dec)
setTimeout(()=>{
console.log(`[${dec}] : err(${loss}/${decodedText.length}, ${Math.round(loss / decodedText.length * 100)}%) alive(${tableCnt}) unrecoverable(${unrecoverable})`)
},10)
}
else {
console.log(`!! [${dec}] : err(${loss}/${decodedText.length}, ${Math.round(loss / decodedText.length * 100)}%) alive(${tableCnt}) unrecoverable(${unrecoverable})`)
}
} catch (e) {
console.log(`!! [${dec}] : not supported.`)
}
}
setTimeout(()=>{
console.log(`recoverable Charset : ${recoverable}`)
}, 10)
In my console, this return
recoverable Charset : ibm866,iso-8859-2,iso-8859-4,iso-8859-5,iso-8859-10,iso-8859-13,iso-8859-14,iso-8859-15,iso-8859-16,koi8-r,koi8-u,macintosh,windows-1250,windows-1251,windows-1252,windows-1254,windows-1256,windows-1258,x-mac-cyrillic,x-user-defined
And I used iso-8859-15 at beginning of this answer. (It has Smallest table size.)
Additional test) Comparison between UTF-8's and ISO-8859-15's result
Check U+FFFD is really disappeared when using ISO-8859-15.
function requestAjax(url, charset) {
let req = new XMLHttpRequest();
if (charset)
req.overrideMimeType(`text/plain; charset=${charset}`);
else
charset = 'utf-8';
req.open('get', url);
req.onload = () => {
console.log(`==========\n${charset}`)
console.log(`${req.responseText.split('', 50)}\n==========`);
console.log('\n')
}
req.send();
}
var url = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==';
requestAjax(url, 'ISO-8859-15');
requestAjax(url);
Bottom line
Recover binary data to, from string needs some additional job.
Find recoverable text encoder/decoder.
Make a recover table
Recover with the table.
(You can refer to the very top of code.)
For use this trick, force MIME type of incoming data to desired charset.

Change JavaScript string encoding

At the moment I have a large JavaScript string I'm attempting to write to a file, but in a different encoding (ISO-8859-1). I was hoping to use something like downloadify. Downloadify only accepts normal JavaScript strings or base64 encoded strings.
Because of this, I've decided to compress my string using JSZip which generates a nicely base64 encoded string that can be passed to downloadify, and downloaded to my desktop. Huzzah! The issue is that the string I compressed, of course, is still the wrong encoding.
Luckily JSZip can take a Uint8Array as data, instead of a string. So is there any way to convert a JavaScript string into a ISO-8859-1 encoded string and store it in a Uint8Array?
Alternatively, if I'm approaching this all wrong, is there a better solution all together? Is there a fancy JavaScript string class that can use different internal encodings?
Edit: To clarify, I'm not pushing this string to a webpage so it won't automatically convert it for me. I'm doing something like this:
var zip = new JSZip();
zip.file("genSave.txt", result);
return zip.generate({compression:"DEFLATE"});
And for this to make sense, I would need result to be in the proper encoding (and JSZip only takes strings, arraybuffers, or uint8arrays).
Final Edit (This was -not- a duplicate question because the result wasn't being displayed in the browser or transmitted to a server where the encoding could be changed):
This turned out to be a little more obscure than I had thought, so I ended up rolling my own solution. It's not nearly as robust as a proper solution would be, but it'll convert a JavaScript string into windows-1252 encoding, and stick it in a Uint8Array:
var enc = new string_transcoder("windows-1252");
var tenc = enc.transcode(result); //This is now a Uint8Array
You can then either use it in the array like I did:
//Make this into a zip
var zip = new JSZip();
zip.file("genSave.txt", tenc);
return zip.generate({compression:"DEFLATE"});
Or convert it into a windows-1252 encoded string using this string encoding library:
var string = TextDecoder("windows-1252").decode(tenc);
To use this function, either use:
<script src="//www.eu4editor.com/string_transcoder.js"></script>
Or include this:
function string_transcoder (target) {
this.encodeList = encodings[target];
if (this.encodeList === undefined) {
return undefined;
}
//Initialize the easy encodings
if (target === "windows-1252") {
var i;
for (i = 0x0; i <= 0x7F; i++) {
this.encodeList[i] = i;
}
for (i = 0xA0; i <= 0xFF; i++) {
this.encodeList[i] = i;
}
}
}
string_transcoder.prototype.transcode = function (inString) {
var res = new Uint8Array(inString.length), i;
for (i = 0; i < inString.length; i++) {
var temp = inString.charCodeAt(i);
var tempEncode = (this.encodeList)[temp];
if (tempEncode === undefined) {
return undefined; //This encoding is messed up
} else {
res[i] = tempEncode;
}
}
return res;
};
encodings = {
"windows-1252": {0x20AC:0x80, 0x201A:0x82, 0x0192:0x83, 0x201E:0x84, 0x2026:0x85, 0x2020:0x86, 0x2021:0x87, 0x02C6:0x88, 0x2030:0x89, 0x0160:0x8A, 0x2039:0x8B, 0x0152:0x8C, 0x017D:0x8E, 0x2018:0x91, 0x2019:0x92, 0x201C:0x93, 0x201D:0x94, 0x2022:0x95, 0x2013:0x96, 0x2014:0x97, 0x02DC:0x98, 0x2122:0x99, 0x0161:0x9A, 0x203A:0x9B, 0x0153:0x9C, 0x017E:0x9E, 0x0178:0x9F}
};
This turned out to be a little more obscure than [the author] had thought, so [the author] ended up rolling [his] own solution. It's not nearly as robust as a proper solution would be, but it'll convert a JavaScript string into windows-1252 encoding, and stick it in a Uint8Array:
var enc = new string_transcoder("windows-1252");
var tenc = enc.transcode(result); //This is now a Uint8Array
You can then either use it in the array like [the author] did:
//Make this into a zip
var zip = new JSZip();
zip.file("genSave.txt", tenc);
return zip.generate({compression:"DEFLATE"});
Or convert it into a windows-1252 encoded string using this string encoding library:
var string = TextDecoder("windows-1252").decode(tenc);
To use this function, either use:
<script src="//www.eu4editor.com/string_transcoder.js"></script>
Or include this:
function string_transcoder (target) {
this.encodeList = encodings[target];
if (this.encodeList === undefined) {
return undefined;
}
//Initialize the easy encodings
if (target === "windows-1252") {
var i;
for (i = 0x0; i <= 0x7F; i++) {
this.encodeList[i] = i;
}
for (i = 0xA0; i <= 0xFF; i++) {
this.encodeList[i] = i;
}
}
}
string_transcoder.prototype.transcode = function (inString) {
var res = new Uint8Array(inString.length), i;
for (i = 0; i < inString.length; i++) {
var temp = inString.charCodeAt(i);
var tempEncode = (this.encodeList)[temp];
if (tempEncode === undefined) {
return undefined; //This encoding is messed up
} else {
res[i] = tempEncode;
}
}
return res;
};
encodings = {
"windows-1252": {0x20AC:0x80, 0x201A:0x82, 0x0192:0x83, 0x201E:0x84, 0x2026:0x85, 0x2020:0x86, 0x2021:0x87, 0x02C6:0x88, 0x2030:0x89, 0x0160:0x8A, 0x2039:0x8B, 0x0152:0x8C, 0x017D:0x8E, 0x2018:0x91, 0x2019:0x92, 0x201C:0x93, 0x201D:0x94, 0x2022:0x95, 0x2013:0x96, 0x2014:0x97, 0x02DC:0x98, 0x2122:0x99, 0x0161:0x9A, 0x203A:0x9B, 0x0153:0x9C, 0x017E:0x9E, 0x0178:0x9F}
};
Test the following script:
<script type="text/javascript" charset="utf-8">
The best solution for me was posted here and this is my one-liner:
<!-- Required for non-UTF encodings (quite big) -->
<script src="encoding-indexes.js"></script>
<script src="encoding.js"></script>
...
// windows-1252 is just one typical example encoding/transcoding
let transcodedString = new TextDecoder( 'windows-1252' ).decode(
new TextEncoder().encode( someUtf8String ))
or this if the transcoding has to be applied on multiple inputs reusing the encoder and decoder:
let srcArr = [ ... ] // some UTF-8 string array
let encoder = new TextEncoder()
let decoder = new TextDecoder( 'windows-1252' )
let transcodedArr = srcArr.forEach( (s,i) => {
srcArr[i] = decoder.decode( encoder.encode( s )) })
(The slightly modified other answer from related question:)
This is what I found after a more specific Google search than just
UTF-8 encode/decode. so for those who are looking for a converting
library to convert between encodings, here you go.
github.com/inexorabletash/text-encoding
var uint8array = new TextEncoder().encode(str);
var str = new TextDecoder(encoding).decode(uint8array);
Paste from repo readme
All encodings from the Encoding specification are supported:
utf-8 ibm866 iso-8859-2 iso-8859-3 iso-8859-4 iso-8859-5 iso-8859-6
iso-8859-7 iso-8859-8 iso-8859-8-i iso-8859-10 iso-8859-13 iso-8859-14
iso-8859-15 iso-8859-16 koi8-r koi8-u macintosh windows-874 windows-1250
windows-1251 windows-1252 windows-1253 windows-1254 windows-1255
windows-1256 windows-1257 windows-1258 x-mac-cyrillic gb18030 hz-gb-2312
big5 euc-jp iso-2022-jp shift_jis euc-kr replacement utf-16be utf-16le
x-user-defined
(Some encodings may be supported under other names, e.g. ascii,
iso-8859-1, etc. See Encoding for additional labels for each
encoding.)

Converting between strings and ArrayBuffers

Is there a commonly accepted technique for efficiently converting JavaScript strings to ArrayBuffers and vice-versa? Specifically, I'd like to be able to write the contents of an ArrayBuffer to localStorage and then read it back.
Update 2016 - five years on there are now new methods in the specs (see support below) to convert between strings and typed arrays using proper encoding.
TextEncoder
The TextEncoder represents:
The TextEncoder interface represents an encoder for a specific method,
that is a specific character encoding, like utf-8, iso-8859-2, koi8,
cp1261, gbk, ... An encoder takes a stream of code points as input and
emits a stream of bytes.
Change note since the above was written: (ibid.)
Note: Firefox, Chrome and Opera used to have support for encoding
types other than utf-8 (such as utf-16, iso-8859-2, koi8, cp1261, and
gbk). As of Firefox 48 [...], Chrome 54 [...] and Opera 41, no
other encoding types are available other than utf-8, in order to match
the spec.*
*) Updated specs (W3) and here (whatwg).
After creating an instance of the TextEncoder it will take a string and encode it using a given encoding parameter:
if (!("TextEncoder" in window))
alert("Sorry, this browser does not support TextEncoder...");
var enc = new TextEncoder(); // always utf-8
console.log(enc.encode("This is a string converted to a Uint8Array"));
You then of course use the .buffer parameter on the resulting Uint8Array to convert the underlaying ArrayBuffer to a different view if needed.
Just make sure that the characters in the string adhere to the encoding schema, for example, if you use characters outside the UTF-8 range in the example they will be encoded to two bytes instead of one.
For general use you would use UTF-16 encoding for things like localStorage.
TextDecoder
Likewise, the opposite process uses the TextDecoder:
The TextDecoder interface represents a decoder for a specific method,
that is a specific character encoding, like utf-8, iso-8859-2, koi8,
cp1261, gbk, ... A decoder takes a stream of bytes as input and emits
a stream of code points.
All available decoding types can be found here.
if (!("TextDecoder" in window))
alert("Sorry, this browser does not support TextDecoder...");
var enc = new TextDecoder("utf-8");
var arr = new Uint8Array([84,104,105,115,32,105,115,32,97,32,85,105,110,116,
56,65,114,114,97,121,32,99,111,110,118,101,114,116,
101,100,32,116,111,32,97,32,115,116,114,105,110,103]);
console.log(enc.decode(arr));
The MDN StringView library
An alternative to these is to use the StringView library (licensed as lgpl-3.0) which goal is:
to create a C-like interface for strings (i.e., an array of character codes — an ArrayBufferView in JavaScript) based upon the
JavaScript ArrayBuffer interface
to create a highly extensible library that anyone can extend by adding methods to the object StringView.prototype
to create a collection of methods for such string-like objects (since now: stringViews) which work strictly on arrays of numbers
rather than on creating new immutable JavaScript strings
to work with Unicode encodings other than JavaScript's default UTF-16 DOMStrings
giving much more flexibility. However, it would require us to link to or embed this library while TextEncoder/TextDecoder is being built-in in modern browsers.
Support
As of July/2018:
TextEncoder (Experimental, On Standard Track)
Chrome | Edge | Firefox | IE | Opera | Safari
----------|-----------|-----------|-----------|-----------|-----------
38 | ? | 19° | - | 25 | -
Chrome/A | Edge/mob | Firefox/A | Opera/A |Safari/iOS | Webview/A
----------|-----------|-----------|-----------|-----------|-----------
38 | ? | 19° | ? | - | 38
°) 18: Firefox 18 implemented an earlier and slightly different version
of the specification.
WEB WORKER SUPPORT:
Experimental, On Standard Track
Chrome | Edge | Firefox | IE | Opera | Safari
----------|-----------|-----------|-----------|-----------|-----------
38 | ? | 20 | - | 25 | -
Chrome/A | Edge/mob | Firefox/A | Opera/A |Safari/iOS | Webview/A
----------|-----------|-----------|-----------|-----------|-----------
38 | ? | 20 | ? | - | 38
Data from MDN - `npm i -g mdncomp` by epistemex
Although Dennis and gengkev solutions of using Blob/FileReader work, I wouldn't suggest taking that approach. It is an async approach to a simple problem, and it is much slower than a direct solution. I've made a post in html5rocks with a simpler and (much faster) solution:
http://updates.html5rocks.com/2012/06/How-to-convert-ArrayBuffer-to-and-from-String
And the solution is:
function ab2str(buf) {
return String.fromCharCode.apply(null, new Uint16Array(buf));
}
function str2ab(str) {
var buf = new ArrayBuffer(str.length*2); // 2 bytes for each char
var bufView = new Uint16Array(buf);
for (var i=0, strLen=str.length; i<strLen; i++) {
bufView[i] = str.charCodeAt(i);
}
return buf;
}
EDIT:
The Encoding API helps solving the string conversion problem. Check out the response from Jeff Posnik on Html5Rocks.com to the above original article.
Excerpt:
The Encoding API makes it simple to translate between raw bytes and native JavaScript strings, regardless of which of the many standard encodings you need to work with.
<pre id="results"></pre>
<script>
if ('TextDecoder' in window) {
// The local files to be fetched, mapped to the encoding that they're using.
var filesToEncoding = {
'utf8.bin': 'utf-8',
'utf16le.bin': 'utf-16le',
'macintosh.bin': 'macintosh'
};
Object.keys(filesToEncoding).forEach(function(file) {
fetchAndDecode(file, filesToEncoding[file]);
});
} else {
document.querySelector('#results').textContent = 'Your browser does not support the Encoding API.'
}
// Use XHR to fetch `file` and interpret its contents as being encoded with `encoding`.
function fetchAndDecode(file, encoding) {
var xhr = new XMLHttpRequest();
xhr.open('GET', file);
// Using 'arraybuffer' as the responseType ensures that the raw data is returned,
// rather than letting XMLHttpRequest decode the data first.
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
if (this.status == 200) {
// The decode() method takes a DataView as a parameter, which is a wrapper on top of the ArrayBuffer.
var dataView = new DataView(this.response);
// The TextDecoder interface is documented at http://encoding.spec.whatwg.org/#interface-textdecoder
var decoder = new TextDecoder(encoding);
var decodedString = decoder.decode(dataView);
// Add the decoded file's text to the <pre> element on the page.
document.querySelector('#results').textContent += decodedString + '\n';
} else {
console.error('Error while requesting', file, this);
}
};
xhr.send();
}
</script>
You can use TextEncoder and TextDecoder from the Encoding standard, which is polyfilled by the stringencoding library, to convert string to and from ArrayBuffers:
var uint8array = new TextEncoder().encode(string);
var string = new TextDecoder(encoding).decode(uint8array);
Blob is much slower than String.fromCharCode(null,array);
but that fails if the array buffer gets too big. The best solution I have found is to use String.fromCharCode(null,array); and split it up into operations that won't blow the stack, but are faster than a single char at a time.
The best solution for large array buffer is:
function arrayBufferToString(buffer){
var bufView = new Uint16Array(buffer);
var length = bufView.length;
var result = '';
var addition = Math.pow(2,16)-1;
for(var i = 0;i<length;i+=addition){
if(i + addition > length){
addition = length - i;
}
result += String.fromCharCode.apply(null, bufView.subarray(i,i+addition));
}
return result;
}
I found this to be about 20 times faster than using blob. It also works for large strings of over 100mb.
In case you have binary data in a string (obtained from nodejs + readFile(..., 'binary'), or cypress + cy.fixture(..., 'binary'), etc), you can't use TextEncoder. It supports only utf8. Bytes with values >= 128 are each turned into 2 bytes.
ES2015:
a = Uint8Array.from(s, x => x.charCodeAt(0))
Uint8Array(33) [2, 134, 140, 186, 82, 70, 108, 182, 233, 40, 143, 247, 29, 76, 245, 206, 29, 87, 48, 160, 78, 225, 242, 56, 236, 201, 80, 80, 152, 118, 92, 144, 48
s = String.fromCharCode.apply(null, a)
"ºRFl¶é(÷LõÎW0 Náò8ìÉPPv\0"
Based on the answer of gengkev, I created functions for both ways, because BlobBuilder can handle String and ArrayBuffer:
function string2ArrayBuffer(string, callback) {
var bb = new BlobBuilder();
bb.append(string);
var f = new FileReader();
f.onload = function(e) {
callback(e.target.result);
}
f.readAsArrayBuffer(bb.getBlob());
}
and
function arrayBuffer2String(buf, callback) {
var bb = new BlobBuilder();
bb.append(buf);
var f = new FileReader();
f.onload = function(e) {
callback(e.target.result)
}
f.readAsText(bb.getBlob());
}
A simple test:
string2ArrayBuffer("abc",
function (buf) {
var uInt8 = new Uint8Array(buf);
console.log(uInt8); // Returns `Uint8Array { 0=97, 1=98, 2=99}`
arrayBuffer2String(buf,
function (string) {
console.log(string); // returns "abc"
}
)
}
)
All the following is about getting binary strings from array buffers
I'd recommend not to use
var binaryString = String.fromCharCode.apply(null, new Uint8Array(arrayBuffer));
because it
crashes on big buffers (somebody wrote about "magic" size of 246300 but I got Maximum call stack size exceeded error on 120000 bytes buffer (Chrome 29))
it has really poor performance (see below)
If you exactly need synchronous solution use something like
var
binaryString = '',
bytes = new Uint8Array(arrayBuffer),
length = bytes.length;
for (var i = 0; i < length; i++) {
binaryString += String.fromCharCode(bytes[i]);
}
it is as slow as the previous one but works correctly. It seems that at the moment of writing this there is no quite fast synchronous solution for that problem (all libraries mentioned in this topic uses the same approach for their synchronous features).
But what I really recommend is using Blob + FileReader approach
function readBinaryStringFromArrayBuffer (arrayBuffer, onSuccess, onFail) {
var reader = new FileReader();
reader.onload = function (event) {
onSuccess(event.target.result);
};
reader.onerror = function (event) {
onFail(event.target.error);
};
reader.readAsBinaryString(new Blob([ arrayBuffer ],
{ type: 'application/octet-stream' }));
}
the only disadvantage (not for all) is that it is asynchronous. And it is about 8-10 times faster then previous solutions! (Some details: synchronous solution on my environment took 950-1050 ms for 2.4Mb buffer but solution with FileReader had times about 100-120 ms for the same amount of data. And I have tested both synchronous solutions on 100Kb buffer and they have taken almost the same time, so loop is not much slower the using 'apply'.)
BTW here: How to convert ArrayBuffer to and from String author compares two approaches like me and get completely opposite results (his test code is here) Why so different results? Probably because of his test string that is 1Kb long (he called it "veryLongStr"). My buffer was a really big JPEG image of size 2.4Mb.
Unlike the solutions here, I needed to convert to/from UTF-8 data. For this purpose, I coded the following two functions, using the (un)escape/(en)decodeURIComponent trick. They're pretty wasteful of memory, allocating 9 times the length of the encoded utf8-string, though those should be recovered by gc. Just don't use them for 100mb text.
function utf8AbFromStr(str) {
var strUtf8 = unescape(encodeURIComponent(str));
var ab = new Uint8Array(strUtf8.length);
for (var i = 0; i < strUtf8.length; i++) {
ab[i] = strUtf8.charCodeAt(i);
}
return ab;
}
function strFromUtf8Ab(ab) {
return decodeURIComponent(escape(String.fromCharCode.apply(null, ab)));
}
Checking that it works:
strFromUtf8Ab(utf8AbFromStr('latinкирилицаαβγδεζηあいうえお'))
-> "latinкирилицаαβγδεζηあいうえお"
(Update Please see the 2nd half of this answer, where I have (hopefully) provided a more complete solution.)
I also ran into this issue, the following works for me in FF 6 (for one direction):
var buf = new ArrayBuffer( 10 );
var view = new Uint8Array( buf );
view[ 3 ] = 4;
alert(Array.prototype.slice.call(view).join(""));
Unfortunately, of course, you end up with ASCII text representations of the values in the array, rather than characters. It still (should be) much more efficient than a loop, though.
eg. For the example above, the result is 0004000000, rather than several null chars & a chr(4).
Edit:
After looking on MDC here, you may create an ArrayBuffer from an Array as follows:
var arr = new Array(23);
// New Uint8Array() converts the Array elements
// to Uint8s & creates a new ArrayBuffer
// to store them in & a corresponding view.
// To get at the generated ArrayBuffer,
// you can then access it as below, with the .buffer property
var buf = new Uint8Array( arr ).buffer;
To answer your original question, this allows you to convert ArrayBuffer <-> String as follows:
var buf, view, str;
buf = new ArrayBuffer( 256 );
view = new Uint8Array( buf );
view[ 0 ] = 7; // Some dummy values
view[ 2 ] = 4;
// ...
// 1. Buffer -> String (as byte array "list")
str = bufferToString(buf);
alert(str); // Alerts "7,0,4,..."
// 1. String (as byte array) -> Buffer
buf = stringToBuffer(str);
alert(new Uint8Array( buf )[ 2 ]); // Alerts "4"
// Converts any ArrayBuffer to a string
// (a comma-separated list of ASCII ordinals,
// NOT a string of characters from the ordinals
// in the buffer elements)
function bufferToString( buf ) {
var view = new Uint8Array( buf );
return Array.prototype.join.call(view, ",");
}
// Converts a comma-separated ASCII ordinal string list
// back to an ArrayBuffer (see note for bufferToString())
function stringToBuffer( str ) {
var arr = str.split(",")
, view = new Uint8Array( arr );
return view.buffer;
}
For convenience, here is a function for converting a raw Unicode String to an ArrayBuffer (will only work with ASCII/one-byte characters)
function rawStringToBuffer( str ) {
var idx, len = str.length, arr = new Array( len );
for ( idx = 0 ; idx < len ; ++idx ) {
arr[ idx ] = str.charCodeAt(idx) & 0xFF;
}
// You may create an ArrayBuffer from a standard array (of values) as follows:
return new Uint8Array( arr ).buffer;
}
// Alerts "97"
alert(new Uint8Array( rawStringToBuffer("abc") )[ 0 ]);
The above allow you to go from ArrayBuffer -> String & back to ArrayBuffer again, where the string may be stored in eg. .localStorage :)
Hope this helps,
Dan
Just
const buffer = thisReturnsBuffers();
const blob = new Blob([buffer], {type: 'text/plain; charset=utf-8'});
blob.text().then(text => console.log(text));
Or
const stringVal = "string here";
const blob = new Blob([stringVal], {type: 'text/plain; charset=utf-8'});
blob.arrayBuffer().then(buffer => console.log(buffer));
Why are you all making this so complicated?
For node.js and also for browsers using https://github.com/feross/buffer
function ab2str(buf: Uint8Array) {
return Buffer.from(buf).toString('base64');
}
function str2ab(str: string) {
return new Uint8Array(Buffer.from(str, 'base64'))
}
Note: Solutions here didn't work for me. I need to support node.js and browsers and just serialize UInt8Array to a string. I could serialize it as a number[] but that occupies unnecessary space. With that solution I don't need to worry about encodings since it's base64. Just in case other people struggle with the same problem... My two cents
I found I had problems with this approach, basically because I was trying to write the output to a file and it was non encoded properly. Since JS seems to use UCS-2 encoding (source, source), we need to stretch this solution a step further, here's my enhanced solution that works to me.
I had no difficulties with generic text, but when it was down to Arab or Korean, the output file didn't have all the chars but instead was showing error characters
File output:
","10k unit":"",Follow:"Õ©íüY‹","Follow %{screen_name}":"%{screen_name}U“’Õ©íü",Tweet:"ĤüÈ","Tweet %{hashtag}":"%{hashtag} ’ĤüÈY‹","Tweet to %{name}":"%{name}U“xĤüÈY‹"},ko:{"%{followers_count} followers":"%{followers_count}…X \Ì","100K+":"100Ì tÁ","10k unit":"Ì è",Follow:"\°","Follow %{screen_name}":"%{screen_name} Ø \°X0",K:"œ",M:"1Ì",Tweet:"¸","Tweet %{hashtag}":"%{hashtag}
Original:
","10k unit":"万",Follow:"フォローする","Follow %{screen_name}":"%{screen_name}さんをフォロー",Tweet:"ツイート","Tweet %{hashtag}":"%{hashtag} をツイートする","Tweet to %{name}":"%{name}さんへツイートする"},ko:{"%{followers_count} followers":"%{followers_count}명의 팔로워","100K+":"100만 이상","10k unit":"만 단위",Follow:"팔로우","Follow %{screen_name}":"%{screen_name} 님 팔로우하기",K:"천",M:"백만",Tweet:"트윗","Tweet %{hashtag}":"%{hashtag}
I took the information from dennis' solution and this post I found.
Here's my code:
function encode_utf8(s) {
return unescape(encodeURIComponent(s));
}
function decode_utf8(s) {
return decodeURIComponent(escape(s));
}
function ab2str(buf) {
var s = String.fromCharCode.apply(null, new Uint8Array(buf));
return decode_utf8(decode_utf8(s))
}
function str2ab(str) {
var s = encode_utf8(str)
var buf = new ArrayBuffer(s.length);
var bufView = new Uint8Array(buf);
for (var i=0, strLen=s.length; i<strLen; i++) {
bufView[i] = s.charCodeAt(i);
}
return bufView;
}
This allows me to save the content to a file without encoding problems.
How it works:
It basically takes the single 8-byte chunks composing a UTF-8 character and saves them as single characters (therefore an UTF-8 character built in this way, could be composed by 1-4 of these characters).
UTF-8 encodes characters in a format that variates from 1 to 4 bytes in length. What we do here is encoding the sting in an URI component and then take this component and translate it in the corresponding 8 byte character. In this way we don't lose the information given by UTF8 characters that are more than 1 byte long.
if you used huge array example arr.length=1000000
you can this code to avoid stack callback problems
function ab2str(buf) {
var bufView = new Uint16Array(buf);
var unis =""
for (var i = 0; i < bufView.length; i++) {
unis=unis+String.fromCharCode(bufView[i]);
}
return unis
}
reverse function
mangini answer from top
function str2ab(str) {
var buf = new ArrayBuffer(str.length*2); // 2 bytes for each char
var bufView = new Uint16Array(buf);
for (var i=0, strLen=str.length; i<strLen; i++) {
bufView[i] = str.charCodeAt(i);
}
return buf;
}
Well, here's a somewhat convoluted way of doing the same thing:
var string = "Blah blah blah", output;
var bb = new (window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder)();
bb.append(string);
var f = new FileReader();
f.onload = function(e) {
// do whatever
output = e.target.result;
}
f.readAsArrayBuffer(bb.getBlob());
Edit: BlobBuilder has long been deprecated in favor of the Blob constructor, which did not exist when I first wrote this post. Here's an updated version. (And yes, this has always been a very silly way to do the conversion, but it was just for fun!)
var string = "Blah blah blah", output;
var f = new FileReader();
f.onload = function(e) {
// do whatever
output = e.target.result;
};
f.readAsArrayBuffer(new Blob([string]));
The following is a working Typescript implementation:
bufferToString(buffer: ArrayBuffer): string {
return String.fromCharCode.apply(null, Array.from(new Uint16Array(buffer)));
}
stringToBuffer(value: string): ArrayBuffer {
let buffer = new ArrayBuffer(value.length * 2); // 2 bytes per char
let view = new Uint16Array(buffer);
for (let i = 0, length = value.length; i < length; i++) {
view[i] = value.charCodeAt(i);
}
return buffer;
}
I've used this for numerous operations while working with crypto.subtle.
Recently I also need to do this for one of my project so did a well research and got a result from Google's Developer community which states this in a simple manner:
For ArrayBuffer to String
function ab2str(buf) {
return String.fromCharCode.apply(null, new Uint16Array(buf));
}
// Here Uint16 can be different like Uinit8/Uint32 depending upon your buffer value type.
For String to ArrayBuffer
function str2ab(str) {
var buf = new ArrayBuffer(str.length*2); // 2 bytes for each char
var bufView = new Uint16Array(buf);
for (var i=0, strLen=str.length; i < strLen; i++) {
bufView[i] = str.charCodeAt(i);
}
return buf;
}
//Same here also for the Uint16Array.
For more in detail reference you can refer this blog by Google.
Let's say you have an arrayBuffer binaryStr:
let text = String.fromCharCode.apply(null, new Uint8Array(binaryStr));
and then you assign the text to the state.
I used this and works for me.
function arrayBufferToBase64( buffer ) {
var binary = '';
var bytes = new Uint8Array( buffer );
var len = bytes.byteLength;
for (var i = 0; i < len; i++) {
binary += String.fromCharCode( bytes[ i ] );
}
return window.btoa( binary );
}
function base64ToArrayBuffer(base64) {
var binary_string = window.atob(base64);
var len = binary_string.length;
var bytes = new Uint8Array( len );
for (var i = 0; i < len; i++) {
bytes[i] = binary_string.charCodeAt(i);
}
return bytes.buffer;
}
stringToArrayBuffer(byteString) {
var byteArray = new Uint8Array(byteString.length);
for (var i = 0; i < byteString.length; i++) {
byteArray[i] = byteString.codePointAt(i);
}
return byteArray;
}
arrayBufferToString(buffer) {
var byteArray = new Uint8Array(buffer);
var byteString = '';
for (var i = 0; i < byteArray.byteLength; i++) {
byteString += String.fromCodePoint(byteArray[i]);
}
return byteString;
}
After playing with mangini's solution for converting from ArrayBuffer to String - ab2str (which is the most elegant and useful one I have found - thanks!), I had some issues when handling large arrays. More specefivally, calling String.fromCharCode.apply(null, new Uint16Array(buf)); throws an error:
arguments array passed to Function.prototype.apply is too large.
In order to solve it (bypass) I have decided to handle the input ArrayBuffer in chunks. So the modified solution is:
function ab2str(buf) {
var str = "";
var ab = new Uint16Array(buf);
var abLen = ab.length;
var CHUNK_SIZE = Math.pow(2, 16);
var offset, len, subab;
for (offset = 0; offset < abLen; offset += CHUNK_SIZE) {
len = Math.min(CHUNK_SIZE, abLen-offset);
subab = ab.subarray(offset, offset+len);
str += String.fromCharCode.apply(null, subab);
}
return str;
}
The chunk size is set to 2^16 because this was the size I have found to work in my development landscape. Setting a higher value caused the same error to reoccur. It can be altered by setting the CHUNK_SIZE variable to a different value. It is important to have an even number.
Note on performance - I did not make any performance tests for this solution. However, since it is based on the previous solution, and can handle large arrays, I see no reason why not to use it.
ArrayBuffer -> Buffer -> String(Base64)
Change ArrayBuffer to Buffer and then to String.
Buffer.from(arrBuffer).toString("base64");
See here: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Typed_arrays/StringView
(a C-like interface for strings based upon the JavaScript ArrayBuffer interface)
The "native" binary string that atob() returns is a 1-byte-per-character Array.
So we shouldn't store 2 byte into a character.
var arrayBufferToString = function(buffer) {
return String.fromCharCode.apply(null, new Uint8Array(buffer));
}
var stringToArrayBuffer = function(str) {
return (new Uint8Array([].map.call(str,function(x){return x.charCodeAt(0)}))).buffer;
}
Yes:
const encstr = (`TextEncoder` in window) ? new TextEncoder().encode(str) : Uint8Array.from(str, c => c.codePointAt(0));
I'd recommend NOT using deprecated APIs like BlobBuilder
BlobBuilder has long been deprecated by the Blob object. Compare the code in Dennis' answer — where BlobBuilder is used — with the code below:
function arrayBufferGen(str, cb) {
var b = new Blob([str]);
var f = new FileReader();
f.onload = function(e) {
cb(e.target.result);
}
f.readAsArrayBuffer(b);
}
Note how much cleaner and less bloated this is compared to the deprecated method... Yeah, this is definitely something to consider here.
Use splat unpacking instead of loops:
arrbuf = new Uint8Array([104, 101, 108, 108, 111])
text = String.fromCharCode(...arrbuf)
console.log(text)
For substrings arrbuf.slice() can be employed.
For me this worked well.
static async hash(message) {
const data = new TextEncoder().encode(message);
const hashBuffer = await crypto.subtle.digest('SHA-256', data)
const hashArray = Array.from(new Uint8Array(hashBuffer))
const hashHex = hashArray.map((b) => b.toString(16).padStart(2, '0')).join('')
return hashHex
}
var decoder = new TextDecoder ();
var string = decoder.decode (arrayBuffer);
See https://developer.mozilla.org/en-US/docs/Web/API/TextDecoder/decode
From emscripten:
function stringToUTF8Array(str, outU8Array, outIdx, maxBytesToWrite) {
if (!(maxBytesToWrite > 0)) return 0;
var startIdx = outIdx;
var endIdx = outIdx + maxBytesToWrite - 1;
for (var i = 0; i < str.length; ++i) {
var u = str.charCodeAt(i);
if (u >= 55296 && u <= 57343) {
var u1 = str.charCodeAt(++i);
u = 65536 + ((u & 1023) << 10) | u1 & 1023
}
if (u <= 127) {
if (outIdx >= endIdx) break;
outU8Array[outIdx++] = u
} else if (u <= 2047) {
if (outIdx + 1 >= endIdx) break;
outU8Array[outIdx++] = 192 | u >> 6;
outU8Array[outIdx++] = 128 | u & 63
} else if (u <= 65535) {
if (outIdx + 2 >= endIdx) break;
outU8Array[outIdx++] = 224 | u >> 12;
outU8Array[outIdx++] = 128 | u >> 6 & 63;
outU8Array[outIdx++] = 128 | u & 63
} else {
if (outIdx + 3 >= endIdx) break;
outU8Array[outIdx++] = 240 | u >> 18;
outU8Array[outIdx++] = 128 | u >> 12 & 63;
outU8Array[outIdx++] = 128 | u >> 6 & 63;
outU8Array[outIdx++] = 128 | u & 63
}
}
outU8Array[outIdx] = 0;
return outIdx - startIdx
}
Use like:
stringToUTF8Array('abs', new Uint8Array(3), 0, 4);

Writing UTF8 text to file

I am using the following function to save text to a file (on IE-8 w/ActiveX).
function saveFile(strFullPath, strContent)
{
var fso = new ActiveXObject( "Scripting.FileSystemObject" );
var flOutput = fso.CreateTextFile( strFullPath, true ); //true for overwrite
flOutput.Write( strContent );
flOutput.Close();
}
The code works fine if the text is fully Latin-9 but when the text contains even a single UTF-8 encoded character, the write fails.
The ActiveX FileSystemObject does not support UTF-8, it seems. I tried UTF-16 encoding the text first but the result was garbled. What is a workaround?
Try this:
function saveFile(strFullPath, strContent) {
var fso = new ActiveXObject("Scripting.FileSystemObject");
var utf8Enc = new ActiveXObject("Utf8Lib.Utf8Enc");
var flOutput = fso.CreateTextFile(strFullPath, true); //true for overwrite
flOutput.BinaryWrite(utf8Enc.UnicodeToUtf8(strContent));
flOutput.Close();
}
The CreateTextFile method has a third parameter which decides whether file be written unicode or not. You can do like:
var flOutput = fso.CreateTextFile(strFullPath,true, true);
Interestingly, way back I had created this little script to save files in unicode format:
Set FSO=CreateObject("Scripting.FileSystemObject")
Value = InputBox ("Enter the path of the file you want to save in Unicode format.")
If Len(Trim(Value)) > 0 Then
If FSO.FileExists(Value) Then
Set iFile = FSO.OpenTextFile (Value)
Data = iFile.ReadAll
iFile.Close
Set oFile = FSO.CreateTextFile (FSO.GetParentFolderName(Value) & "\Unicode" & GetExtention(Value),True,True)
oFile.Write Data
oFile.Close
If FSO.FileExists (FSO.GetParentFolderName(Value) & "\Unicode" & GetExtention(Value)) Then
MsgBox "File successfully saved to:" & vbCrLf & vbCrLf & FSO.GetParentFolderName(Value) & "\Unicode" & GetExtention(Value),vbInformation
Else
MsgBox "Unknown error was encountered!",vbCritical
End If
Else
MsgBox "Make sure that you have entered the correct file path.",vbExclamation
End If
End If
Set iFile = Nothing
Set oFile= Nothing
Set FSO= Nothing
Function GetExtention (Path)
GetExtention = Right(Path,4)
End Function
Note: This is VBScript code, you should save that code in a file like unicode.vbs, and once you double click that file, it will run.
Add a third parameter, true, in your call to the CreateTextFile method. See this page.
function saveFile(strFullPath, strContent) {
var fso = new ActiveXObject( "Scripting.FileSystemObject" );
var flOutput = fso.CreateTextFile( strFullPath, true, true ); //true for overwrite // true for unicode
flOutput.Write( strContent );
flOutput.Close();
}
object.CreateTextFile(filename[, overwrite[, unicode]])

Categories