Create a Lua like JavaScript I18n internationalization - javascript

In our Freifunk project gluon, we use i18n GNU gettext internationalisation in our Lua code (for example for the package gluon-config-mode-hostname) we create separate files in a subfolder i18n. I want to use this .po files to add them in our status-page javascript code:
https://github.com/rubo77/gluon/tree/status-i18n/package/gluon-status-page/i18n
Those contain the translations created by the msginit program.
How can I use the same i18n files for the javascript based status-page (without jQuery) to translate those strings?

Here's a dirty but verbose way of accomplishing it. Is this what you're looking for?
let url = "https://raw.githubusercontent.com/rubo77/gluon/status-i18n/package/gluon-status-page/i18n/de.po"
fetch(url)
.then((res) => {
return res.body.getReader();
})
.then((reader) => {
return reader.read();
})
.then((stream) => {
let decoder = new TextDecoder();
let body = decoder.decode(stream.value || new Uint8Array);
return body
})
.then((body) => {
let text = body.replace(/\\n/g, '');
let lines = text.split('\n');
console.log(text)
let arr = []
let obj = {}
for (let i = 0; i < lines.length; i++) {
// key:value pairs
if (lines[i].indexOf(':') !== -1) {
let line = lines[i].replace(/"/g, '');
let pair = line.split(':');
if (pair.length) {
obj[pair[0]] = pair[1].trim();
}
}
// msgid
if (lines[i].indexOf('msgid') !== -1) {
let msgobj = {};
let msgid = lines[i].split(' "')[1].replace(/\"/g, '');
msgobj.msgid = msgid;
// msgstr
if (lines[i+1].indexOf('msgstr') !== -1) {
let msgstr = lines[i+1].split(' "')[1].replace(/\"/g, '');
msgobj.msgstr = msgstr;
}
arr.push(msgobj);
}
}
arr.push(obj)
document.getElementById('output-source')
.innerHTML = body
document.getElementById('output-js')
.innerHTML = JSON.stringify(arr, null, 2);
});
.output {
background-color: #fafafa;
border: 1px solid #e1e1e1;
}
<pre id="output-source" class="output"></pre>
<pre id="output-js" class="output"></pre>
NB: Above example likely only works in Chrome. Here's a JSBin that should work in FF.

Related

Issues with request, and cheerio when web-scraping

I'm trying to write a code that makes a request to a website, for webscraping
So this are the steps:
Here First part of Code STARTS
The program makes the request to the mainURL
The program selects some objects from the html of the mainURL, and store them in an array of objects(advert), on of the properties of the object, is it's link, which we'll call numberURL, that the code automatically selects using a css selector, the amount of objects is something like 80-90;
The program makes requests to every numberURL(80-90 requests),
and for each of them it does set another properties to the same object, and selects another link, that we'll call accountURL
The program creates an CSV file where it writes every object in different rows
Here First part of Code ENDS
So actually the first part works pretty good, it doesn't have any issues, but the second part does
Here Second part of Code STARTS
The program makes requests to every accountURL from the previous object
The program selects some objects from the html of the accountURL, and stores them in an another array of another objects(account), also using CSS selectors
The program should console.log() all the account objects
Here Second part of Code ENDS
But the second part does have some bugs, because when console.logging the objects we see that the objects properties doesn't changed their default value.
So in debugging purposes I took one advert object and putted it's value manually from the code
post[0].link = 'https://999.md/ru/profile/denisserj'
Finally when running the code for this object it actually works correctly, so it shows the changed properties, but for the rest of them it doesn't.
I tried to set some Timeouts, thinking that the code tries to read the link, before the second request finished, but no effects
I also tried to console.log the link, to see if it exists in the array, so it actually exists there, but also no effect.
Finally here is the code:
// CLASSES
class advert {
constructor() {
this.id = 0;
this.tile = new String();
this.link = new String();
this.phone = new String();
this.account = new String();
this.accountLink = new String();
this.text = new String();
this.operator = new String();
}
show() {
console.log(this.id, this.title, this.link, this.phone, this.account, this.accountLink, this.text, this.operator);
}
}
class account {
constructor() {
this.name = 0;
this.createdAt = 0;
this.phone = [];
this.ads = [];
this.adsNumber = 0;
}
show() {
console.log(this.name, this.createdAt, this.phone, this.ads, this.adsNumber);
}
}
// HEADERS
const mainRequest = require('request');
const auxRequest = require('request');
const cheerio1 = require('cheerio');
const cheerio2 = require('cheerio');
const fs = require('fs');
const fs2 = require('fs');
const adFile = fs.createWriteStream('anunturi.csv');
const accFile = fs2.createWriteStream('conturi.csv');
// SETTINGS
const host = 'https://999.md'
const category = 'https://999.md/ru/list/transport/cars'
const timeLimit = 60; //seconds
// VARIABLES
let post = [];
let postNumber = 0;
let acc = [];
// FUNCTIONS
function deleteFromArray(j) {
post.splice(j, 1);
}
function number(i) {
let category = post[i].link;
auxRequest(category, (error, response, html) => {
if (!error && response.statusCode == 200) {
const $ = cheerio1.load(html);
let phone;
const siteTitle = $('strong').each((id, el) => {
phone = $(el).text();
});
const txt = $('.adPage__content__description').html();
const person = $('.adPage__header__stats').find('.adPage__header__stats__owner').text();
const linkToPerson = host + $('.adPage__header__stats').find('.adPage__header__stats__owner').find('a').attr('href');
post[i].phone = phone;
post[i].account = person;
post[i].accountLink = linkToPerson;
post[i].text = txt;
if (i == postNumber) {
console.log('1. Number Putting done')
writeToFileAd(accountPutter, writeToFileAccount);
}
}
});
}
function writeToFileAd() {
adFile.write('ID, Titlu, Link, Text, Cont, LinkCont, Operator\n')
for (let i = 0; i <= postNumber; i++) {
adFile.write(`${post[i].id}, ${post[i].title}, ${post[i].link}, ${post[i].phone}, ${post[i].account}, ${post[i].accountLink}, ${post[i].operator}\n`);
}
console.log('2. Write To File Ad done')
accountPutter();
}
function accountAnalyzis(i) {
let category = post[i].link;
const mainRequest = require('request');
category = category.replace('/ru/', '/ro/');
mainRequest(category, (error, response, html) => {
if (!error && response.statusCode == 200) {
const $ = cheerio2.load(html);
const name = $('.user-profile__sidebar-info__main-wrapper').find('.login-wrapper').text();
let createdAt = $('.date-registration').text();
createdAt = createdAt.replace('Pe site din ', '');
const phones = $('.user-profile__info__data').find('dd').each((id, el) => {
let phone = $(el).text();
acc[i].phone.push(phone);
});
const ads = $('.profile-ads-list-photo-item-title').find('a').each((id, el) => {
let ad = host + $(el).attr('href');
acc[i].ads.push(ad);
acc[i].adsNumber++;
});
acc[i].name = name;
acc[i].createdAt = createdAt;
console.log(name)
if (i == postNumber) {
console.log('3. Account Putting done')
writeToFileAccount();
}
}
});
}
function writeToFileAccount() {
for (let i = 0; i <= postNumber; i++) {
accFile.write(`${acc[i].name}, ${acc[i].createdAt}, ${acc[i].phone}, ${acc[i].ads}, ${acc[i].adsNumber}\n`);
}
console.log('4. Write to file Account done');
}
function numberPutter() {
for (let i = 0; i <= postNumber; i++) {
number(i);
}
}
function accountPutter() {
for (let i = 0; i <= postNumber; i++) {
accountAnalyzis(i);
}
}
// MAIN
mainRequest(category, (error, response, html) => {
let links = [];
for (let i = 0; i < 1000; i++) {
post[i] = new advert();
}
for (let i = 0; i < 1000; i++) {
acc[i] = new account();
}
if (!error && response.statusCode == 200) {
const $ = cheerio2.load(html);
const siteTitle = $('.ads-list-photo-item-title').each((id, el) => {
const ref = host + $(el).children().attr('href');
const title = $(el).text();
post[id].id = id + 1;
post[id].title = title;
post[id].link = ref;
links[id] = ref;
postNumber = id;
});
post[0].link = 'https://999.md/ru/profile/denisserj'
numberPutter()
}
});
You have an error in line
const siteTitle = $('.ads-list-photo-item-title').each((id, el) => {
What you actually want is .find('a').each...

Little Code Review | From Excel to Json Node JS

Intro -> I have to convert an excel file, which comes usually in this way:
to a Json file that must looks like this:
"DE": {
"Title":"Title_DE",
"Subtitle":"Subtitle_DE",
"Title_2":"Title_2_DE",
"Subtitle_2":"Subtitle_2_DE"
}
"GR": {
"Title":"Title_GR",
"Subtitle":"Subtitle_GR",
"Title_2":"Title_2_GR",
"Subtitle_2":"Subtitle_2_GR"
}
"EN": {
"Title":"Title_EN",
"Subtitle":"Subtitle_EN",
"Title_2":"Title_2_EN",
"Subtitle_2":"Subtitle_2_EN"
}
What I've done -> I have used this node module xlsx to parse the excel file. The code currently works but something in my mind tells me that was not the best way to do it or at least there was a clearest or a smartest way.
const XLSX = require('xlsx');
const fs = require('fs');
let workbook = XLSX.readFile('./src/xls/test.xlsx');
let sheet_name_list = workbook.SheetNames; //--> sheet names
let isocodes = [];
let labels = [];
let copy = [];
let obj = {};
let j = 0;
sheet_name_list.map(sheet => { //--> sheet names loop
let worksheet = workbook.Sheets[sheet];
for (index in worksheet) {
let idCol = index.substring(1, 3);
let idRow = index.substring(0, 1);
if (idCol === "1") {
isocodes.push((worksheet[index].v).trim()); //--> isocodes
}
if (idRow === "A") {
labels.push((worksheet[index].v).trim()); //--> labels
}
if (idRow != "A" && idCol != "1") {
copy.push(worksheet[index].v); //--> copy
}
}
});
isocodes.shift(); //--> delete undefined
labels.shift();
copy.shift();
isocodes.map(iso => { //--> create object to convert
obj[iso] = {};
for (let i = 0; i < labels.length; i++) {
obj[iso][labels[i]] = copy[(i * isocodes.length) + j];
}
j++;
});
//--> write json files in their folder
fs.writeFile('src/lang/desktop.json', JSON.stringify(obj).replace(/\\n|\\r/g, ''), 'utf8', console.log("desktop json file created"));
fs.writeFile('src/lang/mobile.json', JSON.stringify(obj).replace(/\\n|\\r/g, ''), 'utf8', console.log("mobile json file created"));
Please share your thoughts!!!

How convert a audio file into byte array with javascript

I'm trying to convert a .wav file to a byte array string. I need to do this on the back-end targeting the file is becoming an issue.
files.forEach(file => {
let index = files.indexOf(file)
let reader = new FileReader();
reader.readAsArrayBuffer(file);
console.log(reader.result);
reader.onload = function (event) {
let byteArray = new Uint8Array(reader.result);
let FileName = file.name;
let dataAsByteArrayString = byteArray.toString();
var listHtml = $list.html();
The above code uses npm's filereader which says to target a file. I'm having difficulty doing this since this is not a front end drag and drop of the file.
my file that is generated is called "response.wav" how would I convert this file using JavaScript and node extensions? Thanks!
I dont know if this will help but the last project I worked on we parsed a .wav file using the Node buffer API and wrote it using the Node file API.
If you have more questions about the code I can direct you to the person that worked on this file the most. I hope it helps somewhat
https://github.com/IntelliSound/intelliSound-Server/blob/development/lib/sound-data-parser.js
'use strict';
function ParsedWave(buffer) {
const RIFF_HEADER_OFFSET = 0;
const FILE_SIZE_OFFSET = 4;
const RIFF_FORMAT_OFFSET = 8;
const SUBCHUNK1_ID_OFFSET = 12;
const AUDIO_FORMAT_OFFSET = 20;
const NUMBER_OF_CHANNELS_OFFSET = 22;
const SAMPLE_RATE_OFFSET = 24;
const BITS_PER_SAMPLE_OFFSET = 34;
const SUBCHUNK2_ID_OFFSET = 36;
const SUBCHUNK2_SIZE_OFFSET = 40;
const DATA_OFFSET = 44;
this.buffer = buffer;
this.riff = buffer.slice(RIFF_HEADER_OFFSET, RIFF_HEADER_OFFSET + 4).toString('utf8');
this.fileSize = buffer.readUInt32LE(FILE_SIZE_OFFSET);
this.riffType = buffer.slice(RIFF_FORMAT_OFFSET, RIFF_FORMAT_OFFSET + 4).toString('utf8');
this.subChunk1Id = buffer.slice(SUBCHUNK1_ID_OFFSET, SUBCHUNK1_ID_OFFSET + 4).toString('utf8');
this.audioFormat = buffer.readUInt16LE(AUDIO_FORMAT_OFFSET);
this.numberOfChannels = buffer.readUInt16LE(NUMBER_OF_CHANNELS_OFFSET);
this.sampleRate = buffer.readUInt32LE(SAMPLE_RATE_OFFSET);
this.bitsPerSample = buffer.readUInt16LE(BITS_PER_SAMPLE_OFFSET);
this.subChunk2Id = buffer.slice(SUBCHUNK2_ID_OFFSET, SUBCHUNK2_ID_OFFSET + 4).toString('utf8');
this.subChunk2Size = buffer.readUInt32LE(SUBCHUNK2_SIZE_OFFSET);
this.data = buffer.slice(DATA_OFFSET, this.subChunk2Size + DATA_OFFSET);
}
// Andrew - The bufferMapper function is going to accept a parsed wave-file and output
// an array of values corresponding to the data subchunk in a format which can
// be accepted as input to the neural network.
const bufferMapper = parsedWave => {
const SIXTEEN_BIT_ZERO = 32768;
const SIXTEEN_BIT_MAX = 65535;
parsedWave.neuralArray = [];
for (let i = 0; i < parsedWave.data.length; i += 2) {
const sample = parsedWave.data.readInt16LE(i);
const unsignedSample = sample + SIXTEEN_BIT_ZERO;
const sigmoidSample = unsignedSample / SIXTEEN_BIT_MAX;
parsedWave.neuralArray.push(sigmoidSample);
}
return parsedWave;
};
module.exports = data => {
const parsedWaveFile = new ParsedWave(data);
if (parsedWaveFile.riff !== 'RIFF') {
throw new TypeError('incorrect file type, must be RIFF format');
}
if (parsedWaveFile.fileSize > 10000000) {
throw new TypeError('file too large, please limit file size to less than 10MB');
}
if (parsedWaveFile.riffType !== 'WAVE') {
throw new TypeError('file must be a WAVE');
}
if (parsedWaveFile.subChunk1Id !== 'fmt ') {
throw new TypeError('the first subchunk must be fmt');
}
if (parsedWaveFile.audioFormat !== 1) {
throw new TypeError('wave file must be uncompressed linear PCM');
}
if (parsedWaveFile.numberOfChannels > 2) {
throw new TypeError('wave file must have 2 or less channels');
}
if (parsedWaveFile.sampleRate > 48000) {
throw new TypeError('wave file must have sample rate of less than 48k');
}
if (parsedWaveFile.bitsPerSample !== 16) {
throw new TypeError(`file's bit depth must be 16`);
}
if (parsedWaveFile.subChunk2Id !== 'data') {
throw new TypeError('subchunk 2 must be data');
}
const neuralMappedWaveFile = bufferMapper(parsedWaveFile);
return neuralMappedWaveFile;
};
Using the included fs module, you can read in your wav file like so:
const fs = require('fs');
const path = './path/to/my.wav';
fs.readFile(path, (err, data) => {
// Data is a Buffer object
});
For documentation on working with a Node.JS Buffer see here. Now, if you were more interested in the file conversion portion, there are a couple of libraries out there. If you just need the conversion functionality, and not implementing yourself, node-fluent-ffmpeg may work for you. If you want to implement it yourself, this node-wav file may be a good reference (too much to paste here).
If you need to go from Buffer to ArrayBuffer, this SO shows some options.

How to correctly extract text from a pdf using pdf.js

I'm new to ES6 and Promise. I'm trying pdf.js to extract texts from all pages of a pdf file into a string array. And when extraction is done, I want to parse the array somehow. Say pdf file(passed via typedarray correctly) has 4 pages and my code is:
let str = [];
PDFJS.getDocument(typedarray).then(function(pdf) {
for(let i = 1; i <= pdf.numPages; i++) {
pdf.getPage(i).then(function(page) {
page.getTextContent().then(function(textContent) {
for(let j = 0; j < textContent.items.length; j++) {
str.push(textContent.items[j].str);
}
parse(str);
});
});
}
});
It manages to work, but, of course, the problem is my parse function is called 4 times. I just want to call parse only after all 4-pages-extraction is done.
Similar to https://stackoverflow.com/a/40494019/1765767 -- collect page promises using Promise.all and don't forget to chain then's:
function gettext(pdfUrl){
var pdf = pdfjsLib.getDocument(pdfUrl);
return pdf.then(function(pdf) { // get all pages text
var maxPages = pdf.pdfInfo.numPages;
var countPromises = []; // collecting all page promises
for (var j = 1; j <= maxPages; j++) {
var page = pdf.getPage(j);
var txt = "";
countPromises.push(page.then(function(page) { // add page promise
var textContent = page.getTextContent();
return textContent.then(function(text){ // return content promise
return text.items.map(function (s) { return s.str; }).join(''); // value page text
});
}));
}
// Wait for all pages and join text
return Promise.all(countPromises).then(function (texts) {
return texts.join('');
});
});
}
// waiting on gettext to finish completion, or error
gettext("https://cdn.mozilla.net/pdfjs/tracemonkey.pdf").then(function (text) {
alert('parse ' + text);
},
function (reason) {
console.error(reason);
});
<script src="https://npmcdn.com/pdfjs-dist/build/pdf.js"></script>
A bit more cleaner version of #async5 and updated according to the latest version of "pdfjs-dist": "^2.0.943"
import PDFJS from "pdfjs-dist";
import PDFJSWorker from "pdfjs-dist/build/pdf.worker.js"; // add this to fit 2.3.0
PDFJS.disableTextLayer = true;
PDFJS.disableWorker = true; // not availaible anymore since 2.3.0 (see imports)
const getPageText = async (pdf: Pdf, pageNo: number) => {
const page = await pdf.getPage(pageNo);
const tokenizedText = await page.getTextContent();
const pageText = tokenizedText.items.map(token => token.str).join("");
return pageText;
};
/* see example of a PDFSource below */
export const getPDFText = async (source: PDFSource): Promise<string> => {
Object.assign(window, {pdfjsWorker: PDFJSWorker}); // added to fit 2.3.0
const pdf: Pdf = await PDFJS.getDocument(source).promise;
const maxPages = pdf.numPages;
const pageTextPromises = [];
for (let pageNo = 1; pageNo <= maxPages; pageNo += 1) {
pageTextPromises.push(getPageText(pdf, pageNo));
}
const pageTexts = await Promise.all(pageTextPromises);
return pageTexts.join(" ");
};
This is the corresponding typescript declaration file that I have used if anyone needs it.
declare module "pdfjs-dist";
type TokenText = {
str: string;
};
type PageText = {
items: TokenText[];
};
type PdfPage = {
getTextContent: () => Promise<PageText>;
};
type Pdf = {
numPages: number;
getPage: (pageNo: number) => Promise<PdfPage>;
};
type PDFSource = Buffer | string;
declare module 'pdfjs-dist/build/pdf.worker.js'; // needed in 2.3.0
Example of how to get a PDFSource from a File with Buffer (from node types) :
file.arrayBuffer().then((ab: ArrayBuffer) => {
const pdfSource: PDFSource = Buffer.from(ab);
});
Here's a shorter (not necessarily better) version:
async function getPdfText(data) {
let doc = await pdfjsLib.getDocument({data}).promise;
let pageTexts = Array.from({length: doc.numPages}, async (v,i) => {
return (await (await doc.getPage(i+1)).getTextContent()).items.map(token => token.str).join('');
});
return (await Promise.all(pageTexts)).join('');
}
Here, data is a string or buffer (or you could change it to take the url, etc., instead).
Here's another Typescript version with await and Promise.all based on the other answers:
import { getDocument } from "pdfjs-dist";
import {
DocumentInitParameters,
PDFDataRangeTransport,
TypedArray,
} from "pdfjs-dist/types/display/api";
export const getPdfText = async (
src: string | TypedArray | DocumentInitParameters | PDFDataRangeTransport
): Promise<string> => {
const pdf = await getDocument(src).promise;
const pageList = await Promise.all(Array.from({ length: pdf.numPages }, (_, i) => pdf.getPage(i + 1)));
const textList = await Promise.all(pageList.map((p) => p.getTextContent()));
return textList
.map(({ items }) => items.map(({ str }) => str).join(""))
.join("");
};
If you use the PDFViewer component, here is my solution that doesn't involve any promise or asynchrony:
function getDocumentText(viewer) {
let text = '';
for (let i = 0; i < viewer.pagesCount; i++) {
const { textContentItemsStr } = viewer.getPageView(i).textLayer;
for (let item of textContentItemsStr)
text += item;
}
return text;
}
I wouldn't know how to do it either, but thanks to async5 I did it. I copied his code and updated it to the new version of pdf.js.
I made minimal corrections and also took the liberty of not grouping all the pages into a single string. In addition, I used a regular expression that removes many of the empty spaces that PDF unfortunately ends up creating (it does not solve all cases, but the vast majority).
The way I did it should be the way that most will feel comfortable working, however, feel free to remove the regex or make any other changes.
// pdf-to-text.js v1, require pdf.js ( https://mozilla.github.io/pdf.js/getting_started/#download )
// load pdf.js and pdf.worker.js
function pdfToText(url, separator = ' ') {
let pdf = pdfjsLib.getDocument(url);
return pdf.promise.then(function(pdf) { // get all pages text
let maxPages = pdf._pdfInfo.numPages;
let countPromises = []; // collecting all page promises
for (let i = 1; i <= maxPages; i++) {
let page = pdf.getPage(i);
countPromises.push(page.then(function(page) { // add page promise
let textContent = page.getTextContent();
return textContent.then(function(text) { // return content promise
return text.items.map(function(obj) {
return obj.str;
}).join(separator); // value page text
});
}));
};
// wait for all pages and join text
return Promise.all(countPromises).then(function(texts) {
for(let i = 0; i < texts.length; i++){
texts[i] = texts[i].replace(/\s+/g, ' ').trim();
};
return texts;
});
});
};
// example of use:
// waiting on pdfToText to finish completion, or error
pdfToText('files/pdf-name.pdf').then(function(pdfTexts) {
console.log(pdfTexts);
// RESULT: ['TEXT-OF-PAGE-1', 'TEXT-OF-PAGE-2', ...]
}, function(reason) {
console.error(reason);
});

javascript parser for a string which contains .ini data

If a string contains a .ini file data , How can I parse it in JavaScript ?
Is there any JavaScript parser which will help in this regard?
here , typically string contains the content after reading a configuration file. (reading cannot be done through javascript , but somehow I gather .ini info in a string.)
I wrote a javascript function inspirated by node-iniparser.js
function parseINIString(data){
var regex = {
section: /^\s*\[\s*([^\]]*)\s*\]\s*$/,
param: /^\s*([^=]+?)\s*=\s*(.*?)\s*$/,
comment: /^\s*;.*$/
};
var value = {};
var lines = data.split(/[\r\n]+/);
var section = null;
lines.forEach(function(line){
if(regex.comment.test(line)){
return;
}else if(regex.param.test(line)){
var match = line.match(regex.param);
if(section){
value[section][match[1]] = match[2];
}else{
value[match[1]] = match[2];
}
}else if(regex.section.test(line)){
var match = line.match(regex.section);
value[match[1]] = {};
section = match[1];
}else if(line.length == 0 && section){
section = null;
};
});
return value;
}
2017-05-10 updated: fix bug of keys contains spaces.
EDIT:
Sample of ini file read and parse
You could try the config-ini-parser, it's similar to python ConfigParser without I/O operations
It could be installed by npm or bower. Here is an example:
var ConfigIniParser = require("config-ini-parser").ConfigIniParser;
var delimiter = "\r\n"; //or "\n" for *nux
parser = new ConfigIniParser(delimiter); //If don't assign the parameter delimiter then the default value \n will be used
parser.parse(iniContent);
var value = parser.get("section", "option");
parser.stringify('\n'); //get all the ini file content as a string
For more detail you could check the project main page or from the npm package page
Here's a function who's able to parse ini data from a string to an object! (on client side)
function parseINIString(data){
var regex = {
section: /^\s*\[\s*([^\]]*)\s*\]\s*$/,
param: /^\s*([\w\.\-\_]+)\s*=\s*(.*?)\s*$/,
comment: /^\s*;.*$/
};
var value = {};
var lines = data.split(/\r\n|\r|\n/);
var section = null;
for(x=0;x<lines.length;x++)
{
if(regex.comment.test(lines[x])){
return;
}else if(regex.param.test(lines[x])){
var match = lines[x].match(regex.param);
if(section){
value[section][match[1]] = match[2];
}else{
value[match[1]] = match[2];
}
}else if(regex.section.test(lines[x])){
var match = lines[x].match(regex.section);
value[match[1]] = {};
section = match[1];
}else if(lines.length == 0 && section){//changed line to lines to fix bug.
section = null;
};
}
return value;
}
Based on the other responses i've modified it so you can have nested sections :)
function parseINI(data: string) {
let rgx = {
section: /^\s*\[\s*([^\]]*)\s*\]\s*$/,
param: /^\s*([^=]+?)\s*=\s*(.*?)\s*$/,
comment: /^\s*;.*$/
};
let result = {};
let lines = data.split(/[\r\n]+/);
let section = result;
lines.forEach(function (line) {
//comments
if (rgx.comment.test(line)) return;
//params
if (rgx.param.test(line)) {
let match = line.match(rgx.param);
section[match[1]] = match[2];
return;
}
//sections
if (rgx.section.test(line)) {
section = result
let match = line.match(rgx.section);
for (let subSection of match[1].split(".")) {
!section[subSection] && (section[subSection] = {});
section = section[subSection];
}
return;
}
});
return result;
}

Categories

Resources