之前有个需求要将文件解析再处理,当时直接将整个文件内容读到内存中然后解析,也是没有考虑到大文件的问题,那么要如何解析大文件呢?
输入:文件的内容是多个json,按顺序排列
输出:解析后的json数据
代码:
let fs = require('fs'); let log = (str) => { console.log(`${new Date().toLocaleString()} ${str}`); }; let readStream = fs.createReadStream('./input.txt', {encoding: 'utf-8'});
let chunkTotal = '',
res = [],
reg = /(}\s*{)/g; console.time('parse'); readStream.on('readable', () => {
log('readable triggerd');
let chunk; while ((chunk = readStream.read()) !== null) {
log(`read triggerd, chunk length ${chunk.length}, current res length ${res.length}`);
chunkTotal += chunk; let regRes, matchedIndex = 0, srcIndex = 0;
while ((regRes = reg.exec(chunkTotal))) {
matchedIndex = regRes.index;
let json = chunkTotal.slice(srcIndex, matchedIndex + 1);
try {
res.push(JSON.parse(json.trim()));
} catch (e) {
console.log(json);
} srcIndex = matchedIndex + 1;
}
chunkTotal = chunkTotal.slice(matchedIndex + 1).trim();
} let json;
try {
json = JSON.parse(chunkTotal.trim());
res.push(json);
chunkTotal = '';
} catch (e) {}
}); readStream.on('end', () => {
log(`总共编译得到数据:${res.length}个`);
console.timeEnd('parse');
});
实际运行过程中发现程序越跑越慢:
当解析到100多w条json数据时,慢的不能忍
当把代码改成只统计能解析得到的json数量,不保存json数据后,代码就嗖嗖的跑完了。
难道是因为占用内存过高,影响垃圾回收速度?
能不能利用多进程来处理一个大文件?
原因是正则的问题导致效率下降,按行读取还是用readline比较好