Implemented:

- Auto chunk size adjusting.
  Now our large files are processed more efficiently
- These configuration has been removed.

Improved
- Remote chunk retrieving logic has been speeded up.

Fixed
- Fixed process handling of boot sequence
This commit is contained in:
vorotamoroz
2022-09-05 16:53:22 +09:00
parent c9daa1b47d
commit 8112a07210
3 changed files with 31 additions and 61 deletions

View File

@@ -615,7 +615,8 @@ export class LocalPouchDB {
let processed = 0;
let made = 0;
let skiped = 0;
let pieceSize = MAX_DOC_SIZE_BIN * Math.max(this.settings.customChunkSize, 1);
const maxChunkSize = MAX_DOC_SIZE_BIN * Math.max(this.settings.customChunkSize, 1);
let pieceSize = maxChunkSize;
let plainSplit = false;
let cacheUsed = 0;
const userpasswordHash = this.h32Raw(new TextEncoder().encode(this.settings.passphrase));
@@ -624,20 +625,11 @@ export class LocalPouchDB {
plainSplit = true;
}
const minimumChunkSize = Math.min(Math.max(40, ~~(note.data.length / 100)), maxChunkSize);
if (pieceSize < minimumChunkSize) pieceSize = minimumChunkSize;
const newLeafs: EntryLeaf[] = [];
// To keep low bandwith and database size,
// Dedup pieces on database.
// from 0.1.10, for best performance. we use markdown delimiters
// 1. \n[^\n]{longLineThreshold}[^\n]*\n -> long sentence shuld break.
// 2. \n\n shold break
// 3. \r\n\r\n should break
// 4. \n# should break.
let minimumChunkSize = this.settings.minimumChunkSize;
if (minimumChunkSize < 10) minimumChunkSize = 10;
let longLineThreshold = this.settings.longLineThreshold;
if (longLineThreshold < 100) longLineThreshold = 100;
const pieces = splitPieces2(note.data, pieceSize, plainSplit, minimumChunkSize, longLineThreshold);
const pieces = splitPieces2(note.data, pieceSize, plainSplit, minimumChunkSize, 0);
for (const piece of pieces()) {
processed++;
let leafid = "";
@@ -1380,8 +1372,21 @@ export class LocalPouchDB {
if (remoteChunks.rows.some(e => "error" in e)) {
return false;
}
const remoteChunkItems = remoteChunks.rows.map(e => e.doc);
const max = remoteChunkItems.length;
let last = 0;
// Chunks should be ordered by as we requested.
function findChunk(key: string) {
const offset = last;
for (let i = 0; i < max; i++) {
const idx = (offset + i) % max;
last = i;
if (remoteChunkItems[idx]._id == key) return remoteChunkItems[idx];
}
throw Error("Chunk collecting error");
}
// Merge them
const chunkMap: { [key: string]: EntryDoc } = remoteChunks.rows.reduce((p, c) => ({ ...p, [c.key]: c.doc }), {})
return localChunks.rows.map(e => ("error" in e) ? (chunkMap[e.key]) : e.doc);
return localChunks.rows.map(e => ("error" in e) ? (findChunk(e.key)) : e.doc);
}
}

View File

@@ -599,43 +599,7 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
})
)
containerLocalDatabaseEl.createEl("div", {
text: sanitizeHTMLToDom(`Advanced settings<br>
Configuration of how LiveSync makes chunks from the file.`),
});
new Setting(containerLocalDatabaseEl)
.setName("Minimum chunk size")
.setDesc("(letters), minimum chunk size.")
.addText((text) => {
text.setPlaceholder("")
.setValue(this.plugin.settings.minimumChunkSize + "")
.onChange(async (value) => {
let v = Number(value);
if (isNaN(v) || v < 10 || v > 1000) {
v = 10;
}
this.plugin.settings.minimumChunkSize = v;
await this.plugin.saveSettings();
});
text.inputEl.setAttribute("type", "number");
});
new Setting(containerLocalDatabaseEl)
.setName("LongLine Threshold")
.setDesc("(letters), If the line is longer than this, make the line to chunk")
.addText((text) => {
text.setPlaceholder("")
.setValue(this.plugin.settings.longLineThreshold + "")
.onChange(async (value) => {
let v = Number(value);
if (isNaN(v) || v < 10 || v > 1000) {
v = 10;
}
this.plugin.settings.longLineThreshold = v;
await this.plugin.saveSettings();
});
text.inputEl.setAttribute("type", "number");
});
let newDatabaseName = this.plugin.settings.additionalSuffixOfDatabaseName + "";
new Setting(containerLocalDatabaseEl)
.setName("Database suffix")

View File

@@ -1618,18 +1618,20 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
const count = objects.length;
Logger(procedurename);
let i = 0;
// let lastTicks = performance.now() + 2000;
// let workProcs = 0;
const p = Parallels();
const limit = 10;
Logger(`${procedurename} exec.`);
for (const v of objects) {
// workProcs++;
if (!this.localDatabase.isReady) throw Error("Database is not ready!");
p.add(callback(v).then(() => {
const addProc = (p: () => Promise<void>): Promise<unknown> => {
return p();
}
p.add(addProc(async () => {
try {
await callback(v);
i++;
if (i % 100 == 0) {
if (i % 50 == 0) {
const notify = `${procedurename} : ${i}/${count}`;
if (showingNotice) {
Logger(notify, LOG_LEVEL.NOTICE, "syncAll");
@@ -1638,13 +1640,11 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
}
this.setStatusBarText(notify);
}
}).catch(ex => {
} catch (ex) {
Logger(`Error while ${procedurename}`, LOG_LEVEL.NOTICE);
Logger(ex);
}).finally(() => {
// workProcs--;
})
);
}
}));
await p.wait(limit);
}
await p.all();
@@ -1660,6 +1660,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
await runAll("UPDATE STORAGE", onlyInDatabase, async (e) => {
Logger(`Check or pull from db:${e}`);
await this.pullFile(e, filesStorage, false, null, false);
Logger(`Check or pull from db:${e} OK`);
});
}
if (!initialScan) {