New Feature:

- Skip conflicted check while replication

Fixed:
- Rewrited replication reflection algorithm.
This commit is contained in:
vorotamoroz
2022-06-13 17:36:26 +09:00
parent 728dabce60
commit 46ff17fdf3
8 changed files with 183 additions and 14 deletions

View File

@@ -1,7 +1,7 @@
{
"id": "obsidian-livesync",
"name": "Self-hosted LiveSync",
"version": "0.11.0",
"version": "0.11.1",
"minAppVersion": "0.9.12",
"description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
"author": "vorotamoroz",

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "obsidian-livesync",
"version": "0.11.0",
"version": "0.11.1",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "obsidian-livesync",
"version": "0.11.0",
"version": "0.11.1",
"license": "MIT",
"dependencies": {
"diff-match-patch": "^1.0.5",

View File

@@ -1,6 +1,6 @@
{
"name": "obsidian-livesync",
"version": "0.11.0",
"version": "0.11.1",
"description": "Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
"main": "main.js",
"type": "module",

View File

@@ -253,7 +253,7 @@ export class LocalPouchDB {
Logger("Conversion completed!", LOG_LEVEL.NOTICE);
old.destroy(); // delete the old database.
this.isReady = true;
return nextSeq();
return await nextSeq();
} else {
throw new Error("Conversion failed!");
}
@@ -265,7 +265,7 @@ export class LocalPouchDB {
return false;
}
} else {
return nextSeq();
return await nextSeq();
}
}

View File

@@ -710,6 +710,24 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
})
);
new Setting(containerSyncSettingEl)
.setName("Skip old files on sync")
.setDesc("Skip old incoming if incoming changes older than storage.")
.addToggle((toggle) =>
toggle.setValue(this.plugin.settings.skipOlderFilesOnSync).onChange(async (value) => {
this.plugin.settings.skipOlderFilesOnSync = value;
await this.plugin.saveSettings();
})
);
new Setting(containerSyncSettingEl)
.setName("Check conflict only on opening file.")
.setDesc("Do not check conflict while replication")
.addToggle((toggle) =>
toggle.setValue(this.plugin.settings.checkConflictOnlyOnOpen).onChange(async (value) => {
this.plugin.settings.checkConflictOnlyOnOpen = value;
await this.plugin.saveSettings();
})
);
containerSyncSettingEl.createEl("div", {
text: sanitizeHTMLToDom(`Advanced settings<br>
If you reached the payload size limit when using IBM Cloudant, please set batch size and batch limit to a lower value.`),
@@ -1119,8 +1137,8 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
const containerCorruptedDataEl = containerEl.createDiv();
containerCorruptedDataEl.createEl("h3", { text: "Corrupted data" });
containerCorruptedDataEl.createEl("h3", { text: "Corrupted or missing data" });
containerCorruptedDataEl.createEl("h4", { text: "Corrupted" });
if (Object.keys(this.plugin.localDatabase.corruptedEntries).length > 0) {
const cx = containerCorruptedDataEl.createEl("div", { text: "If you have copy of these items on any device, simply edit once or twice. Or not, delete this. sorry.." });
for (const k in this.plugin.localDatabase.corruptedEntries) {
@@ -1149,6 +1167,38 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
} else {
containerCorruptedDataEl.createEl("div", { text: "There is no corrupted data." });
}
containerCorruptedDataEl.createEl("h4", { text: "Missing or waiting" });
if (Object.keys(this.plugin.queuedFiles).length > 0) {
const cx = containerCorruptedDataEl.createEl("div", {
text: "These files have missing or waiting chunks. Perhaps almost chunks will be found in a while after replication. But if there're no chunk, you have to restore database entry from existed file by hitting the button below.",
});
const files = [...new Set([...this.plugin.queuedFiles.map((e) => e.entry._id)])];
for (const k of files) {
const xx = cx.createEl("div", { text: `${id2path(k)}` });
const ba = xx.createEl("button", { text: `Delete this` }, (e) => {
e.addEventListener("click", async () => {
await this.plugin.localDatabase.deleteDBEntry(k);
xx.remove();
});
});
ba.addClass("mod-warning");
xx.createEl("button", { text: `Restore from file` }, (e) => {
e.addEventListener("click", async () => {
const f = await this.app.vault.getFiles().filter((e) => path2id(e.path) == k);
if (f.length == 0) {
Logger("Not found in vault", LOG_LEVEL.NOTICE);
return;
}
await this.plugin.updateIntoDB(f[0]);
xx.remove();
});
});
xx.addClass("mod-warning");
}
} else {
containerCorruptedDataEl.createEl("div", { text: "There is no missing or waiting chunk." });
}
applyDisplayEnabled();
addScreenElement("70", containerCorruptedDataEl);
changeDisplay("0");

Submodule src/lib updated: b031e4e69d...ae989051b5

View File

@@ -923,12 +923,118 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
const doc = change;
const file = targetFile;
await this.doc2storage_modify(doc, file);
if (!this.settings.checkConflictOnlyOnOpen) {
this.queueConflictedCheck(file);
} else {
const af = app.workspace.getActiveFile();
if (af && af.path == file.path) {
this.queueConflictedCheck(file);
}
}
} else {
Logger(`${id2path(change._id)} is already exist as the folder`);
}
}
queuedFiles: {
entry: EntryBody;
missingChildren: string[];
timeout?: number;
done?: boolean;
warned?: boolean;
}[] = [];
chunkWaitTimeout = 60000;
async saveQueuedFiles() {
const saveData = JSON.stringify(this.queuedFiles.filter((e) => !e.done).map((e) => e.entry._id));
const lsname = "obsidian-livesync-queuefiles-" + this.app.vault.getName();
localStorage.setItem(lsname, saveData);
}
async loadQueuedFiles() {
const lsname = "obsidian-livesync-queuefiles-" + this.app.vault.getName();
const ids = JSON.parse(localStorage.getItem(lsname) || "[]") as string[];
const ret = await this.localDatabase.localDatabase.allDocs({ keys: ids, include_docs: true });
for (const doc of ret.rows) {
if (doc.doc && !this.queuedFiles.some((e) => e.entry._id == doc.doc._id)) {
await this.parseIncomingDoc(doc.doc as PouchDB.Core.ExistingDocument<EntryBody & PouchDB.Core.AllDocsMeta>);
}
}
}
async procQueuedFiles() {
await runWithLock("procQueue", true, async () => {
this.saveQueuedFiles();
for (const queue of this.queuedFiles) {
if (queue.done) continue;
const now = new Date().getTime();
if (queue.missingChildren.length == 0) {
queue.done = true;
Logger(`Applying ${queue.entry._id} (${queue.entry._rev}) change...`);
await this.handleDBChanged(queue.entry);
}
if (now > queue.timeout) {
if (!queue.warned) Logger(`Timed out: ${queue.entry._id} could not collect ${queue.missingChildren.length} chunks. plugin keeps watching, but you have to check the file after the replication.`, LOG_LEVEL.NOTICE);
queue.warned = true;
continue;
}
}
this.queuedFiles = this.queuedFiles.filter((e) => !e.done);
this.saveQueuedFiles();
});
}
parseIncomingChunk(chunk: PouchDB.Core.ExistingDocument<EntryDoc>) {
const now = new Date().getTime();
let isNewFileCompleted = false;
for (const queue of this.queuedFiles) {
if (queue.done) continue;
if (queue.missingChildren.indexOf(chunk._id) !== -1) {
queue.missingChildren = queue.missingChildren.filter((e) => e != chunk._id);
queue.timeout = now + this.chunkWaitTimeout;
}
if (queue.missingChildren.length == 0) {
for (const e of this.queuedFiles) {
if (e.entry._id == queue.entry._id && e.entry.mtime < queue.entry.mtime) {
e.done = true;
}
}
isNewFileCompleted = true;
}
}
if (isNewFileCompleted) this.procQueuedFiles();
}
async parseIncomingDoc(doc: PouchDB.Core.ExistingDocument<EntryBody>) {
const skipOldFile = this.settings.skipOlderFilesOnSync;
if (skipOldFile) {
const info = this.app.vault.getAbstractFileByPath(id2path(doc._id));
if (info && info instanceof TFile) {
const localMtime = ~~((info as TFile).stat.mtime / 1000);
const docMtime = ~~(doc.mtime / 1000);
if (localMtime >= docMtime) {
Logger(`${doc._id} Skipped, older than storage.`, LOG_LEVEL.VERBOSE);
return;
}
}
}
const now = new Date().getTime();
const newQueue = {
entry: doc,
missingChildren: [] as string[],
timeout: now + this.chunkWaitTimeout,
};
if ("children" in doc) {
const c = await this.localDatabase.localDatabase.allDocs({ keys: doc.children, include_docs: false });
const missing = c.rows.filter((e) => "error" in e).map((e) => e.key);
if (missing.length) Logger(`${doc._id}(${doc._rev}) Queued (waiting ${missing.length} items)`, LOG_LEVEL.VERBOSE);
newQueue.missingChildren = missing;
this.queuedFiles.push(newQueue);
this.saveQueuedFiles();
} else {
this.queuedFiles.push(newQueue);
this.saveQueuedFiles();
this.procQueuedFiles();
}
}
periodicSyncHandler: number = null;
//---> Sync
@@ -942,14 +1048,15 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
continue;
}
if (change._id.startsWith("h:")) {
await this.parseIncomingChunk(change);
continue;
}
if (change._id == SYNCINFO_ID) {
continue;
}
if (change.type != "leaf" && change.type != "versioninfo" && change.type != "milestoneinfo" && change.type != "nodeinfo") {
Logger("replication change arrived", LOG_LEVEL.VERBOSE);
await this.handleDBChanged(change);
await this.parseIncomingDoc(change);
continue;
}
if (change.type == "versioninfo") {
if (change.version > VER) {
@@ -1086,9 +1193,17 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
waiting = " " + this.batchFileChange.map((e) => "🛫").join("");
waiting = waiting.replace(/(🛫){10}/g, "🚀");
}
let queued = "";
const queue = Object.entries(this.queuedFiles).filter((e) => !e[1].warned);
const queuedCount = queue.length;
if (queuedCount) {
const pieces = queue.map((e) => e[1].missingChildren).reduce((prev, cur) => prev + cur.length, 0);
queued = ` 🧩 ${queuedCount} (${pieces})`;
}
const procs = getProcessingCounts();
const procsDisp = procs == 0 ? "" : `${procs}`;
const message = `Sync:${w}${sent}${arrived}${waiting}${procsDisp}`;
const message = `Sync:${w}${sent}${arrived}${waiting}${procsDisp}${queued}`;
const locks = getLocks();
const pendingTask = locks.pending.length ? `\nPending:${locks.pending.join(", ")}` : "";
const runningTask = locks.running.length ? `\nRunning:${locks.running.join(", ")}` : "";
@@ -1129,6 +1244,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
if (this.settings.autoSweepPlugins) {
await this.sweepPlugin(false);
}
await this.loadQueuedFiles();
this.localDatabase.openReplication(this.settings, false, showMessage, this.parseReplicationResult);
}
@@ -1168,6 +1284,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
if (showingNotice) {
notice = NewNotice("Initializing", 0);
}
const filesStorage = this.app.vault.getFiles();
const filesStorageName = filesStorage.map((e) => e.path);
const wf = await this.localDatabase.localDatabase.allDocs();
@@ -1555,6 +1672,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
});
if (isNotChanged) return;
await this.localDatabase.putDBEntry(d);
this.queuedFiles = this.queuedFiles.map((e) => ({ ...e, ...(e.entry._id == d._id ? { done: true } : {}) }));
Logger("put database:" + fullpath + "(" + datatype + ") ");
if (this.settings.syncOnSave && !this.suspended) {
@@ -1632,7 +1750,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
endkey: `ps:${this.deviceAndVaultName}.`,
include_docs: true,
});
Logger("OLD DOCS.", LOG_LEVEL.VERBOSE);
// Logger("OLD DOCS.", LOG_LEVEL.VERBOSE);
// sweep current plugin.
// @ts-ignore
const pl = this.app.plugins;

View File

@@ -134,6 +134,7 @@ const connectRemoteCouchDB = async (uri: string, auth: { username: string; passw
// return await fetch(url, opts);
},
};
const db: PouchDB.Database<EntryDoc> = new PouchDB<EntryDoc>(uri, conf);
if (passphrase && typeof passphrase === "string") {
enableEncryption(db, passphrase);