diff --git a/src/lib b/src/lib index 3b20495..fb30708 160000 --- a/src/lib +++ b/src/lib @@ -1 +1 @@ -Subproject commit 3b20495ec95c10f198f192b7df21ec8f98abb258 +Subproject commit fb3070851f4eead9461869b0dbbc497f17129d07 diff --git a/src/main.ts b/src/main.ts index 8f58837..ecfcc37 100644 --- a/src/main.ts +++ b/src/main.ts @@ -1350,7 +1350,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin const docMtime = ~~(doc.mtime / 1000); //TODO: some margin required. if (localMtime >= docMtime) { - Logger(`${doc._id} Skipped, older than storage.`, LOG_LEVEL.VERBOSE); + Logger(`${path} (${doc._id}, ${doc._rev}) Skipped, older than storage.`, LOG_LEVEL.VERBOSE); return; } } @@ -1361,12 +1361,12 @@ export default class ObsidianLiveSyncPlugin extends Plugin missingChildren: [] as string[], timeout: now + this.chunkWaitTimeout, }; - // If `Read chunks online` is enabled, retrieve chunks from the remote CouchDB directly. + // If `Read chunks online` is disabled, chunks should be transferred before here. + // However, in some cases, chunks are after that. So, if missing chunks exist, we have to wait for them. if ((!this.settings.readChunksOnline) && "children" in doc) { - const c = await this.localDatabase.allDocsRaw({ keys: doc.children, include_docs: false }); - const missing = c.rows.filter((e) => "error" in e).map((e) => e.key); - // fetch from remote - if (missing.length > 0) Logger(`${doc._id}(${doc._rev}) Queued (waiting ${missing.length} items)`, LOG_LEVEL.VERBOSE); + const c = await this.localDatabase.collectChunksWithCache(doc.children) + const missing = c.filter((e) => !e.chunk).map((e) => e.id); + if (missing.length > 0) Logger(`${path} (${doc._id}, ${doc._rev}) Queued (waiting ${missing.length} items)`, LOG_LEVEL.VERBOSE); newQueue.missingChildren = missing; this.queuedFiles.push(newQueue); } else { @@ -1509,27 +1509,25 @@ export default class ObsidianLiveSyncPlugin extends Plugin return; } - logHideTimer: NodeJS.Timeout = null; setStatusBarText(message: string = null, log: string = null) { if (!this.statusBar) return; const newMsg = typeof message == "string" ? message : this.lastMessage; const newLog = typeof log == "string" ? log : this.lastLog; if (`${this.lastMessage}-${this.lastLog}` != `${newMsg}-${newLog}`) { - this.statusBar.setText(newMsg.split("\n")[0]); + scheduleTask("update-display", 50, () => { + this.statusBar.setText(newMsg.split("\n")[0]); - if (this.settings.showStatusOnEditor) { - const root = activeDocument.documentElement; - const q = root.querySelectorAll(`.CodeMirror-wrap,.cm-s-obsidian>.cm-editor,.canvas-wrapper`); - q.forEach(e => e.setAttr("data-log", '' + (newMsg + "\n" + newLog) + '')) - } else { - const root = activeDocument.documentElement; - const q = root.querySelectorAll(`.CodeMirror-wrap,.cm-s-obsidian>.cm-editor,.canvas-wrapper`); - q.forEach(e => e.setAttr("data-log", '')) - } - if (this.logHideTimer != null) { - clearTimeout(this.logHideTimer); - } - this.logHideTimer = setTimeout(() => this.setStatusBarText(null, ""), 3000); + if (this.settings.showStatusOnEditor) { + const root = activeDocument.documentElement; + const q = root.querySelectorAll(`.CodeMirror-wrap,.cm-s-obsidian>.cm-editor,.canvas-wrapper`); + q.forEach(e => e.setAttr("data-log", '' + (newMsg + "\n" + newLog) + '')) + } else { + const root = activeDocument.documentElement; + const q = root.querySelectorAll(`.CodeMirror-wrap,.cm-s-obsidian>.cm-editor,.canvas-wrapper`); + q.forEach(e => e.setAttr("data-log", '')) + } + }, true); + scheduleTask("log-hide", 3000, () => this.setStatusBarText(null, "")); this.lastMessage = newMsg; this.lastLog = newLog; } @@ -2137,16 +2135,10 @@ Or if you are sure know what had been happened, we can unlock the database from conflictedCheckFiles: FilePath[] = []; // queueing the conflicted file check - conflictedCheckTimer: number; - queueConflictedCheck(file: TFile) { this.conflictedCheckFiles = this.conflictedCheckFiles.filter((e) => e != file.path); this.conflictedCheckFiles.push(getPathFromTFile(file)); - if (this.conflictedCheckTimer != null) { - window.clearTimeout(this.conflictedCheckTimer); - } - this.conflictedCheckTimer = window.setTimeout(async () => { - this.conflictedCheckTimer = null; + scheduleTask("check-conflict", 100, async () => { const checkFiles = JSON.parse(JSON.stringify(this.conflictedCheckFiles)) as FilePath[]; for (const filename of checkFiles) { try { @@ -2158,7 +2150,7 @@ Or if you are sure know what had been happened, we can unlock the database from Logger(ex); } } - }, 100); + }); } async showIfConflicted(filename: FilePathWithPrefix) { diff --git a/src/utils.ts b/src/utils.ts index 4eb1186..3ccdfcb 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -44,7 +44,10 @@ export function getPathFromTFile(file: TAbstractFile) { } const tasks: { [key: string]: ReturnType } = {}; -export function scheduleTask(key: string, timeout: number, proc: (() => Promise | void)) { +export function scheduleTask(key: string, timeout: number, proc: (() => Promise | void), skipIfTaskExist?: boolean) { + if (skipIfTaskExist && key in tasks) { + return; + } cancelTask(key); tasks[key] = setTimeout(async () => { delete tasks[key]; @@ -663,6 +666,14 @@ export const remoteDatabaseCleanup = async (plugin: ObsidianLiveSyncPlugin, dryR return Number.parseInt((info as any)?.sizes?.[key] ?? 0); } await runWithLock("clean-up:remote", true, async () => { + const CHUNK_SIZE = 100; + function makeChunkedArrayFromArray(items: T[]): T[][] { + const chunked = []; + for (let i = 0; i < items.length; i += CHUNK_SIZE) { + chunked.push(items.slice(i, i + CHUNK_SIZE)); + } + return chunked; + } try { const ret = await plugin.replicator.connectRemoteCouchDBWithSetting(plugin.settings, plugin.isMobile); if (typeof ret === "string") { @@ -701,14 +712,17 @@ export const remoteDatabaseCleanup = async (plugin: ObsidianLiveSyncPlugin, dryR return; } Logger(`Deleting unreferenced chunks: ${removeItems}`, LOG_LEVEL.NOTICE, "clean-up-db"); - const rets = await _requestToCouchDBFetch( - `${plugin.settings.couchDB_URI}/${plugin.settings.couchDB_DBNAME}`, - plugin.settings.couchDB_USER, - plugin.settings.couchDB_PASSWORD, - "_purge", - payload, "POST"); - // const result = await rets(); - Logger(JSON.stringify(await rets.json()), LOG_LEVEL.VERBOSE); + const buffer = makeChunkedArrayFromArray(Object.entries(payload)); + for (const chunkedPayload of buffer) { + const rets = await _requestToCouchDBFetch( + `${plugin.settings.couchDB_URI}/${plugin.settings.couchDB_DBNAME}`, + plugin.settings.couchDB_USER, + plugin.settings.couchDB_PASSWORD, + "_purge", + chunkedPayload.reduce((p, c) => ({ ...p, [c[0]]: c[1] }), {}), "POST"); + // const result = await rets(); + Logger(JSON.stringify(await rets.json()), LOG_LEVEL.VERBOSE); + } Logger(`Compacting database...`, LOG_LEVEL.NOTICE, "clean-up-db"); await db.compact(); const endInfo = await db.info();