mirror of
https://github.com/vrtmrz/obsidian-livesync.git
synced 2026-03-10 20:08:48 +00:00
Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c2491fdfad | ||
|
|
06a6e391e8 | ||
|
|
f99475f6b7 | ||
|
|
109fc00b9d | ||
|
|
c071d822e1 | ||
|
|
d2de5b4710 | ||
|
|
cf5ecd8922 |
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "obsidian-livesync",
|
||||
"name": "Self-hosted LiveSync",
|
||||
"version": "0.21.2",
|
||||
"version": "0.21.5",
|
||||
"minAppVersion": "0.9.12",
|
||||
"description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
|
||||
"author": "vorotamoroz",
|
||||
|
||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "obsidian-livesync",
|
||||
"version": "0.21.2",
|
||||
"version": "0.21.5",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "obsidian-livesync",
|
||||
"version": "0.21.2",
|
||||
"version": "0.21.5",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"diff-match-patch": "^1.0.5",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "obsidian-livesync",
|
||||
"version": "0.21.2",
|
||||
"version": "0.21.5",
|
||||
"description": "Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
|
||||
"main": "main.js",
|
||||
"type": "module",
|
||||
|
||||
@@ -436,7 +436,7 @@ export class HiddenFileSync extends LiveSyncCommands {
|
||||
type: "newnote",
|
||||
};
|
||||
} else {
|
||||
if (isDocContentSame(old.data, content) && !forceWrite) {
|
||||
if (await isDocContentSame(old.data, content) && !forceWrite) {
|
||||
// Logger(`STORAGE --> DB:${file.path}: (hidden) Not changed`, LOG_LEVEL_VERBOSE);
|
||||
return;
|
||||
}
|
||||
@@ -560,7 +560,7 @@ export class HiddenFileSync extends LiveSyncCommands {
|
||||
} else {
|
||||
const contentBin = await this.plugin.vaultAccess.adapterReadBinary(filename);
|
||||
const content = await encodeBinary(contentBin);
|
||||
if (isDocContentSame(content, fileOnDB.data) && !force) {
|
||||
if (await isDocContentSame(content, fileOnDB.data) && !force) {
|
||||
// Logger(`STORAGE <-- DB:${filename}: skipped (hidden) Not changed`, LOG_LEVEL_VERBOSE);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -228,7 +228,7 @@ export class PluginAndTheirSettings extends LiveSyncCommands {
|
||||
if (old !== false) {
|
||||
const oldData = { data: old.data, deleted: old._deleted };
|
||||
const newData = { data: d.data, deleted: d._deleted };
|
||||
if (isDocContentSame(oldData.data, newData.data) && oldData.deleted == newData.deleted) {
|
||||
if (await isDocContentSame(oldData.data, newData.data) && oldData.deleted == newData.deleted) {
|
||||
Logger(`Nothing changed:${m.name}`);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -66,10 +66,7 @@
|
||||
|
||||
for (const revInfo of reversedRevs) {
|
||||
if (revInfo.status == "available") {
|
||||
const doc =
|
||||
(!isPlain && showDiffInfo) || (checkStorageDiff && revInfo.rev == docA._rev)
|
||||
? await db.getDBEntry(path, { rev: revInfo.rev }, false, false, true)
|
||||
: await db.getDBEntryMeta(path, { rev: revInfo.rev }, true);
|
||||
const doc = (!isPlain && showDiffInfo) || (checkStorageDiff && revInfo.rev == docA._rev) ? await db.getDBEntry(path, { rev: revInfo.rev }, false, false, true) : await db.getDBEntryMeta(path, { rev: revInfo.rev }, true);
|
||||
if (doc === false) continue;
|
||||
const rev = revInfo.rev;
|
||||
|
||||
@@ -112,11 +109,11 @@
|
||||
let result = false;
|
||||
if (isPlainText(docA.path)) {
|
||||
const data = await plugin.vaultAccess.adapterRead(abs);
|
||||
result = isDocContentSame(data, doc.data);
|
||||
result = await isDocContentSame(data, doc.data);
|
||||
} else {
|
||||
const data = await plugin.vaultAccess.adapterReadBinary(abs);
|
||||
const dataEEncoded = createBinaryBlob(data);
|
||||
result = isDocContentSame(dataEEncoded, doc.data);
|
||||
result = await isDocContentSame(dataEEncoded, doc.data);
|
||||
}
|
||||
if (result) {
|
||||
diffDetail += " ⚖️";
|
||||
|
||||
@@ -1813,7 +1813,7 @@ ${stringifyYaml(pluginConfig)}`;
|
||||
.setClass("wizardHidden")
|
||||
.addDropdown((dropdown) =>
|
||||
dropdown
|
||||
.addOptions({ "": "Old Algorithm", "xxhash32": "xxhash32 (Fast)", "xxhash64": "xxhash64 (Fastest)" } as Record<HashAlgorithm, string>)
|
||||
.addOptions({ "": "Old Algorithm", "xxhash32": "xxhash32 (Fast)", "xxhash64": "xxhash64 (Fastest)", "sha1": "Fallback (Without WebAssembly)" } as Record<HashAlgorithm, string>)
|
||||
.setValue(this.plugin.settings.hashAlg)
|
||||
.onChange(async (value) => {
|
||||
this.plugin.settings.hashAlg = value as HashAlgorithm;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { type App, TFile, type DataWriteOptions, TFolder, TAbstractFile } from "./deps";
|
||||
import { serialized } from "./lib/src/lock";
|
||||
import type { FilePath } from "./lib/src/types";
|
||||
import { createBinaryBlob, isDocContentSame } from "./lib/src/utils";
|
||||
function getFileLockKey(file: TFile | TFolder | string) {
|
||||
return `fl:${typeof (file) == "string" ? file : file.path}`;
|
||||
}
|
||||
@@ -65,9 +66,22 @@ export class SerializedFileAccess {
|
||||
|
||||
async vaultModify(file: TFile, data: string | ArrayBuffer | Uint8Array, options?: DataWriteOptions) {
|
||||
if (typeof (data) === "string") {
|
||||
return await serialized(getFileLockKey(file), () => this.app.vault.modify(file, data, options));
|
||||
return await serialized(getFileLockKey(file), async () => {
|
||||
const oldData = await this.app.vault.read(file);
|
||||
if (data === oldData) return false
|
||||
await this.app.vault.modify(file, data, options)
|
||||
return true;
|
||||
}
|
||||
);
|
||||
} else {
|
||||
return await serialized(getFileLockKey(file), () => this.app.vault.modifyBinary(file, toArrayBuffer(data), options));
|
||||
return await serialized(getFileLockKey(file), async () => {
|
||||
const oldData = await this.app.vault.readBinary(file);
|
||||
if (isDocContentSame(createBinaryBlob(oldData), createBinaryBlob(data))) {
|
||||
return false;
|
||||
}
|
||||
await this.app.vault.modifyBinary(file, toArrayBuffer(data), options)
|
||||
return true;
|
||||
});
|
||||
}
|
||||
}
|
||||
async vaultCreate(path: string, data: string | ArrayBuffer | Uint8Array, options?: DataWriteOptions): Promise<TFile> {
|
||||
|
||||
2
src/lib
2
src/lib
Submodule src/lib updated: 7e79c27035...724c3c364a
41
src/main.ts
41
src/main.ts
@@ -33,7 +33,7 @@ import { GlobalHistoryView, VIEW_TYPE_GLOBAL_HISTORY } from "./GlobalHistoryView
|
||||
import { LogPaneView, VIEW_TYPE_LOG } from "./LogPaneView";
|
||||
import { mapAllTasksWithConcurrencyLimit, processAllTasksWithConcurrencyLimit } from "./lib/src/task";
|
||||
import { LRUCache } from "./lib/src/LRUCache";
|
||||
import { SerializedFileAccess } from "./SerializedFileAccess";
|
||||
import { SerializedFileAccess } from "./SerializedFileAccess.ts";
|
||||
|
||||
setNoticeClass(Notice);
|
||||
|
||||
@@ -1306,17 +1306,22 @@ Note: We can always able to read V1 format. It will be progressively converted.
|
||||
await this.ensureDirectoryEx(path);
|
||||
try {
|
||||
let outFile;
|
||||
let isChanged = true;
|
||||
if (mode == "create") {
|
||||
const normalizedPath = normalizePath(path);
|
||||
await this.vaultAccess.vaultCreate(normalizedPath, writeData, { ctime: doc.ctime, mtime: doc.mtime, });
|
||||
outFile = this.vaultAccess.getAbstractFileByPath(normalizedPath) as TFile;
|
||||
} else {
|
||||
await this.vaultAccess.vaultModify(file, writeData, { ctime: doc.ctime, mtime: doc.mtime });
|
||||
isChanged = await this.vaultAccess.vaultModify(file, writeData, { ctime: doc.ctime, mtime: doc.mtime });
|
||||
outFile = this.vaultAccess.getAbstractFileByPath(getPathFromTFile(file)) as TFile;
|
||||
}
|
||||
Logger(msg + path);
|
||||
this.vaultAccess.touch(outFile);
|
||||
this.app.vault.trigger(mode, outFile);
|
||||
if (isChanged) {
|
||||
Logger(msg + path);
|
||||
this.vaultAccess.touch(outFile);
|
||||
this.app.vault.trigger(mode, outFile);
|
||||
} else {
|
||||
Logger(msg + "Skipped, the file is the same: " + path, LOG_LEVEL_VERBOSE);
|
||||
}
|
||||
|
||||
} catch (ex) {
|
||||
Logger(msg + "ERROR, Could not write: " + path, LOG_LEVEL_NOTICE);
|
||||
@@ -1370,12 +1375,12 @@ Note: We can always able to read V1 format. It will be progressively converted.
|
||||
try {
|
||||
const releaser = await semaphore.acquire(1);
|
||||
serialized(`dbchanged-${path}`, async () => {
|
||||
Logger(`Applying ${path} (${entry._id}: ${entry._rev}) change...`, LOG_LEVEL_VERBOSE);
|
||||
Logger(`Applying ${path} (${entry._id.substring(0, 8)}: ${entry._rev?.substring(0, 5)}) change...`, LOG_LEVEL_VERBOSE);
|
||||
await this.handleDBChangedAsync(entry);
|
||||
Logger(`Applied ${path} (${entry._id}:${entry._rev}) change...`);
|
||||
Logger(`Applied ${path} (${entry._id.substring(0, 8)}:${entry._rev?.substring(0, 5)}) change...`);
|
||||
}).finally(() => { releaser(); });
|
||||
} catch (ex) {
|
||||
Logger(`Failed to apply the change of ${path} (${entry._id}:${entry._rev})`);
|
||||
Logger(`Failed to apply the change of ${path} (${entry._id.substring(0, 8)}:${entry._rev?.substring(0, 5)})`);
|
||||
}
|
||||
} while (this.queuedEntries.length > 0);
|
||||
} finally {
|
||||
@@ -1462,10 +1467,10 @@ Note: We can always able to read V1 format. It will be progressively converted.
|
||||
} else if (isValidPath(this.getPath(queue.entry))) {
|
||||
this.handleDBChanged(queue.entry);
|
||||
} else {
|
||||
Logger(`Skipped: ${queue.entry._id}`, LOG_LEVEL_VERBOSE);
|
||||
Logger(`Skipped: ${queue.entry._id.substring(0, 8)}`, LOG_LEVEL_VERBOSE);
|
||||
}
|
||||
} else if (now > queue.timeout) {
|
||||
if (!queue.warned) Logger(`Timed out: ${queue.entry._id} could not collect ${queue.missingChildren.length} chunks. plugin keeps watching, but you have to check the file after the replication.`, LOG_LEVEL_NOTICE);
|
||||
if (!queue.warned) Logger(`Timed out: ${queue.entry._id.substring(0, 8)} could not collect ${queue.missingChildren.length} chunks. plugin keeps watching, but you have to check the file after the replication.`, LOG_LEVEL_NOTICE);
|
||||
queue.warned = true;
|
||||
continue;
|
||||
}
|
||||
@@ -1501,11 +1506,11 @@ Note: We can always able to read V1 format. It will be progressively converted.
|
||||
const skipOldFile = this.settings.skipOlderFilesOnSync && false; //patched temporary.
|
||||
// Do not handle internal files if the feature has not been enabled.
|
||||
if (isInternalMetadata(doc._id) && !this.settings.syncInternalFiles) {
|
||||
Logger(`Skipped: ${path} (${doc._id}, ${doc._rev}) Hidden file sync is disabled.`, LOG_LEVEL_VERBOSE);
|
||||
Logger(`Skipped: ${path} (${doc._id.substring(0, 8)}, ${doc._rev?.substring(0, 10)}) Hidden file sync is disabled.`, LOG_LEVEL_VERBOSE);
|
||||
return;
|
||||
}
|
||||
if (isCustomisationSyncMetadata(doc._id) && !this.settings.usePluginSync) {
|
||||
Logger(`Skipped: ${path} (${doc._id}, ${doc._rev}) Customization sync is disabled.`, LOG_LEVEL_VERBOSE);
|
||||
Logger(`Skipped: ${path} (${doc._id.substring(0, 8)}, ${doc._rev?.substring(0, 10)}) Customization sync is disabled.`, LOG_LEVEL_VERBOSE);
|
||||
return;
|
||||
}
|
||||
// It is better for your own safety, not to handle the following files
|
||||
@@ -1528,7 +1533,7 @@ Note: We can always able to read V1 format. It will be progressively converted.
|
||||
const docMtime = ~~(doc.mtime / 1000);
|
||||
//TODO: some margin required.
|
||||
if (localMtime >= docMtime) {
|
||||
Logger(`${path} (${doc._id}, ${doc._rev}) Skipped, older than storage.`, LOG_LEVEL_VERBOSE);
|
||||
Logger(`${path} (${doc._id.substring(0, 8)}, ${doc._rev?.substring(0, 10)}) Skipped, older than storage.`, LOG_LEVEL_VERBOSE);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -1544,7 +1549,7 @@ Note: We can always able to read V1 format. It will be progressively converted.
|
||||
if ((!this.settings.readChunksOnline) && "children" in doc) {
|
||||
const c = await this.localDatabase.collectChunksWithCache(doc.children as DocumentID[]);
|
||||
const missing = c.filter((e) => e.chunk === false).map((e) => e.id);
|
||||
if (missing.length > 0) Logger(`${path} (${doc._id}, ${doc._rev}) Queued (waiting ${missing.length} items)`, LOG_LEVEL_VERBOSE);
|
||||
if (missing.length > 0) Logger(`${path} (${doc._id.substring(0, 8)}, ${doc._rev?.substring(0, 10)}) Queued (waiting ${missing.length} items)`, LOG_LEVEL_VERBOSE);
|
||||
newQueue.missingChildren = missing;
|
||||
this.queuedFiles.push(newQueue);
|
||||
} else {
|
||||
@@ -2265,12 +2270,14 @@ Or if you are sure know what had been happened, we can unlock the database from
|
||||
|
||||
const file = this.vaultAccess.getAbstractFileByPath(stripAllPrefixes(path)) as TFile;
|
||||
if (file) {
|
||||
await this.vaultAccess.vaultModify(file, p);
|
||||
await this.updateIntoDB(file);
|
||||
if (await this.vaultAccess.vaultModify(file, p)) {
|
||||
await this.updateIntoDB(file);
|
||||
}
|
||||
} else {
|
||||
const newFile = await this.vaultAccess.vaultCreate(path, p);
|
||||
await this.updateIntoDB(newFile);
|
||||
}
|
||||
// ?
|
||||
await this.pullFile(path);
|
||||
Logger(`Automatically merged (sensible) :${path}`, LOG_LEVEL_INFO);
|
||||
return true;
|
||||
@@ -2562,7 +2569,7 @@ Or if you are sure know what had been happened, we can unlock the database from
|
||||
const oldData = { data: old.data, deleted: old._deleted || old.deleted };
|
||||
const newData = { data: d.data, deleted: d._deleted || d.deleted };
|
||||
if (oldData.deleted != newData.deleted) return false;
|
||||
if (!isDocContentSame(old.data, newData.data)) return false;
|
||||
if (!await isDocContentSame(old.data, newData.data)) return false;
|
||||
Logger(msg + "Skipped (not changed) " + fullPath + ((d._deleted || d.deleted) ? " (deleted)" : ""), LOG_LEVEL_VERBOSE);
|
||||
return true;
|
||||
// d._rev = old._rev;
|
||||
|
||||
12
updates.md
12
updates.md
@@ -6,6 +6,18 @@ It will be addressed soon. Please be patient if you are using filesystem-livesyn
|
||||
|
||||
|
||||
#### Version history
|
||||
- 0.21.5
|
||||
- Improved:
|
||||
- Now all revisions will be shown only its first a few letters.
|
||||
- Now ID of the documents is shown in the log with the first 8 letters.
|
||||
- Fixed:
|
||||
- Check before modifying files has been implemented.
|
||||
- Content change detection has been improved.
|
||||
- 0.21.4
|
||||
- This release had been skipped.
|
||||
- 0.21.3
|
||||
- Implemented:
|
||||
- Now we can use SHA1 for hash function as fallback.
|
||||
- 0.21.2
|
||||
- IMPORTANT NOTICE: **0.21.1 CONTAINS A BUG WHILE REBUILDING THE DATABASE. IF YOU HAVE BEEN REBUILT, PLEASE MAKE SURE THAT ALL FILES ARE SANE.**
|
||||
- This has been fixed in this version.
|
||||
|
||||
Reference in New Issue
Block a user