mirror of
https://github.com/vrtmrz/obsidian-livesync.git
synced 2026-03-10 11:58:48 +00:00
Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b42152db5e | ||
|
|
171cfc0a38 | ||
|
|
d2787bdb6a | ||
|
|
58845276e7 | ||
|
|
a2cc093a9e | ||
|
|
fec203a751 | ||
|
|
1a06837769 | ||
|
|
18d1ce8ec8 | ||
|
|
2221d8c4e8 | ||
|
|
08548f8630 |
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "obsidian-livesync",
|
||||
"name": "Self-hosted LiveSync",
|
||||
"version": "0.25.9",
|
||||
"version": "0.25.13",
|
||||
"minAppVersion": "0.9.12",
|
||||
"description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
|
||||
"author": "vorotamoroz",
|
||||
|
||||
1206
package-lock.json
generated
1206
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "obsidian-livesync",
|
||||
"version": "0.25.9",
|
||||
"version": "0.25.13",
|
||||
"description": "Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
|
||||
"main": "main.js",
|
||||
"type": "module",
|
||||
@@ -92,10 +92,10 @@
|
||||
"fflate": "^0.8.2",
|
||||
"idb": "^8.0.3",
|
||||
"minimatch": "^10.0.1",
|
||||
"octagonal-wheels": "^0.1.37",
|
||||
"octagonal-wheels": "^0.1.38",
|
||||
"qrcode-generator": "^1.4.4",
|
||||
"svelte-check": "^4.1.7",
|
||||
"trystero": "^0.21.5",
|
||||
"trystero": "^0.21.7",
|
||||
"xxhash-wasm-102": "npm:xxhash-wasm@^1.0.2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ export class HiddenFileSync extends LiveSyncCommands implements IObsidianModule
|
||||
return this.plugin.kvDB;
|
||||
}
|
||||
getConflictedDoc(path: FilePathWithPrefix, rev: string) {
|
||||
return this.plugin.localDatabase.getConflictedDoc(path, rev);
|
||||
return this.plugin.managers.conflictManager.getConflictedDoc(path, rev);
|
||||
}
|
||||
onunload() {
|
||||
this.periodicInternalFileScanProcessor?.disable();
|
||||
@@ -699,7 +699,7 @@ Offline Changed files: ${processFiles.length}`;
|
||||
revFrom._revs_info
|
||||
?.filter((e) => e.status == "available" && Number(e.rev.split("-")[0]) < conflictedRevNo)
|
||||
.first()?.rev ?? "";
|
||||
const result = await this.plugin.localDatabase.mergeObject(
|
||||
const result = await this.plugin.managers.conflictManager.mergeObject(
|
||||
doc.path,
|
||||
commonBase,
|
||||
doc._rev,
|
||||
|
||||
@@ -1,9 +1,27 @@
|
||||
import { sizeToHumanReadable } from "octagonal-wheels/number";
|
||||
import { LOG_LEVEL_NOTICE, type MetaEntry } from "../../lib/src/common/types";
|
||||
import {
|
||||
EntryTypes,
|
||||
LOG_LEVEL_INFO,
|
||||
LOG_LEVEL_NOTICE,
|
||||
LOG_LEVEL_VERBOSE,
|
||||
type DocumentID,
|
||||
type EntryDoc,
|
||||
type EntryLeaf,
|
||||
type MetaEntry,
|
||||
} from "../../lib/src/common/types";
|
||||
import { getNoFromRev } from "../../lib/src/pouchdb/LiveSyncLocalDB";
|
||||
import type { IObsidianModule } from "../../modules/AbstractObsidianModule";
|
||||
import { LiveSyncCommands } from "../LiveSyncCommands";
|
||||
import { serialized } from "octagonal-wheels/concurrency/lock_v2";
|
||||
import { arrayToChunkedArray } from "octagonal-wheels/collection";
|
||||
const DB_KEY_SEQ = "gc-seq";
|
||||
const DB_KEY_CHUNK_SET = "chunk-set";
|
||||
const DB_KEY_DOC_USAGE_MAP = "doc-usage-map";
|
||||
type ChunkID = DocumentID;
|
||||
type NoteDocumentID = DocumentID;
|
||||
type Rev = string;
|
||||
|
||||
type ChunkUsageMap = Map<NoteDocumentID, Map<Rev, Set<ChunkID>>>;
|
||||
export class LocalDatabaseMaintenance extends LiveSyncCommands implements IObsidianModule {
|
||||
$everyOnload(): Promise<boolean> {
|
||||
return Promise.resolve(true);
|
||||
@@ -262,4 +280,213 @@ Note: **Make sure to synchronise all devices before deletion.**
|
||||
this.clearHash();
|
||||
}
|
||||
}
|
||||
|
||||
async scanUnusedChunks() {
|
||||
const kvDB = this.plugin.kvDB;
|
||||
const chunkSet = (await kvDB.get<Set<DocumentID>>(DB_KEY_CHUNK_SET)) || new Set();
|
||||
const chunkUsageMap = (await kvDB.get<ChunkUsageMap>(DB_KEY_DOC_USAGE_MAP)) || new Map();
|
||||
const KEEP_MAX_REVS = 10;
|
||||
const unusedSet = new Set<DocumentID>([...chunkSet]);
|
||||
for (const [, revIdMap] of chunkUsageMap) {
|
||||
const sortedRevId = [...revIdMap.entries()].sort((a, b) => getNoFromRev(b[0]) - getNoFromRev(a[0]));
|
||||
if (sortedRevId.length > KEEP_MAX_REVS) {
|
||||
// If we have more revisions than we want to keep, we need to delete the extras
|
||||
}
|
||||
const keepRevID = sortedRevId.slice(0, KEEP_MAX_REVS);
|
||||
keepRevID.forEach((e) => e[1].forEach((ee) => unusedSet.delete(ee)));
|
||||
}
|
||||
return {
|
||||
chunkSet,
|
||||
chunkUsageMap,
|
||||
unusedSet,
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Track changes in the database and update the chunk usage map for garbage collection.
|
||||
* Note that this only able to perform without Fetch chunks on demand.
|
||||
*/
|
||||
async trackChanges(fromStart: boolean = false, showNotice: boolean = false) {
|
||||
if (!this.isAvailable()) return;
|
||||
const logLevel = showNotice ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO;
|
||||
const kvDB = this.plugin.kvDB;
|
||||
|
||||
const previousSeq = fromStart ? "" : await kvDB.get<string>(DB_KEY_SEQ);
|
||||
const chunkSet = (await kvDB.get<Set<DocumentID>>(DB_KEY_CHUNK_SET)) || new Set();
|
||||
|
||||
const chunkUsageMap = (await kvDB.get<ChunkUsageMap>(DB_KEY_DOC_USAGE_MAP)) || new Map();
|
||||
|
||||
const db = this.localDatabase.localDatabase;
|
||||
const verbose = (msg: string) => this._verbose(msg);
|
||||
|
||||
const processDoc = async (doc: EntryDoc, isDeleted: boolean) => {
|
||||
if (!("children" in doc)) {
|
||||
return;
|
||||
}
|
||||
const id = doc._id;
|
||||
const rev = doc._rev!;
|
||||
const deleted = doc._deleted || isDeleted;
|
||||
const softDeleted = doc.deleted;
|
||||
const children = (doc.children || []) as DocumentID[];
|
||||
if (!chunkUsageMap.has(id)) {
|
||||
chunkUsageMap.set(id, new Map<Rev, Set<ChunkID>>());
|
||||
}
|
||||
for (const chunkId of children) {
|
||||
if (deleted) {
|
||||
chunkUsageMap.get(id)!.delete(rev);
|
||||
// chunkSet.add(chunkId as DocumentID);
|
||||
} else {
|
||||
if (softDeleted) {
|
||||
//TODO: Soft delete
|
||||
chunkUsageMap.get(id)!.set(rev, (chunkUsageMap.get(id)!.get(rev) || new Set()).add(chunkId));
|
||||
} else {
|
||||
chunkUsageMap.get(id)!.set(rev, (chunkUsageMap.get(id)!.get(rev) || new Set()).add(chunkId));
|
||||
}
|
||||
}
|
||||
}
|
||||
verbose(
|
||||
`Tracking chunk: ${id}/${rev} (${doc?.path}), deleted: ${deleted ? "yes" : "no"} Soft-Deleted:${softDeleted ? "yes" : "no"}`
|
||||
);
|
||||
return await Promise.resolve();
|
||||
};
|
||||
// let saveQueue = 0;
|
||||
const saveState = async (seq: string | number) => {
|
||||
await kvDB.set(DB_KEY_SEQ, seq);
|
||||
await kvDB.set(DB_KEY_CHUNK_SET, chunkSet);
|
||||
await kvDB.set(DB_KEY_DOC_USAGE_MAP, chunkUsageMap);
|
||||
};
|
||||
|
||||
const processDocRevisions = async (doc: EntryDoc) => {
|
||||
try {
|
||||
const oldRevisions = await db.get(doc._id, { revs: true, revs_info: true, conflicts: true });
|
||||
const allRevs = oldRevisions._revs_info?.length || 0;
|
||||
const info = (oldRevisions._revs_info || [])
|
||||
.filter((e) => e.status == "available" && e.rev != doc._rev)
|
||||
.filter((info) => !chunkUsageMap.get(doc._id)?.has(info.rev));
|
||||
const infoLength = info.length;
|
||||
this._log(`Found ${allRevs} old revisions for ${doc._id} . ${infoLength} items to check `);
|
||||
if (info.length > 0) {
|
||||
const oldDocs = await Promise.all(
|
||||
info
|
||||
.filter((revInfo) => revInfo.status == "available")
|
||||
.map((revInfo) => db.get(doc._id, { rev: revInfo.rev }))
|
||||
).then((docs) => docs.filter((doc) => doc));
|
||||
for (const oldDoc of oldDocs) {
|
||||
await processDoc(oldDoc as EntryDoc, false);
|
||||
}
|
||||
}
|
||||
} catch (ex) {
|
||||
if ((ex as any)?.status == 404) {
|
||||
this._log(`No revisions found for ${doc._id}`, LOG_LEVEL_VERBOSE);
|
||||
} else {
|
||||
this._log(`Error finding revisions for ${doc._id}`);
|
||||
this._verbose(ex);
|
||||
}
|
||||
}
|
||||
};
|
||||
const processChange = async (doc: EntryDoc, isDeleted: boolean, seq: string | number) => {
|
||||
if (doc.type === EntryTypes.CHUNK) {
|
||||
if (isDeleted) return;
|
||||
chunkSet.add(doc._id);
|
||||
} else if ("children" in doc) {
|
||||
await processDoc(doc, isDeleted);
|
||||
await serialized("x-process-doc", async () => await processDocRevisions(doc));
|
||||
}
|
||||
};
|
||||
// Track changes
|
||||
let i = 0;
|
||||
await db
|
||||
.changes({
|
||||
since: previousSeq || "",
|
||||
live: false,
|
||||
conflicts: true,
|
||||
include_docs: true,
|
||||
style: "all_docs",
|
||||
return_docs: false,
|
||||
})
|
||||
.on("change", async (change) => {
|
||||
// handle change
|
||||
await processChange(change.doc!, change.deleted ?? false, change.seq);
|
||||
if (i++ % 100 == 0) {
|
||||
await saveState(change.seq);
|
||||
}
|
||||
})
|
||||
.on("complete", async (info) => {
|
||||
await saveState(info.last_seq);
|
||||
});
|
||||
|
||||
// Track all changed docs and new-leafs;
|
||||
|
||||
const result = await this.scanUnusedChunks();
|
||||
|
||||
const message = `Total chunks: ${result.chunkSet.size}\nUnused chunks: ${result.unusedSet.size}`;
|
||||
this._log(message, logLevel);
|
||||
}
|
||||
async performGC(showingNotice = false) {
|
||||
if (!this.isAvailable()) return;
|
||||
await this.trackChanges(false, showingNotice);
|
||||
const title = "Are all devices synchronised?";
|
||||
const confirmMessage = `This function deletes unused chunks from the device. If there are differences between devices, some chunks may be missing when resolving conflicts.
|
||||
Be sure to synchronise before executing.
|
||||
|
||||
However, if you have deleted them, you may be able to recover them by performing Hatch -> Recreate missing chunks for all files.
|
||||
|
||||
Are you ready to delete unused chunks?`;
|
||||
|
||||
const logLevel = showingNotice ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO;
|
||||
|
||||
const BUTTON_OK = `Yes, delete chunks`;
|
||||
const BUTTON_CANCEL = "Cancel";
|
||||
|
||||
const result = await this.plugin.confirm.askSelectStringDialogue(
|
||||
confirmMessage,
|
||||
[BUTTON_OK, BUTTON_CANCEL] as const,
|
||||
{
|
||||
title,
|
||||
defaultAction: BUTTON_CANCEL,
|
||||
}
|
||||
);
|
||||
if (result !== BUTTON_OK) {
|
||||
this._log("User cancelled chunk deletion", logLevel);
|
||||
return;
|
||||
}
|
||||
const { unusedSet, chunkSet } = await this.scanUnusedChunks();
|
||||
const deleteChunks = await this.database.allDocs({
|
||||
keys: [...unusedSet],
|
||||
include_docs: true,
|
||||
});
|
||||
for (const chunk of deleteChunks.rows) {
|
||||
if ((chunk as any)?.value?.deleted) {
|
||||
chunkSet.delete(chunk.key as DocumentID);
|
||||
}
|
||||
}
|
||||
const deleteDocs = deleteChunks.rows
|
||||
.filter((e) => "doc" in e)
|
||||
.map((e) => ({
|
||||
...(e as any).doc!,
|
||||
_deleted: true,
|
||||
}));
|
||||
|
||||
this._log(`Deleting chunks: ${deleteDocs.length}`, logLevel);
|
||||
const deleteChunkBatch = arrayToChunkedArray(deleteDocs, 100);
|
||||
let successCount = 0;
|
||||
let errored = 0;
|
||||
for (const batch of deleteChunkBatch) {
|
||||
const results = await this.database.bulkDocs(batch as EntryLeaf[]);
|
||||
for (const result of results) {
|
||||
if ("ok" in result) {
|
||||
chunkSet.delete(result.id as DocumentID);
|
||||
successCount++;
|
||||
} else {
|
||||
this._log(`Failed to delete doc: ${result.id}`, LOG_LEVEL_VERBOSE);
|
||||
errored++;
|
||||
}
|
||||
}
|
||||
this._log(`Deleting chunks: ${successCount} `, logLevel, "gc-preforming");
|
||||
}
|
||||
const message = `Garbage Collection completed.
|
||||
Success: ${successCount}, Errored: ${errored}`;
|
||||
this._log(message, logLevel);
|
||||
const kvDB = this.plugin.kvDB;
|
||||
await kvDB.set(DB_KEY_CHUNK_SET, chunkSet);
|
||||
}
|
||||
}
|
||||
|
||||
2
src/lib
2
src/lib
Submodule src/lib updated: 172e7ec61d...f21001fcb2
@@ -84,6 +84,7 @@ import { ModuleLiveSyncMain } from "./modules/main/ModuleLiveSyncMain.ts";
|
||||
import { ModuleExtraSyncObsidian } from "./modules/extraFeaturesObsidian/ModuleExtraSyncObsidian.ts";
|
||||
import { LocalDatabaseMaintenance } from "./features/LocalDatabaseMainte/CmdLocalDatabaseMainte.ts";
|
||||
import { P2PReplicator } from "./features/P2PSync/CmdP2PReplicator.ts";
|
||||
import type { LiveSyncManagers } from "./lib/src/managers/LiveSyncManagers.ts";
|
||||
|
||||
function throwShouldBeOverridden(): never {
|
||||
throw new Error("This function should be overridden by the module.");
|
||||
@@ -211,6 +212,7 @@ export default class ObsidianLiveSyncPlugin
|
||||
|
||||
settings!: ObsidianLiveSyncSettings;
|
||||
localDatabase!: LiveSyncLocalDB;
|
||||
managers!: LiveSyncManagers;
|
||||
simpleStore!: SimpleStore<CheckPointInfo>;
|
||||
replicator!: LiveSyncAbstractReplicator;
|
||||
confirm!: Confirm;
|
||||
@@ -580,6 +582,11 @@ export default class ObsidianLiveSyncPlugin
|
||||
$everyBeforeReplicate(showMessage: boolean): Promise<boolean> {
|
||||
return InterceptiveEvery;
|
||||
}
|
||||
|
||||
$$canReplicate(showMessage: boolean = false): Promise<boolean> {
|
||||
throwShouldBeOverridden();
|
||||
}
|
||||
|
||||
$$replicate(showMessage: boolean = false): Promise<boolean | void> {
|
||||
throwShouldBeOverridden();
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ import { LiveSyncLocalDB } from "../../lib/src/pouchdb/LiveSyncLocalDB.ts";
|
||||
import { initializeStores } from "../../common/stores.ts";
|
||||
import { AbstractModule } from "../AbstractModule.ts";
|
||||
import type { ICoreModule } from "../ModuleTypes.ts";
|
||||
import { LiveSyncManagers } from "../../lib/src/managers/LiveSyncManagers.ts";
|
||||
|
||||
export class ModuleLocalDatabaseObsidian extends AbstractModule implements ICoreModule {
|
||||
$everyOnloadStart(): Promise<boolean> {
|
||||
@@ -14,7 +15,21 @@ export class ModuleLocalDatabaseObsidian extends AbstractModule implements ICore
|
||||
}
|
||||
const vaultName = this.core.$$getVaultName();
|
||||
this._log($msg("moduleLocalDatabase.logWaitingForReady"));
|
||||
const getDB = () => this.core.localDatabase.localDatabase;
|
||||
const getSettings = () => this.core.settings;
|
||||
this.core.managers = new LiveSyncManagers({
|
||||
get database() {
|
||||
return getDB();
|
||||
},
|
||||
getActiveReplicator: () => this.core.replicator,
|
||||
id2path: this.core.$$id2path.bind(this.core),
|
||||
path2id: this.core.$$path2id.bind(this.core),
|
||||
get settings() {
|
||||
return getSettings();
|
||||
},
|
||||
});
|
||||
this.core.localDatabase = new LiveSyncLocalDB(vaultName, this.core);
|
||||
|
||||
initializeStores(vaultName);
|
||||
return await this.localDatabase.initializeDatabase();
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import {
|
||||
type EntryLeaf,
|
||||
type LoadedEntry,
|
||||
type MetaEntry,
|
||||
type RemoteType,
|
||||
} from "../../lib/src/common/types";
|
||||
import { QueueProcessor } from "octagonal-wheels/concurrency/processor";
|
||||
import {
|
||||
@@ -38,7 +39,8 @@ const KEY_REPLICATION_ON_EVENT = "replicationOnEvent";
|
||||
const REPLICATION_ON_EVENT_FORECASTED_TIME = 5000;
|
||||
|
||||
export class ModuleReplicator extends AbstractModule implements ICoreModule {
|
||||
_replicatorType?: string;
|
||||
_replicatorType?: RemoteType;
|
||||
|
||||
$everyOnloadAfterLoadSettings(): Promise<boolean> {
|
||||
eventHub.onEvent(EVENT_FILE_SAVED, () => {
|
||||
if (this.settings.syncOnSave && !this.core.$$isSuspended()) {
|
||||
@@ -91,6 +93,10 @@ export class ModuleReplicator extends AbstractModule implements ICoreModule {
|
||||
|
||||
async $everyBeforeReplicate(showMessage: boolean): Promise<boolean> {
|
||||
// Checking salt
|
||||
if (!this.core.managers.networkManager.isOnline) {
|
||||
this._log("Network is offline", showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO);
|
||||
return false;
|
||||
}
|
||||
// Showing message is false: that because be shown here. (And it is a fatal error, no way to hide it).
|
||||
if (!(await this.ensureReplicatorPBKDF2Salt(false))) {
|
||||
Logger("Failed to initialise the encryption key, preventing replication.", LOG_LEVEL_NOTICE);
|
||||
@@ -167,25 +173,42 @@ Even if you choose to clean up, you will see this option again if you exit Obsid
|
||||
}
|
||||
});
|
||||
}
|
||||
async $$_replicate(showMessage: boolean = false): Promise<boolean | void> {
|
||||
//--?
|
||||
if (!this.core.$$isReady()) return;
|
||||
|
||||
async $$canReplicate(showMessage: boolean = false): Promise<boolean> {
|
||||
if (!this.core.$$isReady()) {
|
||||
Logger(`Not ready`);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (isLockAcquired("cleanup")) {
|
||||
Logger($msg("Replicator.Message.Cleaned"), LOG_LEVEL_NOTICE);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (this.settings.versionUpFlash != "") {
|
||||
Logger($msg("Replicator.Message.VersionUpFlash"), LOG_LEVEL_NOTICE);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(await this.core.$everyCommitPendingFileEvent())) {
|
||||
Logger($msg("Replicator.Message.Pending"), LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!this.core.managers.networkManager.isOnline) {
|
||||
this._log("Network is offline", showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO);
|
||||
return false;
|
||||
}
|
||||
if (!(await this.core.$everyBeforeReplicate(showMessage))) {
|
||||
Logger($msg("Replicator.Message.SomeModuleFailed"), LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
async $$_replicate(showMessage: boolean = false): Promise<boolean | void> {
|
||||
const checkBeforeReplicate = await this.$$canReplicate(showMessage);
|
||||
if (!checkBeforeReplicate) return false;
|
||||
|
||||
//<-- Here could be an module.
|
||||
const ret = await this.core.replicator.openReplication(this.settings, false, showMessage, false);
|
||||
|
||||
@@ -15,15 +15,22 @@ export class ModuleReplicatorCouchDB extends AbstractModule implements ICoreModu
|
||||
return Promise.resolve(new LiveSyncCouchDBReplicator(this.core));
|
||||
}
|
||||
$everyAfterResumeProcess(): Promise<boolean> {
|
||||
if (!this.core.$$isSuspended) return Promise.resolve(true);
|
||||
if (!this.core.$$isReady) return Promise.resolve(true);
|
||||
if (this.settings.remoteType != REMOTE_MINIO && this.settings.remoteType != REMOTE_P2P) {
|
||||
// If LiveSync enabled, open replication
|
||||
if (this.settings.liveSync) {
|
||||
fireAndForget(() => this.core.replicator.openReplication(this.settings, true, false, false));
|
||||
}
|
||||
// If sync on start enabled, open replication
|
||||
if (!this.settings.liveSync && this.settings.syncOnStart) {
|
||||
// Possibly ok as if only share the result
|
||||
fireAndForget(() => this.core.replicator.openReplication(this.settings, false, false, false));
|
||||
const LiveSyncEnabled = this.settings.liveSync;
|
||||
const continuous = LiveSyncEnabled;
|
||||
const eventualOnStart = !LiveSyncEnabled && this.settings.syncOnStart;
|
||||
|
||||
// If enabled LiveSync or on start, open replication
|
||||
if (LiveSyncEnabled || eventualOnStart) {
|
||||
// And note that we do not open the conflict detection dialogue directly during this process.
|
||||
// This should be raised explicitly if needed.
|
||||
fireAndForget(async () => {
|
||||
const canReplicate = await this.core.$$canReplicate(false);
|
||||
if (!canReplicate) return;
|
||||
void this.core.replicator.openReplication(this.settings, continuous, false, false);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,10 @@ import { $msg } from "src/lib/src/common/i18n.ts";
|
||||
|
||||
export class ModuleCheckRemoteSize extends AbstractModule implements ICoreModule {
|
||||
async $allScanStat(): Promise<boolean> {
|
||||
if (this.core.managers.networkManager.isOnline === false) {
|
||||
this._log("Network is offline, skipping remote size check.", LOG_LEVEL_INFO);
|
||||
return true;
|
||||
}
|
||||
this._log($msg("moduleCheckRemoteSize.logCheckingStorageSizes"), LOG_LEVEL_VERBOSE);
|
||||
if (this.settings.notifyThresholdOfRemoteStorageSize < 0) {
|
||||
const message = $msg("moduleCheckRemoteSize.msgSetDBCapacity");
|
||||
|
||||
@@ -11,7 +11,7 @@ import { type UXFileInfo } from "../../../lib/src/common/types.ts";
|
||||
function getFileLockKey(file: TFile | TFolder | string | UXFileInfo) {
|
||||
return `fl:${typeof file == "string" ? file : file.path}`;
|
||||
}
|
||||
function toArrayBuffer(arr: Uint8Array | ArrayBuffer | DataView): ArrayBufferLike {
|
||||
function toArrayBuffer(arr: Uint8Array<ArrayBuffer> | ArrayBuffer | DataView<ArrayBuffer>): ArrayBuffer {
|
||||
if (arr instanceof Uint8Array) {
|
||||
return arr.buffer;
|
||||
}
|
||||
@@ -77,7 +77,11 @@ export class SerializedFileAccess {
|
||||
return await processReadFile(file, () => this.app.vault.adapter.readBinary(path));
|
||||
}
|
||||
|
||||
async adapterWrite(file: TFile | string, data: string | ArrayBuffer | Uint8Array, options?: DataWriteOptions) {
|
||||
async adapterWrite(
|
||||
file: TFile | string,
|
||||
data: string | ArrayBuffer | Uint8Array<ArrayBuffer>,
|
||||
options?: DataWriteOptions
|
||||
) {
|
||||
const path = file instanceof TFile ? file.path : file;
|
||||
if (typeof data === "string") {
|
||||
return await processWriteFile(file, () => this.app.vault.adapter.write(path, data, options));
|
||||
@@ -106,7 +110,7 @@ export class SerializedFileAccess {
|
||||
return await processReadFile(file, () => this.app.vault.readBinary(file));
|
||||
}
|
||||
|
||||
async vaultModify(file: TFile, data: string | ArrayBuffer | Uint8Array, options?: DataWriteOptions) {
|
||||
async vaultModify(file: TFile, data: string | ArrayBuffer | Uint8Array<ArrayBuffer>, options?: DataWriteOptions) {
|
||||
if (typeof data === "string") {
|
||||
return await processWriteFile(file, async () => {
|
||||
const oldData = await this.app.vault.read(file);
|
||||
@@ -131,7 +135,7 @@ export class SerializedFileAccess {
|
||||
}
|
||||
async vaultCreate(
|
||||
path: string,
|
||||
data: string | ArrayBuffer | Uint8Array,
|
||||
data: string | ArrayBuffer | Uint8Array<ArrayBuffer>,
|
||||
options?: DataWriteOptions
|
||||
): Promise<TFile> {
|
||||
if (typeof data === "string") {
|
||||
|
||||
@@ -228,7 +228,9 @@ export class ModuleMigration extends AbstractModule implements ICoreModule {
|
||||
// Check local database for compromised chunks
|
||||
const localCompromised = await countCompromisedChunks(this.localDatabase.localDatabase);
|
||||
const remote = this.core.$$getReplicator();
|
||||
const remoteCompromised = await remote.countCompromisedChunks();
|
||||
const remoteCompromised = this.core.managers.networkManager.isOnline
|
||||
? await remote.countCompromisedChunks()
|
||||
: 0;
|
||||
if (localCompromised === false) {
|
||||
Logger(`Failed to count compromised chunks in local database`, LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
|
||||
@@ -106,6 +106,9 @@ export class ModuleObsidianAPI extends AbstractObsidianModule implements IObsidi
|
||||
if (!isValidRemoteCouchDBURI(uri)) return "Remote URI is not valid";
|
||||
if (uri.toLowerCase() != uri) return "Remote URI and database name could not contain capital letters.";
|
||||
if (uri.indexOf(" ") !== -1) return "Remote URI and database name could not contain spaces.";
|
||||
if (!this.core.managers.networkManager.isOnline) {
|
||||
return "Network is offline";
|
||||
}
|
||||
// let authHeader = await this._authHeader.getAuthorizationHeader(auth);
|
||||
|
||||
const conf: PouchDB.HttpAdapter.HttpAdapterConfiguration = {
|
||||
|
||||
@@ -25,8 +25,8 @@ export class ConflictResolveModal extends Modal {
|
||||
title: string = "Conflicting changes";
|
||||
|
||||
pluginPickMode: boolean = false;
|
||||
localName: string = "Use Base";
|
||||
remoteName: string = "Use Conflicted";
|
||||
localName: string = "Base";
|
||||
remoteName: string = "Conflicted";
|
||||
offEvent?: ReturnType<typeof eventHub.onEvent>;
|
||||
|
||||
constructor(app: App, filename: string, diff: diff_result, pluginPickMode?: boolean, remoteName?: string) {
|
||||
@@ -36,8 +36,8 @@ export class ConflictResolveModal extends Modal {
|
||||
this.pluginPickMode = pluginPickMode || false;
|
||||
if (this.pluginPickMode) {
|
||||
this.title = "Pick a version";
|
||||
this.remoteName = `Use ${remoteName || "Remote"}`;
|
||||
this.localName = "Use Local";
|
||||
this.remoteName = `${remoteName || "Remote"}`;
|
||||
this.localName = "Local";
|
||||
}
|
||||
// Send cancel signal for the previous merge dialogue
|
||||
// if not there, simply be ignored.
|
||||
@@ -93,12 +93,13 @@ export class ConflictResolveModal extends Modal {
|
||||
const date2 =
|
||||
new Date(this.result.right.mtime).toLocaleString() + (this.result.right.deleted ? " (Deleted)" : "");
|
||||
div2.innerHTML = `
|
||||
<span class='deleted'>A:${date1}</span><br /><span class='added'>B:${date2}</span><br>
|
||||
<span class='deleted'><span class='conflict-dev-name'>${this.localName}</span>: ${date1}</span><br>
|
||||
<span class='added'><span class='conflict-dev-name'>${this.remoteName}</span>: ${date2}</span><br>
|
||||
`;
|
||||
contentEl.createEl("button", { text: this.localName }, (e) =>
|
||||
contentEl.createEl("button", { text: `Use ${this.localName}` }, (e) =>
|
||||
e.addEventListener("click", () => this.sendResponse(this.result.right.rev))
|
||||
).style.marginRight = "4px";
|
||||
contentEl.createEl("button", { text: this.remoteName }, (e) =>
|
||||
contentEl.createEl("button", { text: `Use ${this.remoteName}` }, (e) =>
|
||||
e.addEventListener("click", () => this.sendResponse(this.result.left.rev))
|
||||
).style.marginRight = "4px";
|
||||
if (!this.pluginPickMode) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { type IObsidianModule, AbstractObsidianModule } from "../AbstractObsidianModule.ts";
|
||||
// import { PouchDB } from "../../lib/src/pouchdb/pouchdb-browser";
|
||||
import { EVENT_REQUEST_RELOAD_SETTING_TAB, EVENT_SETTING_SAVED, eventHub } from "../../common/events";
|
||||
import { EVENT_REQUEST_RELOAD_SETTING_TAB, EVENT_SETTING_SAVED, eventHub } from "../../common/events.ts";
|
||||
import {
|
||||
type BucketSyncSetting,
|
||||
ChunkAlgorithmNames,
|
||||
@@ -11,8 +11,8 @@ import {
|
||||
SALT_OF_PASSPHRASE,
|
||||
} from "../../lib/src/common/types";
|
||||
import { LOG_LEVEL_NOTICE, LOG_LEVEL_URGENT } from "octagonal-wheels/common/logger";
|
||||
import { $msg, setLang } from "../../lib/src/common/i18n";
|
||||
import { isCloudantURI } from "../../lib/src/pouchdb/utils_couchdb";
|
||||
import { $msg, setLang } from "../../lib/src/common/i18n.ts";
|
||||
import { isCloudantURI } from "../../lib/src/pouchdb/utils_couchdb.ts";
|
||||
import { getLanguage } from "obsidian";
|
||||
import { SUPPORTED_I18N_LANGS, type I18N_LANGS } from "../../lib/src/common/rosetta.ts";
|
||||
import { decryptString, encryptString } from "@/lib/src/encryption/stringEncryption.ts";
|
||||
@@ -23,8 +23,7 @@ export class ModuleObsidianSettings extends AbstractObsidianModule implements IO
|
||||
const obsidianLanguage = getLanguage();
|
||||
if (
|
||||
SUPPORTED_I18N_LANGS.indexOf(obsidianLanguage) !== -1 && // Check if the language is supported
|
||||
obsidianLanguage != this.settings.displayLanguage && // Check if the language is different from the current setting
|
||||
this.settings.displayLanguage != ""
|
||||
obsidianLanguage != this.settings.displayLanguage // Check if the language is different from the current setting
|
||||
) {
|
||||
// Check if the current setting is not empty (Means migrated or installed).
|
||||
this.settings.displayLanguage = obsidianLanguage as I18N_LANGS;
|
||||
|
||||
@@ -158,36 +158,40 @@ export function paneMaintenance(
|
||||
)
|
||||
.addOnUpdate(this.onlyOnMinIO);
|
||||
});
|
||||
void addPanel(paneEl, "Garbage Collection (Beta)", (e) => e, this.onlyOnP2POrCouchDB).then((paneEl) => {
|
||||
void addPanel(paneEl, "Garbage Collection (Beta2)", (e) => e, this.onlyOnP2POrCouchDB).then((paneEl) => {
|
||||
new Setting(paneEl)
|
||||
.setName("Remove all orphaned chunks")
|
||||
.setDesc("Remove all orphaned chunks from the local database.")
|
||||
.setName("Scan garbage")
|
||||
.setDesc("Scan for garbage chunks in the database.")
|
||||
.addButton((button) =>
|
||||
button
|
||||
.setButtonText("Remove")
|
||||
.setWarning()
|
||||
.setButtonText("Scan")
|
||||
// .setWarning()
|
||||
.setDisabled(false)
|
||||
.onClick(async () => {
|
||||
await this.plugin
|
||||
.getAddOn<LocalDatabaseMaintenance>(LocalDatabaseMaintenance.name)
|
||||
?.removeUnusedChunks();
|
||||
?.trackChanges(false, true);
|
||||
})
|
||||
);
|
||||
|
||||
new Setting(paneEl)
|
||||
.setName("Resurrect deleted chunks")
|
||||
.setDesc(
|
||||
"If you have deleted chunks before fully synchronised and missed some chunks, you possibly can resurrect them."
|
||||
)
|
||||
.addButton((button) =>
|
||||
button.setButtonText("Rescan").onClick(async () => {
|
||||
await this.plugin
|
||||
.getAddOn<LocalDatabaseMaintenance>(LocalDatabaseMaintenance.name)
|
||||
?.trackChanges(true, true);
|
||||
})
|
||||
);
|
||||
new Setting(paneEl)
|
||||
.setName("Collect garbage")
|
||||
.setDesc("Remove all unused chunks from the local database.")
|
||||
.addButton((button) =>
|
||||
button
|
||||
.setButtonText("Try resurrect")
|
||||
.setButtonText("Collect")
|
||||
.setWarning()
|
||||
.setDisabled(false)
|
||||
.onClick(async () => {
|
||||
await this.plugin
|
||||
.getAddOn<LocalDatabaseMaintenance>(LocalDatabaseMaintenance.name)
|
||||
?.resurrectChunks();
|
||||
?.performGC(true);
|
||||
})
|
||||
);
|
||||
new Setting(paneEl)
|
||||
@@ -205,6 +209,41 @@ export function paneMaintenance(
|
||||
})
|
||||
);
|
||||
});
|
||||
void addPanel(paneEl, "Garbage Collection (Old and Experimental)", (e) => e, this.onlyOnP2POrCouchDB).then(
|
||||
(paneEl) => {
|
||||
new Setting(paneEl)
|
||||
.setName("Remove all orphaned chunks")
|
||||
.setDesc("Remove all orphaned chunks from the local database.")
|
||||
.addButton((button) =>
|
||||
button
|
||||
.setButtonText("Remove")
|
||||
.setWarning()
|
||||
.setDisabled(false)
|
||||
.onClick(async () => {
|
||||
await this.plugin
|
||||
.getAddOn<LocalDatabaseMaintenance>(LocalDatabaseMaintenance.name)
|
||||
?.removeUnusedChunks();
|
||||
})
|
||||
);
|
||||
|
||||
new Setting(paneEl)
|
||||
.setName("Resurrect deleted chunks")
|
||||
.setDesc(
|
||||
"If you have deleted chunks before fully synchronised and missed some chunks, you possibly can resurrect them."
|
||||
)
|
||||
.addButton((button) =>
|
||||
button
|
||||
.setButtonText("Try resurrect")
|
||||
.setWarning()
|
||||
.setDisabled(false)
|
||||
.onClick(async () => {
|
||||
await this.plugin
|
||||
.getAddOn<LocalDatabaseMaintenance>(LocalDatabaseMaintenance.name)
|
||||
?.resurrectChunks();
|
||||
})
|
||||
);
|
||||
}
|
||||
);
|
||||
void addPanel(paneEl, "Rebuilding Operations (Local)").then((paneEl) => {
|
||||
new Setting(paneEl)
|
||||
.setName("Fetch from remote")
|
||||
|
||||
@@ -12,6 +12,11 @@
|
||||
background-color: var(--text-muted);
|
||||
}
|
||||
|
||||
.conflict-dev-name {
|
||||
display: inline-block;
|
||||
min-width: 5em;
|
||||
}
|
||||
|
||||
.op-scrollable {
|
||||
overflow-y: scroll;
|
||||
/* min-height: 280px; */
|
||||
|
||||
132
updates.md
132
updates.md
@@ -1,3 +1,47 @@
|
||||
## 0.25.13
|
||||
|
||||
1st September, 2025
|
||||
|
||||
### Fixed
|
||||
- Conflict resolving dialogue now properly displays the changeset name instead of A or B (#691).
|
||||
|
||||
## 0.25.12
|
||||
|
||||
29th August, 2025
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed an issue with automatic synchronisation starting (#702).
|
||||
|
||||
## 0.25.11
|
||||
|
||||
28th August, 2025
|
||||
|
||||
### Fixed
|
||||
|
||||
- Automatic translation detection on the first launch now works correctly (#630).
|
||||
- No errors are shown during synchronisations in offline (if not explicitly requested) (#699).
|
||||
- Missing some checking during automatic-synchronisation now works correctly.
|
||||
|
||||
## 0.25.10
|
||||
|
||||
26th August, 2025
|
||||
|
||||
### New experimental feature
|
||||
|
||||
- We can perform Garbage Collection (Beta2) without rebuilding the entire database, and also fetch the database.
|
||||
- Note that this feature is very experimental and should be used with caution.
|
||||
- This feature requires disabling `Fetch chunks on demand`.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Resetting the bucket now properly clears all uploaded files.
|
||||
|
||||
### Refactored
|
||||
|
||||
- Some files have been moved to better reflect their purpose and improve maintainability.
|
||||
- The extensive LiveSyncLocalDB has been split into separate files for each role.
|
||||
|
||||
## 0.25.9
|
||||
|
||||
20th August, 2025
|
||||
@@ -30,94 +74,6 @@
|
||||
|
||||
- Type errors have been corrected.
|
||||
|
||||
## 0.25.7
|
||||
|
||||
15th August, 2025
|
||||
|
||||
**Since the release of 0.25.6, there are two large problem. Please update immediately.**
|
||||
|
||||
- We may have corrupted some documents during the migration process. **Please check your documents on the wizard.**
|
||||
- Due to a chunk ID assignment issue, some data has not been encrypted. **Please rebuild the database using Rebuild Everything** if you have enabled E2EE.
|
||||
|
||||
**_So, If you have enabled E2EE, please perform `Rebuild everything`. If not, please check your documents on the wizard._**
|
||||
|
||||
In next version, insecure chunk detection will be implemented.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Off-loaded chunking have been fixed to ensure proper functionality (#693).
|
||||
- Chunk document ID assignment has been fixed.
|
||||
- Replication prevention message during version up detection has been improved (#686).
|
||||
- `Keep A` and `Keep B` on Conflict resolving dialogue has been renamed to `Use Base` and `Use Conflicted` (#691).
|
||||
|
||||
### Improved
|
||||
|
||||
- Metadata and content-size unmatched documents are now detected and reported, prevented to be applied to the storage.
|
||||
- This behaviour can be configured in `Patch` -> `Edge case addressing (Behaviour)` -> `Process files even if seems to be corrupted`
|
||||
- Note: this toggle is for the direct-database-manipulation users.
|
||||
|
||||
### New Features
|
||||
|
||||
- `Scan for Broken files` has been implemented on `Hatch` -> `TroubleShooting`.
|
||||
|
||||
### Refactored
|
||||
|
||||
- Off-loaded processes have been refactored for the better maintainability.
|
||||
- Files prefixed `bg.worker` are now work on the worker threads.
|
||||
- Files prefixed `bgWorker.` are now also controls these worker threads. (I know what you want to say... I will rename them).
|
||||
- Removed unused code.
|
||||
|
||||
## ~~0.25.5~~ 0.25.6
|
||||
|
||||
(0.25.5 has been withdrawn due to a bug in the `Fetch chunks on demand` feature).
|
||||
|
||||
9th August, 2025
|
||||
|
||||
### Fixed
|
||||
|
||||
- Storage scanning no longer occurs when `Suspend file watching` is enabled (including boot-sequence).
|
||||
- This change improves safety when troubleshooting or fetching the remote database.
|
||||
- `Fetch chunks on demand` is now working again (if you installed 0.25.5, other versions are not affected).
|
||||
|
||||
### Improved
|
||||
|
||||
- Saving notes and files now consumes less memory.
|
||||
- Data is no longer fully buffered in memory and written at once; instead, it is now written in each over-2MB increments.
|
||||
- Chunk caching is now more efficient.
|
||||
- Chunks are now managed solely by their count (still maintained as LRU). If memory usage becomes excessive, they will be automatically released by the system-runtime.
|
||||
- Reverse-indexing is also no longer used. It is performed as scanning caches and act also as a WeakRef thinning.
|
||||
- Both of them (may) are effective for #692, #680, and some more.
|
||||
|
||||
### Changed
|
||||
|
||||
- `Incubate Chunks in Document` (also known as `Eden`) is now fully sunset.
|
||||
- Existing chunks can still be read, but new ones will no longer be created.
|
||||
- The `Compute revisions for chunks` setting has also been removed.
|
||||
- This feature is now always enabled and is no longer configurable (restoring the original behaviour).
|
||||
- As mentioned, `Memory cache size (by total characters)` has been removed.
|
||||
- The `Memory cache size (by total items)` setting is now the only option available (but it has 10x ratio compared to the previous version).
|
||||
|
||||
### Refactored
|
||||
|
||||
- A significant refactoring of the core codebase is underway.
|
||||
- This is part of our ongoing efforts to improve code maintainability, readability, and to unify interfaces.
|
||||
- Previously, complex files posed a risk due to a low bus factor. Fortunately, as our devices have become faster and more capable, we can now write code that is clearer and more maintainable (And not so much costs on performance).
|
||||
- Hashing functions have been refactored into the `HashManager` class and its derived classes.
|
||||
- Chunk splitting functions have been refactored into the `ContentSplitterCore` class and its derived classes.
|
||||
- Change tracking functions have been refactored into the `ChangeManager` class.
|
||||
- Chunk read/write functions have been refactored into the `ChunkManager` class.
|
||||
- Fetching chunks on demand is now handled separately from the `ChunkManager` and chunk reading functions. Chunks are queued by the `ChunkManager` and then processed by the `ChunkFetcher`, simplifying the process and reducing unnecessary complexity.
|
||||
- Then, local database access via `LiveSyncLocalDB` has been refactored to use the new classes.
|
||||
- References to external sources from `commonlib` have been corrected.
|
||||
- Type definitions in `types.ts` have been refined.
|
||||
- Unit tests are being added incrementally.
|
||||
- I am using `Deno` for testing, to simplify testing and coverage reporting.
|
||||
- While this is not identical to the Obsidian environment, `jest` may also have limitations. It is certainly better than having no tests.
|
||||
- In other words, recent manual scenario testing has highlighted some shortcomings.
|
||||
- `pouchdb-test`, used for testing PouchDB with Deno, has been added, utilising the `memory` adapter.
|
||||
|
||||
Side note: Although class-oriented programming is sometimes considered an outdated style, However, I have come to re-evaluate it as valuable from the perspectives of maintainability and readability.
|
||||
|
||||
## 0.25.0
|
||||
|
||||
19th July, 2025 (beta1 in 0.25.0-beta1, 13th July, 2025)
|
||||
|
||||
@@ -11,6 +11,96 @@ As a result, this is the first time in a while that forward compatibility has be
|
||||
|
||||
---
|
||||
|
||||
|
||||
## 0.25.7
|
||||
|
||||
15th August, 2025
|
||||
|
||||
**Since the release of 0.25.6, there are two large problem. Please update immediately.**
|
||||
|
||||
- We may have corrupted some documents during the migration process. **Please check your documents on the wizard.**
|
||||
- Due to a chunk ID assignment issue, some data has not been encrypted. **Please rebuild the database using Rebuild Everything** if you have enabled E2EE.
|
||||
|
||||
**_So, If you have enabled E2EE, please perform `Rebuild everything`. If not, please check your documents on the wizard._**
|
||||
|
||||
In next version, insecure chunk detection will be implemented.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Off-loaded chunking have been fixed to ensure proper functionality (#693).
|
||||
- Chunk document ID assignment has been fixed.
|
||||
- Replication prevention message during version up detection has been improved (#686).
|
||||
- `Keep A` and `Keep B` on Conflict resolving dialogue has been renamed to `Use Base` and `Use Conflicted` (#691).
|
||||
|
||||
### Improved
|
||||
|
||||
- Metadata and content-size unmatched documents are now detected and reported, prevented to be applied to the storage.
|
||||
- This behaviour can be configured in `Patch` -> `Edge case addressing (Behaviour)` -> `Process files even if seems to be corrupted`
|
||||
- Note: this toggle is for the direct-database-manipulation users.
|
||||
|
||||
### New Features
|
||||
|
||||
- `Scan for Broken files` has been implemented on `Hatch` -> `TroubleShooting`.
|
||||
|
||||
### Refactored
|
||||
|
||||
- Off-loaded processes have been refactored for the better maintainability.
|
||||
- Files prefixed `bg.worker` are now work on the worker threads.
|
||||
- Files prefixed `bgWorker.` are now also controls these worker threads. (I know what you want to say... I will rename them).
|
||||
- Removed unused code.
|
||||
|
||||
|
||||
## ~~0.25.5~~ 0.25.6
|
||||
|
||||
(0.25.5 has been withdrawn due to a bug in the `Fetch chunks on demand` feature).
|
||||
|
||||
9th August, 2025
|
||||
|
||||
### Fixed
|
||||
|
||||
- Storage scanning no longer occurs when `Suspend file watching` is enabled (including boot-sequence).
|
||||
- This change improves safety when troubleshooting or fetching the remote database.
|
||||
- `Fetch chunks on demand` is now working again (if you installed 0.25.5, other versions are not affected).
|
||||
|
||||
### Improved
|
||||
|
||||
- Saving notes and files now consumes less memory.
|
||||
- Data is no longer fully buffered in memory and written at once; instead, it is now written in each over-2MB increments.
|
||||
- Chunk caching is now more efficient.
|
||||
- Chunks are now managed solely by their count (still maintained as LRU). If memory usage becomes excessive, they will be automatically released by the system-runtime.
|
||||
- Reverse-indexing is also no longer used. It is performed as scanning caches and act also as a WeakRef thinning.
|
||||
- Both of them (may) are effective for #692, #680, and some more.
|
||||
|
||||
### Changed
|
||||
|
||||
- `Incubate Chunks in Document` (also known as `Eden`) is now fully sunset.
|
||||
- Existing chunks can still be read, but new ones will no longer be created.
|
||||
- The `Compute revisions for chunks` setting has also been removed.
|
||||
- This feature is now always enabled and is no longer configurable (restoring the original behaviour).
|
||||
- As mentioned, `Memory cache size (by total characters)` has been removed.
|
||||
- The `Memory cache size (by total items)` setting is now the only option available (but it has 10x ratio compared to the previous version).
|
||||
|
||||
### Refactored
|
||||
|
||||
- A significant refactoring of the core codebase is underway.
|
||||
- This is part of our ongoing efforts to improve code maintainability, readability, and to unify interfaces.
|
||||
- Previously, complex files posed a risk due to a low bus factor. Fortunately, as our devices have become faster and more capable, we can now write code that is clearer and more maintainable (And not so much costs on performance).
|
||||
- Hashing functions have been refactored into the `HashManager` class and its derived classes.
|
||||
- Chunk splitting functions have been refactored into the `ContentSplitterCore` class and its derived classes.
|
||||
- Change tracking functions have been refactored into the `ChangeManager` class.
|
||||
- Chunk read/write functions have been refactored into the `ChunkManager` class.
|
||||
- Fetching chunks on demand is now handled separately from the `ChunkManager` and chunk reading functions. Chunks are queued by the `ChunkManager` and then processed by the `ChunkFetcher`, simplifying the process and reducing unnecessary complexity.
|
||||
- Then, local database access via `LiveSyncLocalDB` has been refactored to use the new classes.
|
||||
- References to external sources from `commonlib` have been corrected.
|
||||
- Type definitions in `types.ts` have been refined.
|
||||
- Unit tests are being added incrementally.
|
||||
- I am using `Deno` for testing, to simplify testing and coverage reporting.
|
||||
- While this is not identical to the Obsidian environment, `jest` may also have limitations. It is certainly better than having no tests.
|
||||
- In other words, recent manual scenario testing has highlighted some shortcomings.
|
||||
- `pouchdb-test`, used for testing PouchDB with Deno, has been added, utilising the `memory` adapter.
|
||||
|
||||
Side note: Although class-oriented programming is sometimes considered an outdated style, However, I have come to re-evaluate it as valuable from the perspectives of maintainability and readability.
|
||||
|
||||
## 0.25.4
|
||||
|
||||
29th July, 2025
|
||||
|
||||
Reference in New Issue
Block a user