Compare commits

...

12 Commits

Author SHA1 Message Date
vorotamoroz
b35052a485 bump 2022-09-05 16:55:35 +09:00
vorotamoroz
c367d35e09 Target ES2018 2022-09-05 16:55:29 +09:00
vorotamoroz
2a5078cdbb bump 2022-09-05 16:54:06 +09:00
vorotamoroz
8112a07210 Implemented:
- Auto chunk size adjusting.
  Now our large files are processed more efficiently
- These configuration has been removed.

Improved
- Remote chunk retrieving logic has been speeded up.

Fixed
- Fixed process handling of boot sequence
2022-09-05 16:53:22 +09:00
vorotamoroz
c9daa1b47d Fixed issue of importing configurations. 2022-09-04 01:16:29 +09:00
vorotamoroz
73ac93e8c5 bump 2022-09-04 01:08:09 +09:00
vorotamoroz
8d2b9eff37 Improved:
- New test items have been added to `Check database configuration`
2022-09-04 01:08:02 +09:00
vorotamoroz
0ee32a2147 bump 2022-09-03 16:44:51 +09:00
vorotamoroz
ac3c78e198 Fixed
- Could not retrieve files if synchronisation has been interrupted or failed
2022-09-03 16:43:59 +09:00
vorotamoroz
0da1e3d9c8 bump 2022-08-30 15:24:38 +09:00
vorotamoroz
8f021a3c93 Improved:
- Use local chunks in preference to remote them if present.
2022-08-30 15:24:26 +09:00
vorotamoroz
6db0743096 Update release.yml 2022-08-29 16:50:53 +09:00
10 changed files with 100 additions and 77 deletions

View File

@@ -22,7 +22,7 @@ jobs:
- name: Get Version
id: version
run: |
echo "::set-output name=tag::$(git describe --abbrev=0)"
echo "::set-output name=tag::$(git describe --abbrev=0 --tags)"
# Build the plugin
- name: Build
id: build

View File

@@ -29,7 +29,7 @@ esbuild
external: ["obsidian", "electron", ...builtins],
format: "cjs",
watch: !prod,
target: "es2015",
target: "es2018",
logLevel: "info",
sourcemap: prod ? false : "inline",
treeShaking: true,

View File

@@ -1,7 +1,7 @@
{
"id": "obsidian-livesync",
"name": "Self-hosted LiveSync",
"version": "0.14.0",
"version": "0.14.6",
"minAppVersion": "0.9.12",
"description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
"author": "vorotamoroz",

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "obsidian-livesync",
"version": "0.14.0",
"version": "0.14.6",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "obsidian-livesync",
"version": "0.14.0",
"version": "0.14.6",
"license": "MIT",
"dependencies": {
"diff-match-patch": "^1.0.5",

View File

@@ -1,6 +1,6 @@
{
"name": "obsidian-livesync",
"version": "0.14.0",
"version": "0.14.6",
"description": "Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
"main": "main.js",
"type": "module",

View File

@@ -411,11 +411,11 @@ export class LocalPouchDB {
let children: string[] = [];
if (this.settings.readChunksOnline) {
const items = await this.fetchLeafFromRemote(obj.children);
const items = await this.CollectChunks(obj.children);
if (items) {
for (const v of items) {
if (v.doc && v.doc.type == "leaf") {
children.push(v.doc.data);
if (v && v.type == "leaf") {
children.push(v.data);
} else {
if (!opt) {
Logger(`Chunks of ${obj._id} are not valid.`, LOG_LEVEL.NOTICE);
@@ -615,7 +615,8 @@ export class LocalPouchDB {
let processed = 0;
let made = 0;
let skiped = 0;
let pieceSize = MAX_DOC_SIZE_BIN * Math.max(this.settings.customChunkSize, 1);
const maxChunkSize = MAX_DOC_SIZE_BIN * Math.max(this.settings.customChunkSize, 1);
let pieceSize = maxChunkSize;
let plainSplit = false;
let cacheUsed = 0;
const userpasswordHash = this.h32Raw(new TextEncoder().encode(this.settings.passphrase));
@@ -624,20 +625,11 @@ export class LocalPouchDB {
plainSplit = true;
}
const minimumChunkSize = Math.min(Math.max(40, ~~(note.data.length / 100)), maxChunkSize);
if (pieceSize < minimumChunkSize) pieceSize = minimumChunkSize;
const newLeafs: EntryLeaf[] = [];
// To keep low bandwith and database size,
// Dedup pieces on database.
// from 0.1.10, for best performance. we use markdown delimiters
// 1. \n[^\n]{longLineThreshold}[^\n]*\n -> long sentence shuld break.
// 2. \n\n shold break
// 3. \r\n\r\n should break
// 4. \n# should break.
let minimumChunkSize = this.settings.minimumChunkSize;
if (minimumChunkSize < 10) minimumChunkSize = 10;
let longLineThreshold = this.settings.longLineThreshold;
if (longLineThreshold < 100) longLineThreshold = 100;
const pieces = splitPieces2(note.data, pieceSize, plainSplit, minimumChunkSize, longLineThreshold);
const pieces = splitPieces2(note.data, pieceSize, plainSplit, minimumChunkSize, 0);
for (const piece of pieces()) {
processed++;
let leafid = "";
@@ -1357,16 +1349,44 @@ export class LocalPouchDB {
}
return true;
}
async fetchLeafFromRemote(ids: string[], showResult = false) {
// Collect chunks from both local and remote.
async CollectChunks(ids: string[], showResult = false) {
// Fetch local chunks.
const localChunks = await this.localDatabase.allDocs({ keys: ids, include_docs: true });
const missingChunks = localChunks.rows.filter(e => "error" in e).map(e => e.key);
// If we have enough chunks, return them.
if (missingChunks.length == 0) {
return localChunks.rows.map(e => e.doc);
}
// Fetching remote chunks.
const ret = await connectRemoteCouchDBWithSetting(this.settings, this.isMobile);
if (typeof (ret) === "string") {
Logger(`Could not connect to server.${ret} `, showResult ? LOG_LEVEL.NOTICE : LOG_LEVEL.INFO, "fetch");
return;
return false;
}
const leafs = await ret.db.allDocs({ keys: ids, include_docs: true });
return leafs.rows;
}
const remoteChunks = await ret.db.allDocs({ keys: missingChunks, include_docs: true });
if (remoteChunks.rows.some(e => "error" in e)) {
return false;
}
const remoteChunkItems = remoteChunks.rows.map(e => e.doc);
const max = remoteChunkItems.length;
let last = 0;
// Chunks should be ordered by as we requested.
function findChunk(key: string) {
const offset = last;
for (let i = 0; i < max; i++) {
const idx = (offset + i) % max;
last = i;
if (remoteChunkItems[idx]._id == key) return remoteChunkItems[idx];
}
throw Error("Chunk collecting error");
}
// Merge them
return localChunks.rows.map(e => ("error" in e) ? (findChunk(e.key)) : e.doc);
}
}

View File

@@ -469,6 +469,22 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
} else {
addResult("✔ httpd.enable_cors is ok.");
}
// If the server is not cloudant, configure request size
if (!this.plugin.settings.couchDB_URI.contains(".cloudantnosqldb.")) {
// REQUEST SIZE
if (Number(responseConfig?.chttpd?.max_http_request_size ?? 0) < 4294967296) {
addResult("❗ chttpd.max_http_request_size is low)");
addConfigFixButton("Set chttpd.max_http_request_size", "chttpd/max_http_request_size", "4294967296");
} else {
addResult("✔ chttpd.max_http_request_size is ok.");
}
if (Number(responseConfig?.couchdb?.max_document_size ?? 0) < 50000000) {
addResult("❗ couchdb.max_document_size is low)");
addConfigFixButton("Set couchdb.max_document_size", "couchdb/max_document_size", "50000000");
} else {
addResult("✔ couchdb.max_document_size is ok.");
}
}
// CORS check
// checking connectivity for mobile
if (responseConfig?.cors?.credentials != "true") {
@@ -583,43 +599,7 @@ export class ObsidianLiveSyncSettingTab extends PluginSettingTab {
})
)
containerLocalDatabaseEl.createEl("div", {
text: sanitizeHTMLToDom(`Advanced settings<br>
Configuration of how LiveSync makes chunks from the file.`),
});
new Setting(containerLocalDatabaseEl)
.setName("Minimum chunk size")
.setDesc("(letters), minimum chunk size.")
.addText((text) => {
text.setPlaceholder("")
.setValue(this.plugin.settings.minimumChunkSize + "")
.onChange(async (value) => {
let v = Number(value);
if (isNaN(v) || v < 10 || v > 1000) {
v = 10;
}
this.plugin.settings.minimumChunkSize = v;
await this.plugin.saveSettings();
});
text.inputEl.setAttribute("type", "number");
});
new Setting(containerLocalDatabaseEl)
.setName("LongLine Threshold")
.setDesc("(letters), If the line is longer than this, make the line to chunk")
.addText((text) => {
text.setPlaceholder("")
.setValue(this.plugin.settings.longLineThreshold + "")
.onChange(async (value) => {
let v = Number(value);
if (isNaN(v) || v < 10 || v > 1000) {
v = 10;
}
this.plugin.settings.longLineThreshold = v;
await this.plugin.saveSettings();
});
text.inputEl.setAttribute("type", "number");
});
let newDatabaseName = this.plugin.settings.additionalSuffixOfDatabaseName + "";
new Setting(containerLocalDatabaseEl)
.setName("Database suffix")

View File

@@ -402,6 +402,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
}
}
let initDB;
this.settings = newSettingW;
await this.saveSettings();
if (keepLocalDB == "no") {
this.resetLocalOldDatabase();
@@ -1617,18 +1618,20 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
const count = objects.length;
Logger(procedurename);
let i = 0;
// let lastTicks = performance.now() + 2000;
// let workProcs = 0;
const p = Parallels();
const limit = 10;
Logger(`${procedurename} exec.`);
for (const v of objects) {
// workProcs++;
if (!this.localDatabase.isReady) throw Error("Database is not ready!");
p.add(callback(v).then(() => {
const addProc = (p: () => Promise<void>): Promise<unknown> => {
return p();
}
p.add(addProc(async () => {
try {
await callback(v);
i++;
if (i % 100 == 0) {
if (i % 50 == 0) {
const notify = `${procedurename} : ${i}/${count}`;
if (showingNotice) {
Logger(notify, LOG_LEVEL.NOTICE, "syncAll");
@@ -1637,13 +1640,11 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
}
this.setStatusBarText(notify);
}
}).catch(ex => {
} catch (ex) {
Logger(`Error while ${procedurename}`, LOG_LEVEL.NOTICE);
Logger(ex);
}).finally(() => {
// workProcs--;
})
);
}
}));
await p.wait(limit);
}
await p.all();
@@ -1659,6 +1660,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
await runAll("UPDATE STORAGE", onlyInDatabase, async (e) => {
Logger(`Check or pull from db:${e}`);
await this.pullFile(e, filesStorage, false, null, false);
Logger(`Check or pull from db:${e} OK`);
});
}
if (!initialScan) {
@@ -1928,7 +1930,7 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
async pullFile(filename: string, fileList?: TFile[], force?: boolean, rev?: string, waitForReady = true) {
const targetFile = this.app.vault.getAbstractFileByPath(id2path(filename));
if (!this.isTargetFile(targetFile)) return;
if (!this.isTargetFile(id2path(filename))) return;
if (targetFile == null) {
//have to create;
const doc = await this.localDatabase.getDBEntry(filename, rev ? { rev: rev } : null, false, waitForReady);

View File

@@ -2,12 +2,12 @@
"compilerOptions": {
"baseUrl": ".",
"module": "ESNext",
"target": "ES6",
"target": "ES2018",
"allowJs": true,
"noImplicitAny": true,
"moduleResolution": "node",
// "importsNotUsedAsValues": "error",
"importHelpers": true,
"importHelpers": false,
"alwaysStrict": true,
"lib": ["es2018", "DOM", "ES5", "ES6", "ES7"]
},

View File

@@ -1,3 +1,24 @@
### 0.14.1
- The target selecting filter was implemented.
Now we can set what files are synchronised by regular expression.
- We can configure the size of chunks.
We can use larger chunks to improve performance.
(This feature can not be used with IBM Cloudant)
- Read chunks online.
Now we can synchronise only metadata and retrieve chunks on demand. It reduces local database size and time for replication.
- Added this note.
- Use local chunks in preference to remote them if present,
#### Recommended configuration for Self-hosted CouchDB
- Set chunk size to around 100 to 250 (10MB - 25MB per chunk)
- *Set batch size to 100 and batch limit to 20 (0.14.2)*
- Be sure to `Read chunks online` checked.
#### Minors
- 0.14.2 Fixed issue about retrieving files if synchronisation has been interrupted or failed
- 0.14.3 New test items have been added to `Check database configuration`.
- 0.14.4 Fixed issue of importing configurations.
### 0.13.0
- The metadata of the deleted files will be kept on the database by default. If you want to delete this as the previous version, please turn on `Delete metadata of deleted files.`. And, if you have upgraded from the older version, please ensure every device has been upgraded.
@@ -14,4 +35,4 @@
- Now, we can synchronise hidden files that conflicted on each devices.
- We can search for conflicting docs.
- Pending processes can now be run at any time.
- Performance improved on synchronising large numbers of files at once.
- Performance improved on synchronising large numbers of files at once.