Merge pull request #843 from andrewleech/daemon-sync

cli: implement continuous sync daemon mode
This commit is contained in:
vorotamoroz
2026-05-13 18:53:59 +09:00
committed by GitHub
21 changed files with 1702 additions and 124 deletions

61
package-lock.json generated
View File

@@ -16,11 +16,13 @@
"@smithy/protocol-http": "^5.3.9",
"@smithy/querystring-builder": "^4.2.9",
"@trystero-p2p/nostr": "^0.23.0",
"chokidar": "^4.0.0",
"commander": "^14.0.3",
"diff-match-patch": "^1.0.5",
"fflate": "^0.8.2",
"idb": "^8.0.3",
"markdown-it": "^14.1.1",
"micromatch": "^4.0.0",
"minimatch": "^10.2.2",
"octagonal-wheels": "^0.1.45",
"pouchdb-adapter-leveldb": "^9.0.0",
@@ -38,6 +40,7 @@
"@types/deno": "^2.5.0",
"@types/diff-match-patch": "^1.0.36",
"@types/markdown-it": "^14.1.2",
"@types/micromatch": "^4.0.10",
"@types/node": "^24.10.13",
"@types/pouchdb": "^6.4.2",
"@types/pouchdb-adapter-http": "^6.1.6",
@@ -984,7 +987,6 @@
"integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@babel/code-frame": "^7.29.0",
"@babel/generator": "^7.29.0",
@@ -2378,7 +2380,8 @@
"resolved": "https://registry.npmjs.org/@marijn/find-cluster-break/-/find-cluster-break-1.0.2.tgz",
"integrity": "sha512-l0h88YhZFyKdXIFNfSWpyjStDjGHwZ/U7iobcK1cQQD8sejsONdQtTVU+1wVN1PBw40PiiHB1vA5S7VTfQiP9g==",
"dev": true,
"license": "MIT"
"license": "MIT",
"peer": true
},
"node_modules/@minhducsun2002/leb128": {
"version": "1.0.0",
@@ -4224,7 +4227,6 @@
"integrity": "sha512-ou/d51QSdTyN26D7h6dSpusAKaZkAiGM55/AKYi+9AGZw7q85hElbjK3kEyzXHhLSnRISHOYzVge6x0jRZ7DXA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@sveltejs/vite-plugin-svelte-inspector": "^5.0.0",
"deepmerge": "^4.3.1",
@@ -4298,6 +4300,13 @@
"@babel/types": "^7.0.0"
}
},
"node_modules/@types/braces": {
"version": "3.0.5",
"resolved": "https://registry.npmjs.org/@types/braces/-/braces-3.0.5.tgz",
"integrity": "sha512-SQFof9H+LXeWNz8wDe7oN5zu7ket0qwMu5vZubW4GCJ8Kkeh6nBWUz87+KTz/G3Kqsrp0j/W253XJb3KMEeg3w==",
"dev": true,
"license": "MIT"
},
"node_modules/@types/chai": {
"version": "5.2.3",
"resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz",
@@ -4417,6 +4426,16 @@
"dev": true,
"license": "MIT"
},
"node_modules/@types/micromatch": {
"version": "4.0.10",
"resolved": "https://registry.npmjs.org/@types/micromatch/-/micromatch-4.0.10.tgz",
"integrity": "sha512-5jOhFDElqr4DKTrTEbnW8DZ4Hz5LRUEmyrGpCMrD/NphYv3nUnaF08xmSLx1rGGnyEs/kFnhiw6dCgcDqMr5PQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/braces": "*"
}
},
"node_modules/@types/minimatch": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-5.1.2.tgz",
@@ -4738,7 +4757,6 @@
"integrity": "sha512-klQbnPAAiGYFyI02+znpBRLyjL4/BrBd0nyWkdC0s/6xFLkXYQ8OoRrSkqacS1ddVxf/LDyODIKbQ5TgKAf/Fg==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@typescript-eslint/scope-manager": "8.56.1",
"@typescript-eslint/types": "8.56.1",
@@ -4943,7 +4961,6 @@
"integrity": "sha512-gjjrFC4+kPVK/fN9URDJWrssU5Gqh8Az8pKG/NSfQ2V+ky8b/y1BgBg0Ug13+hOGp5pzInonmGRPn7vOgSLgzA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@blazediff/core": "1.9.1",
"@vitest/mocker": "4.1.1",
@@ -4967,7 +4984,6 @@
"integrity": "sha512-dtVSBZZha2k/7P7EAXXrEAoxuIKl8Yv9f2Dk4GN/DGfmhf4DQvkvu+57okR2wq/gan1xppKjL/aBxK/kbYrbGw==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@vitest/browser": "4.1.1",
"@vitest/mocker": "4.1.1",
@@ -5409,7 +5425,6 @@
"integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==",
"dev": true,
"license": "MIT",
"peer": true,
"bin": {
"acorn": "bin/acorn"
},
@@ -6123,7 +6138,6 @@
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
"dev": true,
"license": "MIT",
"dependencies": {
"fill-range": "^7.1.1"
@@ -6152,7 +6166,6 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"baseline-browser-mapping": "^2.9.0",
"caniuse-lite": "^1.0.30001759",
@@ -6385,7 +6398,6 @@
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz",
"integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==",
"dev": true,
"license": "MIT",
"dependencies": {
"readdirp": "^4.0.1"
@@ -6648,7 +6660,8 @@
"resolved": "https://registry.npmjs.org/crelt/-/crelt-1.0.6.tgz",
"integrity": "sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==",
"dev": true,
"license": "MIT"
"license": "MIT",
"peer": true
},
"node_modules/cross-spawn": {
"version": "7.0.6",
@@ -7441,7 +7454,6 @@
"dev": true,
"hasInstallScript": true,
"license": "MIT",
"peer": true,
"bin": {
"esbuild": "bin/esbuild"
},
@@ -7555,7 +7567,6 @@
"integrity": "sha512-XoMjdBOwe/esVgEvLmNsD3IRHkm7fbKIUGvrleloJXUZgDHig2IPWNniv+GwjyJXzuNqVjlr5+4yVUZjycJwfQ==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.8.0",
"@eslint-community/regexpp": "^4.12.1",
@@ -8255,7 +8266,6 @@
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
"dev": true,
"license": "MIT",
"dependencies": {
"to-regex-range": "^5.0.1"
@@ -9358,7 +9368,6 @@
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.12.0"
@@ -9695,7 +9704,6 @@
"integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==",
"dev": true,
"license": "MIT",
"peer": true,
"bin": {
"jiti": "lib/jiti-cli.mjs"
}
@@ -10409,7 +10417,6 @@
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
"integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
"dev": true,
"license": "MIT",
"dependencies": {
"braces": "^3.0.3",
@@ -11119,7 +11126,6 @@
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8.6"
@@ -11203,7 +11209,6 @@
"integrity": "sha512-vA30H8Nvkq/cPBnNw4Q8TWz1EJyqgpuinBcHET0YVJVFldr8JDNiU9LaWAE1KqSkRYazuaBhTpB5ZzShOezQ6A==",
"dev": true,
"license": "Apache-2.0",
"peer": true,
"dependencies": {
"playwright-core": "1.58.2"
},
@@ -11270,7 +11275,6 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"nanoid": "^3.3.11",
"picocolors": "^1.1.1",
@@ -11296,7 +11300,6 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"lilconfig": "^3.1.1"
},
@@ -11943,7 +11946,6 @@
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz",
"integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 14.18.0"
@@ -12956,7 +12958,8 @@
"resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.3.tgz",
"integrity": "sha512-i/n8VsZydrugj3Iuzll8+x/00GH2vnYsk1eomD8QiRrSAeW6ItbCQDtfXCeJHd0iwiNagqjQkvpvREEPtW3IoQ==",
"dev": true,
"license": "MIT"
"license": "MIT",
"peer": true
},
"node_modules/sublevel-pouchdb": {
"version": "9.0.0",
@@ -13025,7 +13028,6 @@
"integrity": "sha512-0a/huwc8e2es+7KFi70esqsReRfRbrT8h1cJSY/+z1lF0yKM6TT+//HYu28Yxstr50H7ifaqZRDGd0KuKDxP7w==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@jridgewell/remapping": "^2.3.4",
"@jridgewell/sourcemap-codec": "^1.5.0",
@@ -13336,7 +13338,6 @@
"integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@@ -13358,7 +13359,6 @@
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"is-number": "^7.0.0"
@@ -13455,7 +13455,6 @@
"integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"esbuild": "~0.27.0",
"get-tsconfig": "^4.7.5"
@@ -14086,7 +14085,6 @@
"integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
"dev": true,
"license": "Apache-2.0",
"peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
@@ -14236,7 +14234,6 @@
"integrity": "sha512-Bby3NOsna2jsjfLVOHKes8sGwgl4TT0E6vvpYgnAYDIF/tie7MRaFthmKuHx1NSXjiTueXH3do80FMQgvEktRg==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"esbuild": "^0.27.0",
"fdir": "^6.5.0",
@@ -14873,7 +14870,6 @@
"integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@@ -14907,7 +14903,6 @@
"integrity": "sha512-yF+o4POL41rpAzj5KVILUxm1GCjKnELvaqmU9TLLUbMfDzuN0UpUR9uaDs+mCtjPe+uYPksXDRLQGGPvj1cTmA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@vitest/expect": "4.1.1",
"@vitest/mocker": "4.1.1",
@@ -15015,7 +15010,8 @@
"resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz",
"integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==",
"dev": true,
"license": "MIT"
"license": "MIT",
"peer": true
},
"node_modules/wait-port": {
"version": "1.1.0",
@@ -15667,7 +15663,6 @@
"integrity": "sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg==",
"dev": true,
"license": "ISC",
"peer": true,
"bin": {
"yaml": "bin.mjs"
},

View File

@@ -69,6 +69,7 @@
"@types/deno": "^2.5.0",
"@types/diff-match-patch": "^1.0.36",
"@types/markdown-it": "^14.1.2",
"@types/micromatch": "^4.0.10",
"@types/node": "^24.10.13",
"@types/pouchdb": "^6.4.2",
"@types/pouchdb-adapter-http": "^6.1.6",
@@ -133,11 +134,13 @@
"@smithy/protocol-http": "^5.3.9",
"@smithy/querystring-builder": "^4.2.9",
"@trystero-p2p/nostr": "^0.23.0",
"chokidar": "^4.0.0",
"commander": "^14.0.3",
"diff-match-patch": "^1.0.5",
"fflate": "^0.8.2",
"idb": "^8.0.3",
"markdown-it": "^14.1.1",
"micromatch": "^4.0.0",
"minimatch": "^10.2.2",
"octagonal-wheels": "^0.1.45",
"pouchdb-adapter-leveldb": "^9.0.0",

View File

@@ -92,39 +92,39 @@ livesync-cli ./my-db pull folder/note.md ./note.md
## Installation
### Build from source
```bash
# Clone with submodules, because the shared core lives in src/lib
git clone --recurse-submodules <repository-url>
cd obsidian-livesync
# If you already cloned without submodules, run this once instead
git submodule update --init --recursive
# Install dependencies from the repository root
npm install
# Build the CLI from its package directory
cd src/apps/cli
npm run build
```
If `src/lib` is missing, `npm run build` now stops early with a targeted message
instead of a low-level Vite `ENOENT` error.
### Build from source
Run the CLI:
```bash
# Run with npm script (from repository root)
npm run --silent cli -- [database-path] [command] [args...]
```bash
# Clone with submodules, because the shared core lives in src/lib
git clone --recurse-submodules <repository-url>
cd obsidian-livesync
# If you already cloned without submodules, run this once instead
git submodule update --init --recursive
# Install dependencies from the repository root
npm install
# Build the CLI from its package directory
cd src/apps/cli
npm run build
```
If `src/lib` is missing, `npm run build` now stops early with a targeted message
instead of a low-level Vite `ENOENT` error.
Run the CLI:
```bash
# Run with npm script (from repository root)
npm run --silent cli -- [database-path] [command] [args...]
# Run the built executable directly
node src/apps/cli/dist/index.cjs [database-path] [command] [args...]
```
### Docker
A Docker image is provided for headless / server deployments. Build from the repository root:
### Docker
A Docker image is provided for headless / server deployments. Build from the repository root:
```bash
docker build -f src/apps/cli/Dockerfile -t livesync-cli .
@@ -297,9 +297,11 @@ Options:
--force, -f Overwrite existing file on init-settings
--verbose, -v Enable verbose logging
--debug, -d Enable debug logging (includes verbose)
--help, -h Show help message
--interval <N>, -i <N> (daemon only) Poll CouchDB every N seconds instead of using the _changes feed
--help, -h Show this help message
Commands:
daemon (default) Run mirror scan then continuously sync CouchDB <-> local filesystem
init-settings [path] Create settings JSON from DEFAULT_SETTINGS
sync Run one replication cycle and exit
p2p-peers <timeout> Show discovered peers as [peer]<TAB><peer-id><TAB><peer-name>
@@ -406,6 +408,86 @@ In other words, it performs the following actions:
Note: `mirror` does not respect file deletions. If a file is deleted in storage, it will be restored on the next `mirror` run. To delete a file, use the `rm` command instead. This is a little inconvenient, but it is intentional behaviour (if we handle this automatically in `mirror`, we should be against a ton of edge cases).
##### daemon
`daemon` is the default command when no command is specified. It runs an initial mirror scan and then continuously syncs changes in both directions:
- **CouchDB → local filesystem**: via the `_changes` feed (LiveSync mode, default) or periodic polling (`--interval N`).
- **local filesystem → CouchDB**: via chokidar file watching. Any file created, modified, or deleted in the vault directory is pushed to CouchDB.
In **LiveSync mode** the `_changes` feed delivers remote changes as they arrive, with sub-second latency. In **polling mode** (`--interval N`) the CLI polls CouchDB every N seconds. Use polling mode if your CouchDB instance does not support long-lived HTTP connections, or if you need predictable network usage.
The daemon exits cleanly on `SIGINT` or `SIGTERM`.
```bash
# LiveSync mode (default — _changes feed, near-real-time)
livesync-cli /path/to/vault
# Polling mode — poll every 60 seconds
livesync-cli /path/to/vault --interval 60
```
### .livesync/ignore
Place a `.livesync/ignore` file in your vault root to exclude files from sync in both directions (local → CouchDB and CouchDB → local).
**Format:**
- Lines beginning with `#` are comments.
- Blank lines are ignored.
- All other lines are [minimatch](https://github.com/isaacs/minimatch) glob patterns, relative to the vault root.
- The directive `import: .gitignore` (exactly this string) reads `.gitignore` from the vault root and merges its non-comment, non-blank lines into the ignore rules.
- Negation patterns (lines starting with `!`) are not supported and will cause an error on load.
**Example `.livesync/ignore`:**
```
# Ignore temporary files
*.tmp
*.swp
# Ignore build output
build/
dist/
# Merge patterns from .gitignore
import: .gitignore
```
Patterns apply in both directions: the chokidar watcher will not emit events for matched files, and the `isTargetFile` filter will exclude them from CouchDB → local sync.
Changes to this file require a daemon restart to take effect.
### Systemd Installation
The `deploy/` directory contains a systemd unit template and an install script.
**Automated install (user service, recommended):**
```bash
bash src/apps/cli/deploy/install.sh --vault /path/to/vault
```
**With polling interval:**
```bash
bash src/apps/cli/deploy/install.sh --vault /path/to/vault --interval 60
```
**System-wide install** (requires root / sudo for `/etc/systemd/system/`):
```bash
bash src/apps/cli/deploy/install.sh --system --vault /path/to/vault
```
The script:
1. Builds the CLI (`npm install` + `npm run build`).
2. Installs the binary to `~/.local/bin/livesync-cli` (user) or `/usr/local/bin/livesync-cli` (system).
3. Writes the unit file to `~/.config/systemd/user/livesync-cli.service` (user) or `/etc/systemd/system/livesync-cli.service` (system).
4. Runs `systemctl [--user] daemon-reload && systemctl [--user] enable --now livesync-cli`.
**Manual setup** — if you prefer to manage the unit yourself, copy `deploy/livesync-cli.service`, replace `LIVESYNC_BIN` and `LIVESYNC_VAULT_PATH` with the actual binary path and vault path, then install to the appropriate systemd directory.
### Planned options:
- `--immediate`: Perform sync after the command (e.g. `push`, `pull`, `put`, `rm`).

View File

@@ -39,12 +39,6 @@ export class NodeFileSystemAdapter implements IFileSystemAdapter<NodeFile, NodeF
async getAbstractFileByPath(p: FilePath | string): Promise<NodeFile | null> {
const pathStr = this.normalisePath(p);
const cached = this.fileCache.get(pathStr);
if (cached) {
return cached;
}
return await this.refreshFile(pathStr);
}
@@ -112,6 +106,7 @@ export class NodeFileSystemAdapter implements IFileSystemAdapter<NodeFile, NodeF
this.fileCache.set(pathStr, file);
return file;
} catch {
// Evict so a deleted file is not returned by subsequent cache scans.
this.fileCache.delete(pathStr);
return null;
}

View File

@@ -15,7 +15,12 @@ export class NodeVaultAdapter implements IVaultAdapter<NodeFile> {
}
async read(file: NodeFile): Promise<string> {
return await fs.readFile(this.resolvePath(file.path), "utf-8");
const content = await fs.readFile(this.resolvePath(file.path), "utf-8");
// Correct stale stat.size — chokidar stats may be from a poll before the final write.
// The downstream document integrity check compares stat.size to content length, so
// they must agree or other clients reject the file as corrupted.
file.stat.size = Buffer.byteLength(content, "utf-8");
return content;
}
async cachedRead(file: NodeFile): Promise<string> {
@@ -25,6 +30,8 @@ export class NodeVaultAdapter implements IVaultAdapter<NodeFile> {
async readBinary(file: NodeFile): Promise<ArrayBuffer> {
const buffer = await fs.readFile(this.resolvePath(file.path));
// Same correction as read() — ensure stat.size matches actual byte length.
file.stat.size = buffer.length;
return buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength) as ArrayBuffer;
}

View File

@@ -0,0 +1,312 @@
import { describe, expect, it, vi, beforeEach, afterEach } from "vitest";
import { runCommand } from "./runCommand";
import type { CLIOptions } from "./types";
// Mock performFullScan so daemon tests don't require a real CouchDB connection.
vi.mock("@lib/serviceFeatures/offlineScanner", () => ({
performFullScan: vi.fn(async () => true),
}));
// Mock UnresolvedErrorManager to avoid event-hub side effects.
vi.mock("@lib/services/base/UnresolvedErrorManager", () => ({
UnresolvedErrorManager: class UnresolvedErrorManager {
showError() {}
clearError() {}
clearErrors() {}
},
}));
import * as offlineScanner from "@lib/serviceFeatures/offlineScanner";
function createCoreMock() {
return {
services: {
control: {
activated: Promise.resolve(),
applySettings: vi.fn(async () => {}),
},
setting: {
applyPartial: vi.fn(async () => {}),
currentSettings: vi.fn(() => ({ liveSync: true, syncOnStart: false })),
},
replication: {
replicate: vi.fn(async () => true),
},
appLifecycle: {
onUnload: {
addHandler: vi.fn(),
},
},
},
serviceModules: {
fileHandler: {
dbToStorage: vi.fn(async () => true),
storeFileToDB: vi.fn(async () => true),
},
storageAccess: {
readFileAuto: vi.fn(async () => ""),
writeFileAuto: vi.fn(async () => {}),
},
databaseFileAccess: {
fetch: vi.fn(async () => undefined),
},
},
} as any;
}
function makeDaemonOptions(interval?: number): CLIOptions {
return {
command: "daemon",
commandArgs: [],
databasePath: "/tmp/vault",
verbose: false,
force: false,
interval,
};
}
const baseContext = {
vaultPath: "/tmp/vault",
settingsPath: "/tmp/vault/.livesync/settings.json",
originalSyncSettings: {
liveSync: true,
syncOnStart: false,
periodicReplication: false,
syncOnSave: false,
syncOnEditorSave: false,
syncOnFileOpen: false,
syncAfterMerge: false,
},
} as any;
describe("daemon command", () => {
beforeEach(() => {
vi.restoreAllMocks();
vi.useFakeTimers();
});
afterEach(() => {
vi.useRealTimers();
});
it("calls performFullScan during startup", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
await runCommand(makeDaemonOptions(), { ...baseContext, core });
expect(offlineScanner.performFullScan).toHaveBeenCalledTimes(1);
});
it("returns false when performFullScan fails", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(false);
const result = await runCommand(makeDaemonOptions(), { ...baseContext, core });
expect(result).toBe(false);
});
it("polling mode: calls setTimeout when interval option is set", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
const setTimeoutSpy = vi.spyOn(globalThis, "setTimeout");
await runCommand(makeDaemonOptions(30), { ...baseContext, core });
expect(setTimeoutSpy).toHaveBeenCalledTimes(1);
// Interval should be in milliseconds (30s → 30000ms)
expect(setTimeoutSpy).toHaveBeenCalledWith(expect.any(Function), 30000);
});
it("polling mode: applies settings with suspendFileWatching=false before setting interval", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
await runCommand(makeDaemonOptions(10), { ...baseContext, core });
expect(core.services.setting.applyPartial).toHaveBeenCalledWith(
expect.objectContaining({ suspendFileWatching: false }),
true
);
expect(core.services.control.applySettings).toHaveBeenCalledTimes(1);
});
it("liveSync mode: calls applyPartial and applySettings", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
await runCommand(makeDaemonOptions(), { ...baseContext, core });
expect(core.services.setting.applyPartial).toHaveBeenCalledWith(
expect.objectContaining({
...baseContext.originalSyncSettings,
suspendFileWatching: false,
}),
true
);
expect(core.services.control.applySettings).toHaveBeenCalledTimes(1);
});
it("liveSync mode: logs warning when both liveSync and syncOnStart are false", async () => {
const core = createCoreMock();
core.services.setting.currentSettings = vi.fn(() => ({
liveSync: false,
syncOnStart: false,
}));
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
const result = await runCommand(makeDaemonOptions(), { ...baseContext, core });
expect(result).toBe(true);
const warningCalls = consoleSpy.mock.calls.filter(
(args) => typeof args[0] === "string" && args[0].includes("liveSync and syncOnStart are both disabled")
);
expect(warningCalls.length).toBeGreaterThan(0);
});
it("liveSync mode: no warning when liveSync is true", async () => {
const core = createCoreMock();
core.services.setting.currentSettings = vi.fn(() => ({
liveSync: true,
syncOnStart: false,
}));
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
await runCommand(makeDaemonOptions(), { ...baseContext, core });
const warningCalls = consoleSpy.mock.calls.filter(
(args) => typeof args[0] === "string" && args[0].includes("liveSync and syncOnStart are both disabled")
);
expect(warningCalls.length).toBe(0);
});
it("calls replicate before performFullScan", async () => {
const core = createCoreMock();
const callOrder: string[] = [];
core.services.replication.replicate = vi.fn(async () => {
callOrder.push("replicate");
return true;
});
vi.mocked(offlineScanner.performFullScan).mockImplementation(async () => {
callOrder.push("performFullScan");
return true;
});
await runCommand(makeDaemonOptions(), { ...baseContext, core });
expect(callOrder).toEqual(["replicate", "performFullScan"]);
});
it("returns false when initial replication fails", async () => {
const core = createCoreMock();
core.services.replication.replicate = vi.fn(async () => false);
vi.mocked(offlineScanner.performFullScan).mockClear();
const result = await runCommand(makeDaemonOptions(), { ...baseContext, core });
expect(result).toBe(false);
// performFullScan should NOT have been called
expect(offlineScanner.performFullScan).not.toHaveBeenCalled();
});
it("polling mode: registers onUnload handler that clears timeout", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
await runCommand(makeDaemonOptions(10), { ...baseContext, core });
// onUnload handler should have been registered
expect(core.services.appLifecycle.onUnload.addHandler).toHaveBeenCalledTimes(1);
const handler = core.services.appLifecycle.onUnload.addHandler.mock.calls[0][0];
// Get the timeout ID that was created
const clearTimeoutSpy = vi.spyOn(globalThis, "clearTimeout");
await handler();
expect(clearTimeoutSpy).toHaveBeenCalledTimes(1);
});
it("polling backoff: interval escalates on failure, caps at 300000ms, then halves on recovery", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
vi.spyOn(console, "error").mockImplementation(() => {});
// startup replicate (call 1) succeeds; poll calls 27 fail; call 8 succeeds.
let callCount = 0;
core.services.replication.replicate = vi.fn(async () => {
callCount++;
if (callCount === 1) return true; // initial startup replicate
if (callCount <= 7) throw new Error("network failure");
return true; // recovery
});
const baseMs = 30 * 1000;
const setTimeoutSpy = vi.spyOn(globalThis, "setTimeout");
await runCommand(makeDaemonOptions(30), { ...baseContext, core });
// After runCommand returns the first setTimeout has been scheduled.
// setTimeoutSpy.mock.calls[0] is the initial schedule (baseMs).
expect(setTimeoutSpy.mock.calls[0][1]).toBe(baseMs);
// Advance through 6 failure polls. After each failure the next setTimeout
// should be scheduled with a larger (or capped) interval.
// formula: min(base * 2^n, 300000). base=30000ms.
// failure 1: 30000*2=60000, failure 2: 30000*4=120000,
// failure 3: 30000*8=240000, failure 4: 30000*16=480000→capped, 5→cap, 6→cap
const expectedIntervals = [
baseMs * 2, // after failure 1: 60000
baseMs * 4, // after failure 2: 120000
baseMs * 8, // after failure 3: 240000
300_000, // after failure 4 (would be 480000, capped)
300_000, // after failure 5 (cap)
300_000, // after failure 6 (cap)
];
for (const expected of expectedIntervals) {
const prevCallCount = setTimeoutSpy.mock.calls.length;
await vi.advanceTimersByTimeAsync(setTimeoutSpy.mock.calls[prevCallCount - 1][1] as number);
const newCallCount = setTimeoutSpy.mock.calls.length;
expect(newCallCount).toBeGreaterThan(prevCallCount);
expect(setTimeoutSpy.mock.calls[newCallCount - 1][1]).toBe(expected);
}
// Now trigger the success poll — interval should halve each time toward base.
// After failure 6, consecutiveFailures=6, currentIntervalMs=300000.
// On success: consecutiveFailures=5, currentIntervalMs=150000.
const prevCallCount = setTimeoutSpy.mock.calls.length;
await vi.advanceTimersByTimeAsync(setTimeoutSpy.mock.calls[prevCallCount - 1][1] as number);
const afterSuccessCallCount = setTimeoutSpy.mock.calls.length;
expect(afterSuccessCallCount).toBeGreaterThan(prevCallCount);
// The interval after one success should be halved (300000 / 2 = 150000).
expect(setTimeoutSpy.mock.calls[afterSuccessCallCount - 1][1]).toBe(150_000);
});
it("polling error handling: replicate rejection is caught and console.error is called", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
// Make replicate succeed on the initial call (startup), then fail on the poll.
let callCount = 0;
core.services.replication.replicate = vi.fn(async () => {
callCount++;
if (callCount === 1) return true; // startup replicate
throw new Error("network failure");
});
const intervalMs = 30 * 1000;
await runCommand(makeDaemonOptions(30), { ...baseContext, core });
// Advance time to trigger the first poll callback and flush its async work.
await vi.advanceTimersByTimeAsync(intervalMs);
// No unhandled rejection — the error was caught internally.
const errorCalls = consoleSpy.mock.calls.filter(
(args) => typeof args[0] === "string" && args[0].includes("Poll error")
);
expect(errorCalls.length).toBeGreaterThan(0);
});
});

View File

@@ -15,6 +15,96 @@ export async function runCommand(options: CLIOptions, context: CLICommandContext
await core.services.control.activated;
if (options.command === "daemon") {
const log = (msg: unknown) => console.error(`[Daemon] ${msg}`);
// Skip the config mismatch dialog — the daemon cannot resolve it interactively
// and the default "Dismiss" action would block replication. The daemon should
// accept whatever configuration the remote has.
await core.services.setting.applyPartial({ disableCheckingConfigMismatch: true }, true);
// 1. Replicate CouchDB → local PouchDB so the mirror scan has content to work with.
log("Replicating from CouchDB...");
const replResult = await core.services.replication.replicate(true);
if (!replResult) {
console.error("[Daemon] Initial CouchDB replication failed, cannot continue");
return false;
}
log("CouchDB replication complete");
// 2. Mirror scan to reconcile PouchDB ↔ local filesystem.
const errorManager = new UnresolvedErrorManager(core.services.appLifecycle);
log("Running mirror scan...");
const scanOk = await performFullScan(core as any, log, errorManager, false, true);
if (!scanOk) {
console.error("[Daemon] Mirror scan failed, cannot continue");
return false;
}
log("Mirror scan complete");
// 3. Re-enable sync.
const restoreSyncSettings = async () => {
await core.services.setting.applyPartial({
...context.originalSyncSettings,
suspendFileWatching: false,
}, true);
// applySettings fires the full lifecycle: onSuspending → onResumed.
// ModuleReplicatorCouchDB starts continuous replication on onResumed
// via fireAndForget.
await core.services.control.applySettings();
// Lifecycle events (onSuspending) may re-enable suspension flags.
// Clear them explicitly after the lifecycle completes. applyPartial
// with true is a direct store write — it does not re-trigger lifecycle.
await core.services.setting.applyPartial({
suspendFileWatching: false,
suspendParseReplicationResult: false,
}, true);
};
if (options.interval) {
log(`Polling mode: syncing every ${options.interval}s`);
await restoreSyncSettings();
const baseIntervalMs = options.interval * 1000;
let currentIntervalMs = baseIntervalMs;
let consecutiveFailures = 0;
const maxIntervalMs = 5 * 60 * 1000; // 5 minutes cap
const poll = async () => {
try {
await core.services.replication.replicate(true);
if (consecutiveFailures > 0) {
consecutiveFailures--;
currentIntervalMs = Math.max(currentIntervalMs / 2, baseIntervalMs);
log(`Replication recovered`);
}
} catch (err) {
consecutiveFailures++;
currentIntervalMs = Math.min(baseIntervalMs * Math.pow(2, consecutiveFailures), maxIntervalMs);
console.error(`[Daemon] Poll error (${consecutiveFailures} consecutive):`, err);
if (consecutiveFailures >= 5) {
console.error(`[Daemon] Warning: ${consecutiveFailures} consecutive failures, backing off to ${Math.round(currentIntervalMs / 1000)}s`);
}
}
pollTimer = setTimeout(poll, currentIntervalMs);
};
let pollTimer: ReturnType<typeof setTimeout> = setTimeout(poll, currentIntervalMs);
core.services.appLifecycle.onUnload.addHandler(async () => {
clearTimeout(pollTimer);
return true;
});
} else {
log("LiveSync mode: restoring sync settings and starting _changes feed");
await restoreSyncSettings();
// The applySettings() lifecycle fires onResumed → ModuleReplicatorCouchDB which
// starts continuous replication via fireAndForget(openReplication). Don't call
// openReplication directly — it races with the handler and causes dedup/termination.
log("LiveSync active");
const currentSettings = core.services.setting.currentSettings();
if (!currentSettings.liveSync && !currentSettings.syncOnStart) {
console.error("[Daemon] Warning: liveSync and syncOnStart are both disabled in settings. " +
"No sync will occur. Set liveSync=true in your settings file for continuous sync, " +
"or use --interval for polling mode.");
}
}
return true;
}

View File

@@ -1,5 +1,6 @@
import { LiveSyncBaseCore } from "../../../LiveSyncBaseCore";
import { ServiceContext } from "@lib/services/base/ServiceBase";
import type { ObsidianLiveSyncSettings } from "@lib/common/types";
export type CLICommand =
| "daemon"
@@ -29,15 +30,18 @@ export interface CLIOptions {
force?: boolean;
command: CLICommand;
commandArgs: string[];
interval?: number;
}
export interface CLICommandContext {
databasePath: string;
core: LiveSyncBaseCore<ServiceContext, any>;
settingsPath: string;
originalSyncSettings: Pick<ObsidianLiveSyncSettings, "liveSync" | "syncOnStart" | "periodicReplication" | "syncOnSave" | "syncOnEditorSave" | "syncOnFileOpen" | "syncAfterMerge">;
}
export const VALID_COMMANDS = new Set([
"daemon",
"sync",
"p2p-peers",
"p2p-sync",

187
src/apps/cli/deploy/install.sh Executable file
View File

@@ -0,0 +1,187 @@
#!/usr/bin/env bash
# install.sh — install livesync-cli as a systemd service
#
# Usage:
# install.sh [--user] [--system] [--vault <path>] [--interval <N>]
#
# Defaults: user install, prompts for vault path if not supplied.
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd -- "$SCRIPT_DIR/../../.." && pwd)"
CLI_DIR="$REPO_ROOT/src/apps/cli"
SERVICE_TEMPLATE="$SCRIPT_DIR/livesync-cli.service"
# ── Argument parsing ────────────────────────────────────────────────────────
INSTALL_MODE="user"
VAULT_PATH=""
INTERVAL=""
FORCE=0
while [[ $# -gt 0 ]]; do
case "$1" in
--user)
INSTALL_MODE="user"
shift
;;
--system)
INSTALL_MODE="system"
shift
;;
--vault)
if [[ -z "${2:-}" ]]; then
echo "Error: --vault requires a path argument" >&2
exit 1
fi
VAULT_PATH="$2"
shift 2
;;
--interval)
if [[ -z "${2:-}" ]]; then
echo "Error: --interval requires a numeric argument" >&2
exit 1
fi
INTERVAL="$2"
if ! [[ "$INTERVAL" =~ ^[1-9][0-9]*$ ]]; then
echo "Error: --interval requires a positive integer, got '$INTERVAL'" >&2
exit 1
fi
shift 2
;;
--force|-f)
FORCE=1
shift
;;
--help|-h)
cat <<EOF
Usage: install.sh [--user|--system] [--vault <path>] [--interval <N>] [--force]
--user Install as a user systemd service (default, ~/.config/systemd/user/)
--system Install as a system systemd service (/etc/systemd/system/)
--vault Path to the vault directory (prompted if omitted)
--interval Poll CouchDB every N seconds instead of using the _changes feed
--force Overwrite existing service unit without prompting
EOF
exit 0
;;
*)
echo "Error: Unknown argument: $1" >&2
exit 1
;;
esac
done
# ── Vault path ──────────────────────────────────────────────────────────────
if [[ -z "$VAULT_PATH" ]]; then
if [ ! -t 0 ]; then
echo "Error: --vault is required in non-interactive mode" >&2
exit 1
fi
printf 'Vault path: '
read -r VAULT_PATH
fi
_orig_vault="$VAULT_PATH"
if ! VAULT_PATH="$(cd -- "$VAULT_PATH" 2>/dev/null && pwd)"; then
echo "Error: vault directory does not exist: $_orig_vault" >&2
exit 1
fi
echo "[INFO] Vault: $VAULT_PATH"
echo "[INFO] Install mode: $INSTALL_MODE"
# ── Build ────────────────────────────────────────────────────────────────────
echo "[INFO] Building CLI from $REPO_ROOT..."
(cd "$REPO_ROOT" && npm install --silent)
(cd "$CLI_DIR" && npm run build)
BUILT_CJS="$CLI_DIR/dist/index.cjs"
if [[ ! -f "$BUILT_CJS" ]]; then
echo "Error: build output not found: $BUILT_CJS" >&2
exit 1
fi
# ── Install binary ───────────────────────────────────────────────────────────
if [[ "$INSTALL_MODE" == "user" ]]; then
BIN_DIR="$HOME/.local/bin"
UNIT_DIR="$HOME/.config/systemd/user"
SYSTEMCTL_FLAGS="--user"
else
BIN_DIR="/usr/local/bin"
UNIT_DIR="/etc/systemd/system"
SYSTEMCTL_FLAGS=""
fi
mkdir -p "$BIN_DIR"
LIVESYNC_BIN="$BIN_DIR/livesync-cli"
LIVESYNC_JS="$BIN_DIR/livesync-cli.js"
# Copy the CJS bundle so the wrapper is self-contained and independent of the
# build directory location.
cp "$BUILT_CJS" "$LIVESYNC_JS"
# Write a bash wrapper that invokes node on the installed bundle.
cat > "$LIVESYNC_BIN" <<WRAPPER
#!/usr/bin/env bash
exec node "$LIVESYNC_JS" "\$@"
WRAPPER
chmod +x "$LIVESYNC_BIN"
echo "[INFO] Installed bundle: $LIVESYNC_JS"
echo "[INFO] Installed binary: $LIVESYNC_BIN"
# ── Write systemd unit ───────────────────────────────────────────────────────
mkdir -p "$UNIT_DIR"
UNIT_PATH="$UNIT_DIR/livesync-cli.service"
EXEC_START="\"$LIVESYNC_BIN\" \"$VAULT_PATH\""
if [[ -n "$INTERVAL" ]]; then
EXEC_START="\"$LIVESYNC_BIN\" \"$VAULT_PATH\" --interval $INTERVAL"
fi
# Check for existing service and offer to overwrite.
if [[ -f "$UNIT_PATH" ]] && [[ "$FORCE" -eq 0 ]]; then
if [ ! -t 0 ]; then
echo "Error: service unit already exists at $UNIT_PATH; use --force to overwrite" >&2
exit 1
fi
printf 'Service unit already exists at %s. Overwrite? [y/N]: ' "$UNIT_PATH"
read -r CONFIRM
case "$CONFIRM" in
[yY]|[yY][eE][sS]) : ;;
*)
echo "[INFO] Aborted. Existing unit left in place."
exit 0
;;
esac
fi
# In awk gsub(), '&' in the replacement means "matched text"; escape any literal '&'
# in path variables before passing them as awk replacement strings.
AWK_BIN="${LIVESYNC_BIN//&/\\&}"
AWK_VAULT="${VAULT_PATH//&/\\&}"
awk -v bin="$AWK_BIN" -v vault="$AWK_VAULT" -v exec_start="ExecStart=$EXEC_START" \
'/^ExecStart=/ { print exec_start; next } {gsub("LIVESYNC_BIN", bin); gsub("LIVESYNC_VAULT_PATH", vault); print}' \
"$SERVICE_TEMPLATE" > "$UNIT_PATH"
echo "[INFO] Installed unit: $UNIT_PATH"
# ── Enable service ───────────────────────────────────────────────────────────
if ! command -v systemctl >/dev/null 2>&1; then
echo "[WARN] systemctl not found — skipping service activation"
echo "[INFO] To enable manually, copy $UNIT_PATH to the correct systemd directory and run:"
echo " systemctl $SYSTEMCTL_FLAGS daemon-reload"
echo " systemctl $SYSTEMCTL_FLAGS enable --now livesync-cli"
exit 0
fi
# shellcheck disable=SC2086
systemctl $SYSTEMCTL_FLAGS daemon-reload
# shellcheck disable=SC2086
systemctl $SYSTEMCTL_FLAGS enable --now livesync-cli
echo ""
echo "[Done] livesync-cli service installed and started."
echo ""
# shellcheck disable=SC2086
systemctl $SYSTEMCTL_FLAGS status livesync-cli --no-pager || true

View File

@@ -0,0 +1,17 @@
[Unit]
Description=Self-hosted LiveSync CLI Daemon
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=LIVESYNC_BIN LIVESYNC_VAULT_PATH
Restart=on-failure
RestartSec=10
TimeoutStartSec=300
StandardOutput=journal
StandardError=journal
LimitNOFILE=65536
[Install]
WantedBy=default.target

View File

@@ -26,6 +26,7 @@ import { VALID_COMMANDS } from "./commands/types";
import type { CLICommand, CLIOptions } from "./commands/types";
import { getPathFromUXFileInfo } from "@lib/common/typeUtils";
import { stripAllPrefixes } from "@lib/string_and_binary/path";
import { IgnoreRules } from "./serviceModules/IgnoreRules";
const SETTINGS_FILE = ".livesync/settings.json";
ensureGlobalNodeLocalStorage();
@@ -43,7 +44,8 @@ Arguments:
database-path Path to the local database directory
Commands:
sync Run one replication cycle and exit
daemon (default) Run mirror scan then continuously sync CouchDB <-> local filesystem
sync Run one replication cycle and exit
p2p-peers <timeout> Show discovered peers as [peer]\t<peer-id>\t<peer-name>
p2p-sync <peer> <timeout>
Sync with the specified peer-id or peer-name
@@ -60,24 +62,30 @@ Commands:
rm <path> Mark a file as deleted in local database
resolve <path> <rev> Resolve conflicts by keeping <rev> and deleting others
mirror [vault-path] Mirror database contents to the local file system (vault-path defaults to database-path)
Options:
--interval <N>, -i <N> (daemon only) Poll CouchDB every N seconds instead of using the _changes feed
Examples:
livesync-cli ./my-database sync
livesync-cli ./my-database p2p-peers 5
livesync-cli ./my-database p2p-sync my-peer-name 15
livesync-cli ./my-database p2p-host
livesync-cli ./my-database --settings ./custom-settings.json push ./note.md folder/note.md
livesync-cli ./my-database pull folder/note.md ./exports/note.md
livesync-cli ./my-database pull-rev folder/note.md ./exports/note.old.md 3-abcdef
livesync-cli ./my-database setup "obsidian://setuplivesync?settings=..."
echo "Hello" | livesync-cli ./my-database put notes/hello.md
livesync-cli ./my-database cat notes/hello.md
livesync-cli ./my-database cat-rev notes/hello.md 3-abcdef
livesync-cli ./my-database ls notes/
livesync-cli ./my-database info notes/hello.md
livesync-cli ./my-database rm notes/hello.md
livesync-cli ./my-database resolve notes/hello.md 3-abcdef
livesync-cli init-settings ./data.json
livesync-cli ./my-database --verbose
livesync-cli ./my-database Run daemon (LiveSync mode)
livesync-cli ./my-database --interval 30 Run daemon (polling every 30s)
livesync-cli ./my-database sync
livesync-cli ./my-database p2p-peers 5
livesync-cli ./my-database p2p-sync my-peer-name 15
livesync-cli ./my-database p2p-host
livesync-cli ./my-database --settings ./custom-settings.json push ./note.md folder/note.md
livesync-cli ./my-database pull folder/note.md ./exports/note.md
livesync-cli ./my-database pull-rev folder/note.md ./exports/note.old.md 3-abcdef
livesync-cli ./my-database setup "obsidian://setuplivesync?settings=..."
echo "Hello" | livesync-cli ./my-database put notes/hello.md
livesync-cli ./my-database cat notes/hello.md
livesync-cli ./my-database cat-rev notes/hello.md 3-abcdef
livesync-cli ./my-database ls notes/
livesync-cli ./my-database info notes/hello.md
livesync-cli ./my-database rm notes/hello.md
livesync-cli ./my-database resolve notes/hello.md 3-abcdef
livesync-cli init-settings ./data.json
livesync-cli ./my-database --verbose
`);
}
@@ -94,6 +102,7 @@ export function parseArgs(): CLIOptions {
let verbose = false;
let debug = false;
let force = false;
let interval: number | undefined;
let command: CLICommand = "daemon";
const commandArgs: string[] = [];
@@ -110,6 +119,21 @@ export function parseArgs(): CLIOptions {
settingsPath = args[i];
break;
}
case "--interval":
case "-i": {
i++;
if (!args[i]) {
console.error(`Error: Missing value for ${token}`);
process.exit(1);
}
const n = parseInt(args[i], 10);
if (!Number.isInteger(n) || n <= 0) {
console.error(`Error: --interval requires a positive integer, got '${args[i]}'`);
process.exit(1);
}
interval = n;
break;
}
case "--debug":
case "-d":
// debugging automatically enables verbose logging, as it is intended for debugging issues.
@@ -164,6 +188,7 @@ export function parseArgs(): CLIOptions {
force,
command,
commandArgs,
interval,
};
}
@@ -197,6 +222,9 @@ async function createDefaultSettingsFile(options: CLIOptions) {
export async function main() {
const options = parseArgs();
if (options.interval && options.command !== "daemon") {
console.error(`Warning: --interval is only used in daemon mode, ignored for '${options.command}'`);
}
const avoidStdoutNoise =
options.command === "cat" ||
options.command === "cat-rev" ||
@@ -248,6 +276,20 @@ export async function main() {
infoLog(`Settings: ${settingsPath}`);
infoLog("");
// For daemon and mirror mode, load ignore rules before the core is constructed so that
// chokidar's ignored option is populated when beginWatch() fires during onLoad().
const watchEnabled = options.command === "daemon";
const vaultPath =
options.command === "mirror" && options.commandArgs[0]
? path.resolve(options.commandArgs[0])
: databasePath;
let ignoreRules: IgnoreRules | undefined;
if (options.command === "daemon" || options.command === "mirror") {
ignoreRules = new IgnoreRules(vaultPath);
await ignoreRules.load();
}
// Create service context and hub
const context = new NodeServiceContext(databasePath);
const serviceHubInstance = new NodeServiceHub<NodeServiceContext>(databasePath, context);
@@ -278,11 +320,14 @@ export async function main() {
}
console.error(`${prefix} ${message}`);
});
// Prevent replication result to be processed automatically.
serviceHubInstance.replication.processSynchroniseResult.addHandler(async () => {
console.error(`[Info] Replication result received, but not processed automatically in CLI mode.`);
return await Promise.resolve(true);
}, -100);
// Prevent replication result from being processed automatically in non-daemon commands.
// In daemon mode the default handler must run so changes are applied to the filesystem.
if (options.command !== "daemon") {
serviceHubInstance.replication.processSynchroniseResult.addHandler(async () => {
console.error(`[Info] Replication result received, but not processed automatically in CLI mode.`);
return await Promise.resolve(true);
}, -100);
}
// Setup settings handlers
const settingService = serviceHubInstance.setting;
@@ -324,11 +369,7 @@ export async function main() {
const core = new LiveSyncBaseCore(
serviceHubInstance,
(core: LiveSyncBaseCore<NodeServiceContext, any>, serviceHub: InjectableServiceHub<NodeServiceContext>) => {
const mirrorVaultPath =
options.command === "mirror" && options.commandArgs[0]
? path.resolve(options.commandArgs[0])
: databasePath;
return initialiseServiceModulesCLI(mirrorVaultPath, core, serviceHub);
return initialiseServiceModulesCLI(vaultPath, core, serviceHub, ignoreRules, watchEnabled);
},
(core) => [
// No modules need to be registered for P2P replication in CLI. Directly using Replicators in p2p.ts
@@ -344,8 +385,25 @@ export async function main() {
if (parts.some((part) => part.startsWith("."))) {
return await Promise.resolve(false);
}
// PouchDB LevelDB database directory lives in the vault directory.
if (parts[0]?.endsWith("-livesync-v2")) {
return await Promise.resolve(false);
}
return await Promise.resolve(true);
}, -1 /* highest priority */);
// Apply user-defined ignore rules for daemon mode (lower priority, runs after dotfile check).
if (ignoreRules) {
const rules = ignoreRules;
core.services.vault.isTargetFile.addHandler(async (target) => {
const targetPath = stripAllPrefixes(getPathFromUXFileInfo(target));
if (rules.shouldIgnore(targetPath)) {
return false;
}
// undefined = pass through to next handler in chain
return undefined;
}, 0);
}
}
);
@@ -366,6 +424,25 @@ export async function main() {
process.on("SIGINT", () => shutdown("SIGINT"));
process.on("SIGTERM", () => shutdown("SIGTERM"));
// Save the settings file before any lifecycle events can mutate and persist them.
// suspendAllSync and other lifecycle hooks clobber sync settings in memory, and
// various code paths persist the clobbered state to disk. We restore on shutdown.
const settingsBackup = await fs.readFile(settingsPath, "utf-8").catch(() => null);
// Restore settings file on any exit to undo lifecycle mutations.
// Write to a temp path first so a crash mid-write doesn't leave a truncated file.
process.on("exit", () => {
if (settingsBackup) {
const tmpPath = settingsPath + ".tmp";
try {
require("fs").writeFileSync(tmpPath, settingsBackup, "utf-8");
require("fs").renameSync(tmpPath, settingsPath);
} catch (err) {
console.error("[Settings] Failed to restore settings on exit:", err);
}
}
});
// Start the core
try {
infoLog(`[Starting] Initializing LiveSync...`);
@@ -375,6 +452,18 @@ export async function main() {
console.error(`[Error] Failed to initialize LiveSync`);
process.exit(1);
}
// Capture sync settings before suspendAllSync() clobbers them.
// Used by daemon mode to restore the correct sync behaviour after the mirror scan.
const settingsBeforeSuspend = core.services.setting.currentSettings();
const originalSyncSettings = {
liveSync: settingsBeforeSuspend.liveSync,
syncOnStart: settingsBeforeSuspend.syncOnStart,
periodicReplication: settingsBeforeSuspend.periodicReplication,
syncOnSave: settingsBeforeSuspend.syncOnSave,
syncOnEditorSave: settingsBeforeSuspend.syncOnEditorSave,
syncOnFileOpen: settingsBeforeSuspend.syncOnFileOpen,
syncAfterMerge: settingsBeforeSuspend.syncAfterMerge,
};
await core.services.setting.suspendAllSync();
await core.services.control.onReady();
@@ -400,7 +489,7 @@ export async function main() {
infoLog("");
}
const result = await runCommand(options, { databasePath, core, settingsPath });
const result = await runCommand(options, { databasePath, core, settingsPath, originalSyncSettings });
if (!result) {
console.error(`[Error] Command '${options.command}' failed`);
process.exitCode = 1;
@@ -408,7 +497,7 @@ export async function main() {
infoLog(`[Done] Command '${options.command}' completed`);
}
if (options.command === "daemon") {
if (options.command === "daemon" && result) {
// Keep the process running
await new Promise(() => {});
} else {

View File

@@ -85,4 +85,67 @@ describe("CLI parseArgs", () => {
expect(parsed.command).toBe("p2p-host");
expect(parsed.commandArgs).toEqual([]);
});
it("parses --interval flag with valid integer", () => {
process.argv = ["node", "livesync-cli", "./vault", "--interval", "30"];
const parsed = parseArgs();
expect(parsed.command).toBe("daemon");
expect(parsed.interval).toBe(30);
});
it("parses -i shorthand for --interval", () => {
process.argv = ["node", "livesync-cli", "./vault", "-i", "10"];
const parsed = parseArgs();
expect(parsed.interval).toBe(10);
});
it("exits 1 when --interval has no value", () => {
process.argv = ["node", "livesync-cli", "./vault", "--interval"];
const exitMock = mockProcessExit();
vi.spyOn(console, "error").mockImplementation(() => {});
expect(() => parseArgs()).toThrowError("__EXIT__:1");
expect(exitMock).toHaveBeenCalledWith(1);
});
it("exits 1 when --interval is not a positive integer", () => {
process.argv = ["node", "livesync-cli", "./vault", "--interval", "0"];
const exitMock = mockProcessExit();
vi.spyOn(console, "error").mockImplementation(() => {});
expect(() => parseArgs()).toThrowError("__EXIT__:1");
expect(exitMock).toHaveBeenCalledWith(1);
});
it("exits 1 when --interval is negative", () => {
process.argv = ["node", "livesync-cli", "./vault", "--interval", "-5"];
const exitMock = mockProcessExit();
vi.spyOn(console, "error").mockImplementation(() => {});
expect(() => parseArgs()).toThrowError("__EXIT__:1");
});
it("exits 1 when --interval is not numeric", () => {
process.argv = ["node", "livesync-cli", "./vault", "--interval", "abc"];
const exitMock = mockProcessExit();
vi.spyOn(console, "error").mockImplementation(() => {});
expect(() => parseArgs()).toThrowError("__EXIT__:1");
});
it("parses explicit daemon command", () => {
process.argv = ["node", "livesync-cli", "./vault", "daemon"];
const parsed = parseArgs();
expect(parsed.command).toBe("daemon");
expect(parsed.databasePath).toBe("./vault");
});
it("defaults to daemon when no command specified", () => {
process.argv = ["node", "livesync-cli", "./vault"];
const parsed = parseArgs();
expect(parsed.command).toBe("daemon");
});
it("parses explicit daemon command with --interval", () => {
process.argv = ["node", "livesync-cli", "./vault", "daemon", "--interval", "30"];
const parsed = parseArgs();
expect(parsed.command).toBe("daemon");
expect(parsed.interval).toBe(30);
});
});

View File

@@ -11,8 +11,11 @@ import type {
} from "@lib/managers/adapters";
import type { FileEventItemSentinel } from "@lib/managers/StorageEventManager";
import type { NodeFile, NodeFolder } from "../adapters/NodeTypes";
import type { Stats } from "fs";
import * as fs from "fs/promises";
import * as path from "path";
import { watch as chokidarWatch, type FSWatcher } from "chokidar";
import type { IgnoreRules } from "../serviceModules/IgnoreRules";
/**
* CLI-specific type guard adapter
@@ -56,22 +59,11 @@ class CLIPersistenceAdapter implements IStorageEventPersistenceAdapter {
}
/**
* CLI-specific status adapter (console logging)
* CLI-specific status adapter (no-op — daemon uses journald for status)
*/
class CLIStatusAdapter implements IStorageEventStatusAdapter {
private lastUpdate = 0;
private updateInterval = 5000; // Update every 5 seconds
updateStatus(status: { batched: number; processing: number; totalQueued: number }): void {
const now = Date.now();
if (now - this.lastUpdate > this.updateInterval) {
if (status.totalQueued > 0 || status.processing > 0) {
// console.log(
// `[StorageEventManager] Batched: ${status.batched}, Processing: ${status.processing}, Total Queued: ${status.totalQueued}`
// );
}
this.lastUpdate = now;
}
updateStatus(_status: { batched: number; processing: number; totalQueued: number }): void {
// intentional no-op
}
}
@@ -100,15 +92,97 @@ class CLIConverterAdapter implements IStorageEventConverterAdapter<NodeFile> {
}
/**
* CLI-specific watch adapter (optional file watching with chokidar)
* CLI-specific watch adapter using chokidar for real-time filesystem monitoring.
*/
class CLIWatchAdapter implements IStorageEventWatchAdapter {
constructor(private basePath: string) {}
private _watcher: FSWatcher | undefined;
constructor(private basePath: string, private ignoreRules?: IgnoreRules, private watchEnabled: boolean = false) {}
private _toNodeFile(filePath: string, stats: Stats | undefined): NodeFile {
return {
path: path.relative(this.basePath, filePath) as FilePath,
stat: {
ctime: stats?.ctimeMs ?? Date.now(),
mtime: stats?.mtimeMs ?? Date.now(),
size: stats?.size ?? 0,
type: "file",
},
};
}
private _toNodeFolder(dirPath: string): NodeFolder {
return {
path: path.relative(this.basePath, dirPath) as FilePath,
isFolder: true,
};
}
async beginWatch(handlers: IStorageEventWatchHandlers): Promise<void> {
// File watching is not activated in the CLI.
// Because the CLI is designed for push/pull operations, not real-time sync.
// console.error("[CLIWatchAdapter] File watching is not enabled in CLI version");
if (!this.watchEnabled) return;
const baseIgnored: Array<RegExp | string | ((p: string) => boolean)> = [
/(^|[/\\])\./,
/(^|[/\\])[^/\\]*-livesync-v2([/\\]|$)/,
];
// Bind rules to a local const before the closure — chokidar v4 requires a
// MatchFunction, not glob strings, for custom patterns.
const rules = this.ignoreRules;
const ignored = rules
? [...baseIgnored, (p: string) => rules.shouldIgnore(path.relative(this.basePath, p))]
: baseIgnored;
const watcher = chokidarWatch(this.basePath, {
ignored,
ignoreInitial: true,
persistent: true,
awaitWriteFinish: {
stabilityThreshold: 500,
pollInterval: 100,
},
});
watcher.on("add", (filePath, stats) => {
const nodeFile = this._toNodeFile(filePath, stats);
handlers.onCreate(nodeFile);
});
watcher.on("change", (filePath, stats) => {
const nodeFile = this._toNodeFile(filePath, stats);
handlers.onChange(nodeFile);
});
watcher.on("unlink", (filePath) => {
const nodeFile = this._toNodeFile(filePath, undefined);
handlers.onDelete(nodeFile);
});
watcher.on("addDir", (dirPath) => {
const nodeFolder = this._toNodeFolder(dirPath);
handlers.onCreate(nodeFolder);
});
watcher.on("unlinkDir", (dirPath) => {
const nodeFolder = this._toNodeFolder(dirPath);
handlers.onDelete(nodeFolder);
});
watcher.on("error", (err) => {
console.error("[CLIWatchAdapter] Fatal watcher error — file watching stopped:", err);
console.error("[CLIWatchAdapter] Exiting for systemd restart.");
void watcher.close();
this._watcher = undefined;
// Use exit(1) rather than SIGTERM so systemd Restart=on-failure engages.
process.exit(1);
});
await new Promise<void>((resolve) => watcher.once("ready", resolve));
this._watcher = watcher;
}
close(): Promise<void> {
if (this._watcher) {
return this._watcher.close();
}
return Promise.resolve();
}
}
@@ -123,11 +197,15 @@ export class CLIStorageEventManagerAdapter implements IStorageEventManagerAdapte
readonly status: CLIStatusAdapter;
readonly converter: CLIConverterAdapter;
constructor(basePath: string) {
constructor(basePath: string, ignoreRules?: IgnoreRules, watchEnabled: boolean = false) {
this.typeGuard = new CLITypeGuardAdapter();
this.persistence = new CLIPersistenceAdapter(basePath);
this.watch = new CLIWatchAdapter(basePath);
this.watch = new CLIWatchAdapter(basePath, ignoreRules, watchEnabled);
this.status = new CLIStatusAdapter();
this.converter = new CLIConverterAdapter();
}
close(): Promise<void> {
return this.watch.close();
}
}

View File

@@ -0,0 +1,126 @@
import { describe, expect, it, vi, beforeEach } from "vitest";
import type { IStorageEventWatchHandlers } from "@lib/managers/adapters";
import type { NodeFile } from "../adapters/NodeTypes";
// ── chokidar mock ──────────────────────────────────────────────────────────────
// Must be hoisted before imports that pull in chokidar.
const mockWatcher = {
on: vi.fn().mockReturnThis(),
once: vi.fn((event: string, cb: () => void) => {
if (event === "ready") cb();
return mockWatcher;
}),
close: vi.fn(() => Promise.resolve()),
};
vi.mock("chokidar", () => ({
watch: vi.fn(() => mockWatcher),
}));
import * as chokidar from "chokidar";
import { CLIStorageEventManagerAdapter } from "./CLIStorageEventManagerAdapter";
// ── helpers ───────────────────────────────────────────────────────────────────
function makeHandlers(): IStorageEventWatchHandlers {
return {
onCreate: vi.fn(),
onChange: vi.fn(),
onDelete: vi.fn(),
onRename: vi.fn(),
} as any;
}
// ── tests ─────────────────────────────────────────────────────────────────────
describe("CLIStorageEventManagerAdapter", () => {
beforeEach(() => {
vi.clearAllMocks();
// Restore the default once() behaviour (ready fires synchronously).
mockWatcher.once.mockImplementation((event: string, cb: () => void) => {
if (event === "ready") cb();
return mockWatcher;
});
});
it("beginWatch is no-op when watchEnabled=false", async () => {
const adapter = new CLIStorageEventManagerAdapter("/base", undefined, false);
const handlers = makeHandlers();
await adapter.watch.beginWatch(handlers);
expect(chokidar.watch).not.toHaveBeenCalled();
});
it("beginWatch calls chokidar.watch when watchEnabled=true", async () => {
const adapter = new CLIStorageEventManagerAdapter("/base", undefined, true);
const handlers = makeHandlers();
await adapter.watch.beginWatch(handlers);
expect(chokidar.watch).toHaveBeenCalledTimes(1);
expect(chokidar.watch).toHaveBeenCalledWith(
"/base",
expect.objectContaining({ ignoreInitial: true })
);
});
it("add event produces NodeFile with correct relative path via onCreate", async () => {
const basePath = "/vault/base";
const adapter = new CLIStorageEventManagerAdapter(basePath, undefined, true);
const handlers = makeHandlers();
await adapter.watch.beginWatch(handlers);
// Find the callback registered for the "add" event.
const addCall = mockWatcher.on.mock.calls.find(([event]) => event === "add");
expect(addCall).toBeDefined();
const addCallback = addCall![1] as (filePath: string, stats: any) => void;
const fakeStats = { ctimeMs: 1000, mtimeMs: 2000, size: 42 };
addCallback(`${basePath}/subdir/note.md`, fakeStats);
expect(handlers.onCreate).toHaveBeenCalledTimes(1);
const created = (handlers.onCreate as ReturnType<typeof vi.fn>).mock.calls[0][0] as NodeFile;
expect(created.path).toBe("subdir/note.md");
expect(created.stat?.size).toBe(42);
});
it("close() calls watcher.close()", async () => {
const adapter = new CLIStorageEventManagerAdapter("/base", undefined, true);
const handlers = makeHandlers();
await adapter.watch.beginWatch(handlers);
await adapter.close();
expect(mockWatcher.close).toHaveBeenCalledTimes(1);
});
it("close() is safe when no watcher was started", async () => {
const adapter = new CLIStorageEventManagerAdapter("/base", undefined, false);
// Should not throw.
await expect(adapter.close()).resolves.toBeUndefined();
expect(mockWatcher.close).not.toHaveBeenCalled();
});
it("error event triggers process.exit(1)", async () => {
const adapter = new CLIStorageEventManagerAdapter("/base", undefined, true);
const handlers = makeHandlers();
await adapter.watch.beginWatch(handlers);
const processExitSpy = vi.spyOn(process, "exit").mockImplementation((() => {}) as any);
const errorCall = mockWatcher.on.mock.calls.find(([event]) => event === "error");
expect(errorCall).toBeDefined();
const errorCallback = errorCall![1] as (err: Error) => void;
errorCallback(new Error("disk failure"));
expect(processExitSpy).toHaveBeenCalledWith(1);
processExitSpy.mockRestore();
});
});

View File

@@ -2,6 +2,7 @@ import { StorageEventManagerBase, type StorageEventManagerBaseDependencies } fro
import { CLIStorageEventManagerAdapter } from "./CLIStorageEventManagerAdapter";
import type { IMinimumLiveSyncCommands, LiveSyncBaseCore } from "../../../LiveSyncBaseCore";
import type { ServiceContext } from "@lib/services/base/ServiceBase";
import type { IgnoreRules } from "../serviceModules/IgnoreRules";
// import type { IMinimumLiveSyncCommands } from "@lib/services/base/IService";
export class StorageEventManagerCLI extends StorageEventManagerBase<CLIStorageEventManagerAdapter> {
@@ -10,9 +11,11 @@ export class StorageEventManagerCLI extends StorageEventManagerBase<CLIStorageEv
constructor(
basePath: string,
core: LiveSyncBaseCore<ServiceContext, IMinimumLiveSyncCommands>,
dependencies: StorageEventManagerBaseDependencies
dependencies: StorageEventManagerBaseDependencies,
ignoreRules?: IgnoreRules,
watchEnabled?: boolean
) {
const adapter = new CLIStorageEventManagerAdapter(basePath);
const adapter = new CLIStorageEventManagerAdapter(basePath, ignoreRules, watchEnabled);
super(adapter, dependencies);
this.core = core;
}
@@ -25,4 +28,11 @@ export class StorageEventManagerCLI extends StorageEventManagerBase<CLIStorageEv
// No-op in CLI version
// Internal file handling is not needed
}
/**
* Close the file watcher. Call this during graceful shutdown.
*/
close(): Promise<void> {
return this.adapter.close();
}
}

View File

@@ -4,6 +4,7 @@
"version": "0.0.0",
"description": "Runtime dependencies for Self-hosted LiveSync CLI Docker image",
"dependencies": {
"chokidar": "^4.0.0",
"commander": "^14.0.3",
"werift": "^0.22.9",
"pouchdb-adapter-http": "^9.0.0",

View File

@@ -9,6 +9,7 @@ import { ServiceFileAccessCLI } from "./ServiceFileAccessImpl";
import { ServiceDatabaseFileAccessCLI } from "./DatabaseFileAccess";
import { StorageEventManagerCLI } from "../managers/StorageEventManagerCLI";
import type { ServiceModules } from "@lib/interfaces/ServiceModule";
import type { IgnoreRules } from "./IgnoreRules";
/**
* Initialize service modules for CLI version
@@ -22,7 +23,9 @@ import type { ServiceModules } from "@lib/interfaces/ServiceModule";
export function initialiseServiceModulesCLI(
basePath: string,
core: LiveSyncBaseCore<ServiceContext, any>,
services: InjectableServiceHub<ServiceContext>
services: InjectableServiceHub<ServiceContext>,
ignoreRules?: IgnoreRules,
watchEnabled: boolean = false,
): ServiceModules {
const storageAccessManager = new StorageAccessManager();
@@ -42,6 +45,12 @@ export function initialiseServiceModulesCLI(
vaultService: services.vault,
storageAccessManager: storageAccessManager,
APIService: services.API,
}, ignoreRules, watchEnabled);
// Close the file watcher during graceful shutdown so the process can exit cleanly.
services.appLifecycle.onUnload.addHandler(async () => {
await storageEventManager.close();
return true;
});
// Storage access using CLI file system adapter

View File

@@ -0,0 +1,129 @@
import * as fs from "fs/promises";
import * as path from "path";
import { minimatch } from "minimatch";
/**
* Loads and evaluates ignore rules from `.livesync/ignore` inside the vault.
*
* File format:
* - Lines starting with `#` are comments.
* - Blank lines are ignored.
* - `import: .gitignore` (exactly) — merges patterns from the vault's `.gitignore`.
* - All other lines are minimatch glob patterns relative to the vault root.
*
* Negation patterns (lines starting with `!`) are not supported. Loading a
* ruleset containing them throws an error — use separate include/exclude files
* instead.
*
* Missing files (`.livesync/ignore` or `.gitignore`) are silently skipped.
*/
export class IgnoreRules {
private patterns: string[] = [];
constructor(private vaultPath: string) {}
/**
* Reads `.livesync/ignore` (and optionally `.gitignore`) and populates the
* pattern list. Safe to call multiple times — each call replaces the
* previous state. Does not throw if files are absent.
*
* @throws if any pattern line begins with `!` (negation is unsupported).
*/
async load(): Promise<void> {
this.patterns = [];
const ignorePath = path.join(this.vaultPath, ".livesync", "ignore");
let rawLines: string[];
try {
const content = await fs.readFile(ignorePath, "utf-8");
rawLines = content.split(/\r?\n/);
} catch {
// File absent or unreadable — treat as empty ruleset.
return;
}
for (const line of rawLines) {
const trimmed = line.trim();
if (!trimmed || trimmed.startsWith("#")) {
continue;
}
// NOTE: Only the exact string "import: .gitignore" is recognised.
// Any future generalisation of this directive must validate that
// the resolved path stays within the vault directory.
if (trimmed === "import: .gitignore") {
await this._importGitignore();
continue;
}
if (trimmed.startsWith("import:")) {
console.error(`[IgnoreRules] Warning: unrecognised directive '${trimmed}' — only 'import: .gitignore' is supported`);
continue;
}
this._addPattern(trimmed);
}
if (this.patterns.length > 0) {
console.error(`[IgnoreRules] Loaded ${this.patterns.length} ignore patterns`);
}
}
// Normalises a single gitignore-style pattern:
// - Patterns ending with `/` (directory patterns like `build/`) are
// converted to `build/**` so they match all files inside that directory.
// - Patterns without a `/` are prefixed with `**/` to give them matchBase
// semantics (e.g. `*.tmp` → `**/*.tmp`), matching the basename in any
// subdirectory as gitignore does.
// - Patterns that already contain a `/` (but don't end with one) are
// path-specific and used as-is.
private _normalisePattern(pattern: string): string {
if (pattern.endsWith("/")) {
return "**/" + pattern + "**";
} else if (!pattern.includes("/")) {
return "**/" + pattern;
}
return pattern;
}
private async _importGitignore(): Promise<void> {
const gitignorePath = path.join(this.vaultPath, ".gitignore");
let content: string;
try {
content = await fs.readFile(gitignorePath, "utf-8");
} catch {
return;
}
this._parseLines(content);
}
private _parseLines(content: string): void {
for (const line of content.split(/\r?\n/)) {
const trimmed = line.trim();
if (!trimmed || trimmed.startsWith("#")) continue;
this._addPattern(trimmed);
}
}
private _addPattern(raw: string): void {
if (raw.startsWith("!")) {
throw new Error(
`[IgnoreRules] Negation pattern '${raw}' is not supported. ` +
`Remove it from .livesync/ignore or use a separate include/exclude file.`
);
}
this.patterns.push(this._normalisePattern(raw));
}
/**
* Returns `true` if the given vault-relative path matches any loaded
* ignore pattern.
*
* @param relativePath - Path relative to the vault root, using forward
* slashes or the OS separator.
*/
shouldIgnore(relativePath: string): boolean {
if (this.patterns.length === 0) {
return false;
}
// Normalise to forward slashes for minimatch.
const normalised = relativePath.replace(/\\/g, "/");
return this.patterns.some((p) => minimatch(normalised, p, { dot: true }));
}
}

View File

@@ -0,0 +1,172 @@
import * as fs from "node:fs/promises";
import * as os from "node:os";
import * as path from "node:path";
import { afterEach, beforeEach, describe, expect, it } from "vitest";
import { IgnoreRules } from "./IgnoreRules";
describe("IgnoreRules", () => {
const tempDirs: string[] = [];
async function createVault(): Promise<string> {
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "livesync-ignorerules-"));
tempDirs.push(tempDir);
return tempDir;
}
async function writeIgnoreFile(vaultPath: string, content: string): Promise<void> {
const ignoreDir = path.join(vaultPath, ".livesync");
await fs.mkdir(ignoreDir, { recursive: true });
await fs.writeFile(path.join(ignoreDir, "ignore"), content, "utf-8");
}
afterEach(async () => {
await Promise.all(tempDirs.splice(0).map((dir) => fs.rm(dir, { recursive: true, force: true })));
});
describe("pattern normalisation", () => {
it("adds **/ prefix to basename patterns (no slash)", async () => {
const vaultPath = await createVault();
await writeIgnoreFile(vaultPath, "*.tmp\n");
const rules = new IgnoreRules(vaultPath);
await rules.load();
expect(rules.shouldIgnore("notes/scratch.tmp")).toBe(true);
expect(rules.shouldIgnore("scratch.tmp")).toBe(true);
expect(rules.shouldIgnore("deep/nested/file.tmp")).toBe(true);
});
it("appends ** to directory patterns ending with / and prepends **/", async () => {
const vaultPath = await createVault();
await writeIgnoreFile(vaultPath, "build/\n");
const rules = new IgnoreRules(vaultPath);
await rules.load();
expect(rules.shouldIgnore("build/output.js")).toBe(true);
expect(rules.shouldIgnore("build/nested/file.js")).toBe(true);
expect(rules.shouldIgnore("subproject/build/output.js")).toBe(true);
});
it("leaves patterns containing / as-is", async () => {
const vaultPath = await createVault();
await writeIgnoreFile(vaultPath, "docs/private.md\n");
const rules = new IgnoreRules(vaultPath);
await rules.load();
expect(rules.shouldIgnore("docs/private.md")).toBe(true);
expect(rules.shouldIgnore("other/docs/private.md")).toBe(false);
});
});
describe("shouldIgnore", () => {
it("matches **/*.tmp against notes/scratch.tmp", async () => {
const vaultPath = await createVault();
await writeIgnoreFile(vaultPath, "*.tmp\n");
const rules = new IgnoreRules(vaultPath);
await rules.load();
expect(rules.shouldIgnore("notes/scratch.tmp")).toBe(true);
});
it("does not match notes/readme.md against **/*.tmp", async () => {
const vaultPath = await createVault();
await writeIgnoreFile(vaultPath, "*.tmp\n");
const rules = new IgnoreRules(vaultPath);
await rules.load();
expect(rules.shouldIgnore("notes/readme.md")).toBe(false);
});
it("returns false when no patterns are loaded", async () => {
const vaultPath = await createVault();
const rules = new IgnoreRules(vaultPath);
// No load() call — patterns are empty
expect(rules.shouldIgnore("anything.md")).toBe(false);
});
});
describe("negation patterns", () => {
it("throws when a negation pattern is encountered", async () => {
const vaultPath = await createVault();
await writeIgnoreFile(vaultPath, "*.tmp\n!important.tmp\n");
const rules = new IgnoreRules(vaultPath);
await expect(rules.load()).rejects.toThrow(/Negation pattern/);
});
it("throws when a .gitignore imported via directive contains negation", async () => {
const vaultPath = await createVault();
await writeIgnoreFile(vaultPath, "import: .gitignore\n");
await fs.writeFile(path.join(vaultPath, ".gitignore"), "*.log\n!keep.log\n", "utf-8");
const rules = new IgnoreRules(vaultPath);
await expect(rules.load()).rejects.toThrow(/Negation pattern/);
});
});
describe("unrecognised import: directives", () => {
it("warns and skips unrecognised import: forms (does not add as literal pattern)", async () => {
const vaultPath = await createVault();
// Typo: "import:.gitignore" instead of "import: .gitignore"
await writeIgnoreFile(vaultPath, "*.tmp\nimport:.gitignore\n");
const rules = new IgnoreRules(vaultPath);
await rules.load();
// *.tmp still loaded; import:.gitignore is skipped (not treated as a literal pattern)
expect(rules.shouldIgnore("scratch.tmp")).toBe(true);
expect(rules.shouldIgnore("import:.gitignore")).toBe(false);
});
});
describe("load() with missing file", () => {
it("returns without error when .livesync/ignore is absent", async () => {
const vaultPath = await createVault();
// No ignore file created
const rules = new IgnoreRules(vaultPath);
await expect(rules.load()).resolves.toBeUndefined();
expect(rules.shouldIgnore("anything.md")).toBe(false);
});
});
describe("load() with comments and blank lines", () => {
it("skips # comment lines and blank lines", async () => {
const vaultPath = await createVault();
await writeIgnoreFile(
vaultPath,
"# This is a comment\n\n \n*.tmp\n# another comment\nbuild/\n"
);
const rules = new IgnoreRules(vaultPath);
await rules.load();
expect(rules.shouldIgnore("scratch.tmp")).toBe(true);
expect(rules.shouldIgnore("build/output.js")).toBe(true);
expect(rules.shouldIgnore("readme.md")).toBe(false);
});
});
describe("import: .gitignore directive", () => {
it("reads and normalises patterns from .gitignore", async () => {
const vaultPath = await createVault();
await writeIgnoreFile(vaultPath, "import: .gitignore\n");
await fs.writeFile(path.join(vaultPath, ".gitignore"), "*.log\nnode_modules/\n", "utf-8");
const rules = new IgnoreRules(vaultPath);
await rules.load();
expect(rules.shouldIgnore("app.log")).toBe(true);
expect(rules.shouldIgnore("node_modules/package.json")).toBe(true);
expect(rules.shouldIgnore("src/node_modules/package.json")).toBe(true);
expect(rules.shouldIgnore("src/index.ts")).toBe(false);
});
it("merges .gitignore patterns with other patterns", async () => {
const vaultPath = await createVault();
await writeIgnoreFile(vaultPath, "*.tmp\nimport: .gitignore\n");
await fs.writeFile(path.join(vaultPath, ".gitignore"), "*.log\n", "utf-8");
const rules = new IgnoreRules(vaultPath);
await rules.load();
expect(rules.shouldIgnore("scratch.tmp")).toBe(true);
expect(rules.shouldIgnore("error.log")).toBe(true);
});
});
describe("import: .gitignore with missing .gitignore", () => {
it("does not throw when .gitignore is absent", async () => {
const vaultPath = await createVault();
await writeIgnoreFile(vaultPath, "*.tmp\nimport: .gitignore\n");
// No .gitignore created
const rules = new IgnoreRules(vaultPath);
await expect(rules.load()).resolves.toBeUndefined();
// The *.tmp pattern from the ignore file still works
expect(rules.shouldIgnore("scratch.tmp")).toBe(true);
});
});
});

View File

@@ -0,0 +1,166 @@
#!/usr/bin/env bash
# Test: daemon-related ignore rules behaviour
#
# Tests that are runnable without a long-running daemon process are exercised
# here using the `mirror` command, which calls the same `isTargetFile` handler
# stack that the daemon uses.
#
# Covered cases:
# 1. .livesync/ignore with *.tmp pattern → ignored file is NOT synced to DB
# 2. .livesync/ignore missing → no error, normal sync continues
# 3. import: .gitignore directive → patterns from .gitignore are merged
#
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
RUN_BUILD="${RUN_BUILD:-1}"
cli_test_init_cli_cmd
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-daemon-test.XXXXXX")"
trap 'rm -rf "$WORK_DIR"' EXIT
SETTINGS_FILE="$WORK_DIR/data.json"
VAULT_DIR="$WORK_DIR/vault"
mkdir -p "$VAULT_DIR/notes"
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI..."
npm run build
fi
echo "[INFO] generating settings -> $SETTINGS_FILE"
cli_test_init_settings_file "$SETTINGS_FILE"
cli_test_mark_settings_configured "$SETTINGS_FILE"
PASS=0
FAIL=0
assert_pass() { echo "[PASS] $1"; PASS=$((PASS + 1)); }
assert_fail() { echo "[FAIL] $1" >&2; FAIL=$((FAIL + 1)); }
# ─────────────────────────────────────────────────────────────────────────────
# Case 1: .livesync/ignore with *.tmp → matched file should NOT appear in DB
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 1: .livesync/ignore *.tmp → ignored file not synced to DB ==="
mkdir -p "$VAULT_DIR/.livesync"
printf '*.tmp\n' > "$VAULT_DIR/.livesync/ignore"
# Also write a normal file so we can confirm mirror ran at all.
printf 'normal content\n' > "$VAULT_DIR/notes/normal.md"
# Write the file that should be ignored.
printf 'tmp content\n' > "$VAULT_DIR/notes/scratch.tmp"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" mirror
# The normal file should be in the DB.
RESULT_NORMAL="$WORK_DIR/case1-normal.txt"
if run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" pull notes/normal.md "$RESULT_NORMAL" 2>/dev/null; then
if cmp -s "$VAULT_DIR/notes/normal.md" "$RESULT_NORMAL"; then
assert_pass "normal.md was synced to DB"
else
assert_fail "normal.md content mismatch after mirror"
fi
else
assert_fail "normal.md was not found in DB after mirror"
fi
# The .tmp file should NOT be in the DB.
DB_LIST="$WORK_DIR/case1-ls.txt"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" ls > "$DB_LIST"
if grep -q "scratch.tmp" "$DB_LIST"; then
assert_fail "scratch.tmp (ignored) was unexpectedly synced to DB"
echo "--- DB listing ---" >&2; cat "$DB_LIST" >&2
else
assert_pass "scratch.tmp (*.tmp pattern) was NOT synced to DB"
fi
# ─────────────────────────────────────────────────────────────────────────────
# Case 2: .livesync/ignore absent → no error, normal sync continues
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 2: .livesync/ignore absent → no error, sync continues ==="
VAULT_DIR2="$WORK_DIR/vault2"
mkdir -p "$VAULT_DIR2/notes"
SETTINGS_FILE2="$WORK_DIR/data2.json"
cli_test_init_settings_file "$SETTINGS_FILE2"
cli_test_mark_settings_configured "$SETTINGS_FILE2"
# No .livesync directory at all.
printf 'hello\n' > "$VAULT_DIR2/notes/hello.md"
# mirror should succeed without error.
set +e
MIRROR_OUTPUT="$WORK_DIR/case2-mirror.txt"
run_cli "$VAULT_DIR2" --settings "$SETTINGS_FILE2" mirror >"$MIRROR_OUTPUT" 2>&1
MIRROR_EXIT=$?
set -e
if [[ "$MIRROR_EXIT" -ne 0 ]]; then
assert_fail "mirror exited non-zero ($MIRROR_EXIT) when .livesync/ignore is absent"
cat "$MIRROR_OUTPUT" >&2
else
assert_pass "mirror succeeded when .livesync/ignore is absent"
fi
# The normal file should have been synced.
RESULT_HELLO="$WORK_DIR/case2-hello.txt"
if run_cli "$VAULT_DIR2" --settings "$SETTINGS_FILE2" pull notes/hello.md "$RESULT_HELLO" 2>/dev/null; then
assert_pass "file synced normally when .livesync/ignore is absent"
else
assert_fail "file was not synced when .livesync/ignore is absent"
fi
# ─────────────────────────────────────────────────────────────────────────────
# Case 3: import: .gitignore merges patterns
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 3: import: .gitignore directive merges patterns ==="
VAULT_DIR3="$WORK_DIR/vault3"
mkdir -p "$VAULT_DIR3/notes"
SETTINGS_FILE3="$WORK_DIR/data3.json"
cli_test_init_settings_file "$SETTINGS_FILE3"
cli_test_mark_settings_configured "$SETTINGS_FILE3"
mkdir -p "$VAULT_DIR3/.livesync"
printf 'import: .gitignore\n' > "$VAULT_DIR3/.livesync/ignore"
printf '# gitignore comment\n*.log\nbuild/\n' > "$VAULT_DIR3/.gitignore"
printf 'regular note\n' > "$VAULT_DIR3/notes/regular.md"
printf 'log content\n' > "$VAULT_DIR3/notes/debug.log"
run_cli "$VAULT_DIR3" --settings "$SETTINGS_FILE3" mirror
DB_LIST3="$WORK_DIR/case3-ls.txt"
run_cli "$VAULT_DIR3" --settings "$SETTINGS_FILE3" ls > "$DB_LIST3"
if grep -q "debug.log" "$DB_LIST3"; then
assert_fail "debug.log (ignored via .gitignore import) was unexpectedly synced to DB"
echo "--- DB listing ---" >&2; cat "$DB_LIST3" >&2
else
assert_pass "debug.log (*.log from imported .gitignore) was NOT synced to DB"
fi
# regular.md should still be present.
if grep -q "regular.md" "$DB_LIST3"; then
assert_pass "regular.md was synced normally alongside .gitignore import rules"
else
assert_fail "regular.md was NOT synced — .gitignore import may have been too broad"
fi
# ─────────────────────────────────────────────────────────────────────────────
# Summary
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "Results: PASS=$PASS FAIL=$FAIL"
if [[ "$FAIL" -gt 0 ]]; then
exit 1
fi

View File

@@ -11,11 +11,54 @@ const defaultExternal = [
"crypto",
"pouchdb-adapter-leveldb",
"commander",
"chokidar",
"punycode",
"werift",
];
// Polyfill FileReader at the very top of the CJS bundle. octagonal-wheels uses
// FileReader for base64 conversion when Uint8Array.toBase64 (TC39 proposal) is
// unavailable. Node.js has neither, so we inject a minimal FileReader shim before
// any module-scope code evaluates.
const fileReaderPolyfillBanner = `
if (typeof globalThis.FileReader === "undefined") {
globalThis.FileReader = class FileReader {
constructor() { this.result = null; this.onload = null; this.onerror = null; }
readAsDataURL(blob) {
blob.arrayBuffer().then((buf) => {
var b64 = require("buffer").Buffer.from(buf).toString("base64");
this.result = "data:" + (blob.type || "application/octet-stream") + ";base64," + b64;
if (this.onload) this.onload({ target: this });
}).catch((err) => { if (this.onerror) this.onerror({ target: this, error: err }); });
}
readAsArrayBuffer() { throw new Error("FileReader.readAsArrayBuffer is not implemented in this polyfill"); }
readAsBinaryString() { throw new Error("FileReader.readAsBinaryString is not implemented in this polyfill"); }
readAsText() { throw new Error("FileReader.readAsText is not implemented in this polyfill"); }
abort() { throw new Error("FileReader.abort is not implemented in this polyfill"); }
};
}
`;
function injectBanner(): import("vite").Plugin {
return {
name: "inject-banner",
generateBundle(_options, bundle) {
for (const chunk of Object.values(bundle)) {
if (chunk.type === "chunk" && chunk.fileName.startsWith("entrypoint")) {
// Insert after the shebang line if present, otherwise at the top.
if (chunk.code.startsWith("#!")) {
const newline = chunk.code.indexOf("\n");
chunk.code = chunk.code.slice(0, newline + 1) + fileReaderPolyfillBanner + chunk.code.slice(newline + 1);
} else {
chunk.code = fileReaderPolyfillBanner + chunk.code;
}
}
}
},
};
}
export default defineConfig({
plugins: [svelte()],
plugins: [svelte(), injectBanner()],
resolve: {
alias: {
"@lib/worker/bgWorker.ts": "../../lib/src/worker/bgWorker.mock.ts",