Compare commits

...

22 Commits

Author SHA1 Message Date
vorotamoroz
d8281390c4 bump 2026-03-17 10:34:29 +01:00
vorotamoroz
08b1712f39 bump 2026-03-16 00:49:54 +09:00
vorotamoroz
6c69547cef ### Fixed
- Fixed flaky timing issues in P2P synchronisation.
- Fixed more binary file handling issues in CLI.

### Tests

- Rewrite P2P end-to-end tests to use the CLI as host.
2026-03-16 00:48:22 +09:00
vorotamoroz
89bf0488c3 Refactor: More refactor P2P Replicator 2026-03-15 04:07:47 +09:00
vorotamoroz
653cf8dfbe Refactor: Refactor P2P Replicator 2026-03-15 03:33:03 +09:00
vorotamoroz
33338506cf bump 2026-03-14 17:02:16 +09:00
vorotamoroz
9dd479e597 Fix for an issue where conflicts cannot be resolved in Journal Sync
Remove unnecessary test calling in CLI
2026-03-14 16:51:30 +09:00
vorotamoroz
8cad4cdf80 Add workaround for my mac 2026-03-14 16:50:43 +09:00
vorotamoroz
beced219c7 Fix: exit code 2026-03-14 16:13:14 +09:00
vorotamoroz
dfe13b1abd Fixed:
- No longer unexpected `Unhandled Rejections` during P2P operations (waiting acceptance).
CLI new features
- P2P sync has been implemented.
2026-03-14 15:08:31 +09:00
vorotamoroz
bf93bddbdd Fix: prevent transfer twice. 2026-03-13 23:34:38 +09:00
vorotamoroz
44890a34e8 remove conflicting option. 2026-03-13 23:08:05 +09:00
vorotamoroz
a14aa201a8 Merge branch 'beta' of https://github.com/vrtmrz/obsidian-livesync into beta 2026-03-13 18:13:12 +09:00
vorotamoroz
338a9ba9fa Add: mirror command
Tidy: test
2026-03-13 18:01:38 +09:00
vorotamoroz
0c65b5add9 Add: mirror command 2026-03-13 12:55:46 +09:00
vorotamoroz
29ce9a5df4 remove todo 2026-03-12 12:45:39 +01:00
vorotamoroz
10f5cb8b42 add paths 2026-03-12 12:31:53 +01:00
vorotamoroz
8aad3716d4 fix grammatical errors 2026-03-12 12:29:43 +01:00
vorotamoroz
d45f41500a Fix: no longer duplicated addLog setHandler 2026-03-12 12:27:47 +01:00
vorotamoroz
4cc0a11d86 Add ci cli-e2e 2026-03-12 12:23:13 +01:00
vorotamoroz
ad0a6b458f bump 2026-03-12 12:16:00 +01:00
vorotamoroz
6ae1d5d6a5 update readme 2026-03-12 12:07:05 +01:00
50 changed files with 4514 additions and 1747 deletions

84
.github/workflows/cli-e2e.yml vendored Normal file
View File

@@ -0,0 +1,84 @@
# Run CLI E2E tests
name: cli-e2e
on:
workflow_dispatch:
inputs:
suite:
description: 'CLI E2E suite to run'
type: choice
options:
- two-vaults-matrix
- two-vaults-couchdb
- two-vaults-minio
default: two-vaults-matrix
push:
branches:
- main
- beta
paths:
- '.github/workflows/cli-e2e.yml'
- 'src/apps/cli/**'
- 'src/lib/src/API/processSetting.ts'
- 'package.json'
- 'package-lock.json'
pull_request:
paths:
- '.github/workflows/cli-e2e.yml'
- 'src/apps/cli/**'
- 'src/lib/src/API/processSetting.ts'
- 'package.json'
- 'package-lock.json'
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 45
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '24.x'
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Run CLI E2E suite
working-directory: src/apps/cli
env:
CI: true
TEST_SUITE: ${{ github.event_name == 'workflow_dispatch' && inputs.suite || 'two-vaults-matrix' }}
run: |
set -euo pipefail
echo "[INFO] Running CLI E2E suite: $TEST_SUITE"
case "$TEST_SUITE" in
two-vaults-matrix)
npm run test:e2e:two-vaults:matrix
;;
two-vaults-couchdb)
REMOTE_TYPE=COUCHDB ENCRYPT=0 npm run test:e2e:two-vaults
;;
two-vaults-minio)
REMOTE_TYPE=MINIO ENCRYPT=0 npm run test:e2e:two-vaults
;;
*)
echo "[ERROR] Unknown suite: $TEST_SUITE" >&2
exit 1
;;
esac
- name: Stop test containers
if: always()
working-directory: src/apps/cli
run: |
bash ./util/couchdb-stop.sh >/dev/null 2>&1 || true
bash ./util/minio-stop.sh >/dev/null 2>&1 || true

View File

@@ -56,7 +56,7 @@ jobs:
if: ${{ inputs.testsuite == '' || inputs.testsuite == 'suitep2p/' }}
env:
CI: true
run: npm run test suitep2p/
run: npm run test:p2p
- name: Stop test services (CouchDB)
run: npm run test:docker-couchdb:stop
if: ${{ inputs.testsuite == '' || inputs.testsuite == 'suite/' }}

View File

@@ -7,6 +7,18 @@ on:
branches:
- main
- beta
paths:
- 'src/**'
- 'test/**'
- 'lib/**'
- 'package.json'
- 'package-lock.json'
- 'tsconfig.json'
- 'vite.config.ts'
- 'vitest.config*.ts'
- 'esbuild.config.mjs'
- 'eslint.config.mjs'
- '.github/workflows/unit-ci.yml'
permissions:
contents: read

View File

@@ -1,7 +1,7 @@
{
"id": "obsidian-livesync",
"name": "Self-hosted LiveSync",
"version": "0.25.52",
"version": "0.25.53",
"minAppVersion": "0.9.12",
"description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
"author": "vorotamoroz",

2039
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "obsidian-livesync",
"version": "0.25.52",
"version": "0.25.53",
"description": "Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
"main": "main.js",
"type": "module",
@@ -53,7 +53,8 @@
"test:docker-all:down": "npm run test:docker-couchdb:down ; npm run test:docker-s3:down ; npm run test:docker-p2p:down",
"test:docker-all:start": "npm run test:docker-all:up && sleep 5 && npm run test:docker-all:init",
"test:docker-all:stop": "npm run test:docker-all:down",
"test:full": "npm run test:docker-all:start && vitest run --coverage && npm run test:docker-all:stop"
"test:full": "npm run test:docker-all:start && vitest run --coverage && npm run test:docker-all:stop",
"test:p2p": "bash test/suitep2p/run-p2p-tests.sh"
},
"keywords": [],
"author": "vorotamoroz",
@@ -134,6 +135,7 @@
"fflate": "^0.8.2",
"idb": "^8.0.3",
"minimatch": "^10.2.2",
"node-datachannel": "^0.32.1",
"octagonal-wheels": "^0.1.45",
"pouchdb-adapter-leveldb": "^9.0.0",
"qrcode-generator": "^1.4.4",

View File

@@ -63,43 +63,43 @@ As you know, the CLI is designed to be used in a headless environment. Hence all
```bash
# Sync local database with CouchDB (no files will be changed).
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json sync
npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json sync
# Push files to local database
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json push /your/storage/file.md /vault/path/file.md
npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json push /your/storage/file.md /vault/path/file.md
# Pull files from local database
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json pull /vault/path/file.md /your/storage/file.md
npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json pull /vault/path/file.md /your/storage/file.md
# Verbose logging
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json --verbose
npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json --verbose
# Apply setup URI to settings file (settings only; does not run synchronisation)
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json setup "obsidian://setuplivesync?settings=..."
npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json setup "obsidian://setuplivesync?settings=..."
# Put text from stdin into local database
echo "Hello from stdin" | npm run cli -- /path/to/your-local-database --settings /path/to/settings.json put /vault/path/file.md
echo "Hello from stdin" | npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json put /vault/path/file.md
# Output a file from local database to stdout
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json cat /vault/path/file.md
npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json cat /vault/path/file.md
# Output a specific revision of a file from local database
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json cat-rev /vault/path/file.md 3-abcdef
npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json cat-rev /vault/path/file.md 3-abcdef
# Pull a specific revision of a file from local database to local storage
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json pull-rev /vault/path/file.md /your/storage/file.old.md 3-abcdef
npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json pull-rev /vault/path/file.md /your/storage/file.old.md 3-abcdef
# List files in local database
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json ls /vault/path/
npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json ls /vault/path/
# Show metadata for a file in local database
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json info /vault/path/file.md
npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json info /vault/path/file.md
# Mark a file as deleted in local database
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json rm /vault/path/file.md
npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json rm /vault/path/file.md
# Resolve conflict by keeping a specific revision
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json resolve /vault/path/file.md 3-abcdef
npm run --silent cli -- /path/to/your-local-database --settings /path/to/settings.json resolve /vault/path/file.md 3-abcdef
```
### Configuration
@@ -148,6 +148,9 @@ Options:
Commands:
init-settings [path] Create settings JSON from DEFAULT_SETTINGS
sync Run one replication cycle and exit
p2p-peers <timeout> Show discovered peers as [peer]<TAB><peer-id><TAB><peer-name>
p2p-sync <peer> <timeout> Synchronise with specified peer-id or peer-name
p2p-host Start P2P host mode and wait until interrupted (Ctrl+C)
push <src> <dst> Push local file <src> into local database path <dst>
pull <src> <dst> Pull file <src> from local database into local file <dst>
pull-rev <src> <dst> <revision> Pull specific revision into local file <dst>
@@ -159,14 +162,52 @@ Commands:
info <vaultPath> Show file metadata including current and past revisions, conflicts, and chunk list
rm <vaultPath> Mark file as deleted in local database
resolve <vaultPath> <revision> Resolve conflict by keeping the specified revision
mirror <storagePath> <vaultPath> Mirror local file into local database.
```
Run via npm script:
```bash
npm run cli -- [database-path] [options] [command] [command-args]
npm run --silent cli -- [database-path] [options] [command] [command-args]
```
#### Detailed Command Descriptions
##### ls
`ls` lists files in the local database with optional prefix filtering. Output format is:
```vault/path/file.md<TAB>size<TAB>mtime<TAB>revision[*]
```
Note: `*` indicates if the file has conflicts.
##### p2p-peers
`p2p-peers <timeout>` waits for the specified number of seconds, then prints each discovered peer on a separate line:
```text
[peer]<TAB><peer-id><TAB><peer-name>
```
Use this command to select a target for `p2p-sync`.
##### p2p-sync
`p2p-sync <peer> <timeout>` discovers peers up to the specified timeout and synchronises with the selected peer.
- `<peer>` accepts either `peer-id` or `peer-name` from `p2p-peers` output.
- On success, the command prints a completion message to standard error and exits with status code `0`.
- On failure, the command prints an error message and exits non-zero.
##### p2p-host
`p2p-host` starts the local P2P host and keeps running until interrupted.
- Other peers can discover and synchronise with this host while it is running.
- Stop the host with `Ctrl+C`.
- In CLI mode, behaviour is non-interactive and acceptance follows settings.
##### info
`info` output fields:
- `id`: Document ID
@@ -179,9 +220,39 @@ npm run cli -- [database-path] [options] [command] [command-args]
- `chunks`: Number of chunk IDs
- `children`: Chunk ID list
### Planned options:
##### mirror
TODO: Conflict and resolution checks for real local databases.
`mirror` is a command that synchronises your storage with your local vault. It is essentially a process that runs upon startup in Obsidian.
In other words, it performs the following actions:
1. **Precondition checks** — Aborts early if any of the following conditions are not met:
- Settings must be configured (`isConfigured: true`).
- File watching must not be suspended (`suspendFileWatching: false`).
- Remediation mode must be inactive (`maxMTimeForReflectEvents: 0`).
2. **State restoration** — On subsequent runs (after the first successful scan), restores the previous storage state before proceeding.
3. **Expired deletion cleanup** — If `automaticallyDeleteMetadataOfDeletedFiles` is set to a positive number of days, any document that is marked deleted and whose `mtime` is older than the retention period is permanently removed from the local database.
4. **File collection** — Enumerates files from two sources:
- **Storage**: all files under the vault path that pass `isTargetFile`.
- **Local database**: all normal documents (fetched with conflict information) whose paths are valid and pass `isTargetFile`.
- Both collections build case-insensitive ↔ case-sensitive path maps, controlled by `handleFilenameCaseSensitive`.
5. **Categorisation and synchronisation** — The union of both file sets is split into three groups and processed concurrently (up to 10 files at a time):
| Group | Condition | Action |
|---|---|---|
| **UPDATE DATABASE** | File exists in storage only | Store the file into the local database. |
| **UPDATE STORAGE** | File exists in database only | If the entry is active (not deleted) and not conflicted, restore the file from the database to storage. Deleted entries and conflicted entries are skipped. |
| **SYNC DATABASE AND STORAGE** | File exists in both | Compare `mtime` freshness. If storage is newer → write to database (`STORAGE → DB`). If database is newer → restore to storage (`STORAGE ← DB`). If equal → do nothing. Conflicted documents and files exceeding the size limit are always skipped. |
6. **Initialisation flag** — On the very first successful run, writes `initialized = true` to the key-value database so that subsequent runs can restore state in step 2.
Note: `mirror` does not respect file deletions. If a file is deleted in storage, it will be restored on the next `mirror` run. To delete a file, use the `rm` command instead. This is a little inconvenient, but it is intentional behaviour (if we handle this automatically in `mirror`, we should be against a ton of edge cases).
### Planned options:
- `--immediate`: Perform sync after the command (e.g. `push`, `pull`, `put`, `rm`).
- `serve`: Start CLI in server mode, exposing REST APIs for remote, and batch operations.
@@ -194,9 +265,9 @@ TODO: Conflict and resolution checks for real local databases.
Create default settings, apply a setup URI, then run one sync cycle.
```bash
npm run cli -- init-settings /data/livesync-settings.json
printf '%s\n' "$SETUP_PASSPHRASE" | npm run cli -- /data/vault --settings /data/livesync-settings.json setup "$SETUP_URI"
npm run cli -- /data/vault --settings /data/livesync-settings.json sync
npm run --silent cli -- init-settings /data/livesync-settings.json
printf '%s\n' "$SETUP_PASSPHRASE" | npm run --silent cli -- /data/vault --settings /data/livesync-settings.json setup "$SETUP_URI"
npm run --silent cli -- /data/vault --settings /data/livesync-settings.json sync
```
### 2. Scripted import and export
@@ -204,8 +275,8 @@ npm run cli -- /data/vault --settings /data/livesync-settings.json sync
Push local files into the database from automation, and pull them back for export or backup.
```bash
npm run cli -- /data/vault --settings /data/livesync-settings.json push ./note.md notes/note.md
npm run cli -- /data/vault --settings /data/livesync-settings.json pull notes/note.md ./exports/note.md
npm run --silent cli -- /data/vault --settings /data/livesync-settings.json push ./note.md notes/note.md
npm run --silent cli -- /data/vault --settings /data/livesync-settings.json pull notes/note.md ./exports/note.md
```
### 3. Revision inspection and restore
@@ -213,9 +284,9 @@ npm run cli -- /data/vault --settings /data/livesync-settings.json pull notes/no
List metadata, find an older revision, then restore it by content (`cat-rev`) or file output (`pull-rev`).
```bash
npm run cli -- /data/vault --settings /data/livesync-settings.json info notes/note.md
npm run cli -- /data/vault --settings /data/livesync-settings.json cat-rev notes/note.md 3-abcdef
npm run cli -- /data/vault --settings /data/livesync-settings.json pull-rev notes/note.md ./restore/note.old.md 3-abcdef
npm run --silent cli -- /data/vault --settings /data/livesync-settings.json info notes/note.md
npm run --silent cli -- /data/vault --settings /data/livesync-settings.json cat-rev notes/note.md 3-abcdef
npm run --silent cli -- /data/vault --settings /data/livesync-settings.json pull-rev notes/note.md ./restore/note.old.md 3-abcdef
```
### 4. Conflict and cleanup workflow
@@ -223,9 +294,9 @@ npm run cli -- /data/vault --settings /data/livesync-settings.json pull-rev note
Inspect conflicted revisions, resolve by keeping one revision, then delete obsolete files.
```bash
npm run cli -- /data/vault --settings /data/livesync-settings.json info notes/note.md
npm run cli -- /data/vault --settings /data/livesync-settings.json resolve notes/note.md 3-abcdef
npm run cli -- /data/vault --settings /data/livesync-settings.json rm notes/obsolete.md
npm run --silent cli -- /data/vault --settings /data/livesync-settings.json info notes/note.md
npm run --silent cli -- /data/vault --settings /data/livesync-settings.json resolve notes/note.md 3-abcdef
npm run --silent cli -- /data/vault --settings /data/livesync-settings.json rm notes/obsolete.md
```
### 5. CI smoke test for content round-trip
@@ -233,8 +304,8 @@ npm run cli -- /data/vault --settings /data/livesync-settings.json rm notes/obso
Validate that `put`/`cat` is behaving as expected in a pipeline.
```bash
echo "hello-ci" | npm run cli -- /data/vault --settings /data/livesync-settings.json put ci/test.md
npm run cli -- /data/vault --settings /data/livesync-settings.json cat ci/test.md
echo "hello-ci" | npm run --silent cli -- /data/vault --settings /data/livesync-settings.json put ci/test.md
npm run --silent cli -- /data/vault --settings /data/livesync-settings.json cat ci/test.md
```
## Development

View File

@@ -0,0 +1,122 @@
import type { LiveSyncBaseCore } from "../../../LiveSyncBaseCore";
import { P2P_DEFAULT_SETTINGS } from "@lib/common/types";
import type { ServiceContext } from "@lib/services/base/ServiceBase";
import { LiveSyncTrysteroReplicator } from "@lib/replication/trystero/LiveSyncTrysteroReplicator";
import { addP2PEventHandlers } from "@lib/replication/trystero/P2PReplicatorCore";
type CLIP2PPeer = {
peerId: string;
name: string;
};
function delay(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
export function parseTimeoutSeconds(value: string, commandName: string): number {
const timeoutSec = Number(value);
if (!Number.isFinite(timeoutSec) || timeoutSec < 0) {
throw new Error(`${commandName} requires a non-negative timeout in seconds`);
}
return timeoutSec;
}
function validateP2PSettings(core: LiveSyncBaseCore<ServiceContext, any>) {
const settings = core.services.setting.currentSettings();
if (!settings.P2P_Enabled) {
throw new Error("P2P is disabled in settings (P2P_Enabled=false)");
}
if (!settings.P2P_AppID) {
settings.P2P_AppID = P2P_DEFAULT_SETTINGS.P2P_AppID;
}
// CLI mode is non-interactive.
settings.P2P_IsHeadless = true;
}
function createReplicator(core: LiveSyncBaseCore<ServiceContext, any>): LiveSyncTrysteroReplicator {
validateP2PSettings(core);
const replicator = new LiveSyncTrysteroReplicator({ services: core.services });
addP2PEventHandlers(replicator);
return replicator;
}
function getSortedPeers(replicator: LiveSyncTrysteroReplicator): CLIP2PPeer[] {
return [...replicator.knownAdvertisements]
.map((peer) => ({ peerId: peer.peerId, name: peer.name }))
.sort((a, b) => a.peerId.localeCompare(b.peerId));
}
export async function collectPeers(
core: LiveSyncBaseCore<ServiceContext, any>,
timeoutSec: number
): Promise<CLIP2PPeer[]> {
const replicator = createReplicator(core);
await replicator.open();
try {
await delay(timeoutSec * 1000);
return getSortedPeers(replicator);
} finally {
await replicator.close();
}
}
function resolvePeer(peers: CLIP2PPeer[], peerToken: string): CLIP2PPeer | undefined {
const byId = peers.find((peer) => peer.peerId === peerToken);
if (byId) {
return byId;
}
const byName = peers.filter((peer) => peer.name === peerToken);
if (byName.length > 1) {
throw new Error(`Multiple peers matched by name '${peerToken}'. Use peer-id instead.`);
}
if (byName.length === 1) {
return byName[0];
}
return undefined;
}
export async function syncWithPeer(
core: LiveSyncBaseCore<ServiceContext, any>,
peerToken: string,
timeoutSec: number
): Promise<CLIP2PPeer> {
const replicator = createReplicator(core);
await replicator.open();
try {
const timeoutMs = timeoutSec * 1000;
const start = Date.now();
let targetPeer: CLIP2PPeer | undefined;
while (Date.now() - start <= timeoutMs) {
const peers = getSortedPeers(replicator);
targetPeer = resolvePeer(peers, peerToken);
if (targetPeer) {
break;
}
await delay(200);
}
if (!targetPeer) {
throw new Error(`Peer '${peerToken}' was not found within ${timeoutSec} seconds`);
}
const pullResult = await replicator.replicateFrom(targetPeer.peerId, false);
if (pullResult && "error" in pullResult && pullResult.error) {
throw pullResult.error;
}
const pushResult = (await replicator.requestSynchroniseToPeer(targetPeer.peerId)) as any;
if (!pushResult || pushResult.ok !== true) {
throw pushResult?.error ?? new Error("P2P sync failed while requesting remote sync");
}
return targetPeer;
} finally {
await replicator.close();
}
}
export async function openP2PHost(core: LiveSyncBaseCore<ServiceContext, any>): Promise<LiveSyncTrysteroReplicator> {
const replicator = createReplicator(core);
await replicator.open();
return replicator;
}

View File

@@ -0,0 +1,18 @@
import { describe, expect, it } from "vitest";
import { parseTimeoutSeconds } from "./p2p";
describe("p2p command helpers", () => {
it("accepts non-negative timeout", () => {
expect(parseTimeoutSeconds("0", "p2p-peers")).toBe(0);
expect(parseTimeoutSeconds("2.5", "p2p-sync")).toBe(2.5);
});
it("rejects invalid timeout values", () => {
expect(() => parseTimeoutSeconds("-1", "p2p-peers")).toThrow(
"p2p-peers requires a non-negative timeout in seconds"
);
expect(() => parseTimeoutSeconds("abc", "p2p-sync")).toThrow(
"p2p-sync requires a non-negative timeout in seconds"
);
});
});

View File

@@ -6,6 +6,9 @@ import { DEFAULT_SETTINGS, type FilePathWithPrefix, type ObsidianLiveSyncSetting
import { stripAllPrefixes } from "@lib/string_and_binary/path";
import type { CLICommandContext, CLIOptions } from "./types";
import { promptForPassphrase, readStdinAsUtf8, toArrayBuffer, toVaultRelativePath } from "./utils";
import { collectPeers, openP2PHost, parseTimeoutSeconds, syncWithPeer } from "./p2p";
import { performFullScan } from "@lib/serviceFeatures/offlineScanner";
import { UnresolvedErrorManager } from "@lib/services/base/UnresolvedErrorManager";
export async function runCommand(options: CLIOptions, context: CLICommandContext): Promise<boolean> {
const { vaultPath, core, settingsPath } = context;
@@ -21,6 +24,42 @@ export async function runCommand(options: CLIOptions, context: CLICommandContext
return !!result;
}
if (options.command === "p2p-peers") {
if (options.commandArgs.length < 1) {
throw new Error("p2p-peers requires one argument: <timeout>");
}
const timeoutSec = parseTimeoutSeconds(options.commandArgs[0], "p2p-peers");
console.error(`[Command] p2p-peers timeout=${timeoutSec}s`);
const peers = await collectPeers(core as any, timeoutSec);
if (peers.length > 0) {
process.stdout.write(peers.map((peer) => `[peer]\t${peer.peerId}\t${peer.name}`).join("\n") + "\n");
}
return true;
}
if (options.command === "p2p-sync") {
if (options.commandArgs.length < 2) {
throw new Error("p2p-sync requires two arguments: <peer> <timeout>");
}
const peerToken = options.commandArgs[0].trim();
if (!peerToken) {
throw new Error("p2p-sync requires a non-empty <peer>");
}
const timeoutSec = parseTimeoutSeconds(options.commandArgs[1], "p2p-sync");
console.error(`[Command] p2p-sync peer=${peerToken} timeout=${timeoutSec}s`);
const peer = await syncWithPeer(core as any, peerToken, timeoutSec);
console.error(`[Done] P2P sync completed with ${peer.name} (${peer.peerId})`);
return true;
}
if (options.command === "p2p-host") {
console.error("[Command] p2p-host");
await openP2PHost(core as any);
console.error("[Ready] P2P host is running. Press Ctrl+C to stop.");
await new Promise(() => {});
return true;
}
if (options.command === "push") {
if (options.commandArgs.length < 2) {
throw new Error("push requires two arguments: <src> <dst>");
@@ -309,5 +348,12 @@ export async function runCommand(options: CLIOptions, context: CLICommandContext
return true;
}
if (options.command === "mirror") {
console.error("[Command] mirror");
const log = (msg: unknown) => console.error(`[Mirror] ${msg}`);
const errorManager = new UnresolvedErrorManager(core.services.appLifecycle);
return await performFullScan(core as any, log, errorManager, false, true);
}
throw new Error(`Unsupported command: ${options.command}`);
}

View File

@@ -4,6 +4,9 @@ import { ServiceContext } from "@lib/services/base/ServiceBase";
export type CLICommand =
| "daemon"
| "sync"
| "p2p-peers"
| "p2p-sync"
| "p2p-host"
| "push"
| "pull"
| "pull-rev"
@@ -15,12 +18,14 @@ export type CLICommand =
| "info"
| "rm"
| "resolve"
| "mirror"
| "init-settings";
export interface CLIOptions {
databasePath?: string;
settingsPath?: string;
verbose?: boolean;
debug?: boolean;
force?: boolean;
command: CLICommand;
commandArgs: string[];
@@ -34,6 +39,9 @@ export interface CLICommandContext {
export const VALID_COMMANDS = new Set([
"sync",
"p2p-peers",
"p2p-sync",
"p2p-host",
"push",
"pull",
"pull-rev",
@@ -45,5 +53,6 @@ export const VALID_COMMANDS = new Set([
"info",
"rm",
"resolve",
"mirror",
"init-settings",
] as const);

View File

@@ -1,6 +1,12 @@
#!/usr/bin/env node
import polyfill from "node-datachannel/polyfill";
import { main } from "./main";
for (const prop in polyfill) {
// @ts-ignore Applying polyfill to globalThis
globalThis[prop] = (polyfill as any)[prop];
}
main().catch((error) => {
console.error(`[Fatal Error]`, error);
process.exit(1);

View File

@@ -23,17 +23,28 @@ import * as fs from "fs/promises";
import * as path from "path";
import { NodeServiceContext, NodeServiceHub } from "./services/NodeServiceHub";
import { LiveSyncBaseCore } from "../../LiveSyncBaseCore";
import { ModuleReplicatorP2P } from "../../modules/core/ModuleReplicatorP2P";
import { initialiseServiceModulesCLI } from "./serviceModules/CLIServiceModules";
import { DEFAULT_SETTINGS, LOG_LEVEL_VERBOSE, type LOG_LEVEL, type ObsidianLiveSyncSettings } from "@lib/common/types";
import type { InjectableServiceHub } from "@lib/services/implements/injectable/InjectableServiceHub";
import type { InjectableSettingService } from "@/lib/src/services/implements/injectable/InjectableSettingService";
import { LOG_LEVEL_DEBUG, setGlobalLogFunction, defaultLoggerEnv } from "octagonal-wheels/common/logger";
import {
LOG_LEVEL_DEBUG,
setGlobalLogFunction,
defaultLoggerEnv,
LOG_LEVEL_INFO,
LOG_LEVEL_URGENT,
LOG_LEVEL_NOTICE,
} from "octagonal-wheels/common/logger";
import { runCommand } from "./commands/runCommand";
import { VALID_COMMANDS } from "./commands/types";
import type { CLICommand, CLIOptions } from "./commands/types";
import { getPathFromUXFileInfo } from "@lib/common/typeUtils";
import { stripAllPrefixes } from "@lib/string_and_binary/path";
const SETTINGS_FILE = ".livesync/settings.json";
defaultLoggerEnv.minLogLevel = LOG_LEVEL_DEBUG;
// DI the log again.
// const recentLogEntries = reactiveSource<LogEntry[]>([]);
// const globalLogFunction = (message: any, level?: number, key?: string) => {
@@ -45,12 +56,12 @@ defaultLoggerEnv.minLogLevel = LOG_LEVEL_DEBUG;
// recentLogEntries.value = [...recentLogEntries.value, entry];
// };
setGlobalLogFunction((msg, level) => {
console.error(`[${level}] ${typeof msg === "string" ? msg : JSON.stringify(msg)}`);
if (msg instanceof Error) {
console.error(msg);
}
});
// setGlobalLogFunction((msg, level) => {
// console.error(`[${level}] ${typeof msg === "string" ? msg : JSON.stringify(msg)}`);
// if (msg instanceof Error) {
// console.error(msg);
// }
// });
function printHelp(): void {
console.log(`
Self-hosted LiveSync CLI
@@ -63,6 +74,10 @@ Arguments:
Commands:
sync Run one replication cycle and exit
p2p-peers <timeout> Show discovered peers as [peer]<TAB><peer-id><TAB><peer-name>
p2p-sync <peer> <timeout>
Sync with the specified peer-id or peer-name
p2p-host Start P2P host mode and wait until interrupted
push <src> <dst> Push local file <src> into local database path <dst>
pull <src> <dst> Pull file <src> from local database into local file <dst>
pull-rev <src> <dst> <rev> Pull file <src> at specific revision <rev> into local file <dst>
@@ -76,6 +91,9 @@ Commands:
resolve <path> <rev> Resolve conflicts by keeping <rev> and deleting others
Examples:
livesync-cli ./my-database sync
livesync-cli ./my-database p2p-peers 5
livesync-cli ./my-database p2p-sync my-peer-name 15
livesync-cli ./my-database p2p-host
livesync-cli ./my-database --settings ./custom-settings.json push ./note.md folder/note.md
livesync-cli ./my-database pull folder/note.md ./exports/note.md
livesync-cli ./my-database pull-rev folder/note.md ./exports/note.old.md 3-abcdef
@@ -103,6 +121,7 @@ export function parseArgs(): CLIOptions {
let databasePath: string | undefined;
let settingsPath: string | undefined;
let verbose = false;
let debug = false;
let force = false;
let command: CLICommand = "daemon";
const commandArgs: string[] = [];
@@ -120,6 +139,10 @@ export function parseArgs(): CLIOptions {
settingsPath = args[i];
break;
}
case "--debug":
case "-d":
// debugging automatically enables verbose logging, as it is intended for debugging issues.
debug = true;
case "--verbose":
case "-v":
verbose = true;
@@ -165,6 +188,7 @@ export function parseArgs(): CLIOptions {
databasePath,
settingsPath,
verbose,
debug,
force,
command,
commandArgs,
@@ -205,11 +229,23 @@ export async function main() {
options.command === "cat" ||
options.command === "cat-rev" ||
options.command === "ls" ||
options.command === "p2p-peers" ||
options.command === "info" ||
options.command === "rm" ||
options.command === "resolve";
const infoLog = avoidStdoutNoise ? console.error : console.log;
if (options.debug) {
setGlobalLogFunction((msg, level) => {
console.error(`[${level}] ${typeof msg === "string" ? msg : JSON.stringify(msg)}`);
if (msg instanceof Error) {
console.error(msg);
}
});
} else {
setGlobalLogFunction((msg, level) => {
// NO OP, leave it to logFunction
});
}
if (options.command === "init-settings") {
await createDefaultSettingsFile(options);
return;
@@ -243,8 +279,28 @@ export async function main() {
const context = new NodeServiceContext(vaultPath);
const serviceHubInstance = new NodeServiceHub<NodeServiceContext>(vaultPath, context);
serviceHubInstance.API.addLog.setHandler((message: string, level: LOG_LEVEL) => {
const prefix = `[${level}]`;
if (level <= LOG_LEVEL_VERBOSE) {
let levelStr = "";
switch (level) {
case LOG_LEVEL_DEBUG:
levelStr = "debug";
break;
case LOG_LEVEL_VERBOSE:
levelStr = "Verbose";
break;
case LOG_LEVEL_INFO:
levelStr = "Info";
break;
case LOG_LEVEL_NOTICE:
levelStr = "Notice";
break;
case LOG_LEVEL_URGENT:
levelStr = "Urgent";
break;
default:
levelStr = `${level}`;
}
const prefix = `(${levelStr})`;
if (level <= LOG_LEVEL_INFO) {
if (!options.verbose) return;
}
console.error(`${prefix} ${message}`);
@@ -254,6 +310,7 @@ export async function main() {
console.error(`[Info] Replication result received, but not processed automatically in CLI mode.`);
return await Promise.resolve(true);
}, -100);
// Setup settings handlers
const settingService = serviceHubInstance.setting;
@@ -296,9 +353,20 @@ export async function main() {
(core: LiveSyncBaseCore<NodeServiceContext, any>, serviceHub: InjectableServiceHub<NodeServiceContext>) => {
return initialiseServiceModulesCLI(vaultPath, core, serviceHub);
},
() => [], // No extra modules
(core) => [new ModuleReplicatorP2P(core)], // Register P2P replicator for CLI (useP2PReplicator is not used here)
() => [], // No add-ons
() => [] // No serviceFeatures
(core) => {
// Add target filter to prevent internal files are handled
core.services.vault.isTargetFile.addHandler(async (target) => {
const vaultPath = stripAllPrefixes(getPathFromUXFileInfo(target));
const parts = vaultPath.split(path.sep);
// if some part of the path starts with dot, treat it as internal file and ignore.
if (parts.some((part) => part.startsWith("."))) {
return await Promise.resolve(false);
}
return await Promise.resolve(true);
}, -1 /* highest priority */);
}
);
// Setup signal handlers for graceful shutdown
@@ -370,4 +438,6 @@ export async function main() {
console.error(`[Error] Failed to start:`, error);
process.exit(1);
}
// To prevent unexpected hanging in webRTC connections.
process.exit(process.exitCode ?? 0);
}

View File

@@ -58,4 +58,31 @@ describe("CLI parseArgs", () => {
expect(combined).toContain("Usage:");
expect(combined).toContain("livesync-cli [database-path]");
});
it("parses p2p-peers command and timeout", () => {
process.argv = ["node", "livesync-cli", "./vault", "p2p-peers", "5"];
const parsed = parseArgs();
expect(parsed.databasePath).toBe("./vault");
expect(parsed.command).toBe("p2p-peers");
expect(parsed.commandArgs).toEqual(["5"]);
});
it("parses p2p-sync command with peer and timeout", () => {
process.argv = ["node", "livesync-cli", "./vault", "p2p-sync", "peer-1", "12"];
const parsed = parseArgs();
expect(parsed.databasePath).toBe("./vault");
expect(parsed.command).toBe("p2p-sync");
expect(parsed.commandArgs).toEqual(["peer-1", "12"]);
});
it("parses p2p-host command", () => {
process.argv = ["node", "livesync-cli", "./vault", "p2p-host"];
const parsed = parseArgs();
expect(parsed.databasePath).toBe("./vault");
expect(parsed.command).toBe("p2p-host");
expect(parsed.commandArgs).toEqual([]);
});
});

View File

@@ -66,9 +66,9 @@ class CLIStatusAdapter implements IStorageEventStatusAdapter {
const now = Date.now();
if (now - this.lastUpdate > this.updateInterval) {
if (status.totalQueued > 0 || status.processing > 0) {
console.log(
`[StorageEventManager] Batched: ${status.batched}, Processing: ${status.processing}, Total Queued: ${status.totalQueued}`
);
// console.log(
// `[StorageEventManager] Batched: ${status.batched}, Processing: ${status.processing}, Total Queued: ${status.totalQueued}`
// );
}
this.lastUpdate = now;
}
@@ -108,7 +108,7 @@ class CLIWatchAdapter implements IStorageEventWatchAdapter {
async beginWatch(handlers: IStorageEventWatchHandlers): Promise<void> {
// File watching is not activated in the CLI.
// Because the CLI is designed for push/pull operations, not real-time sync.
console.error("[CLIWatchAdapter] File watching is not enabled in CLI version");
// console.error("[CLIWatchAdapter] File watching is not enabled in CLI version");
return Promise.resolve();
}
}

View File

@@ -11,14 +11,21 @@
"cli": "node dist/index.cjs",
"buildRun": "npm run build && npm run cli --",
"check": "svelte-check --tsconfig ./tsconfig.app.json && tsc -p tsconfig.node.json",
"test:unit": "cd ../../.. && npx vitest run --config vitest.config.unit.ts src/apps/cli/main.unit.spec.ts src/apps/cli/commands/utils.unit.spec.ts src/apps/cli/commands/runCommand.unit.spec.ts",
"test:unit": "cd ../../.. && npx vitest run --config vitest.config.unit.ts src/apps/cli/main.unit.spec.ts src/apps/cli/commands/utils.unit.spec.ts src/apps/cli/commands/runCommand.unit.spec.ts src/apps/cli/commands/p2p.unit.spec.ts",
"test:e2e:two-vaults": "bash test/test-e2e-two-vaults-with-docker-linux.sh",
"test:e2e:two-vaults:common": "bash test/test-e2e-two-vaults-common.sh",
"test:e2e:two-vaults:matrix": "bash test/test-e2e-two-vaults-matrix.sh",
"test:e2e:push-pull": "bash test/test-push-pull-linux.sh",
"test:e2e:setup-put-cat": "bash test/test-setup-put-cat-linux.sh",
"test:e2e:sync-two-local": "bash test/test-sync-two-local-databases-linux.sh",
"test:e2e:all": "npm run test:e2e:two-vaults && npm run test:e2e:push-pull && npm run test:e2e:setup-put-cat && npm run test:e2e:sync-two-local"
"test:e2e:p2p": "bash test/test-p2p-three-nodes-conflict-linux.sh",
"test:e2e:p2p-upload-download-repro": "bash test/test-p2p-upload-download-repro-linux.sh",
"test:e2e:p2p-host": "bash test/test-p2p-host-linux.sh",
"test:e2e:p2p-sync": "bash test/test-p2p-sync-linux.sh",
"test:e2e:p2p-peers:local-relay": "bash test/test-p2p-peers-local-relay.sh",
"test:e2e:mirror": "bash test/test-mirror-linux.sh",
"pretest:e2e:all": "npm run build",
"test:e2e:all": " export RUN_BUILD=0 && npm run test:e2e:setup-put-cat && npm run test:e2e:push-pull && npm run test:e2e:sync-two-local && npm run test:e2e:p2p && npm run test:e2e:mirror && npm run test:e2e:two-vaults && npm run test:e2e:p2p"
},
"dependencies": {},
"devDependencies": {}

View File

@@ -10,6 +10,78 @@ import { createInstanceLogFunction } from "@lib/services/lib/logUtils";
import * as nodeFs from "node:fs";
import * as nodePath from "node:path";
const NODE_KV_TYPED_KEY = "__nodeKvType";
const NODE_KV_VALUES_KEY = "values";
type SerializableContainer =
| {
[NODE_KV_TYPED_KEY]: "Set";
[NODE_KV_VALUES_KEY]: unknown[];
}
| {
[NODE_KV_TYPED_KEY]: "Uint8Array";
[NODE_KV_VALUES_KEY]: number[];
}
| {
[NODE_KV_TYPED_KEY]: "ArrayBuffer";
[NODE_KV_VALUES_KEY]: number[];
};
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null;
}
function serializeForNodeKV(value: unknown): unknown {
if (value instanceof Set) {
return {
[NODE_KV_TYPED_KEY]: "Set",
[NODE_KV_VALUES_KEY]: [...value].map((entry) => serializeForNodeKV(entry)),
} satisfies SerializableContainer;
}
if (value instanceof Uint8Array) {
return {
[NODE_KV_TYPED_KEY]: "Uint8Array",
[NODE_KV_VALUES_KEY]: Array.from(value),
} satisfies SerializableContainer;
}
if (value instanceof ArrayBuffer) {
return {
[NODE_KV_TYPED_KEY]: "ArrayBuffer",
[NODE_KV_VALUES_KEY]: Array.from(new Uint8Array(value)),
} satisfies SerializableContainer;
}
if (Array.isArray(value)) {
return value.map((entry) => serializeForNodeKV(entry));
}
if (isRecord(value)) {
return Object.fromEntries(Object.entries(value).map(([k, v]) => [k, serializeForNodeKV(v)]));
}
return value;
}
function deserializeFromNodeKV(value: unknown): unknown {
if (Array.isArray(value)) {
return value.map((entry) => deserializeFromNodeKV(entry));
}
if (!isRecord(value)) {
return value;
}
const taggedType = value[NODE_KV_TYPED_KEY];
const taggedValues = value[NODE_KV_VALUES_KEY];
if (taggedType === "Set" && Array.isArray(taggedValues)) {
return new Set(taggedValues.map((entry) => deserializeFromNodeKV(entry)));
}
if (taggedType === "Uint8Array" && Array.isArray(taggedValues)) {
return Uint8Array.from(taggedValues);
}
if (taggedType === "ArrayBuffer" && Array.isArray(taggedValues)) {
return Uint8Array.from(taggedValues).buffer;
}
return Object.fromEntries(Object.entries(value).map(([k, v]) => [k, deserializeFromNodeKV(v)]));
}
class NodeFileKeyValueDatabase implements KeyValueDatabase {
private filePath: string;
private data = new Map<string, unknown>();
@@ -29,7 +101,7 @@ class NodeFileKeyValueDatabase implements KeyValueDatabase {
private load() {
try {
const loaded = JSON.parse(nodeFs.readFileSync(this.filePath, "utf-8")) as Record<string, unknown>;
this.data = new Map(Object.entries(loaded));
this.data = new Map(Object.entries(loaded).map(([key, value]) => [key, deserializeFromNodeKV(value)]));
} catch {
this.data = new Map();
}
@@ -37,7 +109,10 @@ class NodeFileKeyValueDatabase implements KeyValueDatabase {
private flush() {
nodeFs.mkdirSync(nodePath.dirname(this.filePath), { recursive: true });
nodeFs.writeFileSync(this.filePath, JSON.stringify(Object.fromEntries(this.data), null, 2), "utf-8");
const serializable = Object.fromEntries(
[...this.data.entries()].map(([key, value]) => [key, serializeForNodeKV(value)])
);
nodeFs.writeFileSync(this.filePath, JSON.stringify(serializable, null, 2), "utf-8");
}
async get<T>(key: IDBValidKey): Promise<T> {

View File

@@ -4,8 +4,9 @@ set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
CLI_CMD=(npm --silent run cli -- -v)
source "$SCRIPT_DIR/test-helpers.sh"
VERBOSE_TEST_LOGGING="${VERBOSE_TEST_LOGGING:-0}"
cli_test_init_cli_cmd
RUN_BUILD="${RUN_BUILD:-1}"
KEEP_TEST_DATA="${KEEP_TEST_DATA:-0}"
TEST_ENV_FILE="${TEST_ENV_FILE:-$CLI_DIR/.test.env}"
@@ -36,27 +37,24 @@ COUCHDB_URI=""
COUCHDB_DBNAME=""
MINIO_BUCKET=""
require_env() {
local var_name="$1"
if [[ -z "${!var_name:-}" ]]; then
echo "[ERROR] required variable '$var_name' is missing in $TEST_ENV_FILE" >&2
exit 1
fi
}
if [[ "$REMOTE_TYPE" == "COUCHDB" ]]; then
require_env hostname
require_env dbname
require_env username
require_env password
cli_test_require_env hostname "$TEST_ENV_FILE"
cli_test_require_env dbname "$TEST_ENV_FILE"
cli_test_require_env username "$TEST_ENV_FILE"
cli_test_require_env password "$TEST_ENV_FILE"
COUCHDB_URI="${hostname%/}"
COUCHDB_DBNAME="${dbname}-${DB_SUFFIX}"
COUCHDB_USER="${username:-}"
COUCHDB_PASSWORD="${password:-}"
elif [[ "$REMOTE_TYPE" == "MINIO" ]]; then
require_env accessKey
require_env secretKey
require_env minioEndpoint
require_env bucketName
cli_test_require_env accessKey "$TEST_ENV_FILE"
cli_test_require_env secretKey "$TEST_ENV_FILE"
cli_test_require_env minioEndpoint "$TEST_ENV_FILE"
cli_test_require_env bucketName "$TEST_ENV_FILE"
MINIO_BUCKET="${bucketName}-${DB_SUFFIX}"
MINIO_ENDPOINT="${minioEndpoint:-}"
MINIO_ACCESS_KEY="${accessKey:-}"
MINIO_SECRET_KEY="${secretKey:-}"
else
echo "[ERROR] unsupported REMOTE_TYPE: $REMOTE_TYPE (use COUCHDB or MINIO)" >&2
exit 1
@@ -65,9 +63,9 @@ fi
cleanup() {
local exit_code=$?
if [[ "$REMOTE_TYPE" == "COUCHDB" ]]; then
bash "$CLI_DIR/util/couchdb-stop.sh" >/dev/null 2>&1 || true
cli_test_stop_couchdb
else
bash "$CLI_DIR/util/minio-stop.sh" >/dev/null 2>&1 || true
cli_test_stop_minio
fi
if [[ "$KEEP_TEST_DATA" != "1" ]]; then
@@ -83,10 +81,6 @@ cleanup() {
}
trap cleanup EXIT
run_cli() {
"${CLI_CMD[@]}" "$@"
}
run_cli_a() {
run_cli "$VAULT_A" --settings "$SETTINGS_A" "$@"
}
@@ -95,191 +89,28 @@ run_cli_b() {
run_cli "$VAULT_B" --settings "$SETTINGS_B" "$@"
}
assert_contains() {
local haystack="$1"
local needle="$2"
local message="$3"
if ! grep -Fq "$needle" <<< "$haystack"; then
echo "[FAIL] $message" >&2
echo "[FAIL] expected to find: $needle" >&2
echo "[FAIL] actual output:" >&2
echo "$haystack" >&2
exit 1
fi
}
assert_equal() {
local expected="$1"
local actual="$2"
local message="$3"
if [[ "$expected" != "$actual" ]]; then
echo "[FAIL] $message" >&2
echo "[FAIL] expected: $expected" >&2
echo "[FAIL] actual: $actual" >&2
exit 1
fi
}
assert_command_fails() {
local message="$1"
shift
set +e
"$@" >"$WORK_DIR/failed-command.log" 2>&1
local exit_code=$?
set -e
if [[ "$exit_code" -eq 0 ]]; then
echo "[FAIL] $message" >&2
cat "$WORK_DIR/failed-command.log" >&2
exit 1
fi
}
assert_files_equal() {
local expected_file="$1"
local actual_file="$2"
local message="$3"
if ! cmp -s "$expected_file" "$actual_file"; then
echo "[FAIL] $message" >&2
echo "[FAIL] expected sha256: $(sha256sum "$expected_file" | awk '{print $1}')" >&2
echo "[FAIL] actual sha256: $(sha256sum "$actual_file" | awk '{print $1}')" >&2
exit 1
fi
}
sanitise_cat_stdout() {
sed '/^\[CLIWatchAdapter\] File watching is not enabled in CLI version$/d'
}
extract_json_string_field() {
local field_name="$1"
node -e '
const fs = require("node:fs");
const fieldName = process.argv[1];
const data = JSON.parse(fs.readFileSync(0, "utf-8"));
const value = data[fieldName];
if (typeof value === "string") {
process.stdout.write(value);
}
' "$field_name"
}
sync_both() {
run_cli_a sync >/dev/null
run_cli_b sync >/dev/null
}
curl_json() {
curl -4 -sS --fail --connect-timeout 3 --max-time 15 "$@"
}
configure_remote_settings() {
local settings_file="$1"
SETTINGS_FILE="$settings_file" \
REMOTE_TYPE="$REMOTE_TYPE" \
COUCHDB_URI="$COUCHDB_URI" \
COUCHDB_USER="${username:-}" \
COUCHDB_PASSWORD="${password:-}" \
COUCHDB_DBNAME="$COUCHDB_DBNAME" \
MINIO_ENDPOINT="${minioEndpoint:-}" \
MINIO_BUCKET="$MINIO_BUCKET" \
MINIO_ACCESS_KEY="${accessKey:-}" \
MINIO_SECRET_KEY="${secretKey:-}" \
ENCRYPT="$ENCRYPT" \
E2E_PASSPHRASE="$E2E_PASSPHRASE" \
node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
const remoteType = process.env.REMOTE_TYPE;
if (remoteType === "COUCHDB") {
data.remoteType = "";
data.couchDB_URI = process.env.COUCHDB_URI;
data.couchDB_USER = process.env.COUCHDB_USER;
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
} else if (remoteType === "MINIO") {
data.remoteType = "MINIO";
data.bucket = process.env.MINIO_BUCKET;
data.endpoint = process.env.MINIO_ENDPOINT;
data.accessKey = process.env.MINIO_ACCESS_KEY;
data.secretKey = process.env.MINIO_SECRET_KEY;
data.region = "auto";
data.forcePathStyle = true;
}
data.liveSync = true;
data.syncOnStart = false;
data.syncOnSave = false;
data.usePluginSync = false;
data.encrypt = process.env.ENCRYPT === "1";
data.passphrase = data.encrypt ? process.env.E2E_PASSPHRASE : "";
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
cli_test_apply_remote_sync_settings "$settings_file"
}
init_settings() {
local settings_file="$1"
run_cli init-settings --force "$settings_file" >/dev/null
cli_test_init_settings_file "$settings_file"
configure_remote_settings "$settings_file"
cat "$settings_file"
}
wait_for_minio_bucket() {
local retries=30
local delay_sec=2
local i
for ((i = 1; i <= retries; i++)); do
if docker run --rm --network host --entrypoint=/bin/sh minio/mc -c "mc alias set myminio $minioEndpoint $accessKey $secretKey >/dev/null 2>&1 && mc ls myminio/$MINIO_BUCKET >/dev/null 2>&1"; then
return 0
fi
bucketName="$MINIO_BUCKET" bash "$CLI_DIR/util/minio-init.sh" >/dev/null 2>&1 || true
sleep "$delay_sec"
done
return 1
}
start_remote() {
if [[ "$REMOTE_TYPE" == "COUCHDB" ]]; then
echo "[INFO] stopping leftover CouchDB container if present"
bash "$CLI_DIR/util/couchdb-stop.sh" >/dev/null 2>&1 || true
echo "[INFO] starting CouchDB test container"
bash "$CLI_DIR/util/couchdb-start.sh"
echo "[INFO] initialising CouchDB test container"
bash "$CLI_DIR/util/couchdb-init.sh"
echo "[INFO] CouchDB create test database: $COUCHDB_DBNAME"
until (curl_json -X PUT --user "${username}:${password}" "${hostname}/${COUCHDB_DBNAME}"); do sleep 5; done
cli_test_start_couchdb "$COUCHDB_URI" "$COUCHDB_USER" "$COUCHDB_PASSWORD" "$COUCHDB_DBNAME"
else
echo "[INFO] stopping leftover MinIO container if present"
bash "$CLI_DIR/util/minio-stop.sh" >/dev/null 2>&1 || true
echo "[INFO] starting MinIO test container"
bucketName="$MINIO_BUCKET" bash "$CLI_DIR/util/minio-start.sh"
echo "[INFO] initialising MinIO test bucket: $MINIO_BUCKET"
local minio_init_ok=0
for _ in 1 2 3 4 5; do
if bucketName="$MINIO_BUCKET" bash "$CLI_DIR/util/minio-init.sh"; then
minio_init_ok=1
break
fi
sleep 2
done
if [[ "$minio_init_ok" != "1" ]]; then
echo "[FAIL] could not initialise MinIO bucket after retries: $MINIO_BUCKET" >&2
exit 1
fi
if ! wait_for_minio_bucket; then
echo "[FAIL] MinIO bucket not ready: $MINIO_BUCKET" >&2
exit 1
fi
cli_test_start_minio "$MINIO_ENDPOINT" "$MINIO_ACCESS_KEY" "$MINIO_SECRET_KEY" "$MINIO_BUCKET"
fi
}
@@ -313,14 +144,14 @@ TARGET_CONFLICT="e2e/conflict.md"
echo "[CASE] A puts and A can get info"
printf 'alpha-from-a\n' | run_cli_a put "$TARGET_A_ONLY" >/dev/null
INFO_A_ONLY="$(run_cli_a info "$TARGET_A_ONLY")"
assert_contains "$INFO_A_ONLY" "\"path\": \"$TARGET_A_ONLY\"" "A info should include path after put"
cli_test_assert_contains "$INFO_A_ONLY" "\"path\": \"$TARGET_A_ONLY\"" "A info should include path after put"
echo "[PASS] A put/info"
echo "[CASE] A puts, both sync, and B can get info"
printf 'visible-after-sync\n' | run_cli_a put "$TARGET_SYNC" >/dev/null
sync_both
INFO_B_SYNC="$(run_cli_b info "$TARGET_SYNC")"
assert_contains "$INFO_B_SYNC" "\"path\": \"$TARGET_SYNC\"" "B info should include path after sync"
cli_test_assert_contains "$INFO_B_SYNC" "\"path\": \"$TARGET_SYNC\"" "B info should include path after sync"
echo "[PASS] sync A->B and B info"
echo "[CASE] A pushes and puts, both sync, and B can pull and cat"
@@ -331,9 +162,9 @@ run_cli_a push "$PUSH_SRC" "$TARGET_PUSH" >/dev/null
printf 'put-content-%s\n' "$DB_SUFFIX" | run_cli_a put "$TARGET_PUT" >/dev/null
sync_both
run_cli_b pull "$TARGET_PUSH" "$PULL_DST" >/dev/null
assert_files_equal "$PUSH_SRC" "$PULL_DST" "B pull result does not match pushed source"
CAT_B_PUT="$(run_cli_b cat "$TARGET_PUT" | sanitise_cat_stdout)"
assert_equal "put-content-$DB_SUFFIX" "$CAT_B_PUT" "B cat should return A put content"
cli_test_assert_files_equal "$PUSH_SRC" "$PULL_DST" "B pull result does not match pushed source"
CAT_B_PUT="$(run_cli_b cat "$TARGET_PUT" | cli_test_sanitise_cat_stdout)"
cli_test_assert_equal "put-content-$DB_SUFFIX" "$CAT_B_PUT" "B cat should return A put content"
echo "[PASS] push/pull and put/cat across vaults"
echo "[CASE] A pushes binary, both sync, and B can pull identical bytes"
@@ -343,31 +174,44 @@ head -c 4096 /dev/urandom > "$PUSH_BINARY_SRC"
run_cli_a push "$PUSH_BINARY_SRC" "$TARGET_PUSH_BINARY" >/dev/null
sync_both
run_cli_b pull "$TARGET_PUSH_BINARY" "$PULL_BINARY_DST" >/dev/null
assert_files_equal "$PUSH_BINARY_SRC" "$PULL_BINARY_DST" "B pull result does not match pushed binary source"
cli_test_assert_files_equal "$PUSH_BINARY_SRC" "$PULL_BINARY_DST" "B pull result does not match pushed binary source"
echo "[PASS] binary push/pull across vaults"
echo "[CASE] A removes, both sync, and B can no longer cat"
run_cli_a rm "$TARGET_PUT" >/dev/null
sync_both
assert_command_fails "B cat should fail after A removed the file and synced" run_cli_b cat "$TARGET_PUT"
cli_test_assert_command_fails "B cat should fail after A removed the file and synced" "$WORK_DIR/failed-command.log" run_cli_b cat "$TARGET_PUT"
echo "[PASS] rm is replicated"
echo "[CASE] verify conflict detection"
printf 'conflict-base\n' | run_cli_a put "$TARGET_CONFLICT" >/dev/null
sync_both
INFO_B_BASE="$(run_cli_b info "$TARGET_CONFLICT")"
assert_contains "$INFO_B_BASE" "\"path\": \"$TARGET_CONFLICT\"" "B should be able to info before creating conflict"
cli_test_assert_contains "$INFO_B_BASE" "\"path\": \"$TARGET_CONFLICT\"" "B should be able to info before creating conflict"
printf 'conflict-from-a-%s\n' "$DB_SUFFIX" | run_cli_a put "$TARGET_CONFLICT" >/dev/null
printf 'conflict-from-b-%s\n' "$DB_SUFFIX" | run_cli_b put "$TARGET_CONFLICT" >/dev/null
run_cli_a sync >/dev/null
run_cli_b sync >/dev/null
run_cli_a sync >/dev/null
INFO_A_CONFLICT=""
INFO_B_CONFLICT=""
CONFLICT_DETECTED=0
INFO_A_CONFLICT="$(run_cli_a info "$TARGET_CONFLICT")"
INFO_B_CONFLICT="$(run_cli_b info "$TARGET_CONFLICT")"
if grep -qF '"conflicts": "N/A"' <<< "$INFO_A_CONFLICT" && grep -qF '"conflicts": "N/A"' <<< "$INFO_B_CONFLICT"; then
for side in a b a; do
if [[ "$side" == "a" ]]; then
run_cli_a sync >/dev/null
else
run_cli_b sync >/dev/null
fi
INFO_A_CONFLICT="$(run_cli_a info "$TARGET_CONFLICT")"
INFO_B_CONFLICT="$(run_cli_b info "$TARGET_CONFLICT")"
if ! grep -qF '"conflicts": "N/A"' <<< "$INFO_A_CONFLICT" || ! grep -qF '"conflicts": "N/A"' <<< "$INFO_B_CONFLICT"; then
CONFLICT_DETECTED=1
break
fi
done
if [[ "$CONFLICT_DETECTED" != "1" ]]; then
echo "[FAIL] conflict was expected but both A and B show Conflicts: N/A" >&2
echo "--- A info ---" >&2
echo "$INFO_A_CONFLICT" >&2
@@ -399,7 +243,7 @@ fi
echo "[PASS] ls marks conflicts"
echo "[CASE] resolve conflict on A and verify both vaults are clean"
KEEP_REVISION="$(printf '%s' "$INFO_A_CONFLICT" | extract_json_string_field revision)"
KEEP_REVISION="$(printf '%s' "$INFO_A_CONFLICT" | cli_test_json_string_field_from_stdin revision)"
if [[ -z "$KEEP_REVISION" ]]; then
echo "[FAIL] could not extract current revision from A info output" >&2
echo "$INFO_A_CONFLICT" >&2
@@ -411,7 +255,7 @@ run_cli_a resolve "$TARGET_CONFLICT" "$KEEP_REVISION" >/dev/null
INFO_A_RESOLVED=""
INFO_B_RESOLVED=""
RESOLVE_PROPAGATED=0
for _ in 1 2 3 4 5; do
for _ in 1 2 3 4 5 6; do
sync_both
INFO_A_RESOLVED="$(run_cli_a info "$TARGET_CONFLICT")"
INFO_B_RESOLVED="$(run_cli_b info "$TARGET_CONFLICT")"
@@ -419,19 +263,15 @@ for _ in 1 2 3 4 5; do
RESOLVE_PROPAGATED=1
break
fi
done
if [[ "$RESOLVE_PROPAGATED" != "1" ]]; then
KEEP_REVISION_B="$(printf '%s' "$INFO_B_RESOLVED" | extract_json_string_field revision)"
if [[ -n "$KEEP_REVISION_B" ]]; then
run_cli_b resolve "$TARGET_CONFLICT" "$KEEP_REVISION_B" >/dev/null
sync_both
INFO_A_RESOLVED="$(run_cli_a info "$TARGET_CONFLICT")"
INFO_B_RESOLVED="$(run_cli_b info "$TARGET_CONFLICT")"
if grep -qF '"conflicts": "N/A"' <<< "$INFO_A_RESOLVED" && grep -qF '"conflicts": "N/A"' <<< "$INFO_B_RESOLVED"; then
RESOLVE_PROPAGATED=1
# Retry from A only when conflict remains due to eventual consistency.
if ! grep -qF '"conflicts": "N/A"' <<< "$INFO_A_RESOLVED"; then
KEEP_REVISION_A="$(printf '%s' "$INFO_A_RESOLVED" | cli_test_json_string_field_from_stdin revision)"
if [[ -n "$KEEP_REVISION_A" ]]; then
run_cli_a resolve "$TARGET_CONFLICT" "$KEEP_REVISION_A" >/dev/null || true
fi
fi
fi
done
if [[ "$RESOLVE_PROPAGATED" != "1" ]]; then
echo "[FAIL] conflicts should be resolved on both vaults" >&2
@@ -453,9 +293,9 @@ if [[ "$LS_A_RESOLVED_REV" == *"*" || "$LS_B_RESOLVED_REV" == *"*" ]]; then
exit 1
fi
CAT_A_RESOLVED="$(run_cli_a cat "$TARGET_CONFLICT" | sanitise_cat_stdout)"
CAT_B_RESOLVED="$(run_cli_b cat "$TARGET_CONFLICT" | sanitise_cat_stdout)"
assert_equal "$CAT_A_RESOLVED" "$CAT_B_RESOLVED" "resolved content should match across both vaults"
CAT_A_RESOLVED="$(run_cli_a cat "$TARGET_CONFLICT" | cli_test_sanitise_cat_stdout)"
CAT_B_RESOLVED="$(run_cli_b cat "$TARGET_CONFLICT" | cli_test_sanitise_cat_stdout)"
cli_test_assert_equal "$CAT_A_RESOLVED" "$CAT_B_RESOLVED" "resolved content should match across both vaults"
echo "[PASS] resolve is replicated and ls reflects resolved state"
echo "[PASS] all requested E2E scenarios completed (${TEST_LABEL})"

View File

@@ -0,0 +1,346 @@
#!/usr/bin/env bash
cli_test_init_cli_cmd() {
if [[ "${VERBOSE_TEST_LOGGING:-0}" == "1" ]]; then
CLI_CMD=(npm --silent run cli -- -v)
else
CLI_CMD=(npm --silent run cli --)
fi
}
run_cli() {
"${CLI_CMD[@]}" "$@"
}
cli_test_require_env() {
local var_name="$1"
local env_file="${2:-${TEST_ENV_FILE:-environment}}"
if [[ -z "${!var_name:-}" ]]; then
echo "[ERROR] required variable '$var_name' is missing in $env_file" >&2
exit 1
fi
}
cli_test_assert_contains() {
local haystack="$1"
local needle="$2"
local message="$3"
if ! grep -Fq "$needle" <<< "$haystack"; then
echo "[FAIL] $message" >&2
echo "[FAIL] expected to find: $needle" >&2
echo "[FAIL] actual output:" >&2
echo "$haystack" >&2
exit 1
fi
}
cli_test_assert_equal() {
local expected="$1"
local actual="$2"
local message="$3"
if [[ "$expected" != "$actual" ]]; then
echo "[FAIL] $message" >&2
echo "[FAIL] expected: $expected" >&2
echo "[FAIL] actual: $actual" >&2
exit 1
fi
}
cli_test_assert_command_fails() {
local message="$1"
local log_file="$2"
shift 2
set +e
"$@" >"$log_file" 2>&1
local exit_code=$?
set -e
if [[ "$exit_code" -eq 0 ]]; then
echo "[FAIL] $message" >&2
cat "$log_file" >&2
exit 1
fi
}
cli_test_assert_files_equal() {
local expected_file="$1"
local actual_file="$2"
local message="$3"
if ! cmp -s "$expected_file" "$actual_file"; then
echo "[FAIL] $message" >&2
echo "[FAIL] expected sha256: $(sha256sum "$expected_file" | awk '{print $1}')" >&2
echo "[FAIL] actual sha256: $(sha256sum "$actual_file" | awk '{print $1}')" >&2
exit 1
fi
}
cli_test_sanitise_cat_stdout() {
sed '/^\[CLIWatchAdapter\] File watching is not enabled in CLI version$/d'
}
cli_test_json_string_field_from_stdin() {
local field_name="$1"
node -e '
const fs = require("node:fs");
const fieldName = process.argv[1];
const data = JSON.parse(fs.readFileSync(0, "utf-8"));
const value = data[fieldName];
if (typeof value === "string") {
process.stdout.write(value);
}
' "$field_name"
}
cli_test_json_string_field_from_file() {
local json_file="$1"
local field_name="$2"
node -e '
const fs = require("node:fs");
const jsonFile = process.argv[1];
const fieldName = process.argv[2];
const data = JSON.parse(fs.readFileSync(jsonFile, "utf-8"));
const value = data[fieldName];
if (typeof value === "string") {
process.stdout.write(value);
}
' "$json_file" "$field_name"
}
cli_test_json_field_is_na() {
local json_file="$1"
local field_name="$2"
[[ "$(cli_test_json_string_field_from_file "$json_file" "$field_name")" == "N/A" ]]
}
cli_test_curl_json() {
curl -4 -sS --fail --connect-timeout 3 --max-time 15 "$@"
}
cli_test_init_settings_file() {
local settings_file="$1"
run_cli init-settings --force "$settings_file" >/dev/null
}
cli_test_mark_settings_configured() {
local settings_file="$1"
SETTINGS_FILE="$settings_file" node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
}
cli_test_apply_couchdb_settings() {
local settings_file="$1"
local couchdb_uri="$2"
local couchdb_user="$3"
local couchdb_password="$4"
local couchdb_dbname="$5"
local live_sync="${6:-0}"
SETTINGS_FILE="$settings_file" \
COUCHDB_URI="$couchdb_uri" \
COUCHDB_USER="$couchdb_user" \
COUCHDB_PASSWORD="$couchdb_password" \
COUCHDB_DBNAME="$couchdb_dbname" \
LIVE_SYNC="$live_sync" \
node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
data.couchDB_URI = process.env.COUCHDB_URI;
data.couchDB_USER = process.env.COUCHDB_USER;
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
if (process.env.LIVE_SYNC === "1") {
data.liveSync = true;
data.syncOnStart = false;
data.syncOnSave = false;
data.usePluginSync = false;
}
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
}
cli_test_apply_remote_sync_settings() {
local settings_file="$1"
SETTINGS_FILE="$settings_file" \
REMOTE_TYPE="$REMOTE_TYPE" \
COUCHDB_URI="$COUCHDB_URI" \
COUCHDB_USER="${COUCHDB_USER:-}" \
COUCHDB_PASSWORD="${COUCHDB_PASSWORD:-}" \
COUCHDB_DBNAME="$COUCHDB_DBNAME" \
MINIO_ENDPOINT="${MINIO_ENDPOINT:-}" \
MINIO_BUCKET="$MINIO_BUCKET" \
MINIO_ACCESS_KEY="${MINIO_ACCESS_KEY:-}" \
MINIO_SECRET_KEY="${MINIO_SECRET_KEY:-}" \
ENCRYPT="${ENCRYPT:-0}" \
E2E_PASSPHRASE="${E2E_PASSPHRASE:-}" \
node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
const remoteType = process.env.REMOTE_TYPE;
if (remoteType === "COUCHDB") {
data.remoteType = "";
data.couchDB_URI = process.env.COUCHDB_URI;
data.couchDB_USER = process.env.COUCHDB_USER;
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
} else if (remoteType === "MINIO") {
data.remoteType = "MINIO";
data.bucket = process.env.MINIO_BUCKET;
data.endpoint = process.env.MINIO_ENDPOINT;
data.accessKey = process.env.MINIO_ACCESS_KEY;
data.secretKey = process.env.MINIO_SECRET_KEY;
data.region = "auto";
data.forcePathStyle = true;
}
data.liveSync = true;
data.syncOnStart = false;
data.syncOnSave = false;
data.usePluginSync = false;
data.encrypt = process.env.ENCRYPT === "1";
data.passphrase = data.encrypt ? process.env.E2E_PASSPHRASE : "";
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
}
cli_test_apply_p2p_settings() {
local settings_file="$1"
local room_id="$2"
local passphrase="$3"
local app_id="${4:-self-hosted-livesync-cli-tests}"
local relays="${5:-ws://localhost:4000/}"
local auto_accept="${6:-~.*}"
SETTINGS_FILE="$settings_file" \
P2P_ROOM_ID="$room_id" \
P2P_PASSPHRASE="$passphrase" \
P2P_APP_ID="$app_id" \
P2P_RELAYS="$relays" \
P2P_AUTO_ACCEPT="$auto_accept" \
node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
data.P2P_Enabled = true;
data.P2P_AutoStart = false;
data.P2P_AutoBroadcast = false;
data.P2P_AppID = process.env.P2P_APP_ID;
data.P2P_roomID = process.env.P2P_ROOM_ID;
data.P2P_passphrase = process.env.P2P_PASSPHRASE;
data.P2P_relays = process.env.P2P_RELAYS;
data.P2P_AutoAcceptingPeers = process.env.P2P_AUTO_ACCEPT;
data.P2P_AutoDenyingPeers = "";
data.P2P_IsHeadless = true;
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
}
cli_test_is_local_p2p_relay() {
local relay_url="$1"
[[ "$relay_url" == "ws://localhost:4000" || "$relay_url" == "ws://localhost:4000/" ]]
}
cli_test_stop_p2p_relay() {
bash "$CLI_DIR/util/p2p-stop.sh" >/dev/null 2>&1 || true
}
cli_test_start_p2p_relay() {
echo "[INFO] stopping leftover P2P relay container if present"
cli_test_stop_p2p_relay
echo "[INFO] starting local P2P relay container"
bash "$CLI_DIR/util/p2p-start.sh"
}
cli_test_stop_couchdb() {
bash "$CLI_DIR/util/couchdb-stop.sh" >/dev/null 2>&1 || true
}
cli_test_start_couchdb() {
local couchdb_uri="$1"
local couchdb_user="$2"
local couchdb_password="$3"
local couchdb_dbname="$4"
echo "[INFO] stopping leftover CouchDB container if present"
cli_test_stop_couchdb
echo "[INFO] starting CouchDB test container"
bash "$CLI_DIR/util/couchdb-start.sh"
echo "[INFO] initialising CouchDB test container"
bash "$CLI_DIR/util/couchdb-init.sh"
echo "[INFO] CouchDB create test database: $couchdb_dbname"
until (cli_test_curl_json -X PUT --user "${couchdb_user}:${couchdb_password}" "${couchdb_uri}/${couchdb_dbname}"); do sleep 5; done
}
cli_test_stop_minio() {
bash "$CLI_DIR/util/minio-stop.sh" >/dev/null 2>&1 || true
}
cli_test_wait_for_minio_bucket() {
local minio_endpoint="$1"
local minio_access_key="$2"
local minio_secret_key="$3"
local minio_bucket="$4"
local retries=30
local delay_sec=2
local i
for ((i = 1; i <= retries; i++)); do
if docker run --rm --network host --entrypoint=/bin/sh minio/mc -c "mc alias set myminio $minio_endpoint $minio_access_key $minio_secret_key >/dev/null 2>&1 && mc ls myminio/$minio_bucket >/dev/null 2>&1"; then
return 0
fi
bucketName="$minio_bucket" bash "$CLI_DIR/util/minio-init.sh" >/dev/null 2>&1 || true
sleep "$delay_sec"
done
return 1
}
cli_test_start_minio() {
local minio_endpoint="$1"
local minio_access_key="$2"
local minio_secret_key="$3"
local minio_bucket="$4"
local minio_init_ok=0
echo "[INFO] stopping leftover MinIO container if present"
cli_test_stop_minio
echo "[INFO] starting MinIO test container"
bucketName="$minio_bucket" bash "$CLI_DIR/util/minio-start.sh"
echo "[INFO] initialising MinIO test bucket: $minio_bucket"
for _ in 1 2 3 4 5; do
if bucketName="$minio_bucket" bash "$CLI_DIR/util/minio-init.sh"; then
minio_init_ok=1
break
fi
sleep 2
done
if [[ "$minio_init_ok" != "1" ]]; then
echo "[FAIL] could not initialise MinIO bucket after retries: $minio_bucket" >&2
exit 1
fi
if ! cli_test_wait_for_minio_bucket "$minio_endpoint" "$minio_access_key" "$minio_secret_key" "$minio_bucket"; then
echo "[FAIL] MinIO bucket not ready: $minio_bucket" >&2
exit 1
fi
}
display_test_info(){
echo "======================"
echo "Script: ${BASH_SOURCE[1]:-$0}"
echo "Date: $(date -u +%Y-%m-%dT%H:%M:%SZ)"
echo "Git commit: $(git -C "$SCRIPT_DIR/.." rev-parse --short HEAD 2>/dev/null || echo "N/A")"
echo "======================"
}

View File

@@ -0,0 +1,196 @@
#!/usr/bin/env bash
# Test: mirror command — storage <-> local database synchronisation
#
# Covered cases:
# 1. Storage-only file → synced into DB (UPDATE DATABASE)
# 2. DB-only file → restored to storage (UPDATE STORAGE)
# 3. DB-deleted file → NOT restored to storage (UPDATE STORAGE skip)
# 4. Both, storage newer → DB updated (SYNC: STORAGE → DB)
# 5. Both, DB newer → storage updated (SYNC: DB → STORAGE)
#
# Not covered (require precise mtime control or artificial conflict injection):
# - Both, equal mtime → no-op (EVEN)
# - Conflicted entry → skipped
#
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
RUN_BUILD="${RUN_BUILD:-1}"
cli_test_init_cli_cmd
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-test.XXXXXX")"
trap 'rm -rf "$WORK_DIR"' EXIT
SETTINGS_FILE="$WORK_DIR/data.json"
VAULT_DIR="$WORK_DIR/vault"
mkdir -p "$VAULT_DIR/test"
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI..."
npm run build
fi
echo "[INFO] generating settings -> $SETTINGS_FILE"
cli_test_init_settings_file "$SETTINGS_FILE"
# isConfigured=true is required for mirror (canProceedScan checks this)
cli_test_mark_settings_configured "$SETTINGS_FILE"
PASS=0
FAIL=0
assert_pass() { echo "[PASS] $1"; PASS=$((PASS + 1)); }
assert_fail() { echo "[FAIL] $1" >&2; FAIL=$((FAIL + 1)); }
# Return timestamp for touch -t in YYYYMMDDHHMM format.
# Accepts offsets such as "+1 hour" or "-1 hour".
portable_touch_timestamp() {
local offset="$1"
if command -v gdate >/dev/null 2>&1; then
gdate -d "$offset" +%Y%m%d%H%M
return
fi
if date -d "$offset" +%Y%m%d%H%M >/dev/null 2>&1; then
date -d "$offset" +%Y%m%d%H%M
return
fi
case "$offset" in
"+1 hour")
date -v+1H +%Y%m%d%H%M
;;
"-1 hour")
date -v-1H +%Y%m%d%H%M
;;
*)
echo "[FAIL] Unsupported date offset on this platform: $offset" >&2
exit 1
;;
esac
}
# ─────────────────────────────────────────────────────────────────────────────
# Case 1: File exists only in storage → should be synced into DB after mirror
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 1: storage-only → DB ==="
printf 'storage-only content\n' > "$VAULT_DIR/test/storage-only.md"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" mirror
RESULT_FILE="$WORK_DIR/case1-cat.txt"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" pull test/storage-only.md "$RESULT_FILE"
if cmp -s "$VAULT_DIR/test/storage-only.md" "$RESULT_FILE"; then
assert_pass "storage-only file was synced into DB"
else
assert_fail "storage-only file NOT synced into DB"
echo "--- storage ---" >&2; cat "$VAULT_DIR/test/storage-only.md" >&2
echo "--- cat ---" >&2; cat "$RESULT_FILE" >&2
fi
# ─────────────────────────────────────────────────────────────────────────────
# Case 2: File exists only in DB → should be restored to storage after mirror
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 2: DB-only → storage ==="
printf 'db-only content\n' | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put test/db-only.md
if [[ -f "$VAULT_DIR/test/db-only.md" ]]; then
assert_fail "db-only.md unexpectedly exists in storage before mirror"
else
echo "[INFO] confirmed: test/db-only.md not in storage before mirror"
fi
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" mirror
if [[ -f "$VAULT_DIR/test/db-only.md" ]]; then
STORAGE_CONTENT="$(cat "$VAULT_DIR/test/db-only.md")"
if [[ "$STORAGE_CONTENT" == "db-only content" ]]; then
assert_pass "DB-only file was restored to storage"
else
assert_fail "DB-only file restored but content mismatch (got: '${STORAGE_CONTENT}')"
fi
else
assert_fail "DB-only file was NOT restored to storage"
fi
# ─────────────────────────────────────────────────────────────────────────────
# Case 3: File deleted in DB → should NOT be created in storage
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 3: DB-deleted → storage untouched ==="
printf 'to-be-deleted\n' | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put test/deleted.md
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" rm test/deleted.md
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" mirror
if [[ ! -f "$VAULT_DIR/test/deleted.md" ]]; then
assert_pass "deleted DB entry was not restored to storage"
else
assert_fail "deleted DB entry was incorrectly restored to storage"
fi
# ─────────────────────────────────────────────────────────────────────────────
# Case 4: Both exist, storage is newer → DB should be updated
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 4: storage newer → DB updated ==="
# Seed DB with old content (mtime ≈ now)
printf 'old content\n' | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put test/sync-storage-newer.md
# Write new content to storage with a timestamp 1 hour in the future
printf 'new content\n' > "$VAULT_DIR/test/sync-storage-newer.md"
touch -t "$(portable_touch_timestamp '+1 hour')" "$VAULT_DIR/test/sync-storage-newer.md"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" mirror
DB_RESULT_FILE="$WORK_DIR/case4-pull.txt"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" pull test/sync-storage-newer.md "$DB_RESULT_FILE"
if cmp -s "$VAULT_DIR/test/sync-storage-newer.md" "$DB_RESULT_FILE"; then
assert_pass "DB updated to match newer storage file"
else
assert_fail "DB NOT updated to match newer storage file"
echo "--- expected(storage) ---" >&2; cat "$VAULT_DIR/test/sync-storage-newer.md" >&2
echo "--- pulled(from db) ---" >&2; cat "$DB_RESULT_FILE" >&2
fi
# ─────────────────────────────────────────────────────────────────────────────
# Case 5: Both exist, DB is newer → storage should be updated
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 5: DB newer → storage updated ==="
# Write old content to storage with a timestamp 1 hour in the past
printf 'old storage content\n' > "$VAULT_DIR/test/sync-db-newer.md"
touch -t "$(portable_touch_timestamp '-1 hour')" "$VAULT_DIR/test/sync-db-newer.md"
# Write new content to DB only (mtime ≈ now, newer than the storage file)
printf 'new db content\n' | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put test/sync-db-newer.md
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" mirror
STORAGE_CONTENT="$(cat "$VAULT_DIR/test/sync-db-newer.md")"
if [[ "$STORAGE_CONTENT" == "new db content" ]]; then
assert_pass "storage updated to match newer DB entry"
else
assert_fail "storage NOT updated to match newer DB entry (got: '${STORAGE_CONTENT}')"
fi
# ─────────────────────────────────────────────────────────────────────────────
# Summary
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "Results: PASS=$PASS FAIL=$FAIL"
if [[ "$FAIL" -gt 0 ]]; then
exit 1
fi

View File

@@ -0,0 +1,64 @@
#!/usr/bin/env bash
set -euo pipefail
# This test should be run with P2P client, please refer to the test-p2p-three-nodes-conflict-linux.sh test for more details.
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
RUN_BUILD="${RUN_BUILD:-1}"
VERBOSE_TEST_LOGGING="${VERBOSE_TEST_LOGGING:-0}"
KEEP_TEST_DATA="${KEEP_TEST_DATA:-0}"
RELAY="${RELAY:-ws://localhost:4000/}"
USE_INTERNAL_RELAY="${USE_INTERNAL_RELAY:-1}"
ROOM_ID="${ROOM_ID:-1}"
PASSPHRASE="${PASSPHRASE:-test}"
APP_ID="${APP_ID:-self-hosted-livesync-cli-tests}"
cli_test_init_cli_cmd
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI"
npm run build
fi
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-p2p-host.XXXXXX")"
VAULT="$WORK_DIR/vault-host"
SETTINGS="$WORK_DIR/settings-host.json"
mkdir -p "$VAULT"
cleanup() {
local exit_code=$?
if [[ "${P2P_RELAY_STARTED:-0}" == "1" ]]; then
cli_test_stop_p2p_relay
fi
if [[ "$KEEP_TEST_DATA" != "1" ]]; then
rm -rf "$WORK_DIR"
else
echo "[INFO] KEEP_TEST_DATA=1, preserving artefacts at $WORK_DIR"
fi
exit "$exit_code"
}
trap cleanup EXIT
if [[ "$USE_INTERNAL_RELAY" == "1" ]]; then
if cli_test_is_local_p2p_relay "$RELAY"; then
cli_test_start_p2p_relay
P2P_RELAY_STARTED=1
else
echo "[INFO] USE_INTERNAL_RELAY=1 but RELAY is not local ($RELAY), skipping local relay startup"
fi
fi
echo "[INFO] preparing settings"
echo "[INFO] relay=$RELAY room=$ROOM_ID app=$APP_ID"
cli_test_init_settings_file "$SETTINGS"
cli_test_apply_p2p_settings "$SETTINGS" "$ROOM_ID" "$PASSPHRASE" "$APP_ID" "$RELAY"
echo "[CASE] start p2p-host"
echo "[INFO] press Ctrl+C to stop"
run_cli "$VAULT" --settings "$SETTINGS" p2p-host

View File

@@ -0,0 +1,86 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
RUN_BUILD="${RUN_BUILD:-0}"
KEEP_TEST_DATA="${KEEP_TEST_DATA:-0}"
RELAY="${RELAY:-ws://localhost:7777}"
ROOM_ID="${ROOM_ID:-1}"
PASSPHRASE="${PASSPHRASE:-test}"
TIMEOUT_SECONDS="${TIMEOUT_SECONDS:-8}"
DEBUG_FLAG="${DEBUG_FLAG:--d}"
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI"
npm run build
fi
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-p2p-peers-local-relay.XXXXXX")"
VAULT="$WORK_DIR/vault"
SETTINGS="$WORK_DIR/settings.json"
mkdir -p "$VAULT"
cleanup() {
local exit_code=$?
if [[ "$KEEP_TEST_DATA" != "1" ]]; then
rm -rf "$WORK_DIR"
else
echo "[INFO] KEEP_TEST_DATA=1, preserving artefacts at $WORK_DIR"
fi
exit "$exit_code"
}
trap cleanup EXIT
cli_test_init_cli_cmd
echo "[INFO] creating settings at $SETTINGS"
run_cli init-settings --force "$SETTINGS" >/dev/null
SETTINGS_FILE="$SETTINGS" \
P2P_ROOM_ID="$ROOM_ID" \
P2P_PASSPHRASE="$PASSPHRASE" \
P2P_RELAYS="$RELAY" \
node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
data.P2P_Enabled = true;
data.P2P_AutoStart = false;
data.P2P_AutoBroadcast = false;
data.P2P_roomID = process.env.P2P_ROOM_ID;
data.P2P_passphrase = process.env.P2P_PASSPHRASE;
data.P2P_relays = process.env.P2P_RELAYS;
data.P2P_AutoAcceptingPeers = "~.*";
data.P2P_AutoDenyingPeers = "";
data.P2P_IsHeadless = true;
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
echo "[INFO] relay=$RELAY room=$ROOM_ID timeout=${TIMEOUT_SECONDS}s"
echo "[INFO] running p2p-peers"
set +e
OUTPUT="$(run_cli "$DEBUG_FLAG" "$VAULT" --settings "$SETTINGS" p2p-peers "$TIMEOUT_SECONDS" 2>&1)"
EXIT_CODE=$?
set -e
echo "$OUTPUT"
if [[ "$EXIT_CODE" -ne 0 ]]; then
echo "[FAIL] p2p-peers exited with code $EXIT_CODE" >&2
exit "$EXIT_CODE"
fi
if [[ -z "$OUTPUT" ]]; then
echo "[WARN] command completed but output was empty"
fi
echo "[PASS] p2p-peers finished"

View File

@@ -0,0 +1,115 @@
#!/usr/bin/env bash
# This test should be run with P2P client, please refer to the test-p2p-three-nodes-conflict-linux.sh test for more details.
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
RUN_BUILD="${RUN_BUILD:-1}"
VERBOSE_TEST_LOGGING="${VERBOSE_TEST_LOGGING:-0}"
KEEP_TEST_DATA="${KEEP_TEST_DATA:-0}"
RELAY="${RELAY:-ws://localhost:4000/}"
USE_INTERNAL_RELAY="${USE_INTERNAL_RELAY:-1}"
ROOM_ID="${ROOM_ID:-1}"
PASSPHRASE="${PASSPHRASE:-test}"
APP_ID="${APP_ID:-self-hosted-livesync-cli-tests}"
PEERS_TIMEOUT="${PEERS_TIMEOUT:-12}"
SYNC_TIMEOUT="${SYNC_TIMEOUT:-15}"
TARGET_PEER="${TARGET_PEER:-}"
cli_test_init_cli_cmd
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI"
npm run build
fi
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-p2p-sync.XXXXXX")"
VAULT="$WORK_DIR/vault-sync"
SETTINGS="$WORK_DIR/settings-sync.json"
mkdir -p "$VAULT"
cleanup() {
local exit_code=$?
if [[ "${P2P_RELAY_STARTED:-0}" == "1" ]]; then
cli_test_stop_p2p_relay
fi
if [[ "$KEEP_TEST_DATA" != "1" ]]; then
rm -rf "$WORK_DIR"
else
echo "[INFO] KEEP_TEST_DATA=1, preserving artefacts at $WORK_DIR"
fi
exit "$exit_code"
}
trap cleanup EXIT
if [[ "$USE_INTERNAL_RELAY" == "1" ]]; then
if cli_test_is_local_p2p_relay "$RELAY"; then
cli_test_start_p2p_relay
P2P_RELAY_STARTED=1
else
echo "[INFO] USE_INTERNAL_RELAY=1 but RELAY is not local ($RELAY), skipping local relay startup"
fi
fi
echo "[INFO] preparing settings"
echo "[INFO] relay=$RELAY room=$ROOM_ID app=$APP_ID"
cli_test_init_settings_file "$SETTINGS"
cli_test_apply_p2p_settings "$SETTINGS" "$ROOM_ID" "$PASSPHRASE" "$APP_ID" "$RELAY"
echo "[CASE] discover peers"
PEER_LINES="$(run_cli "$VAULT" --settings "$SETTINGS" p2p-peers "$PEERS_TIMEOUT")"
if [[ -z "$PEER_LINES" ]]; then
echo "[FAIL] p2p-peers returned empty output" >&2
exit 1
fi
if ! awk -F $'\t' 'NF>=3 && $1=="[peer]" { found=1 } END { exit(found ? 0 : 1) }' <<< "$PEER_LINES"; then
echo "[FAIL] p2p-peers output must include [peer]<TAB><peer-id><TAB><peer-name>" >&2
echo "$PEER_LINES" >&2
exit 1
fi
SELECTED_PEER_ID=""
SELECTED_PEER_NAME=""
if [[ -n "$TARGET_PEER" ]]; then
while IFS=$'\t' read -r marker peer_id peer_name _; do
if [[ "$marker" != "[peer]" ]]; then
continue
fi
if [[ "$peer_id" == "$TARGET_PEER" || "$peer_name" == "$TARGET_PEER" ]]; then
SELECTED_PEER_ID="$peer_id"
SELECTED_PEER_NAME="$peer_name"
break
fi
done <<< "$PEER_LINES"
if [[ -z "$SELECTED_PEER_ID" ]]; then
echo "[FAIL] TARGET_PEER=$TARGET_PEER was not found" >&2
echo "$PEER_LINES" >&2
exit 1
fi
else
SELECTED_PEER_ID="$(awk -F $'\t' 'NF>=3 && $1=="[peer]" {print $2; exit}' <<< "$PEER_LINES")"
SELECTED_PEER_NAME="$(awk -F $'\t' 'NF>=3 && $1=="[peer]" {print $3; exit}' <<< "$PEER_LINES")"
fi
if [[ -z "$SELECTED_PEER_ID" ]]; then
echo "[FAIL] could not extract peer-id from p2p-peers output" >&2
echo "$PEER_LINES" >&2
exit 1
fi
echo "[PASS] selected peer: ${SELECTED_PEER_ID} (${SELECTED_PEER_NAME:-unknown})"
echo "[CASE] run p2p-sync"
run_cli "$VAULT" --settings "$SETTINGS" p2p-sync "$SELECTED_PEER_ID" "$SYNC_TIMEOUT" >/dev/null
echo "[PASS] p2p-sync completed"

View File

@@ -0,0 +1,242 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
RUN_BUILD="${RUN_BUILD:-1}"
KEEP_TEST_DATA="${KEEP_TEST_DATA:-0}"
VERBOSE_TEST_LOGGING="${VERBOSE_TEST_LOGGING:-0}"
RELAY="${RELAY:-ws://localhost:4000/}"
USE_INTERNAL_RELAY="${USE_INTERNAL_RELAY:-1}"
ROOM_ID_PREFIX="${ROOM_ID_PREFIX:-p2p-room}"
PASSPHRASE_PREFIX="${PASSPHRASE_PREFIX:-p2p-pass}"
APP_ID="${APP_ID:-self-hosted-livesync-cli-tests}"
PEERS_TIMEOUT="${PEERS_TIMEOUT:-10}"
SYNC_TIMEOUT="${SYNC_TIMEOUT:-15}"
ROOM_ID="${ROOM_ID_PREFIX}-$(date +%s)-$RANDOM-$RANDOM"
PASSPHRASE="${PASSPHRASE_PREFIX}-$(date +%s)-$RANDOM-$RANDOM"
cli_test_init_cli_cmd
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI"
npm run build
fi
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-p2p-3nodes.XXXXXX")"
VAULT_A="$WORK_DIR/vault-a"
VAULT_B="$WORK_DIR/vault-b"
VAULT_C="$WORK_DIR/vault-c"
SETTINGS_A="$WORK_DIR/settings-a.json"
SETTINGS_B="$WORK_DIR/settings-b.json"
SETTINGS_C="$WORK_DIR/settings-c.json"
HOST_LOG="$WORK_DIR/p2p-host.log"
mkdir -p "$VAULT_A" "$VAULT_B" "$VAULT_C"
cleanup() {
local exit_code=$?
if [[ -n "${HOST_PID:-}" ]] && kill -0 "$HOST_PID" >/dev/null 2>&1; then
kill -TERM "$HOST_PID" >/dev/null 2>&1 || true
wait "$HOST_PID" >/dev/null 2>&1 || true
fi
if [[ "${P2P_RELAY_STARTED:-0}" == "1" ]]; then
cli_test_stop_p2p_relay
fi
if [[ "$KEEP_TEST_DATA" != "1" ]]; then
rm -rf "$WORK_DIR"
else
echo "[INFO] KEEP_TEST_DATA=1, preserving artefacts at $WORK_DIR"
fi
exit "$exit_code"
}
trap cleanup EXIT
if [[ "$USE_INTERNAL_RELAY" == "1" ]]; then
if cli_test_is_local_p2p_relay "$RELAY"; then
cli_test_start_p2p_relay
P2P_RELAY_STARTED=1
else
echo "[INFO] USE_INTERNAL_RELAY=1 but RELAY is not local ($RELAY), skipping local relay startup"
fi
fi
run_cli_a() {
run_cli "$VAULT_A" --settings "$SETTINGS_A" "$@"
}
run_cli_b() {
run_cli "$VAULT_B" --settings "$SETTINGS_B" "$@"
}
run_cli_c() {
run_cli "$VAULT_C" --settings "$SETTINGS_C" "$@"
}
echo "[INFO] preparing settings"
echo "[INFO] relay=$RELAY room=$ROOM_ID app=$APP_ID"
cli_test_init_settings_file "$SETTINGS_A"
cli_test_init_settings_file "$SETTINGS_B"
cli_test_init_settings_file "$SETTINGS_C"
cli_test_apply_p2p_settings "$SETTINGS_A" "$ROOM_ID" "$PASSPHRASE" "$APP_ID" "$RELAY"
cli_test_apply_p2p_settings "$SETTINGS_B" "$ROOM_ID" "$PASSPHRASE" "$APP_ID" "$RELAY"
cli_test_apply_p2p_settings "$SETTINGS_C" "$ROOM_ID" "$PASSPHRASE" "$APP_ID" "$RELAY"
echo "[CASE] start p2p-host on A"
run_cli_a p2p-host >"$HOST_LOG" 2>&1 &
HOST_PID=$!
for _ in 1 2 3 4 5 6 7 8 9 10; do
echo "[INFO] waiting for p2p-host to start..."
if grep -Fq "P2P host is running" "$HOST_LOG"; then
break
fi
sleep 1
done
if ! grep -Fq "P2P host is running" "$HOST_LOG"; then
echo "[FAIL] p2p-host did not become ready" >&2
cat "$HOST_LOG" >&2
exit 1
fi
echo "[PASS] p2p-host started"
echo "[CASE] discover host peer from B"
PEERS_FROM_B="$(run_cli_b p2p-peers "$PEERS_TIMEOUT")"
HOST_PEER_ID="$(awk -F $'\t' 'NF>=3 && $1=="[peer]" {print $2; exit}' <<< "$PEERS_FROM_B")"
if [[ -z "$HOST_PEER_ID" ]]; then
echo "[FAIL] B could not find host peer" >&2
echo "$PEERS_FROM_B" >&2
exit 1
fi
echo "[PASS] B discovered host peer: $HOST_PEER_ID"
echo "[CASE] discover host peer from C"
PEERS_FROM_C="$(run_cli_c p2p-peers "$PEERS_TIMEOUT")"
HOST_PEER_ID_FROM_C="$(awk -F $'\t' 'NF>=3 && $1=="[peer]" {print $2; exit}' <<< "$PEERS_FROM_C")"
if [[ -z "$HOST_PEER_ID_FROM_C" ]]; then
echo "[FAIL] C could not find host peer" >&2
echo "$PEERS_FROM_C" >&2
exit 1
fi
echo "[PASS] C discovered host peer: $HOST_PEER_ID_FROM_C"
TARGET_PATH="p2p/conflicted-from-two-clients.txt"
echo "[CASE] B creates file and syncs"
printf 'from-client-b-v1\n' | run_cli_b put "$TARGET_PATH" >/dev/null
run_cli_b p2p-sync "$HOST_PEER_ID" "$SYNC_TIMEOUT" >/dev/null
echo "[CASE] C syncs and can see B file"
run_cli_c p2p-sync "$HOST_PEER_ID_FROM_C" "$SYNC_TIMEOUT" >/dev/null
VISIBLE_ON_C=""
for _ in 1 2 3 4 5; do
if VISIBLE_ON_C="$(run_cli_c cat "$TARGET_PATH" 2>/dev/null | cli_test_sanitise_cat_stdout)"; then
if [[ "$VISIBLE_ON_C" == "from-client-b-v1" ]]; then
break
fi
fi
run_cli_c p2p-sync "$HOST_PEER_ID_FROM_C" "$SYNC_TIMEOUT" >/dev/null
sleep 1
done
cli_test_assert_equal "from-client-b-v1" "$VISIBLE_ON_C" "C should see file created by B"
echo "[CASE] B and C modify file independently"
printf 'from-client-b-v2\n' | run_cli_b put "$TARGET_PATH" >/dev/null
printf 'from-client-c-v2\n' | run_cli_c put "$TARGET_PATH" >/dev/null
echo "[CASE] B and C sync to host concurrently"
set +e
run_cli_b p2p-sync "$HOST_PEER_ID" "$SYNC_TIMEOUT" >/dev/null &
SYNC_B_PID=$!
run_cli_c p2p-sync "$HOST_PEER_ID_FROM_C" "$SYNC_TIMEOUT" >/dev/null &
SYNC_C_PID=$!
wait "$SYNC_B_PID"
SYNC_B_EXIT=$?
wait "$SYNC_C_PID"
SYNC_C_EXIT=$?
set -e
if [[ "$SYNC_B_EXIT" -ne 0 || "$SYNC_C_EXIT" -ne 0 ]]; then
echo "[FAIL] concurrent sync failed: B=$SYNC_B_EXIT C=$SYNC_C_EXIT" >&2
exit 1
fi
echo "[CASE] sync back to clients"
run_cli_b p2p-sync "$HOST_PEER_ID" "$SYNC_TIMEOUT" >/dev/null
run_cli_c p2p-sync "$HOST_PEER_ID_FROM_C" "$SYNC_TIMEOUT" >/dev/null
echo "[CASE] B info shows conflict"
INFO_JSON_B_BEFORE="$(run_cli_b info "$TARGET_PATH")"
CONFLICTS_B_BEFORE="$(printf '%s' "$INFO_JSON_B_BEFORE" | cli_test_json_string_field_from_stdin conflicts)"
KEEP_REV_B="$(printf '%s' "$INFO_JSON_B_BEFORE" | cli_test_json_string_field_from_stdin revision)"
if [[ "$CONFLICTS_B_BEFORE" == "N/A" || -z "$CONFLICTS_B_BEFORE" ]]; then
echo "[FAIL] expected conflicts on B after two-client sync" >&2
echo "$INFO_JSON_B_BEFORE" >&2
exit 1
fi
if [[ -z "$KEEP_REV_B" ]]; then
echo "[FAIL] could not read current revision on B for resolve" >&2
echo "$INFO_JSON_B_BEFORE" >&2
exit 1
fi
echo "[PASS] conflict detected on B"
echo "[CASE] C info shows conflict"
INFO_JSON_C_BEFORE="$(run_cli_c info "$TARGET_PATH")"
CONFLICTS_C_BEFORE="$(printf '%s' "$INFO_JSON_C_BEFORE" | cli_test_json_string_field_from_stdin conflicts)"
KEEP_REV_C="$(printf '%s' "$INFO_JSON_C_BEFORE" | cli_test_json_string_field_from_stdin revision)"
if [[ "$CONFLICTS_C_BEFORE" == "N/A" || -z "$CONFLICTS_C_BEFORE" ]]; then
echo "[FAIL] expected conflicts on C after two-client sync" >&2
echo "$INFO_JSON_C_BEFORE" >&2
exit 1
fi
if [[ -z "$KEEP_REV_C" ]]; then
echo "[FAIL] could not read current revision on C for resolve" >&2
echo "$INFO_JSON_C_BEFORE" >&2
exit 1
fi
echo "[PASS] conflict detected on C"
echo "[CASE] resolve conflict on B and C"
run_cli_b resolve "$TARGET_PATH" "$KEEP_REV_B" >/dev/null
run_cli_c resolve "$TARGET_PATH" "$KEEP_REV_C" >/dev/null
INFO_JSON_B_AFTER="$(run_cli_b info "$TARGET_PATH")"
CONFLICTS_B_AFTER="$(printf '%s' "$INFO_JSON_B_AFTER" | cli_test_json_string_field_from_stdin conflicts)"
if [[ "$CONFLICTS_B_AFTER" != "N/A" ]]; then
echo "[FAIL] conflict still remains on B after resolve" >&2
echo "$INFO_JSON_B_AFTER" >&2
exit 1
fi
INFO_JSON_C_AFTER="$(run_cli_c info "$TARGET_PATH")"
CONFLICTS_C_AFTER="$(printf '%s' "$INFO_JSON_C_AFTER" | cli_test_json_string_field_from_stdin conflicts)"
if [[ "$CONFLICTS_C_AFTER" != "N/A" ]]; then
echo "[FAIL] conflict still remains on C after resolve" >&2
echo "$INFO_JSON_C_AFTER" >&2
exit 1
fi
FINAL_CONTENT_B="$(run_cli_b cat "$TARGET_PATH" | cli_test_sanitise_cat_stdout)"
FINAL_CONTENT_C="$(run_cli_c cat "$TARGET_PATH" | cli_test_sanitise_cat_stdout)"
if [[ "$FINAL_CONTENT_B" != "from-client-b-v2" && "$FINAL_CONTENT_B" != "from-client-c-v2" ]]; then
echo "[FAIL] unexpected final content on B after resolve" >&2
echo "[FAIL] final content on B: $FINAL_CONTENT_B" >&2
exit 1
fi
if [[ "$FINAL_CONTENT_C" != "from-client-b-v2" && "$FINAL_CONTENT_C" != "from-client-c-v2" ]]; then
echo "[FAIL] unexpected final content on C after resolve" >&2
echo "[FAIL] final content on C: $FINAL_CONTENT_C" >&2
exit 1
fi
echo "[PASS] conflicts resolved on B and C"
echo "[PASS] all 3-node P2P conflict scenarios passed"

View File

@@ -0,0 +1,228 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
RUN_BUILD="${RUN_BUILD:-1}"
KEEP_TEST_DATA="${KEEP_TEST_DATA:-1}"
VERBOSE_TEST_LOGGING="${VERBOSE_TEST_LOGGING:-0}"
RELAY="${RELAY:-ws://localhost:4000/}"
USE_INTERNAL_RELAY="${USE_INTERNAL_RELAY:-1}"
APP_ID="${APP_ID:-self-hosted-livesync-cli-tests}"
PEERS_TIMEOUT="${PEERS_TIMEOUT:-20}"
SYNC_TIMEOUT="${SYNC_TIMEOUT:-240}"
ROOM_ID="p2p-room-$(date +%s)-$RANDOM-$RANDOM"
PASSPHRASE="p2p-pass-$(date +%s)-$RANDOM-$RANDOM"
HOST_PEER_NAME="p2p-cli-host"
UPLOAD_PEER_NAME="p2p-cli-upload-$(date +%s)-$RANDOM"
DOWNLOAD_PEER_NAME="p2p-cli-download-$(date +%s)-$RANDOM"
cli_test_init_cli_cmd
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI"
npm run build
fi
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-p2p-upload-download.XXXXXX")"
VAULT_HOST="$WORK_DIR/vault-host"
VAULT_UP="$WORK_DIR/vault-up"
VAULT_DOWN="$WORK_DIR/vault-down"
SETTINGS_HOST="$WORK_DIR/settings-host.json"
SETTINGS_UP="$WORK_DIR/settings-up.json"
SETTINGS_DOWN="$WORK_DIR/settings-down.json"
HOST_LOG="$WORK_DIR/p2p-host.log"
mkdir -p "$VAULT_HOST" "$VAULT_UP" "$VAULT_DOWN"
cleanup() {
local exit_code=$?
if [[ -n "${HOST_PID:-}" ]] && kill -0 "$HOST_PID" >/dev/null 2>&1; then
kill -TERM "$HOST_PID" >/dev/null 2>&1 || true
wait "$HOST_PID" >/dev/null 2>&1 || true
fi
if [[ "${P2P_RELAY_STARTED:-0}" == "1" ]]; then
cli_test_stop_p2p_relay
fi
if [[ "$KEEP_TEST_DATA" != "1" ]]; then
rm -rf "$WORK_DIR"
else
echo "[INFO] KEEP_TEST_DATA=1, preserving artefacts at $WORK_DIR"
fi
exit "$exit_code"
}
trap cleanup EXIT
if [[ "$USE_INTERNAL_RELAY" == "1" ]]; then
if cli_test_is_local_p2p_relay "$RELAY"; then
cli_test_start_p2p_relay
P2P_RELAY_STARTED=1
else
echo "[INFO] USE_INTERNAL_RELAY=1 but RELAY is not local ($RELAY), skipping local relay startup"
fi
fi
run_cli_host() {
run_cli "$VAULT_HOST" --settings "$SETTINGS_HOST" "$@"
}
run_cli_up() {
run_cli "$VAULT_UP" --settings "$SETTINGS_UP" "$@"
}
run_cli_down() {
run_cli "$VAULT_DOWN" --settings "$SETTINGS_DOWN" "$@"
}
apply_p2p_test_tweaks() {
local settings_file="$1"
local device_name="$2"
SETTINGS_FILE="$settings_file" DEVICE_NAME="$device_name" PASSPHRASE_VAL="$PASSPHRASE" node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
data.remoteType = "ONLY_P2P";
data.encrypt = true;
data.passphrase = process.env.PASSPHRASE_VAL;
data.usePathObfuscation = true;
data.handleFilenameCaseSensitive = false;
data.customChunkSize = 50;
data.usePluginSyncV2 = true;
data.doNotUseFixedRevisionForChunks = false;
data.P2P_DevicePeerName = process.env.DEVICE_NAME;
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
}
discover_peer_id() {
local side="$1"
local output
local peer_id
if [[ "$side" == "up" ]]; then
output="$(run_cli_up p2p-peers "$PEERS_TIMEOUT")"
else
output="$(run_cli_down p2p-peers "$PEERS_TIMEOUT")"
fi
peer_id="$(awk -F $'\t' 'NF>=3 && $1=="[peer]" {print $2; exit}' <<< "$output")"
if [[ -z "$peer_id" ]]; then
echo "[FAIL] ${side} could not discover any peer" >&2
echo "[FAIL] peers output:" >&2
echo "$output" >&2
return 1
fi
echo "$peer_id"
}
echo "[INFO] preparing settings"
echo "[INFO] relay=$RELAY room=$ROOM_ID app=$APP_ID"
cli_test_init_settings_file "$SETTINGS_HOST"
cli_test_init_settings_file "$SETTINGS_UP"
cli_test_init_settings_file "$SETTINGS_DOWN"
cli_test_apply_p2p_settings "$SETTINGS_HOST" "$ROOM_ID" "$PASSPHRASE" "$APP_ID" "$RELAY" "~.*"
cli_test_apply_p2p_settings "$SETTINGS_UP" "$ROOM_ID" "$PASSPHRASE" "$APP_ID" "$RELAY" "~.*"
cli_test_apply_p2p_settings "$SETTINGS_DOWN" "$ROOM_ID" "$PASSPHRASE" "$APP_ID" "$RELAY" "~.*"
apply_p2p_test_tweaks "$SETTINGS_HOST" "$HOST_PEER_NAME"
apply_p2p_test_tweaks "$SETTINGS_UP" "$UPLOAD_PEER_NAME"
apply_p2p_test_tweaks "$SETTINGS_DOWN" "$DOWNLOAD_PEER_NAME"
echo "[CASE] start p2p-host"
run_cli_host p2p-host >"$HOST_LOG" 2>&1 &
HOST_PID=$!
for _ in 1 2 3 4 5 6 7 8 9 10 11 12; do
if grep -Fq "P2P host is running" "$HOST_LOG"; then
break
fi
sleep 1
done
if ! grep -Fq "P2P host is running" "$HOST_LOG"; then
echo "[FAIL] p2p-host did not become ready" >&2
cat "$HOST_LOG" >&2
exit 1
fi
echo "[PASS] p2p-host started"
echo "[CASE] upload peer discovers host"
HOST_PEER_ID_FOR_UP="$(discover_peer_id up)"
echo "[PASS] upload peer discovered host: $HOST_PEER_ID_FOR_UP"
echo "[CASE] upload phase writes source files"
STORE_TEXT="$WORK_DIR/store-file.md"
DIFF_A_TEXT="$WORK_DIR/test-diff-1.md"
DIFF_B_TEXT="$WORK_DIR/test-diff-2.md"
DIFF_C_TEXT="$WORK_DIR/test-diff-3.md"
printf 'Hello, World!\n' > "$STORE_TEXT"
printf 'Content A\n' > "$DIFF_A_TEXT"
printf 'Content B\n' > "$DIFF_B_TEXT"
printf 'Content C\n' > "$DIFF_C_TEXT"
run_cli_up push "$STORE_TEXT" p2p/store-file.md >/dev/null
run_cli_up push "$DIFF_A_TEXT" p2p/test-diff-1.md >/dev/null
run_cli_up push "$DIFF_B_TEXT" p2p/test-diff-2.md >/dev/null
run_cli_up push "$DIFF_C_TEXT" p2p/test-diff-3.md >/dev/null
LARGE_TXT_100K="$WORK_DIR/large-100k.txt"
LARGE_TXT_1M="$WORK_DIR/large-1m.txt"
head -c 100000 /dev/zero | tr '\0' 'a' > "$LARGE_TXT_100K"
head -c 1000000 /dev/zero | tr '\0' 'b' > "$LARGE_TXT_1M"
run_cli_up push "$LARGE_TXT_100K" p2p/large-100000.md >/dev/null
run_cli_up push "$LARGE_TXT_1M" p2p/large-1000000.md >/dev/null
BINARY_100K="$WORK_DIR/binary-100k.bin"
BINARY_5M="$WORK_DIR/binary-5m.bin"
head -c 100000 /dev/urandom > "$BINARY_100K"
head -c 5000000 /dev/urandom > "$BINARY_5M"
run_cli_up push "$BINARY_100K" p2p/binary-100000.bin >/dev/null
run_cli_up push "$BINARY_5M" p2p/binary-5000000.bin >/dev/null
echo "[PASS] upload source files prepared"
echo "[CASE] upload phase syncs to host"
run_cli_up p2p-sync "$HOST_PEER_ID_FOR_UP" "$SYNC_TIMEOUT" >/dev/null
run_cli_up p2p-sync "$HOST_PEER_ID_FOR_UP" "$SYNC_TIMEOUT" >/dev/null
echo "[PASS] upload phase synced"
echo "[CASE] download peer discovers host"
HOST_PEER_ID_FOR_DOWN="$(discover_peer_id down)"
echo "[PASS] download peer discovered host: $HOST_PEER_ID_FOR_DOWN"
echo "[CASE] download phase syncs from host"
run_cli_down p2p-sync "$HOST_PEER_ID_FOR_DOWN" "$SYNC_TIMEOUT" >/dev/null
run_cli_down p2p-sync "$HOST_PEER_ID_FOR_DOWN" "$SYNC_TIMEOUT" >/dev/null
echo "[PASS] download phase synced"
echo "[CASE] verify text files on download peer"
DOWN_STORE_TEXT="$WORK_DIR/down-store-file.md"
DOWN_DIFF_A_TEXT="$WORK_DIR/down-test-diff-1.md"
DOWN_DIFF_B_TEXT="$WORK_DIR/down-test-diff-2.md"
DOWN_DIFF_C_TEXT="$WORK_DIR/down-test-diff-3.md"
run_cli_down pull p2p/store-file.md "$DOWN_STORE_TEXT" >/dev/null
run_cli_down pull p2p/test-diff-1.md "$DOWN_DIFF_A_TEXT" >/dev/null
run_cli_down pull p2p/test-diff-2.md "$DOWN_DIFF_B_TEXT" >/dev/null
run_cli_down pull p2p/test-diff-3.md "$DOWN_DIFF_C_TEXT" >/dev/null
cmp -s "$STORE_TEXT" "$DOWN_STORE_TEXT" || { echo "[FAIL] store-file mismatch" >&2; exit 1; }
cmp -s "$DIFF_A_TEXT" "$DOWN_DIFF_A_TEXT" || { echo "[FAIL] test-diff-1 mismatch" >&2; exit 1; }
cmp -s "$DIFF_B_TEXT" "$DOWN_DIFF_B_TEXT" || { echo "[FAIL] test-diff-2 mismatch" >&2; exit 1; }
cmp -s "$DIFF_C_TEXT" "$DOWN_DIFF_C_TEXT" || { echo "[FAIL] test-diff-3 mismatch" >&2; exit 1; }
echo "[CASE] verify pushed files on download peer"
DOWN_LARGE_100K="$WORK_DIR/down-large-100k.txt"
DOWN_LARGE_1M="$WORK_DIR/down-large-1m.txt"
DOWN_BINARY_100K="$WORK_DIR/down-binary-100k.bin"
DOWN_BINARY_5M="$WORK_DIR/down-binary-5m.bin"
run_cli_down pull p2p/large-100000.md "$DOWN_LARGE_100K" >/dev/null
run_cli_down pull p2p/large-1000000.md "$DOWN_LARGE_1M" >/dev/null
run_cli_down pull p2p/binary-100000.bin "$DOWN_BINARY_100K" >/dev/null
run_cli_down pull p2p/binary-5000000.bin "$DOWN_BINARY_5M" >/dev/null
cmp -s "$LARGE_TXT_100K" "$DOWN_LARGE_100K" || { echo "[FAIL] large-100000 mismatch" >&2; exit 1; }
cmp -s "$LARGE_TXT_1M" "$DOWN_LARGE_1M" || { echo "[FAIL] large-1000000 mismatch" >&2; exit 1; }
cmp -s "$BINARY_100K" "$DOWN_BINARY_100K" || { echo "[FAIL] binary-100000 mismatch" >&2; exit 1; }
cmp -s "$BINARY_5M" "$DOWN_BINARY_5M" || { echo "[FAIL] binary-5000000 mismatch" >&2; exit 1; }
echo "[PASS] CLI P2P upload/download reproduction scenario completed"

View File

@@ -4,10 +4,12 @@ set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
CLI_CMD=(npm run cli --)
RUN_BUILD="${RUN_BUILD:-1}"
REMOTE_PATH="${REMOTE_PATH:-test/push-pull.txt}"
cli_test_init_cli_cmd
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-test.XXXXXX")"
trap 'rm -rf "$WORK_DIR"' EXIT
@@ -19,26 +21,12 @@ if [[ "$RUN_BUILD" == "1" ]]; then
npm run build
fi
run_cli() {
"${CLI_CMD[@]}" "$@"
}
echo "[INFO] generating settings from DEFAULT_SETTINGS -> $SETTINGS_FILE"
run_cli init-settings --force "$SETTINGS_FILE"
cli_test_init_settings_file "$SETTINGS_FILE"
if [[ -n "${COUCHDB_URI:-}" && -n "${COUCHDB_USER:-}" && -n "${COUCHDB_PASSWORD:-}" && -n "${COUCHDB_DBNAME:-}" ]]; then
echo "[INFO] applying CouchDB env vars to generated settings"
SETTINGS_FILE="$SETTINGS_FILE" node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
data.couchDB_URI = process.env.COUCHDB_URI;
data.couchDB_USER = process.env.COUCHDB_USER;
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
cli_test_apply_couchdb_settings "$SETTINGS_FILE" "$COUCHDB_URI" "$COUCHDB_USER" "$COUCHDB_PASSWORD" "$COUCHDB_DBNAME"
else
echo "[WARN] CouchDB env vars are not fully set. push/pull may fail unless generated settings are updated."
fi

View File

@@ -5,11 +5,13 @@ SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
REPO_ROOT="$(cd -- "$CLI_DIR/../../.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
CLI_CMD=(npm run cli --)
RUN_BUILD="${RUN_BUILD:-1}"
REMOTE_PATH="${REMOTE_PATH:-test/setup-put-cat.txt}"
SETUP_PASSPHRASE="${SETUP_PASSPHRASE:-setup-passphrase}"
cli_test_init_cli_cmd
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-test.XXXXXX")"
trap 'rm -rf "$WORK_DIR"' EXIT
@@ -21,12 +23,8 @@ if [[ "$RUN_BUILD" == "1" ]]; then
npm run build
fi
run_cli() {
"${CLI_CMD[@]}" "$@"
}
echo "[INFO] generating settings from DEFAULT_SETTINGS -> $SETTINGS_FILE"
run_cli init-settings --force "$SETTINGS_FILE"
cli_test_init_settings_file "$SETTINGS_FILE"
echo "[INFO] creating setup URI from settings"
SETUP_URI="$(
@@ -84,7 +82,7 @@ CAT_OUTPUT="$WORK_DIR/cat-output.txt"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" cat "$REMOTE_PATH" > "$CAT_OUTPUT"
CAT_OUTPUT_CLEAN="$WORK_DIR/cat-output-clean.txt"
grep -v '^\[CLIWatchAdapter\] File watching is not enabled in CLI version$' "$CAT_OUTPUT" > "$CAT_OUTPUT_CLEAN" || true
cli_test_sanitise_cat_stdout < "$CAT_OUTPUT" > "$CAT_OUTPUT_CLEAN"
if cmp -s "$SRC_FILE" "$CAT_OUTPUT_CLEAN"; then
echo "[PASS] setup/put/cat roundtrip matched"
@@ -175,48 +173,52 @@ echo "[INFO] info $REMOTE_PATH"
INFO_OUTPUT="$WORK_DIR/info-output.txt"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" info "$REMOTE_PATH" > "$INFO_OUTPUT"
# Check required label lines
for label in "ID:" "Revision:" "Conflicts:" "Filename:" "Path:" "Size:" "Chunks:"; do
if ! grep -q "^$label" "$INFO_OUTPUT"; then
echo "[FAIL] info output missing label: $label" >&2
cat "$INFO_OUTPUT" >&2
exit 1
fi
done
# Path value must match
INFO_PATH="$(grep '^Path:' "$INFO_OUTPUT" | sed 's/^Path:[[:space:]]*//')"
if [[ "$INFO_PATH" != "$REMOTE_PATH" ]]; then
echo "[FAIL] info Path mismatch: $INFO_PATH" >&2
exit 1
fi
# Filename must be the basename
INFO_FILENAME="$(grep '^Filename:' "$INFO_OUTPUT" | sed 's/^Filename:[[:space:]]*//')"
EXPECTED_FILENAME="$(basename "$REMOTE_PATH")"
if [[ "$INFO_FILENAME" != "$EXPECTED_FILENAME" ]]; then
echo "[FAIL] info Filename mismatch: $INFO_FILENAME != $EXPECTED_FILENAME" >&2
exit 1
fi
set +e
INFO_JSON_CHECK="$(
INFO_OUTPUT="$INFO_OUTPUT" REMOTE_PATH="$REMOTE_PATH" EXPECTED_FILENAME="$EXPECTED_FILENAME" node - <<'NODE'
const fs = require("node:fs");
# Size must be numeric
INFO_SIZE="$(grep '^Size:' "$INFO_OUTPUT" | sed 's/^Size:[[:space:]]*//')"
if [[ ! "$INFO_SIZE" =~ ^[0-9]+$ ]]; then
echo "[FAIL] info Size is not numeric: $INFO_SIZE" >&2
exit 1
fi
const content = fs.readFileSync(process.env.INFO_OUTPUT, "utf-8");
let data;
try {
data = JSON.parse(content);
} catch (ex) {
console.error("invalid-json");
process.exit(1);
}
# Chunks count must be numeric and ≥1
INFO_CHUNKS="$(grep '^Chunks:' "$INFO_OUTPUT" | sed 's/^Chunks:[[:space:]]*//')"
if [[ ! "$INFO_CHUNKS" =~ ^[0-9]+$ ]] || [[ "$INFO_CHUNKS" -lt 1 ]]; then
echo "[FAIL] info Chunks is not a positive integer: $INFO_CHUNKS" >&2
exit 1
fi
# Conflicts should be N/A (no live CouchDB)
INFO_CONFLICTS="$(grep '^Conflicts:' "$INFO_OUTPUT" | sed 's/^Conflicts:[[:space:]]*//')"
if [[ "$INFO_CONFLICTS" != "N/A" ]]; then
echo "[FAIL] info Conflicts expected N/A, got: $INFO_CONFLICTS" >&2
if (!data || typeof data !== "object") {
console.error("invalid-payload");
process.exit(1);
}
if (data.path !== process.env.REMOTE_PATH) {
console.error(`path-mismatch:${String(data.path)}`);
process.exit(1);
}
if (data.filename !== process.env.EXPECTED_FILENAME) {
console.error(`filename-mismatch:${String(data.filename)}`);
process.exit(1);
}
if (!Number.isInteger(data.size) || data.size < 0) {
console.error(`size-invalid:${String(data.size)}`);
process.exit(1);
}
if (!Number.isInteger(data.chunks) || data.chunks < 1) {
console.error(`chunks-invalid:${String(data.chunks)}`);
process.exit(1);
}
if (data.conflicts !== "N/A") {
console.error(`conflicts-invalid:${String(data.conflicts)}`);
process.exit(1);
}
NODE
)"
INFO_JSON_EXIT=$?
set -e
if [[ "$INFO_JSON_EXIT" -ne 0 ]]; then
echo "[FAIL] info JSON output validation failed: $INFO_JSON_CHECK" >&2
cat "$INFO_OUTPUT" >&2
exit 1
fi
@@ -292,8 +294,30 @@ echo "[INFO] info $REV_PATH (past revisions)"
REV_INFO_OUTPUT="$WORK_DIR/rev-info-output.txt"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" info "$REV_PATH" > "$REV_INFO_OUTPUT"
PAST_REV="$(grep '^ rev: ' "$REV_INFO_OUTPUT" | head -n 1 | sed 's/^ rev: //')"
if [[ -z "$PAST_REV" ]]; then
set +e
PAST_REV="$(
REV_INFO_OUTPUT="$REV_INFO_OUTPUT" node - <<'NODE'
const fs = require("node:fs");
const content = fs.readFileSync(process.env.REV_INFO_OUTPUT, "utf-8");
let data;
try {
data = JSON.parse(content);
} catch {
process.exit(1);
}
const revisions = Array.isArray(data?.revisions) ? data.revisions : [];
const revision = revisions.find((rev) => typeof rev === "string" && rev !== "N/A");
if (!revision) {
process.exit(1);
}
process.stdout.write(revision);
NODE
)"
PAST_REV_EXIT=$?
set -e
if [[ "$PAST_REV_EXIT" -ne 0 ]] || [[ -z "$PAST_REV" ]]; then
echo "[FAIL] info output did not include any past revision" >&2
cat "$REV_INFO_OUTPUT" >&2
exit 1

View File

@@ -1,39 +1,66 @@
#!/usr/bin/env bash
## TODO: test this script. I would love to go to my bed today (3a.m.) However, I am so excited about the new CLI that I want to at least get this skeleton in place. Delightful days!
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
CLI_CMD=(npm run cli --)
RUN_BUILD="${RUN_BUILD:-1}"
COUCHDB_URI="${COUCHDB_URI:-}"
COUCHDB_USER="${COUCHDB_USER:-}"
COUCHDB_PASSWORD="${COUCHDB_PASSWORD:-}"
COUCHDB_DBNAME_BASE="${COUCHDB_DBNAME:-livesync-cli-e2e}"
TEST_ENV_FILE="${TEST_ENV_FILE:-$CLI_DIR/.test.env}"
cli_test_init_cli_cmd
if [[ ! -f "$TEST_ENV_FILE" ]]; then
echo "[ERROR] test env file not found: $TEST_ENV_FILE" >&2
exit 1
fi
set -a
source "$TEST_ENV_FILE"
set +a
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-two-db-test.XXXXXX")"
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI..."
npm run build
fi
DB_SUFFIX="$(date +%s)-$RANDOM"
COUCHDB_URI="${hostname%/}"
COUCHDB_DBNAME="${dbname}-${DB_SUFFIX}"
COUCHDB_USER="${username:-}"
COUCHDB_PASSWORD="${password:-}"
if [[ -z "$COUCHDB_URI" || -z "$COUCHDB_USER" || -z "$COUCHDB_PASSWORD" ]]; then
echo "[ERROR] COUCHDB_URI, COUCHDB_USER, COUCHDB_PASSWORD are required" >&2
exit 1
fi
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-two-db-test.XXXXXX")"
trap 'rm -rf "$WORK_DIR"' EXIT
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI..."
npm run build
fi
cleanup() {
local exit_code=$?
cli_test_stop_couchdb
run_cli() {
"${CLI_CMD[@]}" "$@"
rm -rf "$WORK_DIR"
# Note: we do not attempt to delete the test database, as it may cause issues if the test failed in a way that leaves the database in an inconsistent state. The test database is named with a unique suffix, so it should not interfere with other tests.
echo "[INFO] test completed with exit code $exit_code. Test database '$COUCHDB_DBNAME' is not deleted for debugging purposes."
exit "$exit_code"
}
trap cleanup EXIT
start_remote() {
cli_test_start_couchdb "$COUCHDB_URI" "$COUCHDB_USER" "$COUCHDB_PASSWORD" "$COUCHDB_DBNAME"
}
DB_SUFFIX="$(date +%s)-$RANDOM"
COUCHDB_DBNAME="${COUCHDB_DBNAME_BASE}-${DB_SUFFIX}"
echo "[INFO] using CouchDB database: $COUCHDB_DBNAME"
start_remote
VAULT_A="$WORK_DIR/vault-a"
VAULT_B="$WORK_DIR/vault-b"
@@ -41,31 +68,12 @@ SETTINGS_A="$WORK_DIR/a-settings.json"
SETTINGS_B="$WORK_DIR/b-settings.json"
mkdir -p "$VAULT_A" "$VAULT_B"
run_cli init-settings --force "$SETTINGS_A" >/dev/null
run_cli init-settings --force "$SETTINGS_B" >/dev/null
cli_test_init_settings_file "$SETTINGS_A"
cli_test_init_settings_file "$SETTINGS_B"
apply_settings() {
local settings_file="$1"
SETTINGS_FILE="$settings_file" \
COUCHDB_URI="$COUCHDB_URI" \
COUCHDB_USER="$COUCHDB_USER" \
COUCHDB_PASSWORD="$COUCHDB_PASSWORD" \
COUCHDB_DBNAME="$COUCHDB_DBNAME" \
node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
data.couchDB_URI = process.env.COUCHDB_URI;
data.couchDB_USER = process.env.COUCHDB_USER;
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
data.liveSync = true;
data.syncOnStart = false;
data.syncOnSave = false;
data.usePluginSync = false;
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
cli_test_apply_couchdb_settings "$settings_file" "$COUCHDB_URI" "$COUCHDB_USER" "$COUCHDB_PASSWORD" "$COUCHDB_DBNAME" 1
}
apply_settings "$SETTINGS_A"
@@ -95,24 +103,12 @@ cat_b() {
run_cli_b cat "$1"
}
assert_equal() {
local expected="$1"
local actual="$2"
local message="$3"
if [[ "$expected" != "$actual" ]]; then
echo "[FAIL] $message" >&2
echo "expected: $expected" >&2
echo "actual: $actual" >&2
exit 1
fi
}
echo "[INFO] case1: A creates file, B can read after sync"
printf 'from-a\n' | run_cli_a put shared/from-a.txt >/dev/null
sync_a
sync_b
VALUE_FROM_B="$(cat_b shared/from-a.txt)"
assert_equal "from-a" "$VALUE_FROM_B" "B could not read file created on A"
cli_test_assert_equal "from-a" "$VALUE_FROM_B" "B could not read file created on A"
echo "[PASS] case1 passed"
echo "[INFO] case2: B creates file, A can read after sync"
@@ -120,7 +116,7 @@ printf 'from-b\n' | run_cli_b put shared/from-b.txt >/dev/null
sync_b
sync_a
VALUE_FROM_A="$(cat_a shared/from-b.txt)"
assert_equal "from-b" "$VALUE_FROM_A" "A could not read file created on B"
cli_test_assert_equal "from-b" "$VALUE_FROM_A" "A could not read file created on B"
echo "[PASS] case2 passed"
echo "[INFO] case3: concurrent edits create conflict"
@@ -131,15 +127,25 @@ sync_b
printf 'edit-from-a\n' | run_cli_a put shared/conflicted.txt >/dev/null
printf 'edit-from-b\n' | run_cli_b put shared/conflicted.txt >/dev/null
sync_a
sync_b
INFO_A="$WORK_DIR/info-a.txt"
INFO_B="$WORK_DIR/info-b.txt"
run_cli_a info shared/conflicted.txt > "$INFO_A"
run_cli_b info shared/conflicted.txt > "$INFO_B"
CONFLICT_DETECTED=0
for side in a b; do
if [[ "$side" == "a" ]]; then
sync_a
else
sync_b
fi
if grep -q '^Conflicts: N/A$' "$INFO_A" && grep -q '^Conflicts: N/A$' "$INFO_B"; then
run_cli_a info shared/conflicted.txt > "$INFO_A"
run_cli_b info shared/conflicted.txt > "$INFO_B"
if ! cli_test_json_field_is_na "$INFO_A" conflicts || ! cli_test_json_field_is_na "$INFO_B" conflicts; then
CONFLICT_DETECTED=1
break
fi
done
if [[ "$CONFLICT_DETECTED" != "1" ]]; then
echo "[FAIL] expected conflict after concurrent edits, but both sides show N/A" >&2
echo "--- A info ---" >&2
cat "$INFO_A" >&2
@@ -150,21 +156,60 @@ fi
echo "[PASS] case3 conflict detected"
echo "[INFO] case4: resolve on A, sync, and verify B has no conflict"
KEEP_REV="$(sed -n 's/^Revision:[[:space:]]*//p' "$INFO_A" | head -n 1)"
INFO_A_AFTER="$WORK_DIR/info-a-after-resolve.txt"
INFO_B_AFTER="$WORK_DIR/info-b-after-resolve.txt"
# Ensure A sees the conflict before resolving; otherwise resolve may be a no-op.
for _ in 1 2 3 4 5; do
run_cli_a info shared/conflicted.txt > "$INFO_A_AFTER"
if ! cli_test_json_field_is_na "$INFO_A_AFTER" conflicts; then
break
fi
sync_b
sync_a
done
run_cli_a info shared/conflicted.txt > "$INFO_A_AFTER"
if cli_test_json_field_is_na "$INFO_A_AFTER" conflicts; then
echo "[FAIL] A does not see conflict, cannot resolve from A only" >&2
cat "$INFO_A_AFTER" >&2
exit 1
fi
KEEP_REV="$(cli_test_json_string_field_from_file "$INFO_A_AFTER" revision)"
if [[ -z "$KEEP_REV" ]]; then
echo "[FAIL] could not read Revision from A info output" >&2
cat "$INFO_A" >&2
echo "[FAIL] could not read revision from A info output" >&2
cat "$INFO_A_AFTER" >&2
exit 1
fi
run_cli_a resolve shared/conflicted.txt "$KEEP_REV" >/dev/null
sync_a
sync_b
INFO_B_AFTER="$WORK_DIR/info-b-after-resolve.txt"
run_cli_b info shared/conflicted.txt > "$INFO_B_AFTER"
if ! grep -q '^Conflicts: N/A$' "$INFO_B_AFTER"; then
echo "[FAIL] B still has conflicts after resolving on A and syncing" >&2
RESOLVE_PROPAGATED=0
for _ in 1 2 3 4 5 6; do
sync_a
sync_b
run_cli_a info shared/conflicted.txt > "$INFO_A_AFTER"
run_cli_b info shared/conflicted.txt > "$INFO_B_AFTER"
if cli_test_json_field_is_na "$INFO_A_AFTER" conflicts && cli_test_json_field_is_na "$INFO_B_AFTER" conflicts; then
RESOLVE_PROPAGATED=1
break
fi
# Retry resolve from A only when conflict remains due to eventual consistency.
if ! cli_test_json_field_is_na "$INFO_A_AFTER" conflicts; then
KEEP_REV_A="$(cli_test_json_string_field_from_file "$INFO_A_AFTER" revision)"
if [[ -n "$KEEP_REV_A" ]]; then
run_cli_a resolve shared/conflicted.txt "$KEEP_REV_A" >/dev/null || true
fi
fi
done
if [[ "$RESOLVE_PROPAGATED" != "1" ]]; then
echo "[FAIL] conflicts should be resolved on both A and B" >&2
echo "--- A info after resolve ---" >&2
cat "$INFO_A_AFTER" >&2
echo "--- B info after resolve ---" >&2
cat "$INFO_B_AFTER" >&2
exit 1
fi

2
src/apps/cli/util/p2p-init.sh Executable file
View File

@@ -0,0 +1,2 @@
#!/bin/bash
echo "P2P Init - No additional initialization required."

2
src/apps/cli/util/p2p-start.sh Executable file
View File

@@ -0,0 +1,2 @@
#!/bin/bash
docker run -d --name relay-test -p 4000:8080 scsibug/nostr-rs-relay:latest

3
src/apps/cli/util/p2p-stop.sh Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/bash
docker stop relay-test
docker rm relay-test

View File

@@ -5,7 +5,16 @@ import { readFileSync } from "node:fs";
const packageJson = JSON.parse(readFileSync("../../../package.json", "utf-8"));
const manifestJson = JSON.parse(readFileSync("../../../manifest.json", "utf-8"));
// https://vite.dev/config/
const defaultExternal = ["obsidian", "electron", "crypto", "pouchdb-adapter-leveldb", "commander", "punycode"];
const defaultExternal = [
"obsidian",
"electron",
"crypto",
"pouchdb-adapter-leveldb",
"commander",
"punycode",
"node-datachannel",
"node-datachannel/polyfill",
];
export default defineConfig({
plugins: [svelte()],
resolve: {
@@ -43,6 +52,7 @@ export default defineConfig({
if (id === "fs" || id === "fs/promises" || id === "path" || id === "crypto" || id === "worker_threads")
return true;
if (id.startsWith("pouchdb-")) return true;
if (id.startsWith("node-datachannel")) return true;
if (id.startsWith("node:")) return true;
return false;
},

View File

@@ -1,10 +1,8 @@
import { PouchDB } from "@lib/pouchdb/pouchdb-browser";
import {
type EntryDoc,
type LOG_LEVEL,
type ObsidianLiveSyncSettings,
type P2PSyncSetting,
LOG_LEVEL_NOTICE,
LOG_LEVEL_VERBOSE,
P2P_DEFAULT_SETTINGS,
REMOTE_P2P,
@@ -12,35 +10,27 @@ import {
import { eventHub } from "@lib/hub/hub";
import type { Confirm } from "@lib/interfaces/Confirm";
import { LOG_LEVEL_INFO, Logger } from "@lib/common/logger";
import { LOG_LEVEL_NOTICE, Logger } from "@lib/common/logger";
import { storeP2PStatusLine } from "./CommandsShim";
import {
EVENT_P2P_PEER_SHOW_EXTRA_MENU,
type CommandShim,
type PeerStatus,
type PluginShim,
} from "@lib/replication/trystero/P2PReplicatorPaneCommon";
import {
closeP2PReplicator,
openP2PReplicator,
P2PLogCollector,
type P2PReplicatorBase,
} from "@lib/replication/trystero/P2PReplicatorCore";
import { P2PLogCollector, type P2PReplicatorBase, useP2PReplicator } from "@lib/replication/trystero/P2PReplicatorCore";
import type { SimpleStore } from "octagonal-wheels/databases/SimpleStoreBase";
import { reactiveSource } from "octagonal-wheels/dataobject/reactive_v2";
import { EVENT_SETTING_SAVED } from "@lib/events/coreEvents";
import { unique } from "octagonal-wheels/collection";
import { BrowserServiceHub } from "@lib/services/BrowserServices";
import { TrysteroReplicator } from "@lib/replication/trystero/TrysteroReplicator";
import { SETTING_KEY_P2P_DEVICE_NAME } from "@lib/common/types";
import { ServiceContext } from "@lib/services/base/ServiceBase";
import type { InjectableServiceHub } from "@lib/services/InjectableServices";
import { Menu } from "@lib/services/implements/browser/Menu";
import type { InjectableVaultServiceCompat } from "@lib/services/implements/injectable/InjectableVaultService";
import { SimpleStoreIDBv2 } from "octagonal-wheels/databases/SimpleStoreIDBv2";
import type { InjectableAPIService } from "@/lib/src/services/implements/injectable/InjectableAPIService";
import type { BrowserAPIService } from "@/lib/src/services/implements/browser/BrowserAPIService";
import type { InjectableSettingService } from "@/lib/src/services/implements/injectable/InjectableSettingService";
import { LiveSyncTrysteroReplicator } from "@lib/replication/trystero/LiveSyncTrysteroReplicator";
function addToList(item: string, list: string) {
return unique(
@@ -60,12 +50,10 @@ function removeFromList(item: string, list: string) {
.join(",");
}
export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
export class P2PReplicatorShim implements P2PReplicatorBase {
storeP2PStatusLine = reactiveSource("");
plugin!: PluginShim;
// environment!: IEnvironment;
confirm!: Confirm;
// simpleStoreAPI!: ISimpleStoreAPI;
db?: PouchDB.Database<EntryDoc>;
services: InjectableServiceHub<ServiceContext>;
@@ -76,12 +64,30 @@ export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
return this.db;
}
_simpleStore!: SimpleStore<any>;
async closeDB() {
if (this.db) {
await this.db.close();
this.db = undefined;
}
}
private _liveSyncReplicator?: LiveSyncTrysteroReplicator;
p2pLogCollector!: P2PLogCollector;
private _initP2PReplicator() {
const {
replicator,
p2pLogCollector,
storeP2PStatusLine: p2pStatusLine,
} = useP2PReplicator({ services: this.services } as any);
this._liveSyncReplicator = replicator;
this.p2pLogCollector = p2pLogCollector;
p2pLogCollector.p2pReplicationLine.onChanged((line) => {
storeP2PStatusLine.set(line.value);
});
}
constructor() {
const browserServiceHub = new BrowserServiceHub<ServiceContext>();
this.services = browserServiceHub;
@@ -89,7 +95,6 @@ export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
(this.services.API as BrowserAPIService<ServiceContext>).getSystemVaultName.setHandler(
() => "p2p-livesync-web-peer"
);
this.services.API.addLog.setHandler(Logger);
const repStore = SimpleStoreIDBv2.open<any>("p2p-livesync-web-peer");
this._simpleStore = repStore;
let _settings = { ...P2P_DEFAULT_SETTINGS, additionalSuffixOfDatabaseName: "" } as ObsidianLiveSyncSettings;
@@ -103,14 +108,13 @@ export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
return settings;
});
}
get settings() {
return this.services.setting.currentSettings() as P2PSyncSetting;
}
async init() {
// const { simpleStoreAPI } = await getWrappedSynchromesh();
// this.confirm = confirm;
this.confirm = this.services.UI.confirm;
// this.environment = environment;
if (this.db) {
try {
@@ -123,30 +127,16 @@ export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
await this.services.setting.loadSettings();
this.plugin = {
// saveSettings: async () => {
// await repStore.set("settings", _settings);
// eventHub.emitEvent(EVENT_SETTING_SAVED, _settings);
// },
// get settings() {
// return _settings;
// },
// set settings(newSettings: P2PSyncSetting) {
// _settings = { ..._settings, ...newSettings };
// },
// rebuilder: null,
// core: {
// settings: this.services.setting.settings,
// },
services: this.services,
core: {
services: this.services,
},
// $$scheduleAppReload: () => {},
// $$getVaultName: () => "p2p-livesync-web-peer",
};
// const deviceName = this.getDeviceName();
const database_name = this.settings.P2P_AppID + "-" + this.settings.P2P_roomID + "p2p-livesync-web-peer";
this.db = new PouchDB<EntryDoc>(database_name);
this._initP2PReplicator();
setTimeout(() => {
if (this.settings.P2P_AutoStart && this.settings.P2P_Enabled) {
void this.open();
@@ -155,7 +145,7 @@ export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
return this;
}
_log(msg: any, level?: LOG_LEVEL): void {
_log(msg: any, level?: any): void {
Logger(msg, level);
}
_notice(msg: string, key?: string): void {
@@ -167,14 +157,10 @@ export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
simpleStore(): SimpleStore<any> {
return this._simpleStore;
}
handleReplicatedDocuments(docs: EntryDoc[]): Promise<boolean> {
// No op. This is a client and does not need to process the docs
handleReplicatedDocuments(_docs: EntryDoc[]): Promise<boolean> {
return Promise.resolve(true);
}
getPluginShim() {
return {};
}
getConfig(key: string) {
const vaultName = this.services.vault.getVaultName();
const dbKey = `${vaultName}-${key}`;
@@ -189,9 +175,7 @@ export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
getDeviceName(): string {
return this.getConfig(SETTING_KEY_P2P_DEVICE_NAME) ?? this.plugin.services.vault.getVaultName();
}
getPlatform(): string {
return "pseudo-replicator";
}
m?: Menu;
afterConstructor(): void {
eventHub.onEvent(EVENT_P2P_PEER_SHOW_EXTRA_MENU, ({ peer, event }) => {
@@ -202,12 +186,6 @@ export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
.addItem((item) => item.setTitle("📥 Only Fetch").onClick(() => this.replicateFrom(peer)))
.addItem((item) => item.setTitle("📤 Only Send").onClick(() => this.replicateTo(peer)))
.addSeparator()
// .addItem((item) => {
// item.setTitle("🔧 Get Configuration").onClick(async () => {
// await this.getRemoteConfig(peer);
// });
// })
// .addSeparator()
.addItem((item) => {
const mark = peer.syncOnConnect ? "checkmark" : null;
item.setTitle("Toggle Sync on connect")
@@ -234,97 +212,43 @@ export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
});
void this.m.showAtPosition({ x: event.x, y: event.y });
});
this.p2pLogCollector.p2pReplicationLine.onChanged((line) => {
storeP2PStatusLine.set(line.value);
});
}
_replicatorInstance?: TrysteroReplicator;
p2pLogCollector = new P2PLogCollector();
async open() {
await openP2PReplicator(this);
await this._liveSyncReplicator?.open();
}
async close() {
await closeP2PReplicator(this);
await this._liveSyncReplicator?.close();
}
enableBroadcastCastings() {
return this?._replicatorInstance?.enableBroadcastChanges();
return this._liveSyncReplicator?.enableBroadcastChanges();
}
disableBroadcastCastings() {
return this?._replicatorInstance?.disableBroadcastChanges();
}
async initialiseP2PReplicator(): Promise<TrysteroReplicator> {
await this.init();
try {
if (this._replicatorInstance) {
await this._replicatorInstance.close();
this._replicatorInstance = undefined;
}
if (!this.settings.P2P_AppID) {
this.settings.P2P_AppID = P2P_DEFAULT_SETTINGS.P2P_AppID;
}
const getInitialDeviceName = () =>
this.getConfig(SETTING_KEY_P2P_DEVICE_NAME) || this.services.vault.getVaultName();
const getSettings = () => this.settings;
const store = () => this.simpleStore();
const getDB = () => this.getDB();
const getConfirm = () => this.confirm;
const getPlatform = () => this.getPlatform();
const env = {
get db() {
return getDB();
},
get confirm() {
return getConfirm();
},
get deviceName() {
return getInitialDeviceName();
},
get platform() {
return getPlatform();
},
get settings() {
return getSettings();
},
processReplicatedDocs: async (docs: EntryDoc[]): Promise<void> => {
await this.handleReplicatedDocuments(docs);
// No op. This is a client and does not need to process the docs
},
get simpleStore() {
return store();
},
};
this._replicatorInstance = new TrysteroReplicator(env);
return this._replicatorInstance;
} catch (e) {
this._log(
e instanceof Error ? e.message : "Something occurred on Initialising P2P Replicator",
LOG_LEVEL_INFO
);
this._log(e, LOG_LEVEL_VERBOSE);
throw e;
}
return this._liveSyncReplicator?.disableBroadcastChanges();
}
get replicator() {
return this._replicatorInstance!;
return this._liveSyncReplicator;
}
async replicateFrom(peer: PeerStatus) {
await this.replicator.replicateFrom(peer.peerId);
const r = this._liveSyncReplicator;
if (!r) return;
await r.replicateFrom(peer.peerId);
}
async replicateTo(peer: PeerStatus) {
await this.replicator.requestSynchroniseToPeer(peer.peerId);
await this._liveSyncReplicator?.requestSynchroniseToPeer(peer.peerId);
}
async getRemoteConfig(peer: PeerStatus) {
Logger(
`Requesting remote config for ${peer.name}. Please input the passphrase on the remote device`,
LOG_LEVEL_NOTICE
);
const remoteConfig = await this.replicator.getRemoteConfig(peer.peerId);
const remoteConfig = await this._liveSyncReplicator?.getRemoteConfig(peer.peerId);
if (remoteConfig) {
Logger(`Remote config for ${peer.name} is retrieved successfully`);
const DROP = "Yes, and drop local database";
@@ -344,9 +268,7 @@ export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
if (remoteConfig.remoteType !== REMOTE_P2P) {
const yn2 = await this.confirm.askYesNoDialog(
`Do you want to set the remote type to "P2P Sync" to rebuild by "P2P replication"?`,
{
title: "Rebuild from remote device",
}
{ title: "Rebuild from remote device" }
);
if (yn2 === "yes") {
remoteConfig.remoteType = REMOTE_P2P;
@@ -355,9 +277,7 @@ export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
}
}
await this.services.setting.applyPartial(remoteConfig, true);
if (yn === DROP) {
// await this.plugin.rebuilder.scheduleFetch();
} else {
if (yn !== DROP) {
await this.plugin.core.services.appLifecycle.scheduleRestart();
}
} else {
@@ -381,8 +301,6 @@ export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
[targetSetting]: currentSettingAll ? currentSettingAll[targetSetting] : "",
};
if (peer[prop]) {
// this.plugin.settings[targetSetting] = removeFromList(peer.name, this.plugin.settings[targetSetting]);
// await this.plugin.saveSettings();
currentSetting[targetSetting] = removeFromList(peer.name, currentSetting[targetSetting]);
} else {
currentSetting[targetSetting] = addToList(peer.name, currentSetting[targetSetting]);

View File

@@ -16,9 +16,6 @@ export const EVENT_REQUEST_RELOAD_SETTING_TAB = "reload-setting-tab";
export const EVENT_REQUEST_OPEN_PLUGIN_SYNC_DIALOG = "request-open-plugin-sync-dialog";
export const EVENT_REQUEST_OPEN_P2P = "request-open-p2p";
export const EVENT_REQUEST_CLOSE_P2P = "request-close-p2p";
export const EVENT_REQUEST_RUN_DOCTOR = "request-run-doctor";
export const EVENT_REQUEST_RUN_FIX_INCOMPLETE = "request-run-fix-incomplete";
@@ -36,8 +33,6 @@ declare global {
[EVENT_REQUEST_OPEN_SETTING_WIZARD]: undefined;
[EVENT_REQUEST_RELOAD_SETTING_TAB]: undefined;
[EVENT_LEAF_ACTIVE_CHANGED]: undefined;
[EVENT_REQUEST_CLOSE_P2P]: undefined;
[EVENT_REQUEST_OPEN_P2P]: undefined;
[EVENT_REQUEST_OPEN_SETUP_URI]: undefined;
[EVENT_REQUEST_COPY_SETUP_URI]: undefined;
[EVENT_REQUEST_SHOW_SETUP_QR]: undefined;

View File

@@ -1,278 +0,0 @@
import { P2PReplicatorPaneView, VIEW_TYPE_P2P } from "./P2PReplicator/P2PReplicatorPaneView.ts";
import {
AutoAccepting,
LOG_LEVEL_NOTICE,
P2P_DEFAULT_SETTINGS,
REMOTE_P2P,
type EntryDoc,
type P2PSyncSetting,
type RemoteDBSettings,
} from "../../lib/src/common/types.ts";
import { LiveSyncCommands } from "../LiveSyncCommands.ts";
import {
LiveSyncTrysteroReplicator,
setReplicatorFunc,
} from "../../lib/src/replication/trystero/LiveSyncTrysteroReplicator.ts";
import { EVENT_REQUEST_OPEN_P2P, eventHub } from "../../common/events.ts";
import type { LiveSyncAbstractReplicator } from "../../lib/src/replication/LiveSyncAbstractReplicator.ts";
import { LOG_LEVEL_INFO, LOG_LEVEL_VERBOSE, Logger } from "octagonal-wheels/common/logger";
import type { CommandShim } from "../../lib/src/replication/trystero/P2PReplicatorPaneCommon.ts";
import {
addP2PEventHandlers,
closeP2PReplicator,
openP2PReplicator,
P2PLogCollector,
removeP2PReplicatorInstance,
type P2PReplicatorBase,
} from "../../lib/src/replication/trystero/P2PReplicatorCore.ts";
import { reactiveSource } from "octagonal-wheels/dataobject/reactive_v2";
import type { Confirm } from "../../lib/src/interfaces/Confirm.ts";
import type ObsidianLiveSyncPlugin from "../../main.ts";
import type { SimpleStore } from "octagonal-wheels/databases/SimpleStoreBase";
// import { getPlatformName } from "../../lib/src/PlatformAPIs/obsidian/Environment.ts";
import type { LiveSyncCore } from "../../main.ts";
import { TrysteroReplicator } from "../../lib/src/replication/trystero/TrysteroReplicator.ts";
import { SETTING_KEY_P2P_DEVICE_NAME } from "../../lib/src/common/types.ts";
export class P2PReplicator extends LiveSyncCommands implements P2PReplicatorBase, CommandShim {
storeP2PStatusLine = reactiveSource("");
getSettings(): P2PSyncSetting {
return this.core.settings;
}
getDB() {
return this.core.localDatabase.localDatabase;
}
get confirm(): Confirm {
return this.core.confirm;
}
_simpleStore!: SimpleStore<any>;
simpleStore(): SimpleStore<any> {
return this._simpleStore;
}
constructor(plugin: ObsidianLiveSyncPlugin, core: LiveSyncCore) {
super(plugin, core);
setReplicatorFunc(() => this._replicatorInstance);
addP2PEventHandlers(this);
this.afterConstructor();
// onBindFunction is called in super class
// this.onBindFunction(plugin, plugin.services);
}
async handleReplicatedDocuments(docs: EntryDoc[]): Promise<boolean> {
// console.log("Processing Replicated Docs", docs);
return await this.services.replication.parseSynchroniseResult(
docs as PouchDB.Core.ExistingDocument<EntryDoc>[]
);
}
_anyNewReplicator(settingOverride: Partial<RemoteDBSettings> = {}): Promise<LiveSyncAbstractReplicator> {
const settings = { ...this.settings, ...settingOverride };
if (settings.remoteType == REMOTE_P2P) {
return Promise.resolve(new LiveSyncTrysteroReplicator(this.plugin.core));
}
return undefined!;
}
_replicatorInstance?: TrysteroReplicator;
p2pLogCollector = new P2PLogCollector();
afterConstructor() {
return;
}
async open() {
await openP2PReplicator(this);
}
async close() {
await closeP2PReplicator(this);
}
getConfig(key: string) {
return this.services.config.getSmallConfig(key);
}
setConfig(key: string, value: string) {
return this.services.config.setSmallConfig(key, value);
}
enableBroadcastCastings() {
return this?._replicatorInstance?.enableBroadcastChanges();
}
disableBroadcastCastings() {
return this?._replicatorInstance?.disableBroadcastChanges();
}
init() {
this._simpleStore = this.services.keyValueDB.openSimpleStore("p2p-sync");
return Promise.resolve(this);
}
async initialiseP2PReplicator(): Promise<TrysteroReplicator> {
await this.init();
try {
if (this._replicatorInstance) {
await this._replicatorInstance.close();
this._replicatorInstance = undefined;
}
if (!this.settings.P2P_AppID) {
this.settings.P2P_AppID = P2P_DEFAULT_SETTINGS.P2P_AppID;
}
const getInitialDeviceName = () =>
this.getConfig(SETTING_KEY_P2P_DEVICE_NAME) || this.services.vault.getVaultName();
const getSettings = () => this.settings;
const store = () => this.simpleStore();
const getDB = () => this.getDB();
const getConfirm = () => this.confirm;
const getPlatform = () => this.services.API.getPlatform();
const env = {
get db() {
return getDB();
},
get confirm() {
return getConfirm();
},
get deviceName() {
return getInitialDeviceName();
},
get platform() {
return getPlatform();
},
get settings() {
return getSettings();
},
processReplicatedDocs: async (docs: EntryDoc[]): Promise<void> => {
await this.handleReplicatedDocuments(docs);
// No op. This is a client and does not need to process the docs
},
get simpleStore() {
return store();
},
};
this._replicatorInstance = new TrysteroReplicator(env);
return this._replicatorInstance;
} catch (e) {
this._log(
e instanceof Error ? e.message : "Something occurred on Initialising P2P Replicator",
LOG_LEVEL_INFO
);
this._log(e, LOG_LEVEL_VERBOSE);
throw e;
}
}
onunload(): void {
removeP2PReplicatorInstance();
void this.close();
}
onload(): void | Promise<void> {
eventHub.onEvent(EVENT_REQUEST_OPEN_P2P, () => {
void this.openPane();
});
this.p2pLogCollector.p2pReplicationLine.onChanged((line) => {
this.storeP2PStatusLine.value = line.value;
});
}
async _everyOnInitializeDatabase(): Promise<boolean> {
await this.initialiseP2PReplicator();
return Promise.resolve(true);
}
private async _allSuspendExtraSync() {
this.plugin.core.settings.P2P_Enabled = false;
this.plugin.core.settings.P2P_AutoAccepting = AutoAccepting.NONE;
this.plugin.core.settings.P2P_AutoBroadcast = false;
this.plugin.core.settings.P2P_AutoStart = false;
this.plugin.core.settings.P2P_AutoSyncPeers = "";
this.plugin.core.settings.P2P_AutoWatchPeers = "";
return await Promise.resolve(true);
}
// async $everyOnLoadStart() {
// return await Promise.resolve();
// }
async openPane() {
await this.services.API.showWindow(VIEW_TYPE_P2P);
}
async _everyOnloadStart(): Promise<boolean> {
this.plugin.registerView(
VIEW_TYPE_P2P,
(leaf) => new P2PReplicatorPaneView(leaf, this.plugin.core, this.plugin)
);
this.plugin.addCommand({
id: "open-p2p-replicator",
name: "P2P Sync : Open P2P Replicator",
callback: async () => {
await this.openPane();
},
});
this.plugin.addCommand({
id: "p2p-establish-connection",
name: "P2P Sync : Connect to the Signalling Server",
checkCallback: (isChecking) => {
if (isChecking) {
return !(this._replicatorInstance?.server?.isServing ?? false);
}
void this.open();
},
});
this.plugin.addCommand({
id: "p2p-close-connection",
name: "P2P Sync : Disconnect from the Signalling Server",
checkCallback: (isChecking) => {
if (isChecking) {
return this._replicatorInstance?.server?.isServing ?? false;
}
Logger(`Closing P2P Connection`, LOG_LEVEL_NOTICE);
void this.close();
},
});
this.plugin.addCommand({
id: "replicate-now-by-p2p",
name: "Replicate now by P2P",
checkCallback: (isChecking) => {
if (isChecking) {
if (this.settings.remoteType == REMOTE_P2P) return false;
if (!this._replicatorInstance?.server?.isServing) return false;
return true;
}
void this._replicatorInstance?.replicateFromCommand(false);
},
});
this.plugin
.addRibbonIcon("waypoints", "P2P Replicator", async () => {
await this.openPane();
})
.addClass("livesync-ribbon-replicate-p2p");
return await Promise.resolve(true);
}
_everyAfterResumeProcess(): Promise<boolean> {
if (this.settings.P2P_Enabled && this.settings.P2P_AutoStart) {
setTimeout(() => void this.open(), 100);
}
const rep = this._replicatorInstance;
rep?.allowReconnection();
return Promise.resolve(true);
}
_everyBeforeSuspendProcess(): Promise<boolean> {
const rep = this._replicatorInstance;
rep?.disconnectFromServer();
return Promise.resolve(true);
}
override onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.replicator.getNewReplicator.addHandler(this._anyNewReplicator.bind(this));
services.databaseEvents.onDatabaseInitialisation.addHandler(this._everyOnInitializeDatabase.bind(this));
services.appLifecycle.onInitialise.addHandler(this._everyOnloadStart.bind(this));
services.appLifecycle.onSuspending.addHandler(this._everyBeforeSuspendProcess.bind(this));
services.appLifecycle.onResumed.addHandler(this._everyAfterResumeProcess.bind(this));
services.setting.suspendExtraSync.addHandler(this._allSuspendExtraSync.bind(this));
}
}

View File

@@ -4,10 +4,9 @@
import {
AcceptedStatus,
ConnectionStatus,
type CommandShim,
type PeerStatus,
type PluginShim,
} from "../../../lib/src/replication/trystero/P2PReplicatorPaneCommon";
import type { LiveSyncTrysteroReplicator } from "../../../lib/src/replication/trystero/LiveSyncTrysteroReplicator";
import PeerStatusRow from "../P2PReplicator/PeerStatusRow.svelte";
import { EVENT_LAYOUT_READY, eventHub } from "../../../common/events";
import {
@@ -23,7 +22,7 @@
import type { LiveSyncBaseCore } from "@/LiveSyncBaseCore";
interface Props {
cmdSync: CommandShim;
cmdSync: LiveSyncTrysteroReplicator;
core: LiveSyncBaseCore;
}
@@ -95,9 +94,8 @@
},
true
);
cmdSync.setConfig(SETTING_KEY_P2P_DEVICE_NAME, eDeviceName);
core.services.config.setSmallConfig(SETTING_KEY_P2P_DEVICE_NAME, eDeviceName);
deviceName = eDeviceName;
// await plugin.saveSettings();
}
async function revert() {
eP2PEnabled = settings.P2P_Enabled;
@@ -115,7 +113,7 @@
const applyLoadSettings = (d: P2PSyncSetting, force: boolean) => {
if (force) {
const initDeviceName =
cmdSync.getConfig(SETTING_KEY_P2P_DEVICE_NAME) ?? core.services.vault.getVaultName();
core.services.config.getSmallConfig(SETTING_KEY_P2P_DEVICE_NAME) ?? core.services.vault.getVaultName();
deviceName = initDeviceName;
eDeviceName = initDeviceName;
}
@@ -239,16 +237,16 @@
await cmdSync.close();
}
function startBroadcasting() {
void cmdSync.enableBroadcastCastings();
void cmdSync.enableBroadcastChanges();
}
function stopBroadcasting() {
void cmdSync.disableBroadcastCastings();
void cmdSync.disableBroadcastChanges();
}
const initialDialogStatusKey = `p2p-dialog-status`;
const getDialogStatus = () => {
try {
const initialDialogStatus = JSON.parse(cmdSync.getConfig(initialDialogStatusKey) ?? "{}") as {
const initialDialogStatus = JSON.parse(core.services.config.getSmallConfig(initialDialogStatusKey) ?? "{}") as {
notice?: boolean;
setting?: boolean;
};
@@ -265,7 +263,7 @@
notice: isNoticeOpened,
setting: isSettingOpened,
};
cmdSync.setConfig(initialDialogStatusKey, JSON.stringify(dialogStatus));
core.services.config.setSmallConfig(initialDialogStatusKey, JSON.stringify(dialogStatus));
});
let isObsidian = $derived.by(() => {
return core.services.API.getPlatform() === "obsidian";

View File

@@ -1,19 +1,15 @@
import { Menu, WorkspaceLeaf } from "@/deps.ts";
import ReplicatorPaneComponent from "./P2PReplicatorPane.svelte";
import type ObsidianLiveSyncPlugin from "../../../main.ts";
import { mount } from "svelte";
import { SvelteItemView } from "../../../common/SvelteItemView.ts";
import { eventHub } from "../../../common/events.ts";
import { SvelteItemView } from "@/common/SvelteItemView.ts";
import { eventHub } from "@/common/events.ts";
import { unique } from "octagonal-wheels/collection";
import { LOG_LEVEL_NOTICE, REMOTE_P2P } from "../../../lib/src/common/types.ts";
import { Logger } from "../../../lib/src/common/logger.ts";
import { P2PReplicator } from "../CmdP2PReplicator.ts";
import {
EVENT_P2P_PEER_SHOW_EXTRA_MENU,
type PeerStatus,
} from "../../../lib/src/replication/trystero/P2PReplicatorPaneCommon.ts";
import { LOG_LEVEL_NOTICE, REMOTE_P2P } from "@lib/common/types.ts";
import { Logger } from "@lib/common/logger.ts";
import { EVENT_P2P_PEER_SHOW_EXTRA_MENU, type PeerStatus } from "@lib/replication/trystero/P2PReplicatorPaneCommon.ts";
import type { LiveSyncBaseCore } from "@/LiveSyncBaseCore.ts";
import type { UseP2PReplicatorResult } from "@lib/replication/trystero/P2PReplicatorCore.ts";
export const VIEW_TYPE_P2P = "p2p-replicator";
function addToList(item: string, list: string) {
@@ -35,8 +31,8 @@ function removeFromList(item: string, list: string) {
}
export class P2PReplicatorPaneView extends SvelteItemView {
// plugin: ObsidianLiveSyncPlugin;
core: LiveSyncBaseCore;
private _p2pResult: UseP2PReplicatorResult;
override icon = "waypoints";
title: string = "";
override navigation = false;
@@ -45,11 +41,7 @@ export class P2PReplicatorPaneView extends SvelteItemView {
return "waypoints";
}
get replicator() {
const r = this.core.getAddOn<P2PReplicator>(P2PReplicator.name);
if (!r || !r._replicatorInstance) {
throw new Error("Replicator not found");
}
return r._replicatorInstance;
return this._p2pResult.replicator;
}
async replicateFrom(peer: PeerStatus) {
await this.replicator.replicateFrom(peer.peerId);
@@ -131,10 +123,10 @@ And you can also drop the local database to rebuild from the remote device.`,
await this.core.services.setting.applyPartial(currentSetting, true);
}
m?: Menu;
constructor(leaf: WorkspaceLeaf, core: LiveSyncBaseCore, plugin: ObsidianLiveSyncPlugin) {
constructor(leaf: WorkspaceLeaf, core: LiveSyncBaseCore, p2pResult: UseP2PReplicatorResult) {
super(leaf);
// this.plugin = plugin;
this.core = core;
this._p2pResult = p2pResult;
eventHub.onEvent(EVENT_P2P_PEER_SHOW_EXTRA_MENU, ({ peer, event }) => {
if (this.m) {
this.m.hide();
@@ -192,14 +184,10 @@ And you can also drop the local database to rebuild from the remote device.`,
}
}
instantiateComponent(target: HTMLElement) {
const cmdSync = this.core.getAddOn<P2PReplicator>(P2PReplicator.name);
if (!cmdSync) {
throw new Error("Replicator not found");
}
return mount(ReplicatorPaneComponent, {
target: target,
props: {
cmdSync: cmdSync,
cmdSync: this._p2pResult.replicator,
core: this.core,
},
});

View File

@@ -1,7 +1,7 @@
<script lang="ts">
import { getContext } from "svelte";
import { AcceptedStatus, type PeerStatus } from "../../../lib/src/replication/trystero/P2PReplicatorPaneCommon";
import type { P2PReplicator } from "../CmdP2PReplicator";
import type { LiveSyncTrysteroReplicator } from "../../../lib/src/replication/trystero/LiveSyncTrysteroReplicator";
import { eventHub } from "../../../common/events";
import { EVENT_P2P_PEER_SHOW_EXTRA_MENU } from "../../../lib/src/replication/trystero/P2PReplicatorPaneCommon";
@@ -57,7 +57,7 @@
let isNew = $derived.by(() => peer.accepted === AcceptedStatus.UNKNOWN);
function makeDecision(isAccepted: boolean, isTemporary: boolean) {
cmdReplicator._replicatorInstance?.server?.makeDecision({
replicator.makeDecision({
peerId: peer.peerId,
name: peer.name,
decision: isAccepted,
@@ -65,13 +65,12 @@
});
}
function revokeDecision() {
cmdReplicator._replicatorInstance?.server?.revokeDecision({
replicator.revokeDecision({
peerId: peer.peerId,
name: peer.name,
});
}
const cmdReplicator = getContext<() => P2PReplicator>("getReplicator")();
const replicator = cmdReplicator._replicatorInstance!;
const replicator = getContext<() => LiveSyncTrysteroReplicator>("getReplicator")();
const peerAttrLabels = $derived.by(() => {
const attrs = [];
@@ -87,14 +86,14 @@
return attrs;
});
function startWatching() {
replicator.watchPeer(peer.peerId);
replicator?.watchPeer(peer.peerId);
}
function stopWatching() {
replicator.unwatchPeer(peer.peerId);
replicator?.unwatchPeer(peer.peerId);
}
function sync() {
replicator.sync(peer.peerId, false);
void replicator?.sync(peer.peerId, false);
}
function moreMenu(evt: MouseEvent) {

Submodule src/lib updated: d94c9b3ed7...9145013efa

View File

@@ -14,7 +14,8 @@ import { ModuleObsidianGlobalHistory } from "./modules/features/ModuleGlobalHist
import { ModuleIntegratedTest } from "./modules/extras/ModuleIntegratedTest.ts";
import { ModuleReplicateTest } from "./modules/extras/ModuleReplicateTest.ts";
import { LocalDatabaseMaintenance } from "./features/LocalDatabaseMainte/CmdLocalDatabaseMainte.ts";
import { P2PReplicator } from "./features/P2PSync/CmdP2PReplicator.ts";
import { P2PReplicatorPaneView, VIEW_TYPE_P2P } from "./features/P2PSync/P2PReplicator/P2PReplicatorPaneView.ts";
import { useP2PReplicator } from "@lib/replication/trystero/P2PReplicatorCore.ts";
import type { InjectableServiceHub } from "./lib/src/services/implements/injectable/InjectableServiceHub.ts";
import { ObsidianServiceHub } from "./modules/services/ObsidianServiceHub.ts";
import { ServiceRebuilder } from "@lib/serviceModules/Rebuilder.ts";
@@ -132,6 +133,10 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
const serviceHub = new ObsidianServiceHub(this);
// Capture useP2PReplicator result so it can be passed to the P2PReplicator addon
// TODO: Dependency fix: bit hacky
let p2pReplicatorResult: ReturnType<typeof useP2PReplicator> | undefined;
this.core = new LiveSyncBaseCore(
serviceHub,
(core, serviceHub) => {
@@ -161,7 +166,6 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
new ConfigSync(this, core),
new HiddenFileSync(this, core),
new LocalDatabaseMaintenance(this, core),
new P2PReplicator(this, core),
];
return addOns;
},
@@ -173,6 +177,10 @@ export default class ObsidianLiveSyncPlugin extends Plugin {
useOfflineScanner(core);
useRedFlagFeatures(core);
useCheckRemoteSize(core);
p2pReplicatorResult = useP2PReplicator(core, [
VIEW_TYPE_P2P,
(leaf: any) => new P2PReplicatorPaneView(leaf, core, p2pReplicatorResult!),
]);
}
);
}

View File

@@ -4,6 +4,13 @@ import { AbstractModule } from "../AbstractModule";
import { LiveSyncTrysteroReplicator } from "../../lib/src/replication/trystero/LiveSyncTrysteroReplicator";
import type { LiveSyncCore } from "../../main";
// Note:
// This module registers only the `getNewReplicator` handler for the P2P replicator.
// `useP2PReplicator` (see P2PReplicatorCore.ts) already registers the same `getNewReplicator`
// handler internally, so this module is redundant in environments that call `useP2PReplicator`.
// Register this module only in environments that do NOT use `useP2PReplicator` (e.g. CLI).
// In other words: just resolving `getNewReplicator` via this module is all that is needed
// to satisfy what `useP2PReplicator` requires from the replicator service.
export class ModuleReplicatorP2P extends AbstractModule {
_anyNewReplicator(settingOverride: Partial<RemoteDBSettings> = {}): Promise<LiveSyncAbstractReplicator | false> {
const settings = { ...this.settings, ...settingOverride };
@@ -12,23 +19,7 @@ export class ModuleReplicatorP2P extends AbstractModule {
}
return Promise.resolve(false);
}
_everyAfterResumeProcess(): Promise<boolean> {
if (this.settings.remoteType == REMOTE_P2P) {
// // If LiveSync enabled, open replication
// if (this.settings.liveSync) {
// fireAndForget(() => this.core.replicator.openReplication(this.settings, true, false, false));
// }
// // If sync on start enabled, open replication
// if (!this.settings.liveSync && this.settings.syncOnStart) {
// // Possibly ok as if only share the result
// fireAndForget(() => this.core.replicator.openReplication(this.settings, false, false, false));
// }
}
return Promise.resolve(true);
}
override onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.replicator.getNewReplicator.addHandler(this._anyNewReplicator.bind(this));
services.appLifecycle.onResumed.addHandler(this._everyAfterResumeProcess.bind(this));
}
}

View File

@@ -116,6 +116,22 @@ export const acceptWebPeer: BrowserCommand = async (ctx) => {
return false;
};
/** Write arbitrary text to a file on the Node.js host (used for phase handoff). */
export const writeHandoffFile: BrowserCommand<[filePath: string, content: string]> = async (
_ctx,
filePath: string,
content: string
) => {
const fs = await import("node:fs/promises");
await fs.writeFile(filePath, content, "utf-8");
};
/** Read a file from the Node.js host (used for phase handoff). */
export const readHandoffFile: BrowserCommand<[filePath: string]> = async (_ctx, filePath: string): Promise<string> => {
const fs = await import("node:fs/promises");
return fs.readFile(filePath, "utf-8");
};
export default function BrowserCommands(): Plugin {
return {
name: "vitest:custom-commands",
@@ -128,6 +144,8 @@ export default function BrowserCommands(): Plugin {
openWebPeer,
closeWebPeer,
acceptWebPeer,
writeHandoffFile,
readHandoffFile,
},
},
},
@@ -141,5 +159,7 @@ declare module "vitest/browser" {
openWebPeer: (setting: P2PSyncSetting, serverPeerName: string) => Promise<void>;
closeWebPeer: () => Promise<void>;
acceptWebPeer: () => Promise<boolean>;
writeHandoffFile: (filePath: string, content: string) => Promise<void>;
readHandoffFile: (filePath: string) => Promise<string>;
}
}

View File

@@ -1,6 +1,6 @@
import { expect } from "vitest";
import { waitForIdle, type LiveSyncHarness } from "../harness/harness";
import { LOG_LEVEL_INFO, RemoteTypes, type ObsidianLiveSyncSettings } from "@/lib/src/common/types";
import { RemoteTypes, type ObsidianLiveSyncSettings } from "@/lib/src/common/types";
import { delay, fireAndForget } from "@/lib/src/common/utils";
import { commands } from "vitest/browser";
@@ -15,14 +15,10 @@ async function waitForP2PPeers(harness: LiveSyncHarness) {
if (!(replicator instanceof LiveSyncTrysteroReplicator)) {
throw new Error("Replicator is not an instance of LiveSyncTrysteroReplicator");
}
const p2pReplicator = await replicator.getP2PConnection(LOG_LEVEL_INFO);
if (!p2pReplicator) {
throw new Error("P2P Replicator is not initialized");
}
while (retries-- > 0) {
fireAndForget(() => commands.acceptWebPeer());
await delay(1000);
const peers = p2pReplicator.knownAdvertisements;
const peers = replicator.knownAdvertisements;
if (peers && peers.length > 0) {
console.log("P2P peers connected:", peers);

194
test/suitep2p/run-p2p-tests.sh Executable file
View File

@@ -0,0 +1,194 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd -- "$SCRIPT_DIR/../.." && pwd)"
CLI_DIR="$REPO_ROOT/src/apps/cli"
CLI_TEST_HELPERS="$CLI_DIR/test/test-helpers.sh"
source "$CLI_TEST_HELPERS"
RUN_BUILD="${RUN_BUILD:-1}"
KEEP_TEST_DATA="${KEEP_TEST_DATA:-1}"
VERBOSE_TEST_LOGGING="${VERBOSE_TEST_LOGGING:-1}"
RELAY="${RELAY:-ws://localhost:4000/}"
USE_INTERNAL_RELAY="${USE_INTERNAL_RELAY:-1}"
APP_ID="${APP_ID:-self-hosted-livesync-vitest-p2p}"
HOST_PEER_NAME="${HOST_PEER_NAME:-p2p-cli-host}"
ROOM_ID="p2p-room-$(date +%s)-$RANDOM-$RANDOM"
PASSPHRASE="p2p-pass-$(date +%s)-$RANDOM-$RANDOM"
UPLOAD_PEER_NAME="p2p-upload-$(date +%s)-$RANDOM"
DOWNLOAD_PEER_NAME="p2p-download-$(date +%s)-$RANDOM"
UPLOAD_VAULT_NAME="TestVaultUpload-$(date +%s)-$RANDOM"
DOWNLOAD_VAULT_NAME="TestVaultDownload-$(date +%s)-$RANDOM"
# ---- Build CLI ----
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI"
(cd "$CLI_DIR" && npm run build)
fi
# ---- Temp directory ----
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-vitest-p2p.XXXXXX")"
VAULT_HOST="$WORK_DIR/vault-host"
SETTINGS_HOST="$WORK_DIR/settings-host.json"
HOST_LOG="$WORK_DIR/p2p-host.log"
# Handoff file: upload phase writes this; download phase reads it.
HANDOFF_FILE="$WORK_DIR/p2p-test-handoff.json"
mkdir -p "$VAULT_HOST"
# ---- Setup CLI command (uses npm run cli from CLI_DIR) ----
# Override run_cli to invoke the built binary directly from CLI_DIR
run_cli() {
(cd "$CLI_DIR" && node dist/index.cjs "$@")
}
# ---- Create host settings ----
echo "[INFO] relay=$RELAY room=$ROOM_ID app=$APP_ID host=$HOST_PEER_NAME"
cli_test_init_settings_file "$SETTINGS_HOST"
cli_test_apply_p2p_settings "$SETTINGS_HOST" "$ROOM_ID" "$PASSPHRASE" "$APP_ID" "$RELAY" "~.*"
# Set host peer name
SETTINGS_HOST_FILE="$SETTINGS_HOST" HOST_PEER_NAME_VAL="$HOST_PEER_NAME" HOST_PASSPHRASE_VAL="$PASSPHRASE" node <<'NODE'
const fs = require("node:fs");
const data = JSON.parse(fs.readFileSync(process.env.SETTINGS_HOST_FILE, "utf-8"));
// Keep tweak values aligned with browser-side P2P test settings.
data.remoteType = "ONLY_P2P";
data.encrypt = true;
data.passphrase = process.env.HOST_PASSPHRASE_VAL;
data.usePathObfuscation = true;
data.handleFilenameCaseSensitive = false;
data.customChunkSize = 50;
data.usePluginSyncV2 = true;
data.doNotUseFixedRevisionForChunks = false;
data.P2P_DevicePeerName = process.env.HOST_PEER_NAME_VAL;
fs.writeFileSync(process.env.SETTINGS_HOST_FILE, JSON.stringify(data, null, 2), "utf-8");
NODE
# ---- Cleanup trap ----
cleanup() {
local exit_code=$?
if [[ -n "${HOST_PID:-}" ]] && kill -0 "$HOST_PID" >/dev/null 2>&1; then
echo "[INFO] stopping CLI host (PID=$HOST_PID)"
kill -TERM "$HOST_PID" >/dev/null 2>&1 || true
wait "$HOST_PID" >/dev/null 2>&1 || true
fi
if [[ "${P2P_RELAY_STARTED:-0}" == "1" ]]; then
cli_test_stop_p2p_relay
fi
if [[ "$KEEP_TEST_DATA" != "1" ]]; then
rm -rf "$WORK_DIR"
else
echo "[INFO] KEEP_TEST_DATA=1, preserving artefacts at $WORK_DIR"
fi
exit "$exit_code"
}
trap cleanup EXIT
start_host() {
local attempt=0
while [[ "$attempt" -lt 5 ]]; do
attempt=$((attempt + 1))
echo "[INFO] starting CLI p2p-host (attempt $attempt/5)"
: >"$HOST_LOG"
(cd "$CLI_DIR" && node dist/index.cjs "$VAULT_HOST" --settings "$SETTINGS_HOST" -d p2p-host) >"$HOST_LOG" 2>&1 &
HOST_PID=$!
local host_ready=0
local exited_early=0
for i in $(seq 1 30); do
if grep -qF "P2P host is running" "$HOST_LOG" 2>/dev/null; then
host_ready=1
break
fi
if ! kill -0 "$HOST_PID" >/dev/null 2>&1; then
exited_early=1
break
fi
echo "[INFO] waiting for p2p-host to be ready... ($i/30)"
sleep 1
done
if [[ "$host_ready" == "1" ]]; then
echo "[INFO] p2p-host is ready (PID=$HOST_PID)"
return 0
fi
wait "$HOST_PID" >/dev/null 2>&1 || true
HOST_PID=
if grep -qF "Resource temporarily unavailable" "$HOST_LOG" 2>/dev/null; then
echo "[INFO] p2p-host database lock is still being released, retrying..."
sleep 2
continue
fi
if [[ "$exited_early" == "1" ]]; then
echo "[FAIL] CLI host process exited unexpectedly" >&2
else
echo "[FAIL] p2p-host did not become ready within 30 seconds" >&2
fi
cat "$HOST_LOG" >&2
exit 1
done
echo "[FAIL] p2p-host could not be restarted after multiple attempts" >&2
cat "$HOST_LOG" >&2
exit 1
}
# ---- Start local relay if needed ----
if [[ "$USE_INTERNAL_RELAY" == "1" ]]; then
if cli_test_is_local_p2p_relay "$RELAY"; then
cli_test_start_p2p_relay
P2P_RELAY_STARTED=1
else
echo "[INFO] USE_INTERNAL_RELAY=1 but RELAY is not local ($RELAY), skipping"
fi
fi
start_host
# Common env vars passed to both vitest runs
P2P_ENV=(
P2P_TEST_ROOM_ID="$ROOM_ID"
P2P_TEST_PASSPHRASE="$PASSPHRASE"
P2P_TEST_HOST_PEER_NAME="$HOST_PEER_NAME"
P2P_TEST_RELAY="$RELAY"
P2P_TEST_APP_ID="$APP_ID"
P2P_TEST_HANDOFF_FILE="$HANDOFF_FILE"
P2P_TEST_UPLOAD_PEER_NAME="$UPLOAD_PEER_NAME"
P2P_TEST_DOWNLOAD_PEER_NAME="$DOWNLOAD_PEER_NAME"
P2P_TEST_UPLOAD_VAULT_NAME="$UPLOAD_VAULT_NAME"
P2P_TEST_DOWNLOAD_VAULT_NAME="$DOWNLOAD_VAULT_NAME"
)
cd "$REPO_ROOT"
# ---- Phase 1: Upload ----
# Each vitest run gets a fresh browser process, so Trystero's module-level
# global state (occupiedRooms, didInit, etc.) is clean for every phase.
echo "[INFO] running P2P vitest — upload phase"
env "${P2P_ENV[@]}" \
npx dotenv-cli -e .env -e .test.env -- \
vitest run --config vitest.config.p2p.ts test/suitep2p/syncp2p.p2p-up.test.ts
echo "[INFO] upload phase completed"
# ---- Phase 2: Download ----
# Keep the same host process alive so its database handle and relay presence stay stable.
echo "[INFO] waiting 5s before download phase..."
sleep 5
echo "[INFO] running P2P vitest — download phase"
env "${P2P_ENV[@]}" \
npx dotenv-cli -e .env -e .test.env -- \
vitest run --config vitest.config.p2p.ts test/suitep2p/syncp2p.p2p-down.test.ts
echo "[INFO] download phase completed"
echo "[INFO] P2P vitest suite completed"

View File

@@ -0,0 +1,175 @@
/**
* P2P-specific sync helpers.
*
* Derived from test/suite/sync_common.ts but with all acceptWebPeer() calls
* removed. When using a CLI p2p-host with P2P_AutoAcceptingPeers="~.*", peer
* acceptance is automatic and no Playwright dialog interaction is needed.
*/
import { expect } from "vitest";
import { waitForIdle, type LiveSyncHarness } from "../harness/harness";
import { RemoteTypes, type ObsidianLiveSyncSettings } from "@/lib/src/common/types";
import { delay } from "@/lib/src/common/utils";
import { LiveSyncTrysteroReplicator } from "@/lib/src/replication/trystero/LiveSyncTrysteroReplicator";
import { waitTaskWithFollowups } from "../lib/util";
const P2P_REPLICATION_TIMEOUT_MS = 180000;
async function testWebSocketConnection(relayUrl: string): Promise<void> {
return new Promise((resolve, reject) => {
console.log(`[P2P Debug] Testing WebSocket connection to ${relayUrl}`);
try {
const ws = new WebSocket(relayUrl);
const timer = setTimeout(() => {
ws.close();
reject(new Error(`WebSocket connection to ${relayUrl} timed out`));
}, 5000);
ws.onopen = () => {
clearTimeout(timer);
console.log(`[P2P Debug] WebSocket connected to ${relayUrl} successfully`);
ws.close();
resolve();
};
ws.onerror = (e) => {
clearTimeout(timer);
console.error(`[P2P Debug] WebSocket error connecting to ${relayUrl}:`, e);
reject(new Error(`WebSocket connection to ${relayUrl} failed`));
};
} catch (e) {
console.error(`[P2P Debug] WebSocket constructor threw:`, e);
reject(e);
}
});
}
async function waitForP2PPeers(harness: LiveSyncHarness) {
if (harness.plugin.core.settings.remoteType === RemoteTypes.REMOTE_P2P) {
const maxRetries = 20;
let retries = maxRetries;
const replicator = await harness.plugin.core.services.replicator.getActiveReplicator();
console.log("[P2P Debug] replicator type:", replicator?.constructor?.name);
if (!(replicator instanceof LiveSyncTrysteroReplicator)) {
throw new Error("Replicator is not an instance of LiveSyncTrysteroReplicator");
}
// Ensure P2P is open (getActiveReplicator returns a fresh instance that may not be open yet)
if (!replicator.server?.isServing) {
console.log("[P2P Debug] P2P not yet serving, calling open()");
// Test WebSocket connectivity first
const relay = harness.plugin.core.settings.P2P_relays?.split(",")[0]?.trim();
if (relay) {
try {
await testWebSocketConnection(relay);
} catch (e) {
console.error("[P2P Debug] WebSocket connectivity test failed:", e);
}
}
try {
await replicator.open();
console.log("[P2P Debug] open() completed, isServing:", replicator.server?.isServing);
} catch (e) {
console.error("[P2P Debug] open() threw:", e);
}
}
// Wait for P2P server to actually start (room joined)
for (let i = 0; i < 30; i++) {
const serving = replicator.server?.isServing;
console.log(`[P2P Debug] isServing: ${serving} (${i}/30)`);
if (serving) break;
await delay(500);
if (i === 29) throw new Error("P2P server did not start in time.");
}
while (retries-- > 0) {
await delay(1000);
const peers = replicator.knownAdvertisements;
if (peers && peers.length > 0) {
console.log("P2P peers connected:", peers);
return;
}
console.log(`Waiting for any P2P peers to be connected... ${maxRetries - retries}/${maxRetries}`);
console.dir(peers);
await delay(1000);
}
console.log("Failed to connect P2P peers after retries");
throw new Error("P2P peers did not connect in time.");
}
}
export async function closeP2PReplicatorConnections(harness: LiveSyncHarness) {
if (harness.plugin.core.settings.remoteType === RemoteTypes.REMOTE_P2P) {
const replicator = await harness.plugin.core.services.replicator.getActiveReplicator();
if (!(replicator instanceof LiveSyncTrysteroReplicator)) {
throw new Error("Replicator is not an instance of LiveSyncTrysteroReplicator");
}
replicator.closeReplication();
await delay(30);
replicator.closeReplication();
await delay(1000);
console.log("P2P replicator connections closed");
}
}
export async function performReplication(harness: LiveSyncHarness) {
await waitForP2PPeers(harness);
await delay(500);
if (harness.plugin.core.settings.remoteType === RemoteTypes.REMOTE_P2P) {
const replicator = await harness.plugin.core.services.replicator.getActiveReplicator();
if (!(replicator instanceof LiveSyncTrysteroReplicator)) {
throw new Error("Replicator is not an instance of LiveSyncTrysteroReplicator");
}
const knownPeers = replicator.knownAdvertisements;
const targetPeer = knownPeers.find((peer) => peer.name.startsWith("vault-host")) ?? knownPeers[0] ?? undefined;
if (!targetPeer) {
throw new Error("No connected P2P peer to synchronise with");
}
const p = replicator.sync(targetPeer.peerId, true);
const result = await waitTaskWithFollowups(p, () => Promise.resolve(), P2P_REPLICATION_TIMEOUT_MS, 500);
if (result && typeof result === "object" && "error" in result && result.error) {
throw result.error;
}
return result;
}
return await harness.plugin.core.services.replication.replicate(true);
}
export async function closeReplication(harness: LiveSyncHarness) {
if (harness.plugin.core.settings.remoteType === RemoteTypes.REMOTE_P2P) {
return await closeP2PReplicatorConnections(harness);
}
const replicator = await harness.plugin.core.services.replicator.getActiveReplicator();
if (!replicator) {
console.log("No active replicator to close");
return;
}
await replicator.closeReplication();
await waitForIdle(harness);
console.log("Replication closed");
}
export async function prepareRemote(harness: LiveSyncHarness, setting: ObsidianLiveSyncSettings, shouldReset = false) {
// P2P has no remote database to initialise — skip
if (setting.remoteType === RemoteTypes.REMOTE_P2P) return;
if (shouldReset) {
await delay(1000);
await harness.plugin.core.services.replicator
.getActiveReplicator()
?.tryResetRemoteDatabase(harness.plugin.core.settings);
} else {
await harness.plugin.core.services.replicator
.getActiveReplicator()
?.tryCreateRemoteDatabase(harness.plugin.core.settings);
}
await harness.plugin.core.services.replicator
.getActiveReplicator()
?.markRemoteResolved(harness.plugin.core.settings);
const status = await harness.plugin.core.services.replicator
.getActiveReplicator()
?.getRemoteStatus(harness.plugin.core.settings);
console.log("Remote status:", status);
expect(status).not.toBeFalsy();
}

View File

@@ -0,0 +1,165 @@
/**
* P2P Replication Tests — Download phase (process 2 of 2)
*
* Executed by run-p2p-tests.sh as the second vitest process, after the
* upload phase has completed and the CLI host holds all the data.
*
* Reads the handoff JSON written by the upload phase to know which files
* to verify, then replicates from the CLI host and checks every file.
*/
import { afterAll, beforeAll, beforeEach, describe, expect, it, test } from "vitest";
import { generateHarness, waitForIdle, waitForReady, type LiveSyncHarness } from "../harness/harness";
import {
PREFERRED_SETTING_SELF_HOSTED,
RemoteTypes,
type FilePath,
type ObsidianLiveSyncSettings,
AutoAccepting,
} from "@/lib/src/common/types";
import { DummyFileSourceInisialised, generateBinaryFile, generateFile } from "../utils/dummyfile";
import { defaultFileOption, testFileRead } from "../suite/db_common";
import { delay } from "@/lib/src/common/utils";
import { closeReplication, performReplication } from "./sync_common_p2p";
import { settingBase } from "../suite/variables";
const env = (import.meta as any).env;
const ROOM_ID: string = env.P2P_TEST_ROOM_ID ?? "p2p-test-room";
const PASSPHRASE: string = env.P2P_TEST_PASSPHRASE ?? "p2p-test-pass";
const HOST_PEER_NAME: string = env.P2P_TEST_HOST_PEER_NAME ?? "p2p-cli-host";
const RELAY: string = env.P2P_TEST_RELAY ?? "ws://localhost:4000/";
const APP_ID: string = env.P2P_TEST_APP_ID ?? "self-hosted-livesync-vitest-p2p";
const DOWNLOAD_PEER_NAME: string = env.P2P_TEST_DOWNLOAD_PEER_NAME ?? `p2p-download-${Date.now()}`;
const DOWNLOAD_VAULT_NAME: string = env.P2P_TEST_DOWNLOAD_VAULT_NAME ?? `TestVaultDownload-${Date.now()}`;
const HANDOFF_FILE: string = env.P2P_TEST_HANDOFF_FILE ?? "/tmp/p2p-test-handoff.json";
console.log("[P2P Down] ROOM_ID:", ROOM_ID, "HOST:", HOST_PEER_NAME, "RELAY:", RELAY, "APP_ID:", APP_ID);
console.log("[P2P Down] HANDOFF_FILE:", HANDOFF_FILE);
const p2pSetting: ObsidianLiveSyncSettings = {
...settingBase,
...PREFERRED_SETTING_SELF_HOSTED,
showVerboseLog: true,
remoteType: RemoteTypes.REMOTE_P2P,
encrypt: true,
passphrase: PASSPHRASE,
usePathObfuscation: true,
P2P_Enabled: true,
P2P_AppID: APP_ID,
handleFilenameCaseSensitive: false,
P2P_AutoAccepting: AutoAccepting.ALL,
P2P_AutoBroadcast: true,
P2P_AutoStart: true,
P2P_passphrase: PASSPHRASE,
P2P_roomID: ROOM_ID,
P2P_relays: RELAY,
P2P_AutoAcceptingPeers: "~.*",
P2P_SyncOnReplication: HOST_PEER_NAME,
};
const fileOptions = defaultFileOption;
const nameFile = (type: string, ext: string, size: number) => `p2p-cli-test-${type}-file-${size}.${ext}`;
/** Read the handoff JSON produced by the upload phase. */
async function readHandoff(): Promise<{ fileSizeMd: number[]; fileSizeBins: number[] }> {
const { commands } = await import("@vitest/browser/context");
const raw = await commands.readHandoffFile(HANDOFF_FILE);
return JSON.parse(raw);
}
describe("P2P Replication — Download", () => {
let harnessDownload: LiveSyncHarness;
let fileSizeMd: number[] = [];
let fileSizeBins: number[] = [];
const downloadSetting: ObsidianLiveSyncSettings = {
...p2pSetting,
P2P_DevicePeerName: DOWNLOAD_PEER_NAME,
};
beforeAll(async () => {
await DummyFileSourceInisialised;
const handoff = await readHandoff();
fileSizeMd = handoff.fileSizeMd;
fileSizeBins = handoff.fileSizeBins;
console.log("[P2P Down] handoff loaded — md sizes:", fileSizeMd, "bin sizes:", fileSizeBins);
const vaultName = DOWNLOAD_VAULT_NAME;
console.log(`[P2P Down] BeforeAll - Vault: ${vaultName}`);
console.log(`[P2P Down] Peer name: ${DOWNLOAD_PEER_NAME}`);
harnessDownload = await generateHarness(vaultName, downloadSetting);
await waitForReady(harnessDownload);
await performReplication(harnessDownload);
await waitForIdle(harnessDownload);
await delay(1000);
await performReplication(harnessDownload);
await waitForIdle(harnessDownload);
await delay(3000);
});
beforeEach(async () => {
await performReplication(harnessDownload);
await waitForIdle(harnessDownload);
});
afterAll(async () => {
await closeReplication(harnessDownload);
await harnessDownload.dispose();
await delay(1000);
});
it("should be instantiated and defined", () => {
expect(harnessDownload.plugin).toBeDefined();
expect(harnessDownload.plugin.app).toBe(harnessDownload.app);
});
it("should have services initialized", () => {
expect(harnessDownload.plugin.core.services).toBeDefined();
});
it("should have local database initialized", () => {
expect(harnessDownload.plugin.core.localDatabase).toBeDefined();
expect(harnessDownload.plugin.core.localDatabase.isReady).toBe(true);
});
it("should have synchronised the stored file", async () => {
await testFileRead(harnessDownload, nameFile("store", "md", 0), "Hello, World!", fileOptions);
});
it("should have synchronised files with different content", async () => {
await testFileRead(harnessDownload, nameFile("test-diff-1", "md", 0), "Content A", fileOptions);
await testFileRead(harnessDownload, nameFile("test-diff-2", "md", 0), "Content B", fileOptions);
await testFileRead(harnessDownload, nameFile("test-diff-3", "md", 0), "Content C", fileOptions);
});
// NOTE: test.each cannot use variables populated in beforeAll, so we use
// a single it() that iterates over the sizes loaded from the handoff file.
it("should have synchronised all large md files", async () => {
for (const size of fileSizeMd) {
const content = Array.from(generateFile(size)).join("");
const path = nameFile("large", "md", size);
const isTooLarge = harnessDownload.plugin.core.services.vault.isFileSizeTooLarge(size);
if (isTooLarge) {
const entry = await harnessDownload.plugin.core.localDatabase.getDBEntry(path as FilePath);
expect(entry).toBe(false);
} else {
await testFileRead(harnessDownload, path, content, fileOptions);
}
}
});
it("should have synchronised all binary files", async () => {
for (const size of fileSizeBins) {
const path = nameFile("binary", "bin", size);
const isTooLarge = harnessDownload.plugin.core.services.vault.isFileSizeTooLarge(size);
if (isTooLarge) {
const entry = await harnessDownload.plugin.core.localDatabase.getDBEntry(path as FilePath);
expect(entry).toBe(false);
} else {
const content = new Blob([...generateBinaryFile(size)], { type: "application/octet-stream" });
await testFileRead(harnessDownload, path, content, fileOptions);
}
}
});
});

View File

@@ -0,0 +1,161 @@
/**
* P2P Replication Tests — Upload phase (process 1 of 2)
*
* Executed by run-p2p-tests.sh as the first vitest process.
* Writes files into the local DB, replicates them to the CLI host,
* then writes a handoff JSON so the download process knows what to verify.
*
* Trystero has module-level global state (occupiedRooms, didInit, etc.)
* that cannot be safely reused across upload→download within the same
* browser process. Running upload and download as separate vitest
* invocations gives each phase a fresh browser context.
*/
import { afterAll, beforeAll, describe, expect, it, test } from "vitest";
import { generateHarness, waitForIdle, waitForReady, type LiveSyncHarness } from "../harness/harness";
import {
PREFERRED_SETTING_SELF_HOSTED,
RemoteTypes,
type ObsidianLiveSyncSettings,
AutoAccepting,
} from "@/lib/src/common/types";
import {
DummyFileSourceInisialised,
FILE_SIZE_BINS,
FILE_SIZE_MD,
generateBinaryFile,
generateFile,
} from "../utils/dummyfile";
import { checkStoredFileInDB, defaultFileOption, testFileWrite } from "../suite/db_common";
import { delay } from "@/lib/src/common/utils";
import { closeReplication, performReplication } from "./sync_common_p2p";
import { settingBase } from "../suite/variables";
const env = (import.meta as any).env;
const ROOM_ID: string = env.P2P_TEST_ROOM_ID ?? "p2p-test-room";
const PASSPHRASE: string = env.P2P_TEST_PASSPHRASE ?? "p2p-test-pass";
const HOST_PEER_NAME: string = env.P2P_TEST_HOST_PEER_NAME ?? "p2p-cli-host";
const RELAY: string = env.P2P_TEST_RELAY ?? "ws://localhost:4000/";
const APP_ID: string = env.P2P_TEST_APP_ID ?? "self-hosted-livesync-vitest-p2p";
const UPLOAD_PEER_NAME: string = env.P2P_TEST_UPLOAD_PEER_NAME ?? `p2p-upload-${Date.now()}`;
const UPLOAD_VAULT_NAME: string = env.P2P_TEST_UPLOAD_VAULT_NAME ?? `TestVaultUpload-${Date.now()}`;
// Path written by run-p2p-tests.sh; the download phase reads it back.
const HANDOFF_FILE: string = env.P2P_TEST_HANDOFF_FILE ?? "/tmp/p2p-test-handoff.json";
console.log("[P2P Up] ROOM_ID:", ROOM_ID, "HOST:", HOST_PEER_NAME, "RELAY:", RELAY, "APP_ID:", APP_ID);
console.log("[P2P Up] HANDOFF_FILE:", HANDOFF_FILE);
const p2pSetting: ObsidianLiveSyncSettings = {
...settingBase,
...PREFERRED_SETTING_SELF_HOSTED,
showVerboseLog: true,
remoteType: RemoteTypes.REMOTE_P2P,
encrypt: true,
passphrase: PASSPHRASE,
usePathObfuscation: true,
P2P_Enabled: true,
P2P_AppID: APP_ID,
handleFilenameCaseSensitive: false,
P2P_AutoAccepting: AutoAccepting.ALL,
P2P_AutoBroadcast: true,
P2P_AutoStart: true,
P2P_passphrase: PASSPHRASE,
P2P_roomID: ROOM_ID,
P2P_relays: RELAY,
P2P_AutoAcceptingPeers: "~.*",
P2P_SyncOnReplication: HOST_PEER_NAME,
};
const fileOptions = defaultFileOption;
const nameFile = (type: string, ext: string, size: number) => `p2p-cli-test-${type}-file-${size}.${ext}`;
/** Write the handoff JSON so the download phase knows which files to verify. */
async function writeHandoff() {
const handoff = {
fileSizeMd: FILE_SIZE_MD,
fileSizeBins: FILE_SIZE_BINS,
};
const { commands } = await import("@vitest/browser/context");
await commands.writeHandoffFile(HANDOFF_FILE, JSON.stringify(handoff));
console.log("[P2P Up] handoff written to", HANDOFF_FILE);
}
describe("P2P Replication — Upload", () => {
let harnessUpload: LiveSyncHarness;
const uploadSetting: ObsidianLiveSyncSettings = {
...p2pSetting,
P2P_DevicePeerName: UPLOAD_PEER_NAME,
};
beforeAll(async () => {
await DummyFileSourceInisialised;
const vaultName = UPLOAD_VAULT_NAME;
console.log(`[P2P Up] BeforeAll - Vault: ${vaultName}`);
console.log(`[P2P Up] Peer name: ${UPLOAD_PEER_NAME}`);
harnessUpload = await generateHarness(vaultName, uploadSetting);
await waitForReady(harnessUpload);
expect(harnessUpload.plugin).toBeDefined();
await waitForIdle(harnessUpload);
});
afterAll(async () => {
await closeReplication(harnessUpload);
await harnessUpload.dispose();
await delay(1000);
});
it("should be instantiated and defined", () => {
expect(harnessUpload.plugin).toBeDefined();
expect(harnessUpload.plugin.app).toBe(harnessUpload.app);
});
it("should have services initialized", () => {
expect(harnessUpload.plugin.core.services).toBeDefined();
});
it("should have local database initialized", () => {
expect(harnessUpload.plugin.core.localDatabase).toBeDefined();
expect(harnessUpload.plugin.core.localDatabase.isReady).toBe(true);
});
it("should create a file", async () => {
await testFileWrite(harnessUpload, nameFile("store", "md", 0), "Hello, World!", false, fileOptions);
});
it("should create several files with different content", async () => {
await testFileWrite(harnessUpload, nameFile("test-diff-1", "md", 0), "Content A", false, fileOptions);
await testFileWrite(harnessUpload, nameFile("test-diff-2", "md", 0), "Content B", false, fileOptions);
await testFileWrite(harnessUpload, nameFile("test-diff-3", "md", 0), "Content C", false, fileOptions);
});
test.each(FILE_SIZE_MD)("should create large md file of size %i bytes", async (size) => {
const content = Array.from(generateFile(size)).join("");
const path = nameFile("large", "md", size);
const isTooLarge = harnessUpload.plugin.core.services.vault.isFileSizeTooLarge(size);
if (isTooLarge) {
expect(true).toBe(true);
} else {
await testFileWrite(harnessUpload, path, content, false, fileOptions);
}
});
test.each(FILE_SIZE_BINS)("should create binary file of size %i bytes", async (size) => {
const content = new Blob([...generateBinaryFile(size)], { type: "application/octet-stream" });
const path = nameFile("binary", "bin", size);
await testFileWrite(harnessUpload, path, content, true, fileOptions);
const isTooLarge = harnessUpload.plugin.core.services.vault.isFileSizeTooLarge(size);
if (!isTooLarge) {
await checkStoredFileInDB(harnessUpload, path, content, fileOptions);
}
});
it("should replicate uploads to CLI host", async () => {
await performReplication(harnessUpload);
await performReplication(harnessUpload);
});
it("should write handoff file for download phase", async () => {
await writeHandoff();
});
});

View File

@@ -3,64 +3,53 @@ Since 19th July, 2025 (beta1 in 0.25.0-beta1, 13th July, 2025)
The head note of 0.25 is now in [updates_old.md](https://github.com/vrtmrz/obsidian-livesync/blob/main/updates_old.md). Because 0.25 got a lot of updates, thankfully, compatibility is kept and we do not need breaking changes! In other words, when get enough stabled. The next version will be v1.0.0. Even though it my hope.
## Unnamed 12th March, 2026
## 0.25.53
12th March, 2026
17th March, 2026
I did wonder whether I should have released a minor version update, but when I actually tested it, compatibility seemed to be intact, so I didnt. Hmm.
### Fixed
- Fixed Journal Sync had not been working on some timing, due to a compatibility issue (for a long time).
- ServiceFileAccessBase now correctly handles the reading of binary files.
#### P2P Synchronisation
### Internal behaviour change (or fix)
- Fixed flaky timing issues in P2P synchronisation.
- No longer unexpected `Unhandled Rejections` during P2P operations (waiting for acceptance).
- Journal Replicator now yields true after the replication is done.
#### Journal Sync
- Fixed an issue where some conflicts cannot be resolved in Journal Sync.
- Many minor fixes have been made for better stability and reliability.
### Tests
- Rewrite P2P end-to-end tests to use the CLI as a host.
### CLI
- Add more tests.
- Object Storage support has also been confirmed (and fixed) in CLI.
We have previously developed FileSystem LiveSync and various other components in a separate repository, but updates have been significantly delayed, and we have been plagued by compatibility issues. Now, a CLI tool using the same core logic is emerging. This does not directly manipulate the file system, but it offers a more convenient way of working and can also communicate with Object Storage. We can also resolve conflicts. Please refer to the code in `src/apps/cli` for the [self-hosted-livesync-cli](./src/apps/cli/README.md) for more details.
- Add `self-hosted-livesync-cli` to `src/apps/cli` as a headless and dedicated version.
- P2P sync and Object Storage are also supported in the CLI.
- Yes, we have finally managed to 'get one file'.
- Also, no more need for a [LiveSync PeerServer](https://github.com/vrtmrz/livesync-serverpeer) for virtual environments! The CLI can do it.
- Now binary files are also supported in the CLI.
### Refactored or internal changes
## Unnamed 11th March, 2026
11th March, 2026 (second commit).
### Refactored
- Offline change scanner and the local database preparation has been separated.
- Set default priority for processFileEvent and processSynchroniseResult for the place for adding hooks.
- ControlService now provides the readiness for processing operations.
- DatabaseService now able to modify database opening options on derived classes.
- Now `useOfflineScanner`, `useCheckRemoteSize`, and `useRedFlagFeatures` are set from `main.ts`, instead of `LiveSyncBaseCore`.
### Fixed
- ServiceFileAccessBase now correctly handles the reading of binary files.
- HeadlessAPIService now correctly provides the online status (always online) to the plug-in.
- Non-worker version of bgWorker now correctly handles some functions.
### New something
- Add `self-hosted-livesync-cli` to `src/apps/cli` as a headless, and a dedicated version.
## Unnamed 11th March, 2026
11th March, 2026
Now, Self-hosted LiveSync has finally begun to be split into the Self-hosted LiveSync plugin for Obsidian, and a properly abstracted version of it.
This may not offer much benefit to Obsidian plugin users, or might even cause a slight inconvenience, but I believe it will certainly help improve testability and make the ecosystem better.
However, I do not see the point in putting something with little benefit into beta, so I am handling this on the alpha branch. I would actually preferred to create an R&D branch, but I was not keen on the ampersand, and I feel it will eventually become a proper beta anyway.
### Refactored
- Separated `ObsidianLiveSyncPlugin` into `ObsidianLiveSyncPlugin` and `LiveSyncBaseCore`.
- Now `LiveSyncCore` indicates the type specified version of `LiveSyncBaseCore`.
- Referencing `plugin.xxx` has been rewritten to referencing the corresponding service or `core.xxx`.
### Internal API changes
- Offline change scanner and the local database preparation have been separated.
- Set default priority for processFileEvent and processSynchroniseResult for the place to add hooks.
- ControlService now provides the readiness for processing operations.
- DatabaseService is now able to modify database opening options on derived classes.
- Now `useOfflineScanner`, `useCheckRemoteSize`, and `useRedFlagFeatures` are set from `main.ts`, instead of `LiveSyncBaseCore`.
- Storage Access APIs are now yielding Promises. This is to allow more limited storage platforms to be supported.
- Journal Replicator now yields true after the replication is done.
### R&D
@@ -264,67 +253,5 @@ This release is identical to 0.25.41-patched-3, except for the version number.
- Migrated from the outdated, unstable platform abstraction layer to services.
- A bit more services will be added in the future for better maintainability.
## 0.25.41
24th January, 2026
### Fixed
- No longer `No available splitter for settings!!` errors occur after fetching old remote settings while rebuilding local database. (#748)
### Improved
- Boot sequence warning is now kept in the in-editor notification area.
### New feature
- We can now set the maximum modified time for reflect events in the settings. (for #754)
- This setting can be configured from `Patches` -> `Remediation` in the settings dialogue.
- Enabling this setting will restrict the propagation from the database to storage to only those changes made before the specified date and time.
- This feature is primarily intended for recovery purposes. After placing `redflag.md` in an empty vault and importing the Self-hosted LiveSync configuration, please perform this configuration, and then fetch the local database from the remote.
- This feature is useful when we want to prevent recent unwanted changes from being reflected in the local storage.
### Refactored
- Module to service refactoring has been started for better maintainability:
- UI module has been moved to UI service.
### Behaviour change
- Default chunk splitter version has been changed to `Rabin-Karp` for new installations.
## 0.25.40
23rd January, 2026
### Fixed
- Fixed an issue where some events were not triggered correctly after the refactoring in 0.25.39.
## 0.25.39
23rd January, 2026
Also no behaviour changes or fixes in this release. Just refactoring for better maintainability. Thank you for your patience! I will address some of the reported issues soon.
However, this is not a minor refactoring, so please be careful. Let me know if you find any unexpected behaviour after this update.
### Refactored
- Rewrite the service's binding/handler assignment systems
- Removed loopholes that allowed traversal between services to clarify dependencies.
- Consolidated the hidden state-related state, the handler, and the addition of bindings to the handler into a single object.
- Currently, functions that can have handlers added implement either addHandler or setHandler directly on the function itself.
I understand there are differing opinions on this, but for now, this is how it stands.
- Services now possess a Context. Please ensure each platform has a class that inherits from ServiceContext.
- To permit services to be dynamically bound, the services themselves are now defined by interfaces.
## 0.25.38
17th January, 2026
### Fixed
- Fixed an issue where indexedDB would not close correctly on some environments, causing unexpected errors during database operations.
Full notes are in
[updates_old.md](https://github.com/vrtmrz/obsidian-livesync/blob/main/updates_old.md).

View File

@@ -3,6 +3,177 @@ Since 19th July, 2025 (beta1 in 0.25.0-beta1, 13th July, 2025)
The head note of 0.25 is now in [updates_old.md](https://github.com/vrtmrz/obsidian-livesync/blob/main/updates_old.md). Because 0.25 got a lot of updates, thankfully, compatibility is kept and we do not need breaking changes! In other words, when get enough stabled. The next version will be v1.0.0. Even though it my hope.
## 0.25.53
17th March, 2026
I did wonder whether I should have released a minor version update, but when I actually tested it, compatibility seemed to be intact, so I didnt. Hmm.
### Fixed
#### P2P Synchronisation
- Fixed flaky timing issues in P2P synchronisation.
- No longer unexpected `Unhandled Rejections` during P2P operations (waiting for acceptance).
#### Journal Sync
- Fixed an issue where some conflicts cannot be resolved in Journal Sync.
- Many minor fixes have been made for better stability and reliability.
### Tests
- Rewrite P2P end-to-end tests to use the CLI as a host.
### CLI
We have previously developed FileSystem LiveSync and various other components in a separate repository, but updates have been significantly delayed, and we have been plagued by compatibility issues. Now, a CLI tool using the same core logic is emerging. This does not directly manipulate the file system, but it offers a more convenient way of working and can also communicate with Object Storage. We can also resolve conflicts. Please refer to the code in `src/apps/cli` for the [self-hosted-livesync-cli](./src/apps/cli/README.md) for more details.
- Add `self-hosted-livesync-cli` to `src/apps/cli` as a headless and dedicated version.
- P2P sync and Object Storage are also supported in the CLI.
- Yes, we have finally managed to 'get one file'.
- Also, no more need for a [LiveSync PeerServer](https://github.com/vrtmrz/livesync-serverpeer) for virtual environments! The CLI can do it.
- Now binary files are also supported in the CLI.
### Refactored or internal changes
- ServiceFileAccessBase now correctly handles the reading of binary files.
- HeadlessAPIService now correctly provides the online status (always online) to the plug-in.
- Non-worker version of bgWorker now correctly handles some functions.
- Separated `ObsidianLiveSyncPlugin` into `ObsidianLiveSyncPlugin` and `LiveSyncBaseCore`.
- Now `LiveSyncCore` indicates the type specified version of `LiveSyncBaseCore`.
- Referencing `plugin.xxx` has been rewritten to referencing the corresponding service or `core.xxx`.
- Offline change scanner and the local database preparation have been separated.
- Set default priority for processFileEvent and processSynchroniseResult for the place to add hooks.
- ControlService now provides the readiness for processing operations.
- DatabaseService is now able to modify database opening options on derived classes.
- Now `useOfflineScanner`, `useCheckRemoteSize`, and `useRedFlagFeatures` are set from `main.ts`, instead of `LiveSyncBaseCore`.
- Storage Access APIs are now yielding Promises. This is to allow more limited storage platforms to be supported.
- Journal Replicator now yields true after the replication is done.
### R&D
- Browser-version of Self-hosted LiveSync is now in development. This is not intended for public use now, but I will eventually make it available for testing.
- We can see the code in `src/apps/webapp` for the browser version.
## 0.25.52-patched-3
16th March, 2026
### Fixed
- Fixed flaky timing issues in P2P synchronisation.
- Fixed more binary file handling issues in CLI.
### Tests
- Rewrite P2P end-to-end tests to use the CLI as host.
## 0.25.52-patched-2
14th March, 2026
### Fixed
- No longer unexpected `Unhandled Rejections` during P2P operations (waiting acceptance).
- Fixed an issue where conflicts cannot be resolved in Journal Sync
### CLI new features
- `mirror` command has been added to the CLI. This command is intended to mirror the storage to the local database.
- `p2p-sync`, `p2p-peers`, and `p2p-host` commands have been added to the CLI. These commands are intended for P2P synchronisation.
- Yes, no more need for a [LiveSync PeerServer](https://github.com/vrtmrz/livesync-serverpeer) for virtual environments! The CLI can handle it by itself.
## 0.25.52-patched-1
12th March, 2026
### Fixed
- Fixed Journal Sync had not been working on some timing, due to a compatibility issue (for a long time).
- ServiceFileAccessBase now correctly handles the reading of binary files.
- HeadlessAPIService now correctly provides the online status (always online) to the plug-in.
- Non-worker version of bgWorker now correctly handles some functions.
### Refactored
- Separated `ObsidianLiveSyncPlugin` into `ObsidianLiveSyncPlugin` and `LiveSyncBaseCore`.
- Now `LiveSyncCore` indicates the type specified version of `LiveSyncBaseCore`.
- Referencing `plugin.xxx` has been rewritten to referencing the corresponding service or `core.xxx`.
- Offline change scanner and the local database preparation have been separated.
- Set default priority for processFileEvent and processSynchroniseResult for the place to add hooks.
- ControlService now provides the readiness for processing operations.
- DatabaseService is now able to modify database opening options on derived classes.
- Now `useOfflineScanner`, `useCheckRemoteSize`, and `useRedFlagFeatures` are set from `main.ts`, instead of `LiveSyncBaseCore`.
### Internal API changes
- Storage Access APIs are now yielding Promises. This is to allow more limited storage platforms to be supported.
- Journal Replicator now yields true after the replication is done.
### CLI
We have previously developed FileSystem LiveSync and various other components in a separate repository, but updates have been significantly delayed, and we have been plagued by compatibility issues. Now, a CLI tool using the same core logic is emerging. This does not directly manipulate the file system, but it offers a more convenient way of working and can also communicate with Object Storage. We can also resolve conflicts. Please refer to the code in `src/apps/cli` for the [self-hosted-livesync-cli](./src/apps/cli/README.md) for more details.
- Add `self-hosted-livesync-cli` to `src/apps/cli` as a headless and dedicated version.
- Add more tests.
- Object Storage support has also been confirmed (and fixed) in CLI.
- Yes, we have finally managed to 'get one file'.
- Now binary files are also supported in the CLI.
### R&D
- Browser-version of Self-hosted LiveSync is now in development. This is not intended for public use now, but I will eventually make it available for testing.
- We can see the code in `src/apps/webapp` for the browser version.
## 0.25.52
9th March, 2026
Excuses: Too much `I`.
Whilst I had a fever, I could not figure it out at all, but once I felt better, I spotted the problem in about thirty seconds. I apologise for causing you concern. I am grateful for your patience.
I would like to devise a mechanism for running simple test scenarios. Now that we have got the Obsidian CLI up and running, it seems the perfect opportunity.
To improve the bus factor, we really need to organise the source code more thoroughly. Your cooperation and contributions would be greatly appreciated.
### Fixed
- No longer unexpected deletion-propagation occurs when the parent directory is not empty (#813).
### Revert reversions
- Reverted the reversion of ModuleCheckRemoteSize. Now it is back to the service feature.
## 0.25.51
7th March, 2026
### Reverted
- Reverted to ModuleRedFlag and ModuleInitializerFile to the previous version because of some unexpected issues. (#813)
- I will re-implement them in the future with better design and tests.
## 0.25.50
3rd March, 2026
Note: 0.25.49 has been skipped because of too verbose logging (credentials are logged in verbose level, but I realised that could lead to unexpected exposure on issue reporting). Please bump to 0.25.50 to get the fix if you are on 0.25.49. (No expected behaviour changes except the logging).
### Fixed
- No longer deleted files are not clickable in the Global History pane.
- Diff view now uses more specific classes (#803).
- A message of configuration mismatching slightly added for better understanding.
- Now it says `When replication is initiated manually via the command palette or ribbon, a dialogue box will open to address this.` to make it clear that the user can fix the issue by themselves.
### Refactored
- `ModuleRedFlag` has been refactored to `serviceFeatures/redFlag` and also tested.
- `ModuleInitializerFile` has been refactored to `lib/serviceFeatures/offlineScanner` and also tested.
## 0.25.48
2nd March, 2026

82
vitest.config.p2p.ts Normal file
View File

@@ -0,0 +1,82 @@
import { defineConfig, mergeConfig } from "vitest/config";
import { playwright } from "@vitest/browser-playwright";
import viteConfig from "./vitest.config.common";
import path from "path";
import dotenv from "dotenv";
import { grantClipboardPermissions, writeHandoffFile, readHandoffFile } from "./test/lib/commands";
const defEnv = dotenv.config({ path: ".env" }).parsed;
const testEnv = dotenv.config({ path: ".test.env" }).parsed;
// Merge: dotenv files < process.env (so shell-injected vars like P2P_TEST_* take precedence)
const p2pEnv: Record<string, string> = {};
if (process.env.P2P_TEST_ROOM_ID) p2pEnv.P2P_TEST_ROOM_ID = process.env.P2P_TEST_ROOM_ID;
if (process.env.P2P_TEST_PASSPHRASE) p2pEnv.P2P_TEST_PASSPHRASE = process.env.P2P_TEST_PASSPHRASE;
if (process.env.P2P_TEST_HOST_PEER_NAME) p2pEnv.P2P_TEST_HOST_PEER_NAME = process.env.P2P_TEST_HOST_PEER_NAME;
if (process.env.P2P_TEST_RELAY) p2pEnv.P2P_TEST_RELAY = process.env.P2P_TEST_RELAY;
if (process.env.P2P_TEST_APP_ID) p2pEnv.P2P_TEST_APP_ID = process.env.P2P_TEST_APP_ID;
if (process.env.P2P_TEST_HANDOFF_FILE) p2pEnv.P2P_TEST_HANDOFF_FILE = process.env.P2P_TEST_HANDOFF_FILE;
const env = Object.assign({}, defEnv, testEnv, p2pEnv);
const debuggerEnabled = env?.ENABLE_DEBUGGER === "true";
const enableUI = env?.ENABLE_UI === "true";
const headless = !debuggerEnabled && !enableUI;
export default mergeConfig(
viteConfig,
defineConfig({
resolve: {
alias: {
obsidian: path.resolve(__dirname, "./test/harness/obsidian-mock.ts"),
},
},
test: {
env: env,
testTimeout: 240000,
hookTimeout: 240000,
fileParallelism: false,
isolate: true,
watch: false,
// Run all CLI-host P2P test files (*.p2p.test.ts, *.p2p-up.test.ts, *.p2p-down.test.ts)
include: ["test/suitep2p/**/*.p2p*.test.ts"],
browser: {
isolate: true,
// Only grantClipboardPermissions is needed; no openWebPeer/acceptWebPeer
commands: {
grantClipboardPermissions,
writeHandoffFile,
readHandoffFile,
},
provider: playwright({
launchOptions: {
args: [
"--js-flags=--expose-gc",
"--allow-insecure-localhost",
"--disable-web-security",
"--ignore-certificate-errors",
],
},
}),
enabled: true,
screenshotFailures: false,
instances: [
{
execArgv: ["--js-flags=--expose-gc"],
browser: "chromium",
headless,
isolate: true,
inspector: debuggerEnabled ? { waitForDebugger: true, enabled: true } : undefined,
printConsoleTrace: true,
onUnhandledError(error) {
const msg = error.message || "";
if (msg.includes("Cannot create so many PeerConnections")) {
return false;
}
},
},
],
headless,
fileParallelism: false,
ui: debuggerEnabled || enableUI ? true : false,
},
},
})
);