cli: implement daemon startup sequence and CouchDB→local sync

- Add daemon command to help text and --interval/-i flag for polling mode
- Capture original sync settings before suspendAllSync() clobbers them
- Implement daemon startup: mirror scan → restore settings → applySettings()
  which triggers the full suspend/resume lifecycle and starts the _changes feed
- Guard processSynchroniseResult no-op to non-daemon commands so default
  handler writes incoming CouchDB changes to the local filesystem
- Polling mode: restore settings + clearInterval-safe try/catch error handling
- Warn when both liveSync and syncOnStart are false after restore (no-op config)
- Fix: only block indefinitely if daemon startup succeeded
This commit is contained in:
Andrew Leech
2026-03-31 21:39:47 +11:00
parent b6b153c0de
commit a4d5ef4620
6 changed files with 567 additions and 25 deletions

View File

@@ -112,6 +112,7 @@ export class NodeFileSystemAdapter implements IFileSystemAdapter<NodeFile, NodeF
this.fileCache.set(pathStr, file);
return file;
} catch {
// Evict so a deleted file is not returned by subsequent cache scans.
this.fileCache.delete(pathStr);
return null;
}

View File

@@ -0,0 +1,312 @@
import { describe, expect, it, vi, beforeEach, afterEach } from "vitest";
import { runCommand } from "./runCommand";
import type { CLIOptions } from "./types";
// Mock performFullScan so daemon tests don't require a real CouchDB connection.
vi.mock("@lib/serviceFeatures/offlineScanner", () => ({
performFullScan: vi.fn(async () => true),
}));
// Mock UnresolvedErrorManager to avoid event-hub side effects.
vi.mock("@lib/services/base/UnresolvedErrorManager", () => ({
UnresolvedErrorManager: class UnresolvedErrorManager {
showError() {}
clearError() {}
clearErrors() {}
},
}));
import * as offlineScanner from "@lib/serviceFeatures/offlineScanner";
function createCoreMock() {
return {
services: {
control: {
activated: Promise.resolve(),
applySettings: vi.fn(async () => {}),
},
setting: {
applyPartial: vi.fn(async () => {}),
currentSettings: vi.fn(() => ({ liveSync: true, syncOnStart: false })),
},
replication: {
replicate: vi.fn(async () => true),
},
appLifecycle: {
onUnload: {
addHandler: vi.fn(),
},
},
},
serviceModules: {
fileHandler: {
dbToStorage: vi.fn(async () => true),
storeFileToDB: vi.fn(async () => true),
},
storageAccess: {
readFileAuto: vi.fn(async () => ""),
writeFileAuto: vi.fn(async () => {}),
},
databaseFileAccess: {
fetch: vi.fn(async () => undefined),
},
},
} as any;
}
function makeDaemonOptions(interval?: number): CLIOptions {
return {
command: "daemon",
commandArgs: [],
databasePath: "/tmp/vault",
verbose: false,
force: false,
interval,
};
}
const baseContext = {
vaultPath: "/tmp/vault",
settingsPath: "/tmp/vault/.livesync/settings.json",
originalSyncSettings: {
liveSync: true,
syncOnStart: false,
periodicReplication: false,
syncOnSave: false,
syncOnEditorSave: false,
syncOnFileOpen: false,
syncAfterMerge: false,
},
} as any;
describe("daemon command", () => {
beforeEach(() => {
vi.restoreAllMocks();
vi.useFakeTimers();
});
afterEach(() => {
vi.useRealTimers();
});
it("calls performFullScan during startup", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
await runCommand(makeDaemonOptions(), { ...baseContext, core });
expect(offlineScanner.performFullScan).toHaveBeenCalledTimes(1);
});
it("returns false when performFullScan fails", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(false);
const result = await runCommand(makeDaemonOptions(), { ...baseContext, core });
expect(result).toBe(false);
});
it("polling mode: calls setTimeout when interval option is set", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
const setTimeoutSpy = vi.spyOn(globalThis, "setTimeout");
await runCommand(makeDaemonOptions(30), { ...baseContext, core });
expect(setTimeoutSpy).toHaveBeenCalledTimes(1);
// Interval should be in milliseconds (30s → 30000ms)
expect(setTimeoutSpy).toHaveBeenCalledWith(expect.any(Function), 30000);
});
it("polling mode: applies settings with suspendFileWatching=false before setting interval", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
await runCommand(makeDaemonOptions(10), { ...baseContext, core });
expect(core.services.setting.applyPartial).toHaveBeenCalledWith(
expect.objectContaining({ suspendFileWatching: false }),
true
);
expect(core.services.control.applySettings).toHaveBeenCalledTimes(1);
});
it("liveSync mode: calls applyPartial and applySettings", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
await runCommand(makeDaemonOptions(), { ...baseContext, core });
expect(core.services.setting.applyPartial).toHaveBeenCalledWith(
expect.objectContaining({
...baseContext.originalSyncSettings,
suspendFileWatching: false,
}),
true
);
expect(core.services.control.applySettings).toHaveBeenCalledTimes(1);
});
it("liveSync mode: logs warning when both liveSync and syncOnStart are false", async () => {
const core = createCoreMock();
core.services.setting.currentSettings = vi.fn(() => ({
liveSync: false,
syncOnStart: false,
}));
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
const result = await runCommand(makeDaemonOptions(), { ...baseContext, core });
expect(result).toBe(true);
const warningCalls = consoleSpy.mock.calls.filter(
(args) => typeof args[0] === "string" && args[0].includes("liveSync and syncOnStart are both disabled")
);
expect(warningCalls.length).toBeGreaterThan(0);
});
it("liveSync mode: no warning when liveSync is true", async () => {
const core = createCoreMock();
core.services.setting.currentSettings = vi.fn(() => ({
liveSync: true,
syncOnStart: false,
}));
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
await runCommand(makeDaemonOptions(), { ...baseContext, core });
const warningCalls = consoleSpy.mock.calls.filter(
(args) => typeof args[0] === "string" && args[0].includes("liveSync and syncOnStart are both disabled")
);
expect(warningCalls.length).toBe(0);
});
it("calls replicate before performFullScan", async () => {
const core = createCoreMock();
const callOrder: string[] = [];
core.services.replication.replicate = vi.fn(async () => {
callOrder.push("replicate");
return true;
});
vi.mocked(offlineScanner.performFullScan).mockImplementation(async () => {
callOrder.push("performFullScan");
return true;
});
await runCommand(makeDaemonOptions(), { ...baseContext, core });
expect(callOrder).toEqual(["replicate", "performFullScan"]);
});
it("returns false when initial replication fails", async () => {
const core = createCoreMock();
core.services.replication.replicate = vi.fn(async () => false);
vi.mocked(offlineScanner.performFullScan).mockClear();
const result = await runCommand(makeDaemonOptions(), { ...baseContext, core });
expect(result).toBe(false);
// performFullScan should NOT have been called
expect(offlineScanner.performFullScan).not.toHaveBeenCalled();
});
it("polling mode: registers onUnload handler that clears timeout", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
await runCommand(makeDaemonOptions(10), { ...baseContext, core });
// onUnload handler should have been registered
expect(core.services.appLifecycle.onUnload.addHandler).toHaveBeenCalledTimes(1);
const handler = core.services.appLifecycle.onUnload.addHandler.mock.calls[0][0];
// Get the timeout ID that was created
const clearTimeoutSpy = vi.spyOn(globalThis, "clearTimeout");
await handler();
expect(clearTimeoutSpy).toHaveBeenCalledTimes(1);
});
it("polling backoff: interval escalates on failure, caps at 300000ms, then halves on recovery", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
vi.spyOn(console, "error").mockImplementation(() => {});
// startup replicate (call 1) succeeds; poll calls 27 fail; call 8 succeeds.
let callCount = 0;
core.services.replication.replicate = vi.fn(async () => {
callCount++;
if (callCount === 1) return true; // initial startup replicate
if (callCount <= 7) throw new Error("network failure");
return true; // recovery
});
const baseMs = 30 * 1000;
const setTimeoutSpy = vi.spyOn(globalThis, "setTimeout");
await runCommand(makeDaemonOptions(30), { ...baseContext, core });
// After runCommand returns the first setTimeout has been scheduled.
// setTimeoutSpy.mock.calls[0] is the initial schedule (baseMs).
expect(setTimeoutSpy.mock.calls[0][1]).toBe(baseMs);
// Advance through 6 failure polls. After each failure the next setTimeout
// should be scheduled with a larger (or capped) interval.
// formula: min(base * 2^n, 300000). base=30000ms.
// failure 1: 30000*2=60000, failure 2: 30000*4=120000,
// failure 3: 30000*8=240000, failure 4: 30000*16=480000→capped, 5→cap, 6→cap
const expectedIntervals = [
baseMs * 2, // after failure 1: 60000
baseMs * 4, // after failure 2: 120000
baseMs * 8, // after failure 3: 240000
300_000, // after failure 4 (would be 480000, capped)
300_000, // after failure 5 (cap)
300_000, // after failure 6 (cap)
];
for (const expected of expectedIntervals) {
const prevCallCount = setTimeoutSpy.mock.calls.length;
await vi.advanceTimersByTimeAsync(setTimeoutSpy.mock.calls[prevCallCount - 1][1] as number);
const newCallCount = setTimeoutSpy.mock.calls.length;
expect(newCallCount).toBeGreaterThan(prevCallCount);
expect(setTimeoutSpy.mock.calls[newCallCount - 1][1]).toBe(expected);
}
// Now trigger the success poll — interval should halve each time toward base.
// After failure 6, consecutiveFailures=6, currentIntervalMs=300000.
// On success: consecutiveFailures=5, currentIntervalMs=150000.
const prevCallCount = setTimeoutSpy.mock.calls.length;
await vi.advanceTimersByTimeAsync(setTimeoutSpy.mock.calls[prevCallCount - 1][1] as number);
const afterSuccessCallCount = setTimeoutSpy.mock.calls.length;
expect(afterSuccessCallCount).toBeGreaterThan(prevCallCount);
// The interval after one success should be halved (300000 / 2 = 150000).
expect(setTimeoutSpy.mock.calls[afterSuccessCallCount - 1][1]).toBe(150_000);
});
it("polling error handling: replicate rejection is caught and console.error is called", async () => {
const core = createCoreMock();
vi.mocked(offlineScanner.performFullScan).mockResolvedValue(true);
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
// Make replicate succeed on the initial call (startup), then fail on the poll.
let callCount = 0;
core.services.replication.replicate = vi.fn(async () => {
callCount++;
if (callCount === 1) return true; // startup replicate
throw new Error("network failure");
});
const intervalMs = 30 * 1000;
await runCommand(makeDaemonOptions(30), { ...baseContext, core });
// Advance time to trigger the first poll callback and flush its async work.
await vi.advanceTimersByTimeAsync(intervalMs);
// No unhandled rejection — the error was caught internally.
const errorCalls = consoleSpy.mock.calls.filter(
(args) => typeof args[0] === "string" && args[0].includes("Poll error")
);
expect(errorCalls.length).toBeGreaterThan(0);
});
});

View File

@@ -15,6 +15,96 @@ export async function runCommand(options: CLIOptions, context: CLICommandContext
await core.services.control.activated;
if (options.command === "daemon") {
const log = (msg: unknown) => console.error(`[Daemon] ${msg}`);
// Skip the config mismatch dialog — the daemon cannot resolve it interactively
// and the default "Dismiss" action would block replication. The daemon should
// accept whatever configuration the remote has.
await core.services.setting.applyPartial({ disableCheckingConfigMismatch: true }, true);
// 1. Replicate CouchDB → local PouchDB so the mirror scan has content to work with.
log("Replicating from CouchDB...");
const replResult = await core.services.replication.replicate(true);
if (!replResult) {
console.error("[Daemon] Initial CouchDB replication failed, cannot continue");
return false;
}
log("CouchDB replication complete");
// 2. Mirror scan to reconcile PouchDB ↔ local filesystem.
const errorManager = new UnresolvedErrorManager(core.services.appLifecycle);
log("Running mirror scan...");
const scanOk = await performFullScan(core as any, log, errorManager, false, true);
if (!scanOk) {
console.error("[Daemon] Mirror scan failed, cannot continue");
return false;
}
log("Mirror scan complete");
// 3. Re-enable sync.
const restoreSyncSettings = async () => {
await core.services.setting.applyPartial({
...context.originalSyncSettings,
suspendFileWatching: false,
}, true);
// applySettings fires the full lifecycle: onSuspending → onResumed.
// ModuleReplicatorCouchDB starts continuous replication on onResumed
// via fireAndForget.
await core.services.control.applySettings();
// Lifecycle events (onSuspending) may re-enable suspension flags.
// Clear them explicitly after the lifecycle completes. applyPartial
// with true is a direct store write — it does not re-trigger lifecycle.
await core.services.setting.applyPartial({
suspendFileWatching: false,
suspendParseReplicationResult: false,
}, true);
};
if (options.interval) {
log(`Polling mode: syncing every ${options.interval}s`);
await restoreSyncSettings();
const baseIntervalMs = options.interval * 1000;
let currentIntervalMs = baseIntervalMs;
let consecutiveFailures = 0;
const maxIntervalMs = 5 * 60 * 1000; // 5 minutes cap
const poll = async () => {
try {
await core.services.replication.replicate(true);
if (consecutiveFailures > 0) {
consecutiveFailures--;
currentIntervalMs = Math.max(currentIntervalMs / 2, baseIntervalMs);
log(`Replication recovered`);
}
} catch (err) {
consecutiveFailures++;
currentIntervalMs = Math.min(baseIntervalMs * Math.pow(2, consecutiveFailures), maxIntervalMs);
console.error(`[Daemon] Poll error (${consecutiveFailures} consecutive):`, err);
if (consecutiveFailures >= 5) {
console.error(`[Daemon] Warning: ${consecutiveFailures} consecutive failures, backing off to ${Math.round(currentIntervalMs / 1000)}s`);
}
}
pollTimer = setTimeout(poll, currentIntervalMs);
};
let pollTimer: ReturnType<typeof setTimeout> = setTimeout(poll, currentIntervalMs);
core.services.appLifecycle.onUnload.addHandler(async () => {
clearTimeout(pollTimer);
return true;
});
} else {
log("LiveSync mode: restoring sync settings and starting _changes feed");
await restoreSyncSettings();
// The applySettings() lifecycle fires onResumed → ModuleReplicatorCouchDB which
// starts continuous replication via fireAndForget(openReplication). Don't call
// openReplication directly — it races with the handler and causes dedup/termination.
log("LiveSync active");
const currentSettings = core.services.setting.currentSettings();
if (!currentSettings.liveSync && !currentSettings.syncOnStart) {
console.error("[Daemon] Warning: liveSync and syncOnStart are both disabled in settings. " +
"No sync will occur. Set liveSync=true in your settings file for continuous sync, " +
"or use --interval for polling mode.");
}
}
return true;
}

View File

@@ -1,5 +1,6 @@
import { LiveSyncBaseCore } from "../../../LiveSyncBaseCore";
import { ServiceContext } from "@lib/services/base/ServiceBase";
import type { ObsidianLiveSyncSettings } from "@lib/common/types";
export type CLICommand =
| "daemon"
@@ -29,15 +30,18 @@ export interface CLIOptions {
force?: boolean;
command: CLICommand;
commandArgs: string[];
interval?: number;
}
export interface CLICommandContext {
databasePath: string;
core: LiveSyncBaseCore<ServiceContext, any>;
settingsPath: string;
originalSyncSettings: Pick<ObsidianLiveSyncSettings, "liveSync" | "syncOnStart" | "periodicReplication" | "syncOnSave" | "syncOnEditorSave" | "syncOnFileOpen" | "syncAfterMerge">;
}
export const VALID_COMMANDS = new Set([
"daemon",
"sync",
"p2p-peers",
"p2p-sync",

View File

@@ -43,7 +43,8 @@ Arguments:
database-path Path to the local database directory
Commands:
sync Run one replication cycle and exit
daemon (default) Run mirror scan then continuously sync CouchDB <-> local filesystem
sync Run one replication cycle and exit
p2p-peers <timeout> Show discovered peers as [peer]\t<peer-id>\t<peer-name>
p2p-sync <peer> <timeout>
Sync with the specified peer-id or peer-name
@@ -60,24 +61,30 @@ Commands:
rm <path> Mark a file as deleted in local database
resolve <path> <rev> Resolve conflicts by keeping <rev> and deleting others
mirror [vault-path] Mirror database contents to the local file system (vault-path defaults to database-path)
Options:
--interval <N>, -i <N> (daemon only) Poll CouchDB every N seconds instead of using the _changes feed
Examples:
livesync-cli ./my-database sync
livesync-cli ./my-database p2p-peers 5
livesync-cli ./my-database p2p-sync my-peer-name 15
livesync-cli ./my-database p2p-host
livesync-cli ./my-database --settings ./custom-settings.json push ./note.md folder/note.md
livesync-cli ./my-database pull folder/note.md ./exports/note.md
livesync-cli ./my-database pull-rev folder/note.md ./exports/note.old.md 3-abcdef
livesync-cli ./my-database setup "obsidian://setuplivesync?settings=..."
echo "Hello" | livesync-cli ./my-database put notes/hello.md
livesync-cli ./my-database cat notes/hello.md
livesync-cli ./my-database cat-rev notes/hello.md 3-abcdef
livesync-cli ./my-database ls notes/
livesync-cli ./my-database info notes/hello.md
livesync-cli ./my-database rm notes/hello.md
livesync-cli ./my-database resolve notes/hello.md 3-abcdef
livesync-cli init-settings ./data.json
livesync-cli ./my-database --verbose
livesync-cli ./my-database Run daemon (LiveSync mode)
livesync-cli ./my-database --interval 30 Run daemon (polling every 30s)
livesync-cli ./my-database sync
livesync-cli ./my-database p2p-peers 5
livesync-cli ./my-database p2p-sync my-peer-name 15
livesync-cli ./my-database p2p-host
livesync-cli ./my-database --settings ./custom-settings.json push ./note.md folder/note.md
livesync-cli ./my-database pull folder/note.md ./exports/note.md
livesync-cli ./my-database pull-rev folder/note.md ./exports/note.old.md 3-abcdef
livesync-cli ./my-database setup "obsidian://setuplivesync?settings=..."
echo "Hello" | livesync-cli ./my-database put notes/hello.md
livesync-cli ./my-database cat notes/hello.md
livesync-cli ./my-database cat-rev notes/hello.md 3-abcdef
livesync-cli ./my-database ls notes/
livesync-cli ./my-database info notes/hello.md
livesync-cli ./my-database rm notes/hello.md
livesync-cli ./my-database resolve notes/hello.md 3-abcdef
livesync-cli init-settings ./data.json
livesync-cli ./my-database --verbose
`);
}
@@ -94,6 +101,7 @@ export function parseArgs(): CLIOptions {
let verbose = false;
let debug = false;
let force = false;
let interval: number | undefined;
let command: CLICommand = "daemon";
const commandArgs: string[] = [];
@@ -110,6 +118,21 @@ export function parseArgs(): CLIOptions {
settingsPath = args[i];
break;
}
case "--interval":
case "-i": {
i++;
if (!args[i]) {
console.error(`Error: Missing value for ${token}`);
process.exit(1);
}
const n = parseInt(args[i], 10);
if (!Number.isInteger(n) || n <= 0) {
console.error(`Error: --interval requires a positive integer, got '${args[i]}'`);
process.exit(1);
}
interval = n;
break;
}
case "--debug":
case "-d":
// debugging automatically enables verbose logging, as it is intended for debugging issues.
@@ -164,6 +187,7 @@ export function parseArgs(): CLIOptions {
force,
command,
commandArgs,
interval,
};
}
@@ -248,6 +272,20 @@ export async function main() {
infoLog(`Settings: ${settingsPath}`);
infoLog("");
// For daemon and mirror mode, load ignore rules before the core is constructed so that
// chokidar's ignored option is populated when beginWatch() fires during onLoad().
const watchEnabled = options.command === "daemon";
const vaultPathForIgnoreRules =
options.command === "mirror" && options.commandArgs[0]
? path.resolve(options.commandArgs[0])
: databasePath;
let ignoreRules: IgnoreRules | undefined;
if (options.command === "daemon" || options.command === "mirror") {
ignoreRules = new IgnoreRules(vaultPathForIgnoreRules);
await ignoreRules.load();
}
// Create service context and hub
const context = new NodeServiceContext(databasePath);
const serviceHubInstance = new NodeServiceHub<NodeServiceContext>(databasePath, context);
@@ -278,11 +316,14 @@ export async function main() {
}
console.error(`${prefix} ${message}`);
});
// Prevent replication result to be processed automatically.
serviceHubInstance.replication.processSynchroniseResult.addHandler(async () => {
console.error(`[Info] Replication result received, but not processed automatically in CLI mode.`);
return await Promise.resolve(true);
}, -100);
// Prevent replication result from being processed automatically in non-daemon commands.
// In daemon mode the default handler must run so changes are applied to the filesystem.
if (options.command !== "daemon") {
serviceHubInstance.replication.processSynchroniseResult.addHandler(async () => {
console.error(`[Info] Replication result received, but not processed automatically in CLI mode.`);
return await Promise.resolve(true);
}, -100);
}
// Setup settings handlers
const settingService = serviceHubInstance.setting;
@@ -366,6 +407,25 @@ export async function main() {
process.on("SIGINT", () => shutdown("SIGINT"));
process.on("SIGTERM", () => shutdown("SIGTERM"));
// Save the settings file before any lifecycle events can mutate and persist them.
// suspendAllSync and other lifecycle hooks clobber sync settings in memory, and
// various code paths persist the clobbered state to disk. We restore on shutdown.
const settingsBackup = await fs.readFile(settingsPath, "utf-8").catch(() => null);
// Restore settings file on any exit to undo lifecycle mutations.
// Write to a temp path first so a crash mid-write doesn't leave a truncated file.
process.on("exit", () => {
if (settingsBackup) {
const tmpPath = settingsPath + ".tmp";
try {
require("fs").writeFileSync(tmpPath, settingsBackup, "utf-8");
require("fs").renameSync(tmpPath, settingsPath);
} catch (err) {
console.error("[Settings] Failed to restore settings on exit:", err);
}
}
});
// Start the core
try {
infoLog(`[Starting] Initializing LiveSync...`);
@@ -375,6 +435,18 @@ export async function main() {
console.error(`[Error] Failed to initialize LiveSync`);
process.exit(1);
}
// Capture sync settings before suspendAllSync() clobbers them.
// Used by daemon mode to restore the correct sync behaviour after the mirror scan.
const settingsBeforeSuspend = core.services.setting.currentSettings();
const originalSyncSettings = {
liveSync: settingsBeforeSuspend.liveSync,
syncOnStart: settingsBeforeSuspend.syncOnStart,
periodicReplication: settingsBeforeSuspend.periodicReplication,
syncOnSave: settingsBeforeSuspend.syncOnSave,
syncOnEditorSave: settingsBeforeSuspend.syncOnEditorSave,
syncOnFileOpen: settingsBeforeSuspend.syncOnFileOpen,
syncAfterMerge: settingsBeforeSuspend.syncAfterMerge,
};
await core.services.setting.suspendAllSync();
await core.services.control.onReady();
@@ -400,7 +472,7 @@ export async function main() {
infoLog("");
}
const result = await runCommand(options, { databasePath, core, settingsPath });
const result = await runCommand(options, { databasePath, core, settingsPath, originalSyncSettings });
if (!result) {
console.error(`[Error] Command '${options.command}' failed`);
process.exitCode = 1;
@@ -408,7 +480,7 @@ export async function main() {
infoLog(`[Done] Command '${options.command}' completed`);
}
if (options.command === "daemon") {
if (options.command === "daemon" && result) {
// Keep the process running
await new Promise(() => {});
} else {

View File

@@ -85,4 +85,67 @@ describe("CLI parseArgs", () => {
expect(parsed.command).toBe("p2p-host");
expect(parsed.commandArgs).toEqual([]);
});
it("parses --interval flag with valid integer", () => {
process.argv = ["node", "livesync-cli", "./vault", "--interval", "30"];
const parsed = parseArgs();
expect(parsed.command).toBe("daemon");
expect(parsed.interval).toBe(30);
});
it("parses -i shorthand for --interval", () => {
process.argv = ["node", "livesync-cli", "./vault", "-i", "10"];
const parsed = parseArgs();
expect(parsed.interval).toBe(10);
});
it("exits 1 when --interval has no value", () => {
process.argv = ["node", "livesync-cli", "./vault", "--interval"];
const exitMock = mockProcessExit();
vi.spyOn(console, "error").mockImplementation(() => {});
expect(() => parseArgs()).toThrowError("__EXIT__:1");
expect(exitMock).toHaveBeenCalledWith(1);
});
it("exits 1 when --interval is not a positive integer", () => {
process.argv = ["node", "livesync-cli", "./vault", "--interval", "0"];
const exitMock = mockProcessExit();
vi.spyOn(console, "error").mockImplementation(() => {});
expect(() => parseArgs()).toThrowError("__EXIT__:1");
expect(exitMock).toHaveBeenCalledWith(1);
});
it("exits 1 when --interval is negative", () => {
process.argv = ["node", "livesync-cli", "./vault", "--interval", "-5"];
const exitMock = mockProcessExit();
vi.spyOn(console, "error").mockImplementation(() => {});
expect(() => parseArgs()).toThrowError("__EXIT__:1");
});
it("exits 1 when --interval is not numeric", () => {
process.argv = ["node", "livesync-cli", "./vault", "--interval", "abc"];
const exitMock = mockProcessExit();
vi.spyOn(console, "error").mockImplementation(() => {});
expect(() => parseArgs()).toThrowError("__EXIT__:1");
});
it("parses explicit daemon command", () => {
process.argv = ["node", "livesync-cli", "./vault", "daemon"];
const parsed = parseArgs();
expect(parsed.command).toBe("daemon");
expect(parsed.databasePath).toBe("./vault");
});
it("defaults to daemon when no command specified", () => {
process.argv = ["node", "livesync-cli", "./vault"];
const parsed = parseArgs();
expect(parsed.command).toBe("daemon");
});
it("parses explicit daemon command with --interval", () => {
process.argv = ["node", "livesync-cli", "./vault", "daemon", "--interval", "30"];
const parsed = parseArgs();
expect(parsed.command).toBe("daemon");
expect(parsed.interval).toBe(30);
});
});