mirror of
https://github.com/vrtmrz/obsidian-livesync.git
synced 2026-03-25 03:05:17 +00:00
Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2ff60dd5ac | ||
|
|
c3341da242 | ||
|
|
c2bfaeb5a9 |
@@ -2,8 +2,7 @@ import type { LiveSyncBaseCore } from "../../../LiveSyncBaseCore";
|
||||
import { P2P_DEFAULT_SETTINGS } from "@lib/common/types";
|
||||
import type { ServiceContext } from "@lib/services/base/ServiceBase";
|
||||
import { LiveSyncTrysteroReplicator } from "@lib/replication/trystero/LiveSyncTrysteroReplicator";
|
||||
import { addP2PEventHandlers } from "@lib/replication/trystero/P2PReplicatorCore";
|
||||
|
||||
import { addP2PEventHandlers } from "@lib/replication/trystero/addP2PEventHandlers";
|
||||
type CLIP2PPeer = {
|
||||
peerId: string;
|
||||
name: string;
|
||||
|
||||
81
src/apps/webapp/playwright.config.ts
Normal file
81
src/apps/webapp/playwright.config.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
import { defineConfig, devices } from "@playwright/test";
|
||||
import * as path from "path";
|
||||
import * as fs from "fs";
|
||||
import { fileURLToPath } from "url";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Load environment variables from .test.env (root) so that CouchDB
|
||||
// connection details are visible to the test process.
|
||||
// ---------------------------------------------------------------------------
|
||||
function loadEnvFile(envPath: string): Record<string, string> {
|
||||
const result: Record<string, string> = {};
|
||||
if (!fs.existsSync(envPath)) return result;
|
||||
const lines = fs.readFileSync(envPath, "utf-8").split("\n");
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed || trimmed.startsWith("#")) continue;
|
||||
const eq = trimmed.indexOf("=");
|
||||
if (eq < 0) continue;
|
||||
const key = trimmed.slice(0, eq).trim();
|
||||
const val = trimmed.slice(eq + 1).trim();
|
||||
result[key] = val;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// __dirname is src/apps/webapp — root is three levels up
|
||||
const ROOT = path.resolve(__dirname, "../../..");
|
||||
const envVars = {
|
||||
...loadEnvFile(path.join(ROOT, ".env")),
|
||||
...loadEnvFile(path.join(ROOT, ".test.env")),
|
||||
};
|
||||
|
||||
// Make the loaded variables available to all test files via process.env.
|
||||
for (const [k, v] of Object.entries(envVars)) {
|
||||
if (!(k in process.env)) {
|
||||
process.env[k] = v;
|
||||
}
|
||||
}
|
||||
|
||||
export default defineConfig({
|
||||
testDir: "./test",
|
||||
// Give each test plenty of time for replication round-trips.
|
||||
timeout: 120_000,
|
||||
expect: { timeout: 30_000 },
|
||||
// Run test files sequentially; the tests themselves manage two contexts.
|
||||
fullyParallel: false,
|
||||
workers: 1,
|
||||
reporter: "list",
|
||||
|
||||
use: {
|
||||
baseURL: "http://localhost:3000",
|
||||
// Use Chromium for OPFS and FileSystem API support.
|
||||
...devices["Desktop Chrome"],
|
||||
headless: true,
|
||||
// Launch args to match the main vitest browser config.
|
||||
launchOptions: {
|
||||
args: ["--js-flags=--expose-gc"],
|
||||
},
|
||||
},
|
||||
|
||||
projects: [
|
||||
{
|
||||
name: "chromium",
|
||||
use: { ...devices["Desktop Chrome"] },
|
||||
},
|
||||
],
|
||||
|
||||
// Start the vite dev server before running the tests.
|
||||
webServer: {
|
||||
command: "npx vite --port 3000",
|
||||
url: "http://localhost:3000",
|
||||
// Re-use a running dev server when developing locally.
|
||||
reuseExistingServer: !process.env.CI,
|
||||
timeout: 30_000,
|
||||
// Run from the webapp directory so vite finds its config.
|
||||
cwd: __dirname,
|
||||
},
|
||||
});
|
||||
203
src/apps/webapp/test-entry.ts
Normal file
203
src/apps/webapp/test-entry.ts
Normal file
@@ -0,0 +1,203 @@
|
||||
/**
|
||||
* LiveSync WebApp E2E test entry point.
|
||||
*
|
||||
* When served by vite dev server (at /test.html), this module wires up
|
||||
* `window.livesyncTest`, a plain JS API that Playwright tests can call via
|
||||
* `page.evaluate()`. All methods are async and serialisation-safe.
|
||||
*
|
||||
* Vault storage is backed by OPFS so no `showDirectoryPicker()` interaction
|
||||
* is required, making it fully headless-compatible.
|
||||
*/
|
||||
|
||||
import { LiveSyncWebApp } from "./main";
|
||||
import type { ObsidianLiveSyncSettings } from "@lib/common/types";
|
||||
import type { FilePathWithPrefix } from "@lib/common/types";
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Internal state – one app instance per page / browser context
|
||||
// --------------------------------------------------------------------------
|
||||
let app: LiveSyncWebApp | null = null;
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
/** Strip the "plain:" / "enc:" / … prefix used internally in PouchDB paths. */
|
||||
function stripPrefix(raw: string): string {
|
||||
return raw.replace(/^[^:]+:/, "");
|
||||
}
|
||||
|
||||
/**
|
||||
* Poll every 300 ms until all known processing queues are drained, or until
|
||||
* the timeout elapses. Mirrors `waitForIdle` in the existing vitest harness.
|
||||
*/
|
||||
async function waitForIdle(core: any, timeoutMs = 60_000): Promise<void> {
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
while (Date.now() < deadline) {
|
||||
const q =
|
||||
(core.services?.replication?.databaseQueueCount?.value ?? 0) +
|
||||
(core.services?.fileProcessing?.totalQueued?.value ?? 0) +
|
||||
(core.services?.fileProcessing?.batched?.value ?? 0) +
|
||||
(core.services?.fileProcessing?.processing?.value ?? 0) +
|
||||
(core.services?.replication?.storageApplyingCount?.value ?? 0);
|
||||
if (q === 0) return;
|
||||
await new Promise<void>((r) => setTimeout(r, 300));
|
||||
}
|
||||
throw new Error(`waitForIdle timed out after ${timeoutMs} ms`);
|
||||
}
|
||||
|
||||
function getCore(): any {
|
||||
const core = (app as any)?.core;
|
||||
if (!core) throw new Error("Vault not initialised – call livesyncTest.init() first");
|
||||
return core;
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Public test API
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
export interface LiveSyncTestAPI {
|
||||
/**
|
||||
* Initialise a vault in OPFS under the given name and apply `settings`.
|
||||
* Any previous contents of the OPFS directory are wiped first so each
|
||||
* test run starts clean.
|
||||
*/
|
||||
init(vaultName: string, settings: Partial<ObsidianLiveSyncSettings>): Promise<void>;
|
||||
|
||||
/**
|
||||
* Write `content` to the local PouchDB under `vaultPath` (equivalent to
|
||||
* the CLI `put` command). Waiting for the DB write to finish is
|
||||
* included; you still need to call `replicate()` to push to remote.
|
||||
*/
|
||||
putFile(vaultPath: string, content: string): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Mark `vaultPath` as deleted in the local PouchDB (equivalent to CLI
|
||||
* `rm`). Call `replicate()` afterwards to propagate to remote.
|
||||
*/
|
||||
deleteFile(vaultPath: string): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Run one full replication cycle (push + pull) against the remote CouchDB,
|
||||
* then wait for the local storage-application queue to drain.
|
||||
*/
|
||||
replicate(): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Wait until all processing queues are idle. Usually not needed after
|
||||
* `putFile` / `deleteFile` since those already await, but useful when
|
||||
* testing results after `replicate()`.
|
||||
*/
|
||||
waitForIdle(timeoutMs?: number): Promise<void>;
|
||||
|
||||
/**
|
||||
* Return metadata for `vaultPath` from the local database, or `null` if
|
||||
* not found / deleted.
|
||||
*/
|
||||
getInfo(vaultPath: string): Promise<{
|
||||
path: string;
|
||||
revision: string;
|
||||
conflicts: string[];
|
||||
size: number;
|
||||
mtime: number;
|
||||
} | null>;
|
||||
|
||||
/** Convenience wrapper: returns true when the doc has ≥1 conflict revision. */
|
||||
hasConflict(vaultPath: string): Promise<boolean>;
|
||||
|
||||
/** Tear down the current app instance. */
|
||||
shutdown(): Promise<void>;
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Implementation
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
const livesyncTest: LiveSyncTestAPI = {
|
||||
async init(vaultName: string, settings: Partial<ObsidianLiveSyncSettings>): Promise<void> {
|
||||
// Clean up any stale OPFS data from previous runs.
|
||||
const opfsRoot = await navigator.storage.getDirectory();
|
||||
try {
|
||||
await opfsRoot.removeEntry(vaultName, { recursive: true });
|
||||
} catch {
|
||||
// directory did not exist – that's fine
|
||||
}
|
||||
const vaultDir = await opfsRoot.getDirectoryHandle(vaultName, { create: true });
|
||||
|
||||
// Pre-write settings so they are loaded during initialise().
|
||||
const livesyncDir = await vaultDir.getDirectoryHandle(".livesync", { create: true });
|
||||
const settingsFile = await livesyncDir.getFileHandle("settings.json", { create: true });
|
||||
const writable = await settingsFile.createWritable();
|
||||
await writable.write(JSON.stringify(settings));
|
||||
await writable.close();
|
||||
|
||||
app = new LiveSyncWebApp(vaultDir);
|
||||
await app.initialize();
|
||||
|
||||
// Give background startup tasks a moment to settle.
|
||||
await waitForIdle(getCore(), 30_000);
|
||||
},
|
||||
|
||||
async putFile(vaultPath: string, content: string): Promise<boolean> {
|
||||
const core = getCore();
|
||||
const result = await core.serviceModules.databaseFileAccess.storeContent(
|
||||
vaultPath as FilePathWithPrefix,
|
||||
content
|
||||
);
|
||||
await waitForIdle(core);
|
||||
return result !== false;
|
||||
},
|
||||
|
||||
async deleteFile(vaultPath: string): Promise<boolean> {
|
||||
const core = getCore();
|
||||
const result = await core.serviceModules.databaseFileAccess.delete(vaultPath as FilePathWithPrefix);
|
||||
await waitForIdle(core);
|
||||
return result !== false;
|
||||
},
|
||||
|
||||
async replicate(): Promise<boolean> {
|
||||
const core = getCore();
|
||||
const result = await core.services.replication.replicate(true);
|
||||
// After replicate() resolves, remote docs may still be queued for
|
||||
// local storage application – wait until all queues are drained.
|
||||
await waitForIdle(core);
|
||||
return result !== false;
|
||||
},
|
||||
|
||||
async waitForIdle(timeoutMs?: number): Promise<void> {
|
||||
await waitForIdle(getCore(), timeoutMs ?? 60_000);
|
||||
},
|
||||
|
||||
async getInfo(vaultPath: string) {
|
||||
const core = getCore();
|
||||
const db = core.services?.database;
|
||||
for await (const doc of db.localDatabase.findAllNormalDocs({ conflicts: true })) {
|
||||
if (doc._deleted || doc.deleted) continue;
|
||||
const docPath = stripPrefix(doc.path ?? "");
|
||||
if (docPath !== vaultPath) continue;
|
||||
return {
|
||||
path: docPath,
|
||||
revision: (doc._rev as string) ?? "",
|
||||
conflicts: (doc._conflicts as string[]) ?? [],
|
||||
size: (doc.size as number) ?? 0,
|
||||
mtime: (doc.mtime as number) ?? 0,
|
||||
};
|
||||
}
|
||||
return null;
|
||||
},
|
||||
|
||||
async hasConflict(vaultPath: string): Promise<boolean> {
|
||||
const info = await livesyncTest.getInfo(vaultPath);
|
||||
return (info?.conflicts?.length ?? 0) > 0;
|
||||
},
|
||||
|
||||
async shutdown(): Promise<void> {
|
||||
if (app) {
|
||||
await app.shutdown();
|
||||
app = null;
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
// Expose on window for Playwright page.evaluate() calls.
|
||||
(window as any).livesyncTest = livesyncTest;
|
||||
26
src/apps/webapp/test.html
Normal file
26
src/apps/webapp/test.html
Normal file
@@ -0,0 +1,26 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>LiveSync WebApp – E2E Test Page</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: monospace;
|
||||
padding: 1rem;
|
||||
}
|
||||
#status {
|
||||
margin-top: 1rem;
|
||||
padding: 0.5rem;
|
||||
border: 1px solid #ccc;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h2>LiveSync WebApp E2E</h2>
|
||||
<p>This page is used by Playwright tests only. <code>window.livesyncTest</code> is exposed by the script below.</p>
|
||||
<!-- status div required by LiveSyncWebApp internal helpers -->
|
||||
<div id="status">Loading…</div>
|
||||
<script type="module" src="/test-entry.ts"></script>
|
||||
</body>
|
||||
</html>
|
||||
294
src/apps/webapp/test/e2e.spec.ts
Normal file
294
src/apps/webapp/test/e2e.spec.ts
Normal file
@@ -0,0 +1,294 @@
|
||||
/**
|
||||
* WebApp E2E tests – two-vault scenarios.
|
||||
*
|
||||
* Each vault (A and B) runs in its own browser context so that JavaScript
|
||||
* global state (including Trystero's global signalling tables) is fully
|
||||
* isolated. The two vaults communicate only through the shared remote
|
||||
* CouchDB database.
|
||||
*
|
||||
* Vault storage is OPFS-backed – no file-picker interaction needed.
|
||||
*
|
||||
* Prerequisites:
|
||||
* - A reachable CouchDB instance whose connection details are in .test.env
|
||||
* (read automatically by playwright.config.ts).
|
||||
*
|
||||
* How to run:
|
||||
* cd src/apps/webapp && npm run test:e2e
|
||||
*/
|
||||
|
||||
import { test, expect, type BrowserContext, type Page, type TestInfo } from "@playwright/test";
|
||||
import type { LiveSyncTestAPI } from "../test-entry";
|
||||
import { mkdirSync, writeFileSync } from "node:fs";
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Settings helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function requireEnv(name: string): string {
|
||||
const v = process.env[name];
|
||||
if (!v) throw new Error(`Missing required env variable: ${name}`);
|
||||
return v;
|
||||
}
|
||||
|
||||
async function ensureCouchDbDatabase(uri: string, user: string, pass: string, dbName: string): Promise<void> {
|
||||
const base = uri.replace(/\/+$/, "");
|
||||
const dbUrl = `${base}/${encodeURIComponent(dbName)}`;
|
||||
const auth = Buffer.from(`${user}:${pass}`, "utf-8").toString("base64");
|
||||
const response = await fetch(dbUrl, {
|
||||
method: "PUT",
|
||||
headers: {
|
||||
Authorization: `Basic ${auth}`,
|
||||
},
|
||||
});
|
||||
|
||||
// 201: created, 202: accepted, 412: already exists
|
||||
if (response.status === 201 || response.status === 202 || response.status === 412) {
|
||||
return;
|
||||
}
|
||||
|
||||
const body = await response.text().catch(() => "");
|
||||
throw new Error(`Failed to ensure CouchDB database (${response.status}): ${body}`);
|
||||
}
|
||||
|
||||
function buildSettings(dbName: string): Record<string, unknown> {
|
||||
return {
|
||||
// Remote database (shared between A and B – this is the replication target)
|
||||
couchDB_URI: requireEnv("hostname").replace(/\/+$/, ""),
|
||||
couchDB_USER: process.env["username"] ?? "",
|
||||
couchDB_PASSWORD: process.env["password"] ?? "",
|
||||
couchDB_DBNAME: dbName,
|
||||
|
||||
// Core behaviour
|
||||
isConfigured: true,
|
||||
liveSync: false,
|
||||
syncOnSave: false,
|
||||
syncOnStart: false,
|
||||
periodicReplication: false,
|
||||
gcDelay: 0,
|
||||
savingDelay: 0,
|
||||
notifyThresholdOfRemoteStorageSize: 0,
|
||||
|
||||
// Encryption off for test simplicity
|
||||
encrypt: false,
|
||||
|
||||
// Disable plugin/hidden-file sync (not needed in webapp)
|
||||
usePluginSync: false,
|
||||
autoSweepPlugins: false,
|
||||
autoSweepPluginsPeriodic: false,
|
||||
|
||||
//Auto accept perr
|
||||
P2P_AutoAcceptingPeers: "~.*",
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Test-page helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Navigate to the test entry page and wait for `window.livesyncTest`. */
|
||||
async function openTestPage(ctx: BrowserContext): Promise<Page> {
|
||||
const page = await ctx.newPage();
|
||||
await page.goto("/test.html");
|
||||
await page.waitForFunction(() => !!(window as any).livesyncTest, { timeout: 20_000 });
|
||||
return page;
|
||||
}
|
||||
|
||||
/** Type-safe wrapper – calls `window.livesyncTest.<method>(...args)` in the page. */
|
||||
async function call<M extends keyof LiveSyncTestAPI>(
|
||||
page: Page,
|
||||
method: M,
|
||||
...args: Parameters<LiveSyncTestAPI[M]>
|
||||
): Promise<Awaited<ReturnType<LiveSyncTestAPI[M]>>> {
|
||||
const invoke = () =>
|
||||
page.evaluate(([m, a]) => (window as any).livesyncTest[m](...a), [method, args] as [
|
||||
string,
|
||||
unknown[],
|
||||
]) as Promise<Awaited<ReturnType<LiveSyncTestAPI[M]>>>;
|
||||
|
||||
try {
|
||||
return await invoke();
|
||||
} catch (ex: any) {
|
||||
const message = String(ex?.message ?? ex);
|
||||
// Some startup flows may trigger one page reload; recover once.
|
||||
if (
|
||||
message.includes("Execution context was destroyed") ||
|
||||
message.includes("Most likely the page has been closed")
|
||||
) {
|
||||
await page.waitForFunction(() => !!(window as any).livesyncTest, { timeout: 20_000 });
|
||||
return await invoke();
|
||||
}
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
async function dumpCoverage(page: Page | undefined, label: string, testInfo: TestInfo): Promise<void> {
|
||||
if (!process.env.PW_COVERAGE || !page || page.isClosed()) {
|
||||
return;
|
||||
}
|
||||
const cov = await page
|
||||
.evaluate(() => {
|
||||
const data = (window as any).__coverage__;
|
||||
if (!data) return null;
|
||||
// Reset between tests to avoid runaway accumulation.
|
||||
(window as any).__coverage__ = {};
|
||||
return data;
|
||||
})
|
||||
.catch(() => null!);
|
||||
if (!cov) return;
|
||||
if (typeof cov === "object" && Object.keys(cov as Record<string, unknown>).length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const outDir = path.resolve(__dirname, "../.nyc_output");
|
||||
mkdirSync(outDir, { recursive: true });
|
||||
const name = `${testInfo.testId.replace(/[^a-zA-Z0-9_-]/g, "_")}-${label}.json`;
|
||||
writeFileSync(path.join(outDir, name), JSON.stringify(cov), "utf-8");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Two-vault E2E suite
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test.describe("WebApp two-vault E2E", () => {
|
||||
let ctxA: BrowserContext;
|
||||
let ctxB: BrowserContext;
|
||||
let pageA: Page;
|
||||
let pageB: Page;
|
||||
|
||||
const DB_SUFFIX = `${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
||||
const dbName = `${requireEnv("dbname")}-${DB_SUFFIX}`;
|
||||
const settings = buildSettings(dbName);
|
||||
|
||||
test.beforeAll(async ({ browser }) => {
|
||||
await ensureCouchDbDatabase(
|
||||
String(settings.couchDB_URI ?? ""),
|
||||
String(settings.couchDB_USER ?? ""),
|
||||
String(settings.couchDB_PASSWORD ?? ""),
|
||||
dbName
|
||||
);
|
||||
|
||||
// Open Vault A and Vault B in completely separate browser contexts.
|
||||
// Each context has its own JS runtime, IndexedDB and OPFS root, so
|
||||
// Trystero global state and PouchDB instance names cannot collide.
|
||||
ctxA = await browser.newContext();
|
||||
ctxB = await browser.newContext();
|
||||
|
||||
pageA = await openTestPage(ctxA);
|
||||
pageB = await openTestPage(ctxB);
|
||||
|
||||
await call(pageA, "init", "testvault_a", settings as any);
|
||||
await call(pageB, "init", "testvault_b", settings as any);
|
||||
});
|
||||
|
||||
test.afterAll(async () => {
|
||||
await call(pageA, "shutdown").catch(() => {});
|
||||
await call(pageB, "shutdown").catch(() => {});
|
||||
await ctxA.close();
|
||||
await ctxB.close();
|
||||
});
|
||||
|
||||
test.afterEach(async ({}, testInfo) => {
|
||||
await dumpCoverage(pageA, "vaultA", testInfo);
|
||||
await dumpCoverage(pageB, "vaultB", testInfo);
|
||||
});
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Case 1: Vault A writes a file and can read its metadata back from the
|
||||
// local database (no replication yet).
|
||||
// -----------------------------------------------------------------------
|
||||
test("Case 1: A writes a file and can get its info", async () => {
|
||||
const FILE = "e2e/case1-a-only.md";
|
||||
const CONTENT = "hello from vault A";
|
||||
|
||||
const ok = await call(pageA, "putFile", FILE, CONTENT);
|
||||
expect(ok).toBe(true);
|
||||
|
||||
const info = await call(pageA, "getInfo", FILE);
|
||||
expect(info).not.toBeNull();
|
||||
expect(info!.path).toBe(FILE);
|
||||
expect(info!.revision).toBeTruthy();
|
||||
expect(info!.conflicts).toHaveLength(0);
|
||||
});
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Case 2: Vault A writes a file, both vaults replicate, and Vault B ends
|
||||
// up with the file in its local database.
|
||||
// -----------------------------------------------------------------------
|
||||
test("Case 2: A writes a file, both replicate, B receives the file", async () => {
|
||||
const FILE = "e2e/case2-sync.md";
|
||||
const CONTENT = "content from A – should appear in B";
|
||||
|
||||
await call(pageA, "putFile", FILE, CONTENT);
|
||||
|
||||
// A pushes to remote, B pulls from remote.
|
||||
await call(pageA, "replicate");
|
||||
await call(pageB, "replicate");
|
||||
|
||||
const infoB = await call(pageB, "getInfo", FILE);
|
||||
expect(infoB).not.toBeNull();
|
||||
expect(infoB!.path).toBe(FILE);
|
||||
});
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Case 3: Vault A deletes the file it synced in case 2. After both
|
||||
// vaults replicate, Vault B no longer sees the file.
|
||||
// -----------------------------------------------------------------------
|
||||
test("Case 3: A deletes the file, both replicate, B no longer sees it", async () => {
|
||||
// This test depends on Case 2 having put e2e/case2-sync.md into both vaults.
|
||||
const FILE = "e2e/case2-sync.md";
|
||||
|
||||
await call(pageA, "deleteFile", FILE);
|
||||
|
||||
await call(pageA, "replicate");
|
||||
await call(pageB, "replicate");
|
||||
|
||||
const infoB = await call(pageB, "getInfo", FILE);
|
||||
// The file should be gone (null means not found or deleted).
|
||||
expect(infoB).toBeNull();
|
||||
});
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Case 4: A and B each independently edit the same file that was already
|
||||
// synced. After both vaults replicate the editing cycle, both
|
||||
// vaults report a conflict on that file.
|
||||
// -----------------------------------------------------------------------
|
||||
test("Case 4: concurrent edits from A and B produce a conflict on both sides", async () => {
|
||||
const FILE = "e2e/case4-conflict.md";
|
||||
|
||||
// 1) Write a baseline and synchronise so both vaults start from the
|
||||
// same revision.
|
||||
await call(pageA, "putFile", FILE, "base content");
|
||||
await call(pageA, "replicate");
|
||||
await call(pageB, "replicate");
|
||||
|
||||
// Confirm B has the base file with no conflicts yet.
|
||||
const baseInfoB = await call(pageB, "getInfo", FILE);
|
||||
expect(baseInfoB).not.toBeNull();
|
||||
expect(baseInfoB!.conflicts).toHaveLength(0);
|
||||
|
||||
// 2) Both vaults write diverging content without syncing in between –
|
||||
// this creates two competing revisions.
|
||||
await call(pageA, "putFile", FILE, "content from A (conflict side)");
|
||||
await call(pageB, "putFile", FILE, "content from B (conflict side)");
|
||||
|
||||
// 3) Run replication on both sides. The order mirrors the pattern
|
||||
// from the CLI two-vault tests (A → remote → B → remote → A).
|
||||
await call(pageA, "replicate");
|
||||
await call(pageB, "replicate");
|
||||
await call(pageA, "replicate"); // re-check from A to pick up B's revision
|
||||
|
||||
// 4) At least one side must report a conflict.
|
||||
const hasConflictA = await call(pageA, "hasConflict", FILE);
|
||||
const hasConflictB = await call(pageB, "hasConflict", FILE);
|
||||
|
||||
expect(
|
||||
hasConflictA || hasConflictB,
|
||||
"Expected a conflict to appear on vault A or vault B after diverging edits"
|
||||
).toBe(true);
|
||||
});
|
||||
});
|
||||
@@ -10,23 +10,23 @@ The head note of 0.25 is now in [updates_old.md](https://github.com/vrtmrz/obsid
|
||||
### Fixed
|
||||
|
||||
- Remote storage size check now works correctly again (#818).
|
||||
- Some buttons on the setting dialogue now respond correctly again (#827).
|
||||
- Some buttons on the settings dialogue now respond correctly again (#827).
|
||||
|
||||
### Refactored
|
||||
|
||||
- P2P replicator has been refactored to be a little roust and easier to understand.
|
||||
- P2P replicator has been refactored to be a little more robust and easier to understand.
|
||||
- Delete items which are no longer used that might cause potential problems
|
||||
|
||||
### CLI
|
||||
|
||||
- Fixed the corrupted display of the help message.
|
||||
- Remove some unnecessary codes.
|
||||
- Remove some unnecessary code.
|
||||
|
||||
### WebApp
|
||||
|
||||
- Fixed the issue where the detail level was not being applied in the log pane.
|
||||
- Pop-ups are now shown.
|
||||
- Add coverage for test.
|
||||
- Add coverage for the test.
|
||||
- Pop-ups are now shown in the web app as well.
|
||||
|
||||
## 0.25.53
|
||||
|
||||
Reference in New Issue
Block a user