Add: mirror command

Tidy: test
This commit is contained in:
vorotamoroz
2026-03-13 18:01:38 +09:00
parent 29ce9a5df4
commit 338a9ba9fa
15 changed files with 952 additions and 399 deletions

View File

@@ -4,8 +4,9 @@ set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
CLI_CMD=(npm --silent run cli -- -v)
source "$SCRIPT_DIR/test-helpers.sh"
VERBOSE_TEST_LOGGING="${VERBOSE_TEST_LOGGING:-0}"
cli_test_init_cli_cmd
RUN_BUILD="${RUN_BUILD:-1}"
KEEP_TEST_DATA="${KEEP_TEST_DATA:-0}"
TEST_ENV_FILE="${TEST_ENV_FILE:-$CLI_DIR/.test.env}"
@@ -36,27 +37,24 @@ COUCHDB_URI=""
COUCHDB_DBNAME=""
MINIO_BUCKET=""
require_env() {
local var_name="$1"
if [[ -z "${!var_name:-}" ]]; then
echo "[ERROR] required variable '$var_name' is missing in $TEST_ENV_FILE" >&2
exit 1
fi
}
if [[ "$REMOTE_TYPE" == "COUCHDB" ]]; then
require_env hostname
require_env dbname
require_env username
require_env password
cli_test_require_env hostname "$TEST_ENV_FILE"
cli_test_require_env dbname "$TEST_ENV_FILE"
cli_test_require_env username "$TEST_ENV_FILE"
cli_test_require_env password "$TEST_ENV_FILE"
COUCHDB_URI="${hostname%/}"
COUCHDB_DBNAME="${dbname}-${DB_SUFFIX}"
COUCHDB_USER="${username:-}"
COUCHDB_PASSWORD="${password:-}"
elif [[ "$REMOTE_TYPE" == "MINIO" ]]; then
require_env accessKey
require_env secretKey
require_env minioEndpoint
require_env bucketName
cli_test_require_env accessKey "$TEST_ENV_FILE"
cli_test_require_env secretKey "$TEST_ENV_FILE"
cli_test_require_env minioEndpoint "$TEST_ENV_FILE"
cli_test_require_env bucketName "$TEST_ENV_FILE"
MINIO_BUCKET="${bucketName}-${DB_SUFFIX}"
MINIO_ENDPOINT="${minioEndpoint:-}"
MINIO_ACCESS_KEY="${accessKey:-}"
MINIO_SECRET_KEY="${secretKey:-}"
else
echo "[ERROR] unsupported REMOTE_TYPE: $REMOTE_TYPE (use COUCHDB or MINIO)" >&2
exit 1
@@ -65,9 +63,9 @@ fi
cleanup() {
local exit_code=$?
if [[ "$REMOTE_TYPE" == "COUCHDB" ]]; then
bash "$CLI_DIR/util/couchdb-stop.sh" >/dev/null 2>&1 || true
cli_test_stop_couchdb
else
bash "$CLI_DIR/util/minio-stop.sh" >/dev/null 2>&1 || true
cli_test_stop_minio
fi
if [[ "$KEEP_TEST_DATA" != "1" ]]; then
@@ -83,10 +81,6 @@ cleanup() {
}
trap cleanup EXIT
run_cli() {
"${CLI_CMD[@]}" "$@"
}
run_cli_a() {
run_cli "$VAULT_A" --settings "$SETTINGS_A" "$@"
}
@@ -95,191 +89,28 @@ run_cli_b() {
run_cli "$VAULT_B" --settings "$SETTINGS_B" "$@"
}
assert_contains() {
local haystack="$1"
local needle="$2"
local message="$3"
if ! grep -Fq "$needle" <<< "$haystack"; then
echo "[FAIL] $message" >&2
echo "[FAIL] expected to find: $needle" >&2
echo "[FAIL] actual output:" >&2
echo "$haystack" >&2
exit 1
fi
}
assert_equal() {
local expected="$1"
local actual="$2"
local message="$3"
if [[ "$expected" != "$actual" ]]; then
echo "[FAIL] $message" >&2
echo "[FAIL] expected: $expected" >&2
echo "[FAIL] actual: $actual" >&2
exit 1
fi
}
assert_command_fails() {
local message="$1"
shift
set +e
"$@" >"$WORK_DIR/failed-command.log" 2>&1
local exit_code=$?
set -e
if [[ "$exit_code" -eq 0 ]]; then
echo "[FAIL] $message" >&2
cat "$WORK_DIR/failed-command.log" >&2
exit 1
fi
}
assert_files_equal() {
local expected_file="$1"
local actual_file="$2"
local message="$3"
if ! cmp -s "$expected_file" "$actual_file"; then
echo "[FAIL] $message" >&2
echo "[FAIL] expected sha256: $(sha256sum "$expected_file" | awk '{print $1}')" >&2
echo "[FAIL] actual sha256: $(sha256sum "$actual_file" | awk '{print $1}')" >&2
exit 1
fi
}
sanitise_cat_stdout() {
sed '/^\[CLIWatchAdapter\] File watching is not enabled in CLI version$/d'
}
extract_json_string_field() {
local field_name="$1"
node -e '
const fs = require("node:fs");
const fieldName = process.argv[1];
const data = JSON.parse(fs.readFileSync(0, "utf-8"));
const value = data[fieldName];
if (typeof value === "string") {
process.stdout.write(value);
}
' "$field_name"
}
sync_both() {
run_cli_a sync >/dev/null
run_cli_b sync >/dev/null
}
curl_json() {
curl -4 -sS --fail --connect-timeout 3 --max-time 15 "$@"
}
configure_remote_settings() {
local settings_file="$1"
SETTINGS_FILE="$settings_file" \
REMOTE_TYPE="$REMOTE_TYPE" \
COUCHDB_URI="$COUCHDB_URI" \
COUCHDB_USER="${username:-}" \
COUCHDB_PASSWORD="${password:-}" \
COUCHDB_DBNAME="$COUCHDB_DBNAME" \
MINIO_ENDPOINT="${minioEndpoint:-}" \
MINIO_BUCKET="$MINIO_BUCKET" \
MINIO_ACCESS_KEY="${accessKey:-}" \
MINIO_SECRET_KEY="${secretKey:-}" \
ENCRYPT="$ENCRYPT" \
E2E_PASSPHRASE="$E2E_PASSPHRASE" \
node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
const remoteType = process.env.REMOTE_TYPE;
if (remoteType === "COUCHDB") {
data.remoteType = "";
data.couchDB_URI = process.env.COUCHDB_URI;
data.couchDB_USER = process.env.COUCHDB_USER;
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
} else if (remoteType === "MINIO") {
data.remoteType = "MINIO";
data.bucket = process.env.MINIO_BUCKET;
data.endpoint = process.env.MINIO_ENDPOINT;
data.accessKey = process.env.MINIO_ACCESS_KEY;
data.secretKey = process.env.MINIO_SECRET_KEY;
data.region = "auto";
data.forcePathStyle = true;
}
data.liveSync = true;
data.syncOnStart = false;
data.syncOnSave = false;
data.usePluginSync = false;
data.encrypt = process.env.ENCRYPT === "1";
data.passphrase = data.encrypt ? process.env.E2E_PASSPHRASE : "";
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
cli_test_apply_remote_sync_settings "$settings_file"
}
init_settings() {
local settings_file="$1"
run_cli init-settings --force "$settings_file" >/dev/null
cli_test_init_settings_file "$settings_file"
configure_remote_settings "$settings_file"
cat "$settings_file"
}
wait_for_minio_bucket() {
local retries=30
local delay_sec=2
local i
for ((i = 1; i <= retries; i++)); do
if docker run --rm --network host --entrypoint=/bin/sh minio/mc -c "mc alias set myminio $minioEndpoint $accessKey $secretKey >/dev/null 2>&1 && mc ls myminio/$MINIO_BUCKET >/dev/null 2>&1"; then
return 0
fi
bucketName="$MINIO_BUCKET" bash "$CLI_DIR/util/minio-init.sh" >/dev/null 2>&1 || true
sleep "$delay_sec"
done
return 1
}
start_remote() {
if [[ "$REMOTE_TYPE" == "COUCHDB" ]]; then
echo "[INFO] stopping leftover CouchDB container if present"
bash "$CLI_DIR/util/couchdb-stop.sh" >/dev/null 2>&1 || true
echo "[INFO] starting CouchDB test container"
bash "$CLI_DIR/util/couchdb-start.sh"
echo "[INFO] initialising CouchDB test container"
bash "$CLI_DIR/util/couchdb-init.sh"
echo "[INFO] CouchDB create test database: $COUCHDB_DBNAME"
until (curl_json -X PUT --user "${username}:${password}" "${hostname}/${COUCHDB_DBNAME}"); do sleep 5; done
cli_test_start_couchdb "$COUCHDB_URI" "$COUCHDB_USER" "$COUCHDB_PASSWORD" "$COUCHDB_DBNAME"
else
echo "[INFO] stopping leftover MinIO container if present"
bash "$CLI_DIR/util/minio-stop.sh" >/dev/null 2>&1 || true
echo "[INFO] starting MinIO test container"
bucketName="$MINIO_BUCKET" bash "$CLI_DIR/util/minio-start.sh"
echo "[INFO] initialising MinIO test bucket: $MINIO_BUCKET"
local minio_init_ok=0
for _ in 1 2 3 4 5; do
if bucketName="$MINIO_BUCKET" bash "$CLI_DIR/util/minio-init.sh"; then
minio_init_ok=1
break
fi
sleep 2
done
if [[ "$minio_init_ok" != "1" ]]; then
echo "[FAIL] could not initialise MinIO bucket after retries: $MINIO_BUCKET" >&2
exit 1
fi
if ! wait_for_minio_bucket; then
echo "[FAIL] MinIO bucket not ready: $MINIO_BUCKET" >&2
exit 1
fi
cli_test_start_minio "$MINIO_ENDPOINT" "$MINIO_ACCESS_KEY" "$MINIO_SECRET_KEY" "$MINIO_BUCKET"
fi
}
@@ -313,14 +144,14 @@ TARGET_CONFLICT="e2e/conflict.md"
echo "[CASE] A puts and A can get info"
printf 'alpha-from-a\n' | run_cli_a put "$TARGET_A_ONLY" >/dev/null
INFO_A_ONLY="$(run_cli_a info "$TARGET_A_ONLY")"
assert_contains "$INFO_A_ONLY" "\"path\": \"$TARGET_A_ONLY\"" "A info should include path after put"
cli_test_assert_contains "$INFO_A_ONLY" "\"path\": \"$TARGET_A_ONLY\"" "A info should include path after put"
echo "[PASS] A put/info"
echo "[CASE] A puts, both sync, and B can get info"
printf 'visible-after-sync\n' | run_cli_a put "$TARGET_SYNC" >/dev/null
sync_both
INFO_B_SYNC="$(run_cli_b info "$TARGET_SYNC")"
assert_contains "$INFO_B_SYNC" "\"path\": \"$TARGET_SYNC\"" "B info should include path after sync"
cli_test_assert_contains "$INFO_B_SYNC" "\"path\": \"$TARGET_SYNC\"" "B info should include path after sync"
echo "[PASS] sync A->B and B info"
echo "[CASE] A pushes and puts, both sync, and B can pull and cat"
@@ -331,9 +162,9 @@ run_cli_a push "$PUSH_SRC" "$TARGET_PUSH" >/dev/null
printf 'put-content-%s\n' "$DB_SUFFIX" | run_cli_a put "$TARGET_PUT" >/dev/null
sync_both
run_cli_b pull "$TARGET_PUSH" "$PULL_DST" >/dev/null
assert_files_equal "$PUSH_SRC" "$PULL_DST" "B pull result does not match pushed source"
CAT_B_PUT="$(run_cli_b cat "$TARGET_PUT" | sanitise_cat_stdout)"
assert_equal "put-content-$DB_SUFFIX" "$CAT_B_PUT" "B cat should return A put content"
cli_test_assert_files_equal "$PUSH_SRC" "$PULL_DST" "B pull result does not match pushed source"
CAT_B_PUT="$(run_cli_b cat "$TARGET_PUT" | cli_test_sanitise_cat_stdout)"
cli_test_assert_equal "put-content-$DB_SUFFIX" "$CAT_B_PUT" "B cat should return A put content"
echo "[PASS] push/pull and put/cat across vaults"
echo "[CASE] A pushes binary, both sync, and B can pull identical bytes"
@@ -343,31 +174,44 @@ head -c 4096 /dev/urandom > "$PUSH_BINARY_SRC"
run_cli_a push "$PUSH_BINARY_SRC" "$TARGET_PUSH_BINARY" >/dev/null
sync_both
run_cli_b pull "$TARGET_PUSH_BINARY" "$PULL_BINARY_DST" >/dev/null
assert_files_equal "$PUSH_BINARY_SRC" "$PULL_BINARY_DST" "B pull result does not match pushed binary source"
cli_test_assert_files_equal "$PUSH_BINARY_SRC" "$PULL_BINARY_DST" "B pull result does not match pushed binary source"
echo "[PASS] binary push/pull across vaults"
echo "[CASE] A removes, both sync, and B can no longer cat"
run_cli_a rm "$TARGET_PUT" >/dev/null
sync_both
assert_command_fails "B cat should fail after A removed the file and synced" run_cli_b cat "$TARGET_PUT"
cli_test_assert_command_fails "B cat should fail after A removed the file and synced" "$WORK_DIR/failed-command.log" run_cli_b cat "$TARGET_PUT"
echo "[PASS] rm is replicated"
echo "[CASE] verify conflict detection"
printf 'conflict-base\n' | run_cli_a put "$TARGET_CONFLICT" >/dev/null
sync_both
INFO_B_BASE="$(run_cli_b info "$TARGET_CONFLICT")"
assert_contains "$INFO_B_BASE" "\"path\": \"$TARGET_CONFLICT\"" "B should be able to info before creating conflict"
cli_test_assert_contains "$INFO_B_BASE" "\"path\": \"$TARGET_CONFLICT\"" "B should be able to info before creating conflict"
printf 'conflict-from-a-%s\n' "$DB_SUFFIX" | run_cli_a put "$TARGET_CONFLICT" >/dev/null
printf 'conflict-from-b-%s\n' "$DB_SUFFIX" | run_cli_b put "$TARGET_CONFLICT" >/dev/null
run_cli_a sync >/dev/null
run_cli_b sync >/dev/null
run_cli_a sync >/dev/null
INFO_A_CONFLICT=""
INFO_B_CONFLICT=""
CONFLICT_DETECTED=0
INFO_A_CONFLICT="$(run_cli_a info "$TARGET_CONFLICT")"
INFO_B_CONFLICT="$(run_cli_b info "$TARGET_CONFLICT")"
if grep -qF '"conflicts": "N/A"' <<< "$INFO_A_CONFLICT" && grep -qF '"conflicts": "N/A"' <<< "$INFO_B_CONFLICT"; then
for side in a b a; do
if [[ "$side" == "a" ]]; then
run_cli_a sync >/dev/null
else
run_cli_b sync >/dev/null
fi
INFO_A_CONFLICT="$(run_cli_a info "$TARGET_CONFLICT")"
INFO_B_CONFLICT="$(run_cli_b info "$TARGET_CONFLICT")"
if ! grep -qF '"conflicts": "N/A"' <<< "$INFO_A_CONFLICT" || ! grep -qF '"conflicts": "N/A"' <<< "$INFO_B_CONFLICT"; then
CONFLICT_DETECTED=1
break
fi
done
if [[ "$CONFLICT_DETECTED" != "1" ]]; then
echo "[FAIL] conflict was expected but both A and B show Conflicts: N/A" >&2
echo "--- A info ---" >&2
echo "$INFO_A_CONFLICT" >&2
@@ -399,7 +243,7 @@ fi
echo "[PASS] ls marks conflicts"
echo "[CASE] resolve conflict on A and verify both vaults are clean"
KEEP_REVISION="$(printf '%s' "$INFO_A_CONFLICT" | extract_json_string_field revision)"
KEEP_REVISION="$(printf '%s' "$INFO_A_CONFLICT" | cli_test_json_string_field_from_stdin revision)"
if [[ -z "$KEEP_REVISION" ]]; then
echo "[FAIL] could not extract current revision from A info output" >&2
echo "$INFO_A_CONFLICT" >&2
@@ -411,7 +255,7 @@ run_cli_a resolve "$TARGET_CONFLICT" "$KEEP_REVISION" >/dev/null
INFO_A_RESOLVED=""
INFO_B_RESOLVED=""
RESOLVE_PROPAGATED=0
for _ in 1 2 3 4 5; do
for _ in 1 2 3 4 5 6; do
sync_both
INFO_A_RESOLVED="$(run_cli_a info "$TARGET_CONFLICT")"
INFO_B_RESOLVED="$(run_cli_b info "$TARGET_CONFLICT")"
@@ -419,19 +263,15 @@ for _ in 1 2 3 4 5; do
RESOLVE_PROPAGATED=1
break
fi
done
if [[ "$RESOLVE_PROPAGATED" != "1" ]]; then
KEEP_REVISION_B="$(printf '%s' "$INFO_B_RESOLVED" | extract_json_string_field revision)"
if [[ -n "$KEEP_REVISION_B" ]]; then
run_cli_b resolve "$TARGET_CONFLICT" "$KEEP_REVISION_B" >/dev/null
sync_both
INFO_A_RESOLVED="$(run_cli_a info "$TARGET_CONFLICT")"
INFO_B_RESOLVED="$(run_cli_b info "$TARGET_CONFLICT")"
if grep -qF '"conflicts": "N/A"' <<< "$INFO_A_RESOLVED" && grep -qF '"conflicts": "N/A"' <<< "$INFO_B_RESOLVED"; then
RESOLVE_PROPAGATED=1
# Retry from A only when conflict remains due to eventual consistency.
if ! grep -qF '"conflicts": "N/A"' <<< "$INFO_A_RESOLVED"; then
KEEP_REVISION_A="$(printf '%s' "$INFO_A_RESOLVED" | cli_test_json_string_field_from_stdin revision)"
if [[ -n "$KEEP_REVISION_A" ]]; then
run_cli_a resolve "$TARGET_CONFLICT" "$KEEP_REVISION_A" >/dev/null || true
fi
fi
fi
done
if [[ "$RESOLVE_PROPAGATED" != "1" ]]; then
echo "[FAIL] conflicts should be resolved on both vaults" >&2
@@ -453,9 +293,9 @@ if [[ "$LS_A_RESOLVED_REV" == *"*" || "$LS_B_RESOLVED_REV" == *"*" ]]; then
exit 1
fi
CAT_A_RESOLVED="$(run_cli_a cat "$TARGET_CONFLICT" | sanitise_cat_stdout)"
CAT_B_RESOLVED="$(run_cli_b cat "$TARGET_CONFLICT" | sanitise_cat_stdout)"
assert_equal "$CAT_A_RESOLVED" "$CAT_B_RESOLVED" "resolved content should match across both vaults"
CAT_A_RESOLVED="$(run_cli_a cat "$TARGET_CONFLICT" | cli_test_sanitise_cat_stdout)"
CAT_B_RESOLVED="$(run_cli_b cat "$TARGET_CONFLICT" | cli_test_sanitise_cat_stdout)"
cli_test_assert_equal "$CAT_A_RESOLVED" "$CAT_B_RESOLVED" "resolved content should match across both vaults"
echo "[PASS] resolve is replicated and ls reflects resolved state"
echo "[PASS] all requested E2E scenarios completed (${TEST_LABEL})"

View File

@@ -0,0 +1,295 @@
#!/usr/bin/env bash
cli_test_init_cli_cmd() {
if [[ "${VERBOSE_TEST_LOGGING:-0}" == "1" ]]; then
CLI_CMD=(npm --silent run cli -- -v)
else
CLI_CMD=(npm --silent run cli --)
fi
}
run_cli() {
"${CLI_CMD[@]}" "$@"
}
cli_test_require_env() {
local var_name="$1"
local env_file="${2:-${TEST_ENV_FILE:-environment}}"
if [[ -z "${!var_name:-}" ]]; then
echo "[ERROR] required variable '$var_name' is missing in $env_file" >&2
exit 1
fi
}
cli_test_assert_contains() {
local haystack="$1"
local needle="$2"
local message="$3"
if ! grep -Fq "$needle" <<< "$haystack"; then
echo "[FAIL] $message" >&2
echo "[FAIL] expected to find: $needle" >&2
echo "[FAIL] actual output:" >&2
echo "$haystack" >&2
exit 1
fi
}
cli_test_assert_equal() {
local expected="$1"
local actual="$2"
local message="$3"
if [[ "$expected" != "$actual" ]]; then
echo "[FAIL] $message" >&2
echo "[FAIL] expected: $expected" >&2
echo "[FAIL] actual: $actual" >&2
exit 1
fi
}
cli_test_assert_command_fails() {
local message="$1"
local log_file="$2"
shift 2
set +e
"$@" >"$log_file" 2>&1
local exit_code=$?
set -e
if [[ "$exit_code" -eq 0 ]]; then
echo "[FAIL] $message" >&2
cat "$log_file" >&2
exit 1
fi
}
cli_test_assert_files_equal() {
local expected_file="$1"
local actual_file="$2"
local message="$3"
if ! cmp -s "$expected_file" "$actual_file"; then
echo "[FAIL] $message" >&2
echo "[FAIL] expected sha256: $(sha256sum "$expected_file" | awk '{print $1}')" >&2
echo "[FAIL] actual sha256: $(sha256sum "$actual_file" | awk '{print $1}')" >&2
exit 1
fi
}
cli_test_sanitise_cat_stdout() {
sed '/^\[CLIWatchAdapter\] File watching is not enabled in CLI version$/d'
}
cli_test_json_string_field_from_stdin() {
local field_name="$1"
node -e '
const fs = require("node:fs");
const fieldName = process.argv[1];
const data = JSON.parse(fs.readFileSync(0, "utf-8"));
const value = data[fieldName];
if (typeof value === "string") {
process.stdout.write(value);
}
' "$field_name"
}
cli_test_json_string_field_from_file() {
local json_file="$1"
local field_name="$2"
node -e '
const fs = require("node:fs");
const jsonFile = process.argv[1];
const fieldName = process.argv[2];
const data = JSON.parse(fs.readFileSync(jsonFile, "utf-8"));
const value = data[fieldName];
if (typeof value === "string") {
process.stdout.write(value);
}
' "$json_file" "$field_name"
}
cli_test_json_field_is_na() {
local json_file="$1"
local field_name="$2"
[[ "$(cli_test_json_string_field_from_file "$json_file" "$field_name")" == "N/A" ]]
}
cli_test_curl_json() {
curl -4 -sS --fail --connect-timeout 3 --max-time 15 "$@"
}
cli_test_init_settings_file() {
local settings_file="$1"
run_cli init-settings --force "$settings_file" >/dev/null
}
cli_test_mark_settings_configured() {
local settings_file="$1"
SETTINGS_FILE="$settings_file" node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
}
cli_test_apply_couchdb_settings() {
local settings_file="$1"
local couchdb_uri="$2"
local couchdb_user="$3"
local couchdb_password="$4"
local couchdb_dbname="$5"
local live_sync="${6:-0}"
SETTINGS_FILE="$settings_file" \
COUCHDB_URI="$couchdb_uri" \
COUCHDB_USER="$couchdb_user" \
COUCHDB_PASSWORD="$couchdb_password" \
COUCHDB_DBNAME="$couchdb_dbname" \
LIVE_SYNC="$live_sync" \
node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
data.couchDB_URI = process.env.COUCHDB_URI;
data.couchDB_USER = process.env.COUCHDB_USER;
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
if (process.env.LIVE_SYNC === "1") {
data.liveSync = true;
data.syncOnStart = false;
data.syncOnSave = false;
data.usePluginSync = false;
}
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
}
cli_test_apply_remote_sync_settings() {
local settings_file="$1"
SETTINGS_FILE="$settings_file" \
REMOTE_TYPE="$REMOTE_TYPE" \
COUCHDB_URI="$COUCHDB_URI" \
COUCHDB_USER="${COUCHDB_USER:-}" \
COUCHDB_PASSWORD="${COUCHDB_PASSWORD:-}" \
COUCHDB_DBNAME="$COUCHDB_DBNAME" \
MINIO_ENDPOINT="${MINIO_ENDPOINT:-}" \
MINIO_BUCKET="$MINIO_BUCKET" \
MINIO_ACCESS_KEY="${MINIO_ACCESS_KEY:-}" \
MINIO_SECRET_KEY="${MINIO_SECRET_KEY:-}" \
ENCRYPT="${ENCRYPT:-0}" \
E2E_PASSPHRASE="${E2E_PASSPHRASE:-}" \
node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
const remoteType = process.env.REMOTE_TYPE;
if (remoteType === "COUCHDB") {
data.remoteType = "";
data.couchDB_URI = process.env.COUCHDB_URI;
data.couchDB_USER = process.env.COUCHDB_USER;
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
} else if (remoteType === "MINIO") {
data.remoteType = "MINIO";
data.bucket = process.env.MINIO_BUCKET;
data.endpoint = process.env.MINIO_ENDPOINT;
data.accessKey = process.env.MINIO_ACCESS_KEY;
data.secretKey = process.env.MINIO_SECRET_KEY;
data.region = "auto";
data.forcePathStyle = true;
}
data.liveSync = true;
data.syncOnStart = false;
data.syncOnSave = false;
data.usePluginSync = false;
data.encrypt = process.env.ENCRYPT === "1";
data.passphrase = data.encrypt ? process.env.E2E_PASSPHRASE : "";
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
}
cli_test_stop_couchdb() {
bash "$CLI_DIR/util/couchdb-stop.sh" >/dev/null 2>&1 || true
}
cli_test_start_couchdb() {
local couchdb_uri="$1"
local couchdb_user="$2"
local couchdb_password="$3"
local couchdb_dbname="$4"
echo "[INFO] stopping leftover CouchDB container if present"
cli_test_stop_couchdb
echo "[INFO] starting CouchDB test container"
bash "$CLI_DIR/util/couchdb-start.sh"
echo "[INFO] initialising CouchDB test container"
bash "$CLI_DIR/util/couchdb-init.sh"
echo "[INFO] CouchDB create test database: $couchdb_dbname"
until (cli_test_curl_json -X PUT --user "${couchdb_user}:${couchdb_password}" "${couchdb_uri}/${couchdb_dbname}"); do sleep 5; done
}
cli_test_stop_minio() {
bash "$CLI_DIR/util/minio-stop.sh" >/dev/null 2>&1 || true
}
cli_test_wait_for_minio_bucket() {
local minio_endpoint="$1"
local minio_access_key="$2"
local minio_secret_key="$3"
local minio_bucket="$4"
local retries=30
local delay_sec=2
local i
for ((i = 1; i <= retries; i++)); do
if docker run --rm --network host --entrypoint=/bin/sh minio/mc -c "mc alias set myminio $minio_endpoint $minio_access_key $minio_secret_key >/dev/null 2>&1 && mc ls myminio/$minio_bucket >/dev/null 2>&1"; then
return 0
fi
bucketName="$minio_bucket" bash "$CLI_DIR/util/minio-init.sh" >/dev/null 2>&1 || true
sleep "$delay_sec"
done
return 1
}
cli_test_start_minio() {
local minio_endpoint="$1"
local minio_access_key="$2"
local minio_secret_key="$3"
local minio_bucket="$4"
local minio_init_ok=0
echo "[INFO] stopping leftover MinIO container if present"
cli_test_stop_minio
echo "[INFO] starting MinIO test container"
bucketName="$minio_bucket" bash "$CLI_DIR/util/minio-start.sh"
echo "[INFO] initialising MinIO test bucket: $minio_bucket"
for _ in 1 2 3 4 5; do
if bucketName="$minio_bucket" bash "$CLI_DIR/util/minio-init.sh"; then
minio_init_ok=1
break
fi
sleep 2
done
if [[ "$minio_init_ok" != "1" ]]; then
echo "[FAIL] could not initialise MinIO bucket after retries: $minio_bucket" >&2
exit 1
fi
if ! cli_test_wait_for_minio_bucket "$minio_endpoint" "$minio_access_key" "$minio_secret_key" "$minio_bucket"; then
echo "[FAIL] MinIO bucket not ready: $minio_bucket" >&2
exit 1
fi
}
display_test_info(){
echo "======================"
echo "Script: ${BASH_SOURCE[1]:-$0}"
echo "Date: $(date -u +%Y-%m-%dT%H:%M:%SZ)"
echo "Git commit: $(git -C "$SCRIPT_DIR/.." rev-parse --short HEAD 2>/dev/null || echo "N/A")"
echo "======================"
}

View File

@@ -0,0 +1,169 @@
#!/usr/bin/env bash
# Test: mirror command — storage <-> local database synchronisation
#
# Covered cases:
# 1. Storage-only file → synced into DB (UPDATE DATABASE)
# 2. DB-only file → restored to storage (UPDATE STORAGE)
# 3. DB-deleted file → NOT restored to storage (UPDATE STORAGE skip)
# 4. Both, storage newer → DB updated (SYNC: STORAGE → DB)
# 5. Both, DB newer → storage updated (SYNC: DB → STORAGE)
#
# Not covered (require precise mtime control or artificial conflict injection):
# - Both, equal mtime → no-op (EVEN)
# - Conflicted entry → skipped
#
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
RUN_BUILD="${RUN_BUILD:-1}"
cli_test_init_cli_cmd
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-test.XXXXXX")"
trap 'rm -rf "$WORK_DIR"' EXIT
SETTINGS_FILE="$WORK_DIR/data.json"
VAULT_DIR="$WORK_DIR/vault"
mkdir -p "$VAULT_DIR/test"
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI..."
npm run build
fi
echo "[INFO] generating settings -> $SETTINGS_FILE"
cli_test_init_settings_file "$SETTINGS_FILE"
# isConfigured=true is required for mirror (canProceedScan checks this)
cli_test_mark_settings_configured "$SETTINGS_FILE"
PASS=0
FAIL=0
assert_pass() { echo "[PASS] $1"; PASS=$((PASS + 1)); }
assert_fail() { echo "[FAIL] $1" >&2; FAIL=$((FAIL + 1)); }
# ─────────────────────────────────────────────────────────────────────────────
# Case 1: File exists only in storage → should be synced into DB after mirror
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 1: storage-only → DB ==="
printf 'storage-only content\n' > "$VAULT_DIR/test/storage-only.md"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" mirror
RESULT_FILE="$WORK_DIR/case1-cat.txt"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" pull test/storage-only.md "$RESULT_FILE"
if cmp -s "$VAULT_DIR/test/storage-only.md" "$RESULT_FILE"; then
assert_pass "storage-only file was synced into DB"
else
assert_fail "storage-only file NOT synced into DB"
echo "--- storage ---" >&2; cat "$VAULT_DIR/test/storage-only.md" >&2
echo "--- cat ---" >&2; cat "$RESULT_FILE" >&2
fi
# ─────────────────────────────────────────────────────────────────────────────
# Case 2: File exists only in DB → should be restored to storage after mirror
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 2: DB-only → storage ==="
printf 'db-only content\n' | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put test/db-only.md
if [[ -f "$VAULT_DIR/test/db-only.md" ]]; then
assert_fail "db-only.md unexpectedly exists in storage before mirror"
else
echo "[INFO] confirmed: test/db-only.md not in storage before mirror"
fi
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" mirror
if [[ -f "$VAULT_DIR/test/db-only.md" ]]; then
STORAGE_CONTENT="$(cat "$VAULT_DIR/test/db-only.md")"
if [[ "$STORAGE_CONTENT" == "db-only content" ]]; then
assert_pass "DB-only file was restored to storage"
else
assert_fail "DB-only file restored but content mismatch (got: '${STORAGE_CONTENT}')"
fi
else
assert_fail "DB-only file was NOT restored to storage"
fi
# ─────────────────────────────────────────────────────────────────────────────
# Case 3: File deleted in DB → should NOT be created in storage
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 3: DB-deleted → storage untouched ==="
printf 'to-be-deleted\n' | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put test/deleted.md
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" rm test/deleted.md
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" mirror
if [[ ! -f "$VAULT_DIR/test/deleted.md" ]]; then
assert_pass "deleted DB entry was not restored to storage"
else
assert_fail "deleted DB entry was incorrectly restored to storage"
fi
# ─────────────────────────────────────────────────────────────────────────────
# Case 4: Both exist, storage is newer → DB should be updated
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 4: storage newer → DB updated ==="
# Seed DB with old content (mtime ≈ now)
printf 'old content\n' | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put test/sync-storage-newer.md
# Write new content to storage with a timestamp 1 hour in the future
printf 'new content\n' > "$VAULT_DIR/test/sync-storage-newer.md"
touch -t "$(date -d '+1 hour' +%Y%m%d%H%M)" "$VAULT_DIR/test/sync-storage-newer.md"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" mirror
DB_RESULT_FILE="$WORK_DIR/case4-pull.txt"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" pull test/sync-storage-newer.md "$DB_RESULT_FILE"
if cmp -s "$VAULT_DIR/test/sync-storage-newer.md" "$DB_RESULT_FILE"; then
assert_pass "DB updated to match newer storage file"
else
assert_fail "DB NOT updated to match newer storage file"
echo "--- expected(storage) ---" >&2; cat "$VAULT_DIR/test/sync-storage-newer.md" >&2
echo "--- pulled(from db) ---" >&2; cat "$DB_RESULT_FILE" >&2
fi
# ─────────────────────────────────────────────────────────────────────────────
# Case 5: Both exist, DB is newer → storage should be updated
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Case 5: DB newer → storage updated ==="
# Write old content to storage with a timestamp 1 hour in the past
printf 'old storage content\n' > "$VAULT_DIR/test/sync-db-newer.md"
touch -t "$(date -d '-1 hour' +%Y%m%d%H%M)" "$VAULT_DIR/test/sync-db-newer.md"
# Write new content to DB only (mtime ≈ now, newer than the storage file)
printf 'new db content\n' | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put test/sync-db-newer.md
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" mirror
STORAGE_CONTENT="$(cat "$VAULT_DIR/test/sync-db-newer.md")"
if [[ "$STORAGE_CONTENT" == "new db content" ]]; then
assert_pass "storage updated to match newer DB entry"
else
assert_fail "storage NOT updated to match newer DB entry (got: '${STORAGE_CONTENT}')"
fi
# ─────────────────────────────────────────────────────────────────────────────
# Summary
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "Results: PASS=$PASS FAIL=$FAIL"
if [[ "$FAIL" -gt 0 ]]; then
exit 1
fi

View File

@@ -4,10 +4,12 @@ set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
CLI_CMD=(npm run cli --)
RUN_BUILD="${RUN_BUILD:-1}"
REMOTE_PATH="${REMOTE_PATH:-test/push-pull.txt}"
cli_test_init_cli_cmd
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-test.XXXXXX")"
trap 'rm -rf "$WORK_DIR"' EXIT
@@ -19,26 +21,12 @@ if [[ "$RUN_BUILD" == "1" ]]; then
npm run build
fi
run_cli() {
"${CLI_CMD[@]}" "$@"
}
echo "[INFO] generating settings from DEFAULT_SETTINGS -> $SETTINGS_FILE"
run_cli init-settings --force "$SETTINGS_FILE"
cli_test_init_settings_file "$SETTINGS_FILE"
if [[ -n "${COUCHDB_URI:-}" && -n "${COUCHDB_USER:-}" && -n "${COUCHDB_PASSWORD:-}" && -n "${COUCHDB_DBNAME:-}" ]]; then
echo "[INFO] applying CouchDB env vars to generated settings"
SETTINGS_FILE="$SETTINGS_FILE" node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
data.couchDB_URI = process.env.COUCHDB_URI;
data.couchDB_USER = process.env.COUCHDB_USER;
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
cli_test_apply_couchdb_settings "$SETTINGS_FILE" "$COUCHDB_URI" "$COUCHDB_USER" "$COUCHDB_PASSWORD" "$COUCHDB_DBNAME"
else
echo "[WARN] CouchDB env vars are not fully set. push/pull may fail unless generated settings are updated."
fi

View File

@@ -5,11 +5,13 @@ SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
REPO_ROOT="$(cd -- "$CLI_DIR/../../.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
CLI_CMD=(npm run cli --)
RUN_BUILD="${RUN_BUILD:-1}"
REMOTE_PATH="${REMOTE_PATH:-test/setup-put-cat.txt}"
SETUP_PASSPHRASE="${SETUP_PASSPHRASE:-setup-passphrase}"
cli_test_init_cli_cmd
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-test.XXXXXX")"
trap 'rm -rf "$WORK_DIR"' EXIT
@@ -21,12 +23,8 @@ if [[ "$RUN_BUILD" == "1" ]]; then
npm run build
fi
run_cli() {
"${CLI_CMD[@]}" "$@"
}
echo "[INFO] generating settings from DEFAULT_SETTINGS -> $SETTINGS_FILE"
run_cli init-settings --force "$SETTINGS_FILE"
cli_test_init_settings_file "$SETTINGS_FILE"
echo "[INFO] creating setup URI from settings"
SETUP_URI="$(
@@ -84,7 +82,7 @@ CAT_OUTPUT="$WORK_DIR/cat-output.txt"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" cat "$REMOTE_PATH" > "$CAT_OUTPUT"
CAT_OUTPUT_CLEAN="$WORK_DIR/cat-output-clean.txt"
grep -v '^\[CLIWatchAdapter\] File watching is not enabled in CLI version$' "$CAT_OUTPUT" > "$CAT_OUTPUT_CLEAN" || true
cli_test_sanitise_cat_stdout < "$CAT_OUTPUT" > "$CAT_OUTPUT_CLEAN"
if cmp -s "$SRC_FILE" "$CAT_OUTPUT_CLEAN"; then
echo "[PASS] setup/put/cat roundtrip matched"
@@ -175,48 +173,52 @@ echo "[INFO] info $REMOTE_PATH"
INFO_OUTPUT="$WORK_DIR/info-output.txt"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" info "$REMOTE_PATH" > "$INFO_OUTPUT"
# Check required label lines
for label in "ID:" "Revision:" "Conflicts:" "Filename:" "Path:" "Size:" "Chunks:"; do
if ! grep -q "^$label" "$INFO_OUTPUT"; then
echo "[FAIL] info output missing label: $label" >&2
cat "$INFO_OUTPUT" >&2
exit 1
fi
done
# Path value must match
INFO_PATH="$(grep '^Path:' "$INFO_OUTPUT" | sed 's/^Path:[[:space:]]*//')"
if [[ "$INFO_PATH" != "$REMOTE_PATH" ]]; then
echo "[FAIL] info Path mismatch: $INFO_PATH" >&2
exit 1
fi
# Filename must be the basename
INFO_FILENAME="$(grep '^Filename:' "$INFO_OUTPUT" | sed 's/^Filename:[[:space:]]*//')"
EXPECTED_FILENAME="$(basename "$REMOTE_PATH")"
if [[ "$INFO_FILENAME" != "$EXPECTED_FILENAME" ]]; then
echo "[FAIL] info Filename mismatch: $INFO_FILENAME != $EXPECTED_FILENAME" >&2
exit 1
fi
set +e
INFO_JSON_CHECK="$(
INFO_OUTPUT="$INFO_OUTPUT" REMOTE_PATH="$REMOTE_PATH" EXPECTED_FILENAME="$EXPECTED_FILENAME" node - <<'NODE'
const fs = require("node:fs");
# Size must be numeric
INFO_SIZE="$(grep '^Size:' "$INFO_OUTPUT" | sed 's/^Size:[[:space:]]*//')"
if [[ ! "$INFO_SIZE" =~ ^[0-9]+$ ]]; then
echo "[FAIL] info Size is not numeric: $INFO_SIZE" >&2
exit 1
fi
const content = fs.readFileSync(process.env.INFO_OUTPUT, "utf-8");
let data;
try {
data = JSON.parse(content);
} catch (ex) {
console.error("invalid-json");
process.exit(1);
}
# Chunks count must be numeric and ≥1
INFO_CHUNKS="$(grep '^Chunks:' "$INFO_OUTPUT" | sed 's/^Chunks:[[:space:]]*//')"
if [[ ! "$INFO_CHUNKS" =~ ^[0-9]+$ ]] || [[ "$INFO_CHUNKS" -lt 1 ]]; then
echo "[FAIL] info Chunks is not a positive integer: $INFO_CHUNKS" >&2
exit 1
fi
# Conflicts should be N/A (no live CouchDB)
INFO_CONFLICTS="$(grep '^Conflicts:' "$INFO_OUTPUT" | sed 's/^Conflicts:[[:space:]]*//')"
if [[ "$INFO_CONFLICTS" != "N/A" ]]; then
echo "[FAIL] info Conflicts expected N/A, got: $INFO_CONFLICTS" >&2
if (!data || typeof data !== "object") {
console.error("invalid-payload");
process.exit(1);
}
if (data.path !== process.env.REMOTE_PATH) {
console.error(`path-mismatch:${String(data.path)}`);
process.exit(1);
}
if (data.filename !== process.env.EXPECTED_FILENAME) {
console.error(`filename-mismatch:${String(data.filename)}`);
process.exit(1);
}
if (!Number.isInteger(data.size) || data.size < 0) {
console.error(`size-invalid:${String(data.size)}`);
process.exit(1);
}
if (!Number.isInteger(data.chunks) || data.chunks < 1) {
console.error(`chunks-invalid:${String(data.chunks)}`);
process.exit(1);
}
if (data.conflicts !== "N/A") {
console.error(`conflicts-invalid:${String(data.conflicts)}`);
process.exit(1);
}
NODE
)"
INFO_JSON_EXIT=$?
set -e
if [[ "$INFO_JSON_EXIT" -ne 0 ]]; then
echo "[FAIL] info JSON output validation failed: $INFO_JSON_CHECK" >&2
cat "$INFO_OUTPUT" >&2
exit 1
fi
@@ -292,8 +294,30 @@ echo "[INFO] info $REV_PATH (past revisions)"
REV_INFO_OUTPUT="$WORK_DIR/rev-info-output.txt"
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" info "$REV_PATH" > "$REV_INFO_OUTPUT"
PAST_REV="$(grep '^ rev: ' "$REV_INFO_OUTPUT" | head -n 1 | sed 's/^ rev: //')"
if [[ -z "$PAST_REV" ]]; then
set +e
PAST_REV="$(
REV_INFO_OUTPUT="$REV_INFO_OUTPUT" node - <<'NODE'
const fs = require("node:fs");
const content = fs.readFileSync(process.env.REV_INFO_OUTPUT, "utf-8");
let data;
try {
data = JSON.parse(content);
} catch {
process.exit(1);
}
const revisions = Array.isArray(data?.revisions) ? data.revisions : [];
const revision = revisions.find((rev) => typeof rev === "string" && rev !== "N/A");
if (!revision) {
process.exit(1);
}
process.stdout.write(revision);
NODE
)"
PAST_REV_EXIT=$?
set -e
if [[ "$PAST_REV_EXIT" -ne 0 ]] || [[ -z "$PAST_REV" ]]; then
echo "[FAIL] info output did not include any past revision" >&2
cat "$REV_INFO_OUTPUT" >&2
exit 1

View File

@@ -1,39 +1,66 @@
#!/usr/bin/env bash
## TODO: test this script. I would love to go to my bed today (3a.m.) However, I am so excited about the new CLI that I want to at least get this skeleton in place. Delightful days!
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
cd "$CLI_DIR"
source "$SCRIPT_DIR/test-helpers.sh"
display_test_info
CLI_CMD=(npm run cli --)
RUN_BUILD="${RUN_BUILD:-1}"
COUCHDB_URI="${COUCHDB_URI:-}"
COUCHDB_USER="${COUCHDB_USER:-}"
COUCHDB_PASSWORD="${COUCHDB_PASSWORD:-}"
COUCHDB_DBNAME_BASE="${COUCHDB_DBNAME:-livesync-cli-e2e}"
TEST_ENV_FILE="${TEST_ENV_FILE:-$CLI_DIR/.test.env}"
cli_test_init_cli_cmd
if [[ ! -f "$TEST_ENV_FILE" ]]; then
echo "[ERROR] test env file not found: $TEST_ENV_FILE" >&2
exit 1
fi
set -a
source "$TEST_ENV_FILE"
set +a
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-two-db-test.XXXXXX")"
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI..."
npm run build
fi
DB_SUFFIX="$(date +%s)-$RANDOM"
COUCHDB_URI="${hostname%/}"
COUCHDB_DBNAME="${dbname}-${DB_SUFFIX}"
COUCHDB_USER="${username:-}"
COUCHDB_PASSWORD="${password:-}"
if [[ -z "$COUCHDB_URI" || -z "$COUCHDB_USER" || -z "$COUCHDB_PASSWORD" ]]; then
echo "[ERROR] COUCHDB_URI, COUCHDB_USER, COUCHDB_PASSWORD are required" >&2
exit 1
fi
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-two-db-test.XXXXXX")"
trap 'rm -rf "$WORK_DIR"' EXIT
if [[ "$RUN_BUILD" == "1" ]]; then
echo "[INFO] building CLI..."
npm run build
fi
cleanup() {
local exit_code=$?
cli_test_stop_couchdb
run_cli() {
"${CLI_CMD[@]}" "$@"
rm -rf "$WORK_DIR"
# Note: we do not attempt to delete the test database, as it may cause issues if the test failed in a way that leaves the database in an inconsistent state. The test database is named with a unique suffix, so it should not interfere with other tests.
echo "[INFO] test completed with exit code $exit_code. Test database '$COUCHDB_DBNAME' is not deleted for debugging purposes."
exit "$exit_code"
}
trap cleanup EXIT
start_remote() {
cli_test_start_couchdb "$COUCHDB_URI" "$COUCHDB_USER" "$COUCHDB_PASSWORD" "$COUCHDB_DBNAME"
}
DB_SUFFIX="$(date +%s)-$RANDOM"
COUCHDB_DBNAME="${COUCHDB_DBNAME_BASE}-${DB_SUFFIX}"
echo "[INFO] using CouchDB database: $COUCHDB_DBNAME"
start_remote
VAULT_A="$WORK_DIR/vault-a"
VAULT_B="$WORK_DIR/vault-b"
@@ -41,31 +68,12 @@ SETTINGS_A="$WORK_DIR/a-settings.json"
SETTINGS_B="$WORK_DIR/b-settings.json"
mkdir -p "$VAULT_A" "$VAULT_B"
run_cli init-settings --force "$SETTINGS_A" >/dev/null
run_cli init-settings --force "$SETTINGS_B" >/dev/null
cli_test_init_settings_file "$SETTINGS_A"
cli_test_init_settings_file "$SETTINGS_B"
apply_settings() {
local settings_file="$1"
SETTINGS_FILE="$settings_file" \
COUCHDB_URI="$COUCHDB_URI" \
COUCHDB_USER="$COUCHDB_USER" \
COUCHDB_PASSWORD="$COUCHDB_PASSWORD" \
COUCHDB_DBNAME="$COUCHDB_DBNAME" \
node <<'NODE'
const fs = require("node:fs");
const settingsPath = process.env.SETTINGS_FILE;
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
data.couchDB_URI = process.env.COUCHDB_URI;
data.couchDB_USER = process.env.COUCHDB_USER;
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
data.liveSync = true;
data.syncOnStart = false;
data.syncOnSave = false;
data.usePluginSync = false;
data.isConfigured = true;
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
NODE
cli_test_apply_couchdb_settings "$settings_file" "$COUCHDB_URI" "$COUCHDB_USER" "$COUCHDB_PASSWORD" "$COUCHDB_DBNAME" 1
}
apply_settings "$SETTINGS_A"
@@ -95,24 +103,12 @@ cat_b() {
run_cli_b cat "$1"
}
assert_equal() {
local expected="$1"
local actual="$2"
local message="$3"
if [[ "$expected" != "$actual" ]]; then
echo "[FAIL] $message" >&2
echo "expected: $expected" >&2
echo "actual: $actual" >&2
exit 1
fi
}
echo "[INFO] case1: A creates file, B can read after sync"
printf 'from-a\n' | run_cli_a put shared/from-a.txt >/dev/null
sync_a
sync_b
VALUE_FROM_B="$(cat_b shared/from-a.txt)"
assert_equal "from-a" "$VALUE_FROM_B" "B could not read file created on A"
cli_test_assert_equal "from-a" "$VALUE_FROM_B" "B could not read file created on A"
echo "[PASS] case1 passed"
echo "[INFO] case2: B creates file, A can read after sync"
@@ -120,7 +116,7 @@ printf 'from-b\n' | run_cli_b put shared/from-b.txt >/dev/null
sync_b
sync_a
VALUE_FROM_A="$(cat_a shared/from-b.txt)"
assert_equal "from-b" "$VALUE_FROM_A" "A could not read file created on B"
cli_test_assert_equal "from-b" "$VALUE_FROM_A" "A could not read file created on B"
echo "[PASS] case2 passed"
echo "[INFO] case3: concurrent edits create conflict"
@@ -131,15 +127,25 @@ sync_b
printf 'edit-from-a\n' | run_cli_a put shared/conflicted.txt >/dev/null
printf 'edit-from-b\n' | run_cli_b put shared/conflicted.txt >/dev/null
sync_a
sync_b
INFO_A="$WORK_DIR/info-a.txt"
INFO_B="$WORK_DIR/info-b.txt"
run_cli_a info shared/conflicted.txt > "$INFO_A"
run_cli_b info shared/conflicted.txt > "$INFO_B"
CONFLICT_DETECTED=0
for side in a b; do
if [[ "$side" == "a" ]]; then
sync_a
else
sync_b
fi
if grep -q '^Conflicts: N/A$' "$INFO_A" && grep -q '^Conflicts: N/A$' "$INFO_B"; then
run_cli_a info shared/conflicted.txt > "$INFO_A"
run_cli_b info shared/conflicted.txt > "$INFO_B"
if ! cli_test_json_field_is_na "$INFO_A" conflicts || ! cli_test_json_field_is_na "$INFO_B" conflicts; then
CONFLICT_DETECTED=1
break
fi
done
if [[ "$CONFLICT_DETECTED" != "1" ]]; then
echo "[FAIL] expected conflict after concurrent edits, but both sides show N/A" >&2
echo "--- A info ---" >&2
cat "$INFO_A" >&2
@@ -150,21 +156,60 @@ fi
echo "[PASS] case3 conflict detected"
echo "[INFO] case4: resolve on A, sync, and verify B has no conflict"
KEEP_REV="$(sed -n 's/^Revision:[[:space:]]*//p' "$INFO_A" | head -n 1)"
INFO_A_AFTER="$WORK_DIR/info-a-after-resolve.txt"
INFO_B_AFTER="$WORK_DIR/info-b-after-resolve.txt"
# Ensure A sees the conflict before resolving; otherwise resolve may be a no-op.
for _ in 1 2 3 4 5; do
run_cli_a info shared/conflicted.txt > "$INFO_A_AFTER"
if ! cli_test_json_field_is_na "$INFO_A_AFTER" conflicts; then
break
fi
sync_b
sync_a
done
run_cli_a info shared/conflicted.txt > "$INFO_A_AFTER"
if cli_test_json_field_is_na "$INFO_A_AFTER" conflicts; then
echo "[FAIL] A does not see conflict, cannot resolve from A only" >&2
cat "$INFO_A_AFTER" >&2
exit 1
fi
KEEP_REV="$(cli_test_json_string_field_from_file "$INFO_A_AFTER" revision)"
if [[ -z "$KEEP_REV" ]]; then
echo "[FAIL] could not read Revision from A info output" >&2
cat "$INFO_A" >&2
echo "[FAIL] could not read revision from A info output" >&2
cat "$INFO_A_AFTER" >&2
exit 1
fi
run_cli_a resolve shared/conflicted.txt "$KEEP_REV" >/dev/null
sync_a
sync_b
INFO_B_AFTER="$WORK_DIR/info-b-after-resolve.txt"
run_cli_b info shared/conflicted.txt > "$INFO_B_AFTER"
if ! grep -q '^Conflicts: N/A$' "$INFO_B_AFTER"; then
echo "[FAIL] B still has conflicts after resolving on A and syncing" >&2
RESOLVE_PROPAGATED=0
for _ in 1 2 3 4 5 6; do
sync_a
sync_b
run_cli_a info shared/conflicted.txt > "$INFO_A_AFTER"
run_cli_b info shared/conflicted.txt > "$INFO_B_AFTER"
if cli_test_json_field_is_na "$INFO_A_AFTER" conflicts && cli_test_json_field_is_na "$INFO_B_AFTER" conflicts; then
RESOLVE_PROPAGATED=1
break
fi
# Retry resolve from A only when conflict remains due to eventual consistency.
if ! cli_test_json_field_is_na "$INFO_A_AFTER" conflicts; then
KEEP_REV_A="$(cli_test_json_string_field_from_file "$INFO_A_AFTER" revision)"
if [[ -n "$KEEP_REV_A" ]]; then
run_cli_a resolve shared/conflicted.txt "$KEEP_REV_A" >/dev/null || true
fi
fi
done
if [[ "$RESOLVE_PROPAGATED" != "1" ]]; then
echo "[FAIL] conflicts should be resolved on both A and B" >&2
echo "--- A info after resolve ---" >&2
cat "$INFO_A_AFTER" >&2
echo "--- B info after resolve ---" >&2
cat "$INFO_B_AFTER" >&2
exit 1
fi