mirror of
https://github.com/vrtmrz/obsidian-livesync.git
synced 2026-03-12 04:48:48 +00:00
Compare commits
342 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5d80258a77 | ||
|
|
fa14531599 | ||
|
|
7992b3c2b9 | ||
|
|
5872cad1e5 | ||
|
|
16c0dfef4c | ||
|
|
70c7624c7a | ||
|
|
4a0d5e99d0 | ||
|
|
0742773e1e | ||
|
|
2f8bc4fef2 | ||
|
|
0dfd42259d | ||
|
|
9cf630320c | ||
|
|
584adc9296 | ||
|
|
f7dba6854f | ||
|
|
1d83e0ee31 | ||
|
|
d0244bd6d0 | ||
|
|
79bb5e1c77 | ||
|
|
3403712e24 | ||
|
|
8faa19629b | ||
|
|
7ff9c666ce | ||
|
|
d8bc2806e0 | ||
|
|
62f78b4028 | ||
|
|
cf9d2720ce | ||
|
|
09115dfe15 | ||
|
|
4cbb833e9d | ||
|
|
7419d0d2a1 | ||
|
|
f3e83d4045 | ||
|
|
28e06a21e4 | ||
|
|
e08fbbd223 | ||
|
|
a1e331d452 | ||
|
|
646f8af680 | ||
|
|
392f76fd36 | ||
|
|
f61a3eb85b | ||
|
|
19c03ec8d8 | ||
|
|
be1642f1c1 | ||
|
|
c9a71e2076 | ||
|
|
2199c1ebd3 | ||
|
|
278935f85d | ||
|
|
010631f553 | ||
|
|
8c0c65307a | ||
|
|
988cb34d7c | ||
|
|
6eec8117f5 | ||
|
|
9f6a909143 | ||
|
|
09f283721a | ||
|
|
235c702223 | ||
|
|
b923b43b6b | ||
|
|
fdcf3be0f9 | ||
|
|
25dd907591 | ||
|
|
80c049d276 | ||
|
|
e961f01187 | ||
|
|
14b4c3cd50 | ||
|
|
4f987e7c2b | ||
|
|
f4d8c0a8db | ||
|
|
556ce471f8 | ||
|
|
32b6717114 | ||
|
|
e0e72fae72 | ||
|
|
203dd17421 | ||
|
|
1bde2b2ff1 | ||
|
|
2bf1c775ee | ||
|
|
4658e3735d | ||
|
|
627edc96bf | ||
|
|
0a1917e83c | ||
|
|
48b0d22da6 | ||
|
|
3201399bdf | ||
|
|
2ae70e8f07 | ||
|
|
2b9bb1ed06 | ||
|
|
e63e3e6725 | ||
|
|
6e9ac6a9f9 | ||
|
|
fb59c4a723 | ||
|
|
1b5ca9e52c | ||
|
|
787627a156 | ||
|
|
b1bba7685e | ||
|
|
cdfc0ccead | ||
|
|
0635cad350 | ||
|
|
6fd1fa6313 | ||
|
|
12b1f881dc | ||
|
|
bf3efab1af | ||
|
|
da72fda221 | ||
|
|
665501f485 | ||
|
|
6ee332fff8 | ||
|
|
f66447cb59 | ||
|
|
eb3120a8fd | ||
|
|
5fa39b3c6e | ||
|
|
91c35a88dd | ||
|
|
49f4d79f4f | ||
|
|
abfd010467 | ||
|
|
cde1013359 | ||
|
|
9c7f9e4316 | ||
|
|
aceda16c64 | ||
|
|
3656e5c725 | ||
|
|
6915b160a2 | ||
|
|
f464623bf6 | ||
|
|
b1f518071c | ||
|
|
0e903c3520 | ||
|
|
edcdfd97c4 | ||
|
|
c2b7081215 | ||
|
|
cf1954b10e | ||
|
|
46546e121f | ||
|
|
28146eec2c | ||
|
|
3cd9b9e06d | ||
|
|
7c43c61b85 | ||
|
|
465af4f3aa | ||
|
|
0a1e3dcd51 | ||
|
|
b97756d0cf | ||
|
|
acf4bc3737 | ||
|
|
88838872e7 | ||
|
|
7d3827d335 | ||
|
|
92d3a0cfa2 | ||
|
|
bba26624ad | ||
|
|
b82f497cab | ||
|
|
37f4d13e75 | ||
|
|
7965f5342c | ||
|
|
9cdc14dda8 | ||
|
|
4f46276ebf | ||
|
|
931d360fb1 | ||
|
|
f68c1855da | ||
|
|
dff654b6e5 | ||
|
|
7e85bcbf08 | ||
|
|
38a695ea12 | ||
|
|
a502b0cd0c | ||
|
|
934f708753 | ||
|
|
0e574c6cb1 | ||
|
|
7375a85b07 | ||
|
|
4c3393d8b2 | ||
|
|
a9f1bbff9f | ||
|
|
f86815e420 | ||
|
|
fd16b166ef | ||
|
|
02aa9319c3 | ||
|
|
1a72e46d53 | ||
|
|
d755579968 | ||
|
|
b74ee9df77 | ||
|
|
daa04bcea8 | ||
|
|
b96b2f24a6 | ||
|
|
5569ab62df | ||
|
|
d84b6c4f15 | ||
|
|
336f2c8a4d | ||
|
|
b52ceec36a | ||
|
|
1e6400cf79 | ||
|
|
1ff1ac951b | ||
|
|
aa6d771d17 | ||
|
|
512c238415 | ||
|
|
55ffeeda10 | ||
|
|
65f18b4160 | ||
|
|
b0c1d6a1bf | ||
|
|
ca19f2f2ed | ||
|
|
ac0378ca4b | ||
|
|
f06f8d1eb6 | ||
|
|
77074cb92f | ||
|
|
1274b6f683 | ||
|
|
c54ae58c0f | ||
|
|
3f54921e90 | ||
|
|
1b070c2dd4 | ||
|
|
0e5846b670 | ||
|
|
f81e71802b | ||
|
|
4c761eebff | ||
|
|
bf754d6e07 | ||
|
|
3cc70b985a | ||
|
|
0e81ec2586 | ||
|
|
1c49acd5b5 | ||
|
|
bab66a64d7 | ||
|
|
477913456f | ||
|
|
b0661cdbab | ||
|
|
18f9a842b7 | ||
|
|
5130bc5f2a | ||
|
|
ca8af80a27 | ||
|
|
df273d273b | ||
|
|
23aa0a82ca | ||
|
|
8f488b205b | ||
|
|
893eac5c92 | ||
|
|
cd6946bce2 | ||
|
|
174ca08954 | ||
|
|
4af4d9c4bd | ||
|
|
1b7a25598a | ||
|
|
e2a01c14cc | ||
|
|
a623b987c8 | ||
|
|
db28b9ec11 | ||
|
|
b2fbbb38f5 | ||
|
|
33c01fdf1e | ||
|
|
536c0426d6 | ||
|
|
2f848878c2 | ||
|
|
c4f2baef5e | ||
|
|
a5b88a8d47 | ||
|
|
88e61fb41f | ||
|
|
9bf04332bb | ||
|
|
5238dec3f2 | ||
|
|
2b7b411c52 | ||
|
|
aab0f7f034 | ||
|
|
b3a0deb0e3 | ||
|
|
b9138d1395 | ||
|
|
04997b84c0 | ||
|
|
7eb9807aa5 | ||
|
|
91a4f234f1 | ||
|
|
82f2860938 | ||
|
|
41a112cd8a | ||
|
|
294ebf0c31 | ||
|
|
5443317157 | ||
|
|
47fe9d2af3 | ||
|
|
c76187c6d2 | ||
|
|
4c260a7d2b | ||
|
|
82f6fefd35 | ||
|
|
ada8001fcb | ||
|
|
b3b3ad843c | ||
|
|
8b81570035 | ||
|
|
d3e50421e4 | ||
|
|
12605f4604 | ||
|
|
2c0dd82886 | ||
|
|
f5315aacb8 | ||
|
|
5a93066870 | ||
|
|
3a73073505 | ||
|
|
ee0c0ee611 | ||
|
|
d7ea30e304 | ||
|
|
2b9ded60f7 | ||
|
|
40508822cf | ||
|
|
6f938d5f54 | ||
|
|
51dc44bfb0 | ||
|
|
7c4f2bf78a | ||
|
|
d82122de24 | ||
|
|
67c9b4cf06 | ||
|
|
4808876968 | ||
|
|
cccff21ecc | ||
|
|
d8415a97e5 | ||
|
|
85e9aa2978 | ||
|
|
b4eb0e4868 | ||
|
|
3ea348f468 | ||
|
|
81362816d6 | ||
|
|
d6efe4510f | ||
|
|
ca5a7ae18c | ||
|
|
a27652ac34 | ||
|
|
29b89efc47 | ||
|
|
ef3eef2d08 | ||
|
|
ffbbe32e36 | ||
|
|
0a5371cdee | ||
|
|
466bb142e2 | ||
|
|
d394a4ce7f | ||
|
|
71ce76e502 | ||
|
|
ae7a7dd456 | ||
|
|
4048186bb5 | ||
|
|
2b94fd9139 | ||
|
|
ec72ece86d | ||
|
|
e394a994c5 | ||
|
|
aa23b6a39a | ||
|
|
58e328a591 | ||
|
|
1730c39d70 | ||
|
|
dfeac201a2 | ||
|
|
b42152db5e | ||
|
|
171cfc0a38 | ||
|
|
d2787bdb6a | ||
|
|
44b022f003 | ||
|
|
58845276e7 | ||
|
|
a2cc093a9e | ||
|
|
fec203a751 | ||
|
|
1a06837769 | ||
|
|
18d1ce8ec8 | ||
|
|
2221d8c4e8 | ||
|
|
08548f8630 | ||
|
|
5d24c3b984 | ||
|
|
de8fd43c8b | ||
|
|
ed88761eaa | ||
|
|
4dcb37f5a2 | ||
|
|
db0562eda1 | ||
|
|
b610d5d959 | ||
|
|
5abba74f3b | ||
|
|
021c1fccfe | ||
|
|
0a30af479f | ||
|
|
a9c3f60fe7 | ||
|
|
f996e056af | ||
|
|
1073ee9e30 | ||
|
|
f94653e60e | ||
|
|
3dccf2076f | ||
|
|
3e78fe03e1 | ||
|
|
4aa8fc3519 | ||
|
|
ba3d2220e1 | ||
|
|
8057b516af | ||
|
|
f2b4431182 | ||
|
|
badec46d9a | ||
|
|
355e41f488 | ||
|
|
e0e7e1b5ca | ||
|
|
ce4b61557a | ||
|
|
52b02f3888 | ||
|
|
7535999388 | ||
|
|
dccf8580b8 | ||
|
|
e3964f3c5d | ||
|
|
375e7bde31 | ||
|
|
341f0ab12d | ||
|
|
39340c1e1b | ||
|
|
55cdc58857 | ||
|
|
4f1a9dc4e8 | ||
|
|
013818b7d0 | ||
|
|
1179438df8 | ||
|
|
47ea8f6859 | ||
|
|
670fe16486 | ||
|
|
3f0093916c | ||
|
|
9503474d06 | ||
|
|
ddf7b243e4 | ||
|
|
f37561c3c1 | ||
|
|
f01429decc | ||
|
|
c0fcb66924 | ||
|
|
5f76b9809b | ||
|
|
d61d6fec37 | ||
|
|
9fdd622824 | ||
|
|
3b8d03a189 | ||
|
|
1f1a39e5a0 | ||
|
|
d0e92cff7a | ||
|
|
5addddc792 | ||
|
|
d978892661 | ||
|
|
cfb061a6a2 | ||
|
|
381055fc93 | ||
|
|
37d12916fc | ||
|
|
944aa846c4 | ||
|
|
abca808e29 | ||
|
|
90bb610133 | ||
|
|
9c5e9fe63b | ||
|
|
00dfae24d7 | ||
|
|
d8a41fe45d | ||
|
|
30467d1c25 | ||
|
|
f8351f1d45 | ||
|
|
5924af98ab | ||
|
|
2769b61da4 | ||
|
|
bb4409221d | ||
|
|
f398c14200 | ||
|
|
27d58508dc | ||
|
|
d4dea5b226 | ||
|
|
c79dc30cba | ||
|
|
b3119ee8a9 | ||
|
|
2a1d71da5c | ||
|
|
24f31ed19e | ||
|
|
a982629ae6 | ||
|
|
85140aecab | ||
|
|
3f2e23ee88 | ||
|
|
6049c19e8a | ||
|
|
65648683a3 | ||
|
|
5d70f2c1e9 | ||
|
|
cbcfdc453e | ||
|
|
a4eb21593c | ||
|
|
05eb2c8262 | ||
|
|
fecefa3631 | ||
|
|
f8c4d5ccb0 | ||
|
|
e63e79bc8e | ||
|
|
ed76125f3d | ||
|
|
70f4e23474 | ||
|
|
f6d5b78cc8 | ||
|
|
405624b51b | ||
|
|
90c0ff22b9 |
@@ -1,11 +0,0 @@
|
||||
node_modules
|
||||
build
|
||||
.eslintrc.js.bak
|
||||
src/lib/src/patches/pouchdb-utils
|
||||
esbuild.config.mjs
|
||||
rollup.config.js
|
||||
src/lib/test
|
||||
src/lib/src/cli
|
||||
main.js
|
||||
src/lib/apps/webpeer/dist
|
||||
src/lib/apps/webpeer/svelte.config.js
|
||||
32
.eslintrc
32
.eslintrc
@@ -1,13 +1,35 @@
|
||||
{
|
||||
"root": true,
|
||||
"parser": "@typescript-eslint/parser",
|
||||
"plugins": ["@typescript-eslint"],
|
||||
"extends": ["eslint:recommended", "plugin:@typescript-eslint/eslint-recommended", "plugin:@typescript-eslint/recommended"],
|
||||
"plugins": [
|
||||
"@typescript-eslint",
|
||||
"eslint-plugin-svelte",
|
||||
"eslint-plugin-import"
|
||||
],
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"plugin:@typescript-eslint/eslint-recommended",
|
||||
"plugin:@typescript-eslint/recommended"
|
||||
],
|
||||
"parserOptions": {
|
||||
"sourceType": "module",
|
||||
"project": ["tsconfig.json"]
|
||||
"project": [
|
||||
"tsconfig.json"
|
||||
]
|
||||
},
|
||||
"ignorePatterns": [],
|
||||
"ignorePatterns": [
|
||||
"**/node_modules/*",
|
||||
"**/jest.config.js",
|
||||
"src/lib/coverage",
|
||||
"src/lib/browsertest",
|
||||
"**/test.ts",
|
||||
"**/tests.ts",
|
||||
"**/**test.ts",
|
||||
"**/**.test.ts",
|
||||
"src/apps/**",
|
||||
"esbuild.*.mjs",
|
||||
"terser.*.mjs"
|
||||
],
|
||||
"rules": {
|
||||
"no-unused-vars": "off",
|
||||
"@typescript-eslint/no-unused-vars": [
|
||||
@@ -34,4 +56,4 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
68
.github/workflows/harness-ci.yml
vendored
Normal file
68
.github/workflows/harness-ci.yml
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
# Run tests by Harnessed CI
|
||||
name: harness-ci
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
testsuite:
|
||||
description: 'Run specific test suite (leave empty to run all)'
|
||||
type: choice
|
||||
options:
|
||||
- ''
|
||||
- 'suite/'
|
||||
- 'suitep2p/'
|
||||
default: ''
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '24.x'
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Install test dependencies (Playwright Chromium)
|
||||
run: npm run test:install-dependencies
|
||||
|
||||
- name: Start test services (CouchDB)
|
||||
run: npm run test:docker-couchdb:start
|
||||
if: ${{ inputs.testsuite == '' || inputs.testsuite == 'suite/' }}
|
||||
- name: Start test services (MinIO)
|
||||
run: npm run test:docker-s3:start
|
||||
if: ${{ inputs.testsuite == '' || inputs.testsuite == 'suite/' }}
|
||||
- name: Start test services (Nostr Relay + WebPeer)
|
||||
run: npm run test:docker-p2p:start
|
||||
if: ${{ inputs.testsuite == '' || inputs.testsuite == 'suitep2p/' }}
|
||||
- name: Run tests suite
|
||||
if: ${{ inputs.testsuite == '' || inputs.testsuite == 'suite/' }}
|
||||
env:
|
||||
CI: true
|
||||
run: npm run test suite/
|
||||
- name: Run P2P tests suite
|
||||
if: ${{ inputs.testsuite == '' || inputs.testsuite == 'suitep2p/' }}
|
||||
env:
|
||||
CI: true
|
||||
run: npm run test suitep2p/
|
||||
- name: Stop test services (CouchDB)
|
||||
run: npm run test:docker-couchdb:stop
|
||||
if: ${{ inputs.testsuite == '' || inputs.testsuite == 'suite/' }}
|
||||
- name: Stop test services (MinIO)
|
||||
run: npm run test:docker-s3:stop
|
||||
if: ${{ inputs.testsuite == '' || inputs.testsuite == 'suite/' }}
|
||||
- name: Stop test services (Nostr Relay + WebPeer)
|
||||
run: npm run test:docker-p2p:stop
|
||||
if: ${{ inputs.testsuite == '' || inputs.testsuite == 'suitep2p/' }}
|
||||
128
.github/workflows/release.yml
vendored
128
.github/workflows/release.yml
vendored
@@ -10,19 +10,19 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # otherwise, you will failed to push refs to dest repo
|
||||
submodules: recursive
|
||||
- name: Use Node.js
|
||||
uses: actions/setup-node@v1
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '22.x' # You might need to adjust this value to your own version
|
||||
node-version: '24.x' # You might need to adjust this value to your own version
|
||||
# Get the version number and put it in a variable
|
||||
- name: Get Version
|
||||
id: version
|
||||
run: |
|
||||
echo "::set-output name=tag::$(git describe --abbrev=0 --tags)"
|
||||
echo "tag=$(git describe --abbrev=0 --tags)" >> $GITHUB_OUTPUT
|
||||
# Build the plugin
|
||||
- name: Build
|
||||
id: build
|
||||
@@ -36,59 +36,69 @@ jobs:
|
||||
cp main.js manifest.json styles.css README.md ${{ github.event.repository.name }}
|
||||
zip -r ${{ github.event.repository.name }}.zip ${{ github.event.repository.name }}
|
||||
# Create the release on github
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VERSION: ${{ github.ref }}
|
||||
# - name: Create Release
|
||||
# id: create_release
|
||||
# uses: actions/create-release@v1
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# VERSION: ${{ steps.version.outputs.tag }}
|
||||
# with:
|
||||
# tag_name: ${{ steps.version.outputs.tag }}
|
||||
# release_name: ${{ steps.version.outputs.tag }}
|
||||
# draft: true
|
||||
# prerelease: false
|
||||
# # Upload the packaged release file
|
||||
# - name: Upload zip file
|
||||
# id: upload-zip
|
||||
# uses: actions/upload-release-asset@v1
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# with:
|
||||
# upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
# asset_path: ./${{ github.event.repository.name }}.zip
|
||||
# asset_name: ${{ github.event.repository.name }}-${{ steps.version.outputs.tag }}.zip
|
||||
# asset_content_type: application/zip
|
||||
# # Upload the main.js
|
||||
# - name: Upload main.js
|
||||
# id: upload-main
|
||||
# uses: actions/upload-release-asset@v1
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# with:
|
||||
# upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
# asset_path: ./main.js
|
||||
# asset_name: main.js
|
||||
# asset_content_type: text/javascript
|
||||
# # Upload the manifest.json
|
||||
# - name: Upload manifest.json
|
||||
# id: upload-manifest
|
||||
# uses: actions/upload-release-asset@v1
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# with:
|
||||
# upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
# asset_path: ./manifest.json
|
||||
# asset_name: manifest.json
|
||||
# asset_content_type: application/json
|
||||
# # Upload the style.css
|
||||
# - name: Upload styles.css
|
||||
# id: upload-css
|
||||
# uses: actions/upload-release-asset@v1
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# with:
|
||||
# upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
# asset_path: ./styles.css
|
||||
# asset_name: styles.css
|
||||
# asset_content_type: text/css
|
||||
- name: Create Release and Upload Assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: ${{ github.ref }}
|
||||
draft: true
|
||||
prerelease: false
|
||||
# Upload the packaged release file
|
||||
- name: Upload zip file
|
||||
id: upload-zip
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./${{ github.event.repository.name }}.zip
|
||||
asset_name: ${{ github.event.repository.name }}-${{ steps.version.outputs.tag }}.zip
|
||||
asset_content_type: application/zip
|
||||
# Upload the main.js
|
||||
- name: Upload main.js
|
||||
id: upload-main
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./main.js
|
||||
asset_name: main.js
|
||||
asset_content_type: text/javascript
|
||||
# Upload the manifest.json
|
||||
- name: Upload manifest.json
|
||||
id: upload-manifest
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./manifest.json
|
||||
asset_name: manifest.json
|
||||
asset_content_type: application/json
|
||||
# Upload the style.css
|
||||
- name: Upload styles.css
|
||||
id: upload-css
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./styles.css
|
||||
asset_name: styles.css
|
||||
asset_content_type: text/css
|
||||
# TODO: release notes???
|
||||
files: |
|
||||
${{ github.event.repository.name }}.zip
|
||||
main.js
|
||||
manifest.json
|
||||
styles.css
|
||||
name: ${{ steps.version.outputs.tag }}
|
||||
tag_name: ${{ steps.version.outputs.tag }}
|
||||
draft: true
|
||||
45
.github/workflows/unit-ci.yml
vendored
Normal file
45
.github/workflows/unit-ci.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
# Run Unit test without Harnesses
|
||||
name: unit-ci
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- beta
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '24.x'
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
# unit tests do not require Playwright, so we can skip installing its dependencies to save time
|
||||
# - name: Install test dependencies (Playwright Chromium)
|
||||
# run: npm run test:install-dependencies
|
||||
|
||||
- name: Run unit tests suite with coverage
|
||||
run: npm run test:unit:coverage
|
||||
|
||||
- name: Upload coverage report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-report
|
||||
path: coverage/**
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -9,6 +9,7 @@ package-lock.json
|
||||
# build
|
||||
main.js
|
||||
main_org.js
|
||||
main_org_*.js
|
||||
*.js.map
|
||||
meta.json
|
||||
meta-*.json
|
||||
@@ -17,3 +18,14 @@ meta-*.json
|
||||
# obsidian
|
||||
data.json
|
||||
.vscode
|
||||
|
||||
# environment variables
|
||||
.env
|
||||
|
||||
# local config files
|
||||
*.local
|
||||
|
||||
cov_profile/**
|
||||
|
||||
coverage
|
||||
src/apps/cli/dist/*
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
"trailingComma": "es5",
|
||||
"tabWidth": 4,
|
||||
"printWidth": 120,
|
||||
"semi": true,
|
||||
"endOfLine": "lf"
|
||||
}
|
||||
20
.prettierrc.mjs
Normal file
20
.prettierrc.mjs
Normal file
@@ -0,0 +1,20 @@
|
||||
import { readFileSync } from "fs";
|
||||
let localPrettierConfig = {};
|
||||
|
||||
try {
|
||||
const localConfig = readFileSync(".prettierrc.local", "utf-8");
|
||||
localPrettierConfig = JSON.parse(localConfig);
|
||||
console.log("Using local Prettier config from .prettierrc.local");
|
||||
} catch (e) {
|
||||
// no local config
|
||||
}
|
||||
const prettierConfig = {
|
||||
trailingComma: "es5",
|
||||
tabWidth: 4,
|
||||
printWidth: 120,
|
||||
semi: true,
|
||||
endOfLine: "cr",
|
||||
...localPrettierConfig,
|
||||
};
|
||||
|
||||
export default prettierConfig;
|
||||
11
.test.env
Normal file
11
.test.env
Normal file
@@ -0,0 +1,11 @@
|
||||
hostname=http://localhost:5989/
|
||||
dbname=livesync-test-db2
|
||||
minioEndpoint=http://127.0.0.1:9000
|
||||
username=admin
|
||||
password=testpassword
|
||||
accessKey=minioadmin
|
||||
secretKey=minioadmin
|
||||
bucketName=livesync-test-bucket
|
||||
# ENABLE_DEBUGGER=true
|
||||
# PRINT_LIVESYNC_LOGS=true
|
||||
# ENABLE_UI=true
|
||||
67
README.md
67
README.md
@@ -1,34 +1,39 @@
|
||||
<!-- For translation: 20240227r0 -->
|
||||
# Self-hosted LiveSync
|
||||
[Japanese docs](./README_ja.md) - [Chinese docs](./README_cn.md).
|
||||
|
||||
Self-hosted LiveSync is a community-implemented synchronization plugin, available on every obsidian-compatible platform and using CouchDB or Object Storage (e.g., MinIO, S3, R2, etc.) as the server.
|
||||
|
||||
Self-hosted LiveSync is a community-developed synchronisation plug-in available on all Obsidian-compatible platforms. It leverages robust server solutions such as CouchDB or object storage systems (e.g., MinIO, S3, R2, etc.) to ensure reliable data synchronisation.
|
||||
|
||||
Additionally, it supports peer-to-peer synchronisation using WebRTC now (experimental), enabling you to synchronise your notes directly between devices without relying on a server.
|
||||
|
||||

|
||||
|
||||
Note: This plugin cannot synchronise with the official "Obsidian Sync".
|
||||
>[!IMPORTANT]
|
||||
> This plug-in is not compatible with the official "Obsidian Sync" and cannot synchronise with it.
|
||||
|
||||
## Features
|
||||
- Synchronise vaults efficiently with minimal traffic.
|
||||
- Handle conflicting modifications effectively.
|
||||
- Automatically merge simple conflicts.
|
||||
- Use open-source solutions for the server.
|
||||
- Compatible solutions are supported.
|
||||
- Support end-to-end encryption.
|
||||
- Synchronise settings, snippets, themes, and plug-ins via [Customisation Sync (Beta)](docs/settings.md#6-customization-sync-advanced) or [Hidden File Sync](docs/settings.md#7-hidden-files-advanced).
|
||||
- Enable WebRTC peer-to-peer synchronisation without requiring a `host` (Experimental).
|
||||
- This feature is still in the experimental stage. Please exercise caution when using it.
|
||||
- WebRTC is a peer-to-peer synchronisation method, so **at least one device must be online to synchronise**.
|
||||
- Instead of keeping your device online as a stable peer, you can use two pseudo-peers:
|
||||
- [livesync-serverpeer](https://github.com/vrtmrz/livesync-serverpeer): A pseudo-client running on the server for receiving and sending data between devices.
|
||||
- [webpeer](https://github.com/vrtmrz/livesync-commonlib/tree/main/apps/webpeer): A pseudo-client for receiving and sending data between devices.
|
||||
- A pre-built instance is available at [fancy-syncing.vrtmrz.net/webpeer](https://fancy-syncing.vrtmrz.net/webpeer/) (hosted on the vrtmrz blog site). This is also peer-to-peer. Feel free to use it.
|
||||
- For more information, refer to the [English explanatory article](https://fancy-syncing.vrtmrz.net/blog/0034-p2p-sync-en.html) or the [Japanese explanatory article](https://fancy-syncing.vrtmrz.net/blog/0034-p2p-sync).
|
||||
|
||||
- Synchronize vaults very efficiently with less traffic.
|
||||
- Good at conflicted modification.
|
||||
- Automatic merging for simple conflicts.
|
||||
- Using OSS solution for the server.
|
||||
- Compatible solutions can be used.
|
||||
- Supporting End-to-end encryption.
|
||||
- Synchronisation of settings, snippets, themes, and plug-ins, via [Customization sync(Beta)](#customization-sync) or [Hidden File Sync](#hiddenfilesync)
|
||||
- WebClip from [obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf)
|
||||
- WebRTC peer-to-peer synchronisation without the need any `host` is now possible. (Experimental)
|
||||
- This feature is still in the experimental stage. Please be careful when using it.
|
||||
- Instead of using server, you can use [webpeer](https://github.com/vrtmrz/livesync-commonlib/tree/main/apps/webpeer) the pseudo client for receiving and sending between devices.
|
||||
|
||||
|
||||
This plug-in might be useful for researchers, engineers, and developers with a need to keep their notes fully self-hosted for security reasons. Or just anyone who would like the peace of mind of knowing that their notes are fully private.
|
||||
This plug-in may be particularly useful for researchers, engineers, and developers who need to keep their notes fully self-hosted for security reasons. It is also suitable for anyone seeking the peace of mind that comes with knowing their notes remain entirely private.
|
||||
|
||||
>[!IMPORTANT]
|
||||
> - Before installing or upgrading this plug-in, please back your vault up.
|
||||
> - Do not enable this plugin with another synchronization solution at the same time (including iCloud and Obsidian Sync).
|
||||
> - This is a synchronization plugin. Not a backup solution. Do not rely on this for backup.
|
||||
> - Before installing or upgrading this plug-in, please back up your vault.
|
||||
> - Do not enable this plug-in alongside another synchronisation solution at the same time (including iCloud and Obsidian Sync).
|
||||
> - For backups, we also provide a plug-in called [Differential ZIP Backup](https://github.com/vrtmrz/diffzip).
|
||||
|
||||
## How to use
|
||||
|
||||
@@ -47,9 +52,11 @@ This plug-in might be useful for researchers, engineers, and developers with a n
|
||||
1. [Setup CouchDB on fly.io](docs/setup_flyio.md)
|
||||
2. [Setup your CouchDB](docs/setup_own_server.md)
|
||||
2. Configure plug-in in [Quick Setup](docs/quick_setup.md)
|
||||
|
||||
> [!TIP]
|
||||
> Now, fly.io has become not free. Fortunately, even though there are some issues, we are still able to use IBM Cloudant. Here is [Setup IBM Cloudant](docs/setup_cloudant.md). It will be updated soon!
|
||||
> Fly.io is no longer free. Fortunately, despite some issues, we can still use IBM Cloudant. Refer to [Setup IBM Cloudant](docs/setup_cloudant.md).
|
||||
> And also, we can use peer-to-peer synchronisation without a server. Or very cheap Object Storage -- Cloudflare R2 can be used for free.
|
||||
> HOWEVER, most importantly, we can use the server that we trust. Therefore, please set up your own server.
|
||||
> CouchDB can be run on a Raspberry Pi. (But please be careful about the security of your server).
|
||||
|
||||
|
||||
## Information in StatusBar
|
||||
@@ -79,20 +86,20 @@ Synchronization status is shown in the status bar with the following icons.
|
||||
|
||||
To prevent file and database corruption, please wait to stop Obsidian until all progress indicators have disappeared as possible (The plugin will also try to resume, though). Especially in case of if you have deleted or renamed files.
|
||||
|
||||
|
||||
|
||||
## Tips and Troubleshooting
|
||||
If you are having problems getting the plugin working see: [Tips and Troubleshooting](docs/troubleshooting.md)
|
||||
If you are having problems getting the plugin working see: [Tips and Troubleshooting](docs/troubleshooting.md).
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
The project has been in continual progress and harmony because of
|
||||
- Many [Contributors](https://github.com/vrtmrz/obsidian-livesync/graphs/contributors)
|
||||
- Many [GitHub Sponsors](https://github.com/sponsors/vrtmrz#sponsors)
|
||||
- JetBrains Community Programs / Support for Open-Source Projects <img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.png" alt="JetBrains logo." height="24">
|
||||
The project has been in continual progress and harmony thanks to:
|
||||
- Many [Contributors](https://github.com/vrtmrz/obsidian-livesync/graphs/contributors).
|
||||
- Many [GitHub Sponsors](https://github.com/sponsors/vrtmrz#sponsors).
|
||||
- JetBrains Community Programs / Support for Open-Source Projects. <img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.png" alt="JetBrains logo" height="24">
|
||||
|
||||
May those who have contributed be honoured and remembered for their kindness and generosity.
|
||||
|
||||
## Development Guide
|
||||
Please refer to [Development Guide](devs.md) for development setup, testing infrastructure, code conventions, and more.
|
||||
|
||||
## License
|
||||
|
||||
Licensed under the MIT License.
|
||||
|
||||
172
README_cn.md
172
README_cn.md
@@ -1,9 +1,12 @@
|
||||
# Self-hosted LiveSync
|
||||
|
||||
Self-hosted LiveSync (自搭建在线同步) 是一个社区实现的在线同步插件。
|
||||
使用一个自搭建的或者购买的 CouchDB 作为中转服务器。兼容所有支持 Obsidian 的平台。
|
||||
它利用诸如CouchDB或对象存储系统(例如MinIO、S3、R2等)等强大的服务器解决方案,以确保数据同步的可靠性。。兼容所有支持 Obsidian 的平台。
|
||||
|
||||
注意: 本插件与官方的 "Obsidian Sync" 服务不兼容。
|
||||
此外,它现在支持使用WebRTC进行点对点同步(实验性功能),使您无需依赖服务器即可直接在设备之间同步笔记。
|
||||
|
||||
>[!IMPORTANT]
|
||||
>本插件与官方的 "Obsidian Sync" 服务不兼容。
|
||||
|
||||

|
||||
|
||||
@@ -11,119 +14,94 @@ Self-hosted LiveSync (自搭建在线同步) 是一个社区实现的在线同
|
||||
|
||||
## 功能
|
||||
|
||||
- 可视化的冲突解决器
|
||||
- 接近实时的多设备双向同步
|
||||
- 可使用 CouchDB 以及兼容的服务,如 IBM Cloudant
|
||||
- 支持端到端加密
|
||||
- 插件同步 (Beta)
|
||||
- 从 [obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf) 接收 WebClip (本功能不适用端到端加密)
|
||||
- 以最少流量高效同步vault
|
||||
- 有效处理冲突的修改。
|
||||
- 自动合并简单冲突。
|
||||
- 服务端使用开源的解决方案
|
||||
- 支持兼容的解决方案。
|
||||
- 支持端到端加密。
|
||||
- 同步设置、代码片段、主题和插件,通过 [Customisation Sync (Beta)](docs/settings.md#6-customization-sync-advanced) 或者 [Hidden File Sync](docs/settings.md#7-hidden-files-advanced).
|
||||
- 启用 WebRTC 点对点同步,无需指定 `host`(实验性)。
|
||||
- 此功能仍处于试验阶段。请在使用时务必谨慎。
|
||||
- WebRTC 是一种点对点同步方法,因此**至少有一台设备必须在线才能进行同步**。
|
||||
- 与其让您的设备作为稳定的对等节点保持在线,您可以使用两个 pseudo-peers:
|
||||
- [livesync-serverpeer](https://github.com/vrtmrz/livesync-serverpeer): 在服务器上运行的 pseudo-client 用于在设备之间接收和发送数据。
|
||||
- [webpeer](https://github.com/vrtmrz/livesync-commonlib/tree/main/apps/webpeer): 用于在设备之间接收和发送数据的pseudo-client。
|
||||
- 一个预构建的实例现已上线,地址为 [fancy-syncing.vrtmrz.net/webpeer](https://fancy-syncing.vrtmrz.net/webpeer/) (托管于vrtmrz博客网站). 这也是一个点对点的实例。可自由使用。
|
||||
- 欲了解更多信息,请参阅[英文说明文章](https://fancy-syncing.vrtmrz.net/blog/0034-p2p-sync-en.html)或[日文说明文章](https://fancy-syncing.vrtmrz.net/blog/0034-p2p-sync)。
|
||||
|
||||
适用于出于安全原因需要将笔记完全自托管的研究人员、工程师或开发人员,以及任何喜欢笔记完全私密所带来的安全感的人。
|
||||
此插件适用于出于安全原因需要将笔记完全自托管的研究人员、工程师或开发人员,以及任何喜欢笔记完全私密所带来的安全感的人。
|
||||
|
||||
## 重要提醒
|
||||
|
||||
- 请勿与其他同步解决方案(包括 iCloud、Obsidian Sync)一起使用。在启用此插件之前,请确保禁用所有其他同步方法以避免内容损坏或重复。如果要同步到多个服务,请一一进行,切勿同时启用两种同步方法。
|
||||
这包括不能将您的保管库放在云同步文件夹中(例如 iCloud 文件夹或 Dropbox 文件夹)
|
||||
- 这是一个同步插件,不是备份解决方案。不要依赖它进行备份。
|
||||
- 如果设备的存储空间耗尽,可能会发生数据库损坏。
|
||||
- 隐藏文件或任何其他不可见文件不会保存在数据库中,因此不会被同步。(**并且可能会被删除**)
|
||||
>[!IMPORTANT]
|
||||
> - 在安装或升级此插件之前,请务必备份您的保险库。
|
||||
> - 请勿同时启用此插件与其它同步方案(包括iCloud和Obsidian Sync)。
|
||||
> - 对于备份,我们还提供了一款名为[Differential ZIP Backup](https://github.com/vrtmrz/diffzip)的插件。
|
||||
|
||||
## 如何使用
|
||||
|
||||
### 准备好你的数据库
|
||||
### 3分钟搞定——在fly.io上部署CouchDB
|
||||
|
||||
首先,准备好你的数据库。IBM Cloudant 是用于测试的首选。或者,您也可以在自己的服务器上安装 CouchDB。有关更多信息,请参阅以下内容:
|
||||
1. [Setup IBM Cloudant](docs/setup_cloudant.md)
|
||||
2. [Setup your CouchDB](docs/setup_own_server_cn.md)
|
||||
**推荐初学者第一次使用此方法**
|
||||
[](https://www.youtube.com/watch?v=7sa_I1832Xc)
|
||||
|
||||
Note: 正在征集更多搭建方法!目前在讨论的有 [使用 fly.io](https://github.com/vrtmrz/obsidian-livesync/discussions/85)。
|
||||
1. [Setup CouchDB on fly.io](docs/setup_flyio.md)
|
||||
2. 在 [Quick Setup](docs/quick_setup.md) 中配置插件。
|
||||
|
||||
### 第一个设备
|
||||
### 手动设置
|
||||
|
||||
1. 在您的设备上安装插件。
|
||||
2. 配置远程数据库信息。
|
||||
1. 将您的服务器信息填写到 `Remote Database configuration`(远程数据库配置)设置页中。
|
||||
2. 建议启用 `End to End Encryption`(端到端加密)。输入密码后,单击“应用”。
|
||||
3. 点击 `Test Database Connection` 并确保插件显示 `Connected to (你的数据库名称)`。
|
||||
4. 单击 `Check database configuration`(检查数据库配置)并确保所有测试均已通过。
|
||||
3. 在 `Sync Settings`(同步设置)选项卡中配置何时进行同步。(您也可以稍后再设置)
|
||||
1. 如果要实时同步,请启用 `LiveSync`。
|
||||
2. 或者,根据您的需要设置同步方式。默认情况下,不会启用任何自动同步,这意味着您需要手动触发同步过程。
|
||||
3. 其他配置也在这里。建议启用 `Use Trash for deleted files`(删除文件到回收站),但您也可以保持所有配置不变。
|
||||
4. 配置杂项功能。
|
||||
1. 启用 `Show staus inside editor` 会在编辑器右上角显示状态。(推荐开启)
|
||||
5. 回到编辑器。等待初始扫描完成。
|
||||
6. 当状态不再变化并显示 ⏹️ 图标表示 COMPLETED(没有 ⏳ 和 🧩 图标)时,您就可以与服务器同步了。
|
||||
7. 按功能区上的复制图标或从命令面板运行 `Replicate now`(立刻复制)。这会将您的所有数据发送到服务器。
|
||||
8. 打开命令面板,运行 `Copy setup URI`(复制设置链接),并设置密码。这会将您的配置导出到剪贴板,作为您导入其他设备的链接。
|
||||
1. 配置服务器
|
||||
1. [在fly.io上快速搭建CouchDB](docs/setup_flyio.md)
|
||||
2. [自行搭建CouchDB](docs/setup_own_server.md)
|
||||
2. 在[快速设置](docs/quick_setup.md)中配置插件
|
||||
|
||||
> [!提示]
|
||||
> Fly.io现已不再免费。不过,尽管存在一些问题,我们仍可使用IBM Cloudant。请参考[搭建IBM Cloudant](docs/setup_cloudant.md)。
|
||||
> 此外,我们还可以采用点对点同步方式,无需搭建服务器;或者选用价格极低的对象存储——Cloudflare R2可免费使用。
|
||||
> 但最重要的是,我们可以选择自己信任的服务器。因此,建议您搭建自有服务器
|
||||
> CouchDB可在树莓派上运行。(但请务必注意服务器的安全性)。
|
||||
|
||||
**重要: 不要公开本链接,这个链接包含了你的所有认证信息!** (即使没有密码别人读不了)
|
||||
|
||||
### 后续设备
|
||||
|
||||
注意:如果要与非空的 vault 进行同步,文件的修改日期和时间必须互相匹配。否则,可能会发生额外的传输或文件可能会损坏。
|
||||
为简单起见,我们强烈建议同步到一个全空的 vault。
|
||||
## 状态栏中的信息
|
||||
|
||||
1. 安装插件。
|
||||
2. 打开您从第一台设备导出的链接。
|
||||
3. 插件会询问您是否确定应用配置。 回答 `Yes`,然后按照以下说明进行操作:
|
||||
1. 对 `Keep local DB?` 回答 `Yes`。
|
||||
*注意:如果您希望保留本地现有 vault,则必须对此问题回答 `No`,并对 `Rebuild the database?` 回答 `No`。*
|
||||
2. 对 `Keep remote DB?` 回答 `Yes`。
|
||||
3. 对 `Replicate once?` 回答 `Yes`。
|
||||
完成后,您的所有设置将会从第一台设备成功导入。
|
||||
4. 你的笔记应该很快就会同步。
|
||||
|
||||
## 文件看起来有损坏...
|
||||
|
||||
请再次打开配置链接并回答如下:
|
||||
- 如果您的本地数据库看起来已损坏(当你的本地 Obsidian 文件看起来很奇怪)
|
||||
- 对 `Keep local DB?` 回答 `No`
|
||||
- 如果您的远程数据库看起来已损坏(当复制时发生中断)
|
||||
- 对 `Keep remote DB?` 回答 `No`
|
||||
|
||||
如果您对两者都回答“否”,您的数据库将根据您设备上的内容重建。并且远程数据库将锁定其他设备,您必须再次同步所有设备。(此时,几乎所有文件都会与时间戳同步。因此您可以安全地使用现有的 vault)。
|
||||
|
||||
## 测试服务器
|
||||
|
||||
设置 Cloudant 或本地 CouchDB 实例有点复杂,所以我搭建了一个 [self-hosted-livesync 尝鲜服务器](https://olstaste.vrtmrz.net/)。欢迎免费尝试!
|
||||
注意:请仔细阅读“限制”条目。不要发送您的私人 vault。
|
||||
|
||||
## 状态栏信息
|
||||
|
||||
同步状态将显示在状态栏。
|
||||
同步状态显示在状态栏中,采用以下图标。
|
||||
|
||||
- 活动指示器
|
||||
- 📲 网络请求
|
||||
- 状态
|
||||
- ⏹️ 就绪
|
||||
- 💤 LiveSync 已启用,正在等待更改。
|
||||
- ⚡️ 同步中。
|
||||
- ⚠ 一个错误出现了。
|
||||
- ↑ 上传的 chunk 和元数据数量
|
||||
- ↓ 下载的 chunk 和元数据数量
|
||||
- ⏳ 等待的过程的数量
|
||||
- 🧩 正在等待 chunk 的文件数量
|
||||
如果你删除或更名了文件,请等待 ⏳ 图标消失。
|
||||
- ⏹️ 已停止
|
||||
- 💤 LiveSync已启用,正在等待更改
|
||||
- ⚡️ 同步中
|
||||
- ⚠ 发生了错误
|
||||
- 统计指标
|
||||
- ↑ 上传的分块与元数据
|
||||
- ↓ 下载的分块与元数据
|
||||
- 进度指示器
|
||||
- 📥 未处理的传输项
|
||||
- 📄 正在进行的数据库操作
|
||||
- 💾 正在进行的写入存储进程
|
||||
- ⏳ 正在进行的读取存储进程
|
||||
- 🛫 待处理的读取存储进程
|
||||
- 📬 批量处理的读取存储进程
|
||||
- ⚙️ 正在进行或待处理的隐藏文件存储进程
|
||||
- 🧩 等待中的分块
|
||||
- 🔌 正在进行的自定义项(配置、代码片段和插件)
|
||||
|
||||
为避免文件和数据库损坏,请等待所有进度指示器尽可能消失后再关闭 Obsidian(插件也会尝试恢复同步进度)。特别是在您已删除或重命名文件的情况下,请务必遵守此操作。
|
||||
|
||||
|
||||
## 提示
|
||||
|
||||
- 如果文件夹在复制后变为空,则默认情况下该文件夹会被删除。您可以关闭此行为。检查 [设置](docs/settings.md)。
|
||||
- LiveSync 模式在移动设备上可能导致耗电量增加。建议使用定期同步 + 条件自动同步。
|
||||
- 移动平台上的 Obsidian 无法连接到非安全 (HTTP) 或本地签名的服务器,即使设备上安装了根证书。
|
||||
- 没有类似“exclude_folders”的配置。
|
||||
- 同步时,文件按修改时间进行比较,较旧的将被较新的文件覆盖。然后插件检查冲突,如果需要合并,将打开一个对话框。
|
||||
- 数据库中的文件在罕见情况下可能会损坏。当接收到的文件看起来已损坏时,插件不会将其写入本地存储。如果您的设备上有文件的本地版本,则可以通过编辑本地文件并进行同步来覆盖损坏的版本。但是,如果您的任何设备上都不存在该文件,则无法挽救该文件。在这种情况下,您可以从设置对话框中删除这些损坏的文件。
|
||||
- 要阻止插件的启动流程(例如,为了修复数据库问题),您可以在 vault 的根目录创建一个 "redflag.md" 文件。
|
||||
- 问:数据库在增长,我该如何缩小它?
|
||||
答:每个文档都保存了过去 100 次修订,用于检测和解决冲突。想象一台设备已经离线一段时间,然后再次上线。设备必须将其笔记与远程保存的笔记进行比较。如果存在曾经相同的历史修订,则可以安全地直接更新这个文件(和 git 的快进原理一样)。即使文件不在修订历史中,我们也只需检查两个设备上该文件的公有修订版本之后的差异。这就像 git 的冲突解决方法。所以,如果想从根本上解决数据库太大的问题,我们像构建一个扩大版的 git repo 一样去重新设计数据库。
|
||||
- 更多技术信息在 [技术信息](docs/tech_info.md)
|
||||
- 如果你想在没有黑曜石的情况下同步文件,你可以使用[filesystem-livesync](https://github.com/vrtmrz/filesystem-livesync)。
|
||||
- WebClipper 也可在 Chrome Web Store 上使用:[obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf)
|
||||
## 使用技巧与故障排除
|
||||
如果您在配置插件时遇到问题,请参阅:[Tips and Troubleshooting](docs/troubleshooting.md).
|
||||
|
||||
|
||||
仓库地址:[obsidian-livesync-webclip](https://github.com/vrtmrz/obsidian-livesync-webclip) (文档施工中)
|
||||
## 致谢
|
||||
本项目得以持续顺利推进,离不开以下各方的贡献:
|
||||
- 众多[贡献者](https://github.com/vrtmrz/obsidian-livesync/graphs/contributors)。
|
||||
- 许多[GitHub 赞助人](https://github.com/sponsors/vrtmrz#sponsors)。
|
||||
- JetBrains 社区计划/对开源项目的支持。<img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.png" alt="JetBrains logo" height="24">
|
||||
|
||||
## License
|
||||
愿所有作出贡献的人士因其善良与慷慨而受到尊敬与铭记。
|
||||
|
||||
The source code is licensed under the MIT License.
|
||||
本源代码使用 MIT 协议授权。
|
||||
## 许可协议
|
||||
|
||||
本项目采用 MIT 许可协议授权。
|
||||
|
||||
178
devs.md
Normal file
178
devs.md
Normal file
@@ -0,0 +1,178 @@
|
||||
# Self-hosted LiveSync Development Guide
|
||||
## Project Overview
|
||||
|
||||
Self-hosted LiveSync is an Obsidian plugin for synchronising vaults across devices using CouchDB, MinIO/S3, or peer-to-peer WebRTC. The codebase uses a modular architecture with TypeScript, Svelte, and PouchDB.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Module System
|
||||
|
||||
The plugin uses a dynamic module system to reduce coupling and improve maintainability:
|
||||
|
||||
- **Service Hub**: Central registry for services using dependency injection
|
||||
- Services are registered, and accessed via `this.services` (in most modules)
|
||||
- **Module Loading**: All modules extend `AbstractModule` or `AbstractObsidianModule` (which extends `AbstractModule`). These modules are loaded in main.ts and some modules.
|
||||
- **Module Categories** (by directory):
|
||||
- `core/` - Platform-independent core functionality
|
||||
- `coreObsidian/` - Obsidian-specific core (e.g., `ModuleFileAccessObsidian`)
|
||||
- `essential/` - Required modules (e.g., `ModuleMigration`, `ModuleKeyValueDB`)
|
||||
- `features/` - Optional features (e.g., `ModuleLog`, `ModuleObsidianSettings`)
|
||||
- `extras/` - Development/testing tools (e.g., `ModuleDev`, `ModuleIntegratedTest`)
|
||||
- **Services**: Core services (e.g., `database`, `replicator`, `storageAccess`) are registered in `ServiceHub` and accessed by modules. They provide an extension point for add new behaviour without modifying existing code.
|
||||
- For example, checks before the replication can be added to the `replication.onBeforeReplicate` handler, and the handlers can be return `false` to prevent replication-starting. `vault.isTargetFile` also can be used to prevent processing specific files.
|
||||
- **ServiceModule**: A new type of module that directly depends on services.
|
||||
|
||||
#### Note on Module vs Service
|
||||
|
||||
After v0.25.44 refactoring, the Service will henceforth, as a rule, cease to use setHandler, that is to say, simple lazy binding. - They will be implemented directly in the service. - However, not everything will be middlewarised. Modules that maintain state or make decisions based on the results of multiple handlers are permitted.
|
||||
|
||||
Hence, the new feature should be implemented as follows:
|
||||
|
||||
- If it is a simple extension point (e.g., adding a check before replication), it should be implemented as a handler in the service (e.g., `replication.onBeforeReplicate`).
|
||||
- If it requires maintaining state or making decisions based on multiple handlers, it should be implemented as a serviceModule dependent on the relevant services explicitly.
|
||||
- If you have to implement a new feature without much modification, you can extent existing modules, but it is recommended to implement a new module or serviceModule for better maintainability.
|
||||
- Refactoring existing modules to services is also always welcome!
|
||||
- Please write tests for new features, you will notice that the simple handler approach is quite testable.
|
||||
|
||||
### Key Architectural Components
|
||||
|
||||
- **LiveSyncLocalDB** (`src/lib/src/pouchdb/`): Local PouchDB database wrapper
|
||||
- **Replicators** (`src/lib/src/replication/`): CouchDB, Journal, and MinIO sync engines
|
||||
- **Service Hub** (`src/modules/services/`): Central service registry using dependency injection
|
||||
- **Common Library** (`src/lib/`): Platform-independent sync logic, shared with other tools
|
||||
|
||||
### File Structure Conventions
|
||||
|
||||
- **Platform-specific code**: Use `.platform.ts` suffix (replaced with `.obsidian.ts` in production builds via esbuild)
|
||||
- **Development code**: Use `.dev.ts` suffix (replaced with `.prod.ts` in production)
|
||||
- **Path aliases**: `@/*` maps to `src/*`, `@lib/*` maps to `src/lib/src/*`
|
||||
|
||||
## Build & Development Workflow
|
||||
|
||||
### Commands
|
||||
|
||||
```bash
|
||||
npm run test:unit # Run unit tests with vitest (or `npm run test:unit:coverage` for coverage)
|
||||
npm run check # TypeScript and svelte type checking
|
||||
npm run dev # Development build with auto-rebuild (uses .env for test vault paths)
|
||||
npm run build # Production build
|
||||
npm run buildDev # Development build (one-time)
|
||||
npm run bakei18n # Pre-build step: compile i18n resources (YAML → JSON → TS)
|
||||
npm test # Run vitest tests (requires Docker services)
|
||||
```
|
||||
|
||||
### Environment Setup
|
||||
|
||||
- Create `.env` file with `PATHS_TEST_INSTALL` pointing to test vault plug-in directories (`:` separated on Unix, `;` on Windows)
|
||||
- Development builds auto-copy to these paths on build
|
||||
|
||||
### Testing Infrastructure
|
||||
|
||||
- ~~**Deno Tests**: Unit tests for platform-independent code (e.g., `HashManager.test.ts`)~~
|
||||
- This is now obsolete, migrated to vitest.
|
||||
- **Vitest** (`vitest.config.ts`): E2E test by Browser-based-harness using Playwright, unit tests.
|
||||
- Unit tests should be `*.unit.spec.ts` and placed alongside the implementation file (e.g., `ChunkFetcher.unit.spec.ts`).
|
||||
|
||||
- **Docker Services**: Tests require CouchDB, MinIO (S3), and P2P services:
|
||||
```bash
|
||||
npm run test:docker-all:start # Start all test services
|
||||
npm run test:full # Run tests with coverage
|
||||
npm run test:docker-all:stop # Stop services
|
||||
```
|
||||
If some services are not needed, start only required ones (e.g., `test:docker-couchdb:start`)
|
||||
Note that if services are already running, starting script will fail. Please stop them first.
|
||||
- **Test Structure**:
|
||||
- `test/suite/` - Integration tests for sync operations
|
||||
- `test/unit/` - Unit tests (via vitest, as harness is browser-based)
|
||||
- `test/harness/` - Mock implementations (e.g., `obsidian-mock.ts`)
|
||||
|
||||
## Code Conventions
|
||||
|
||||
### Internationalisation (i18n)
|
||||
|
||||
- **Translation Workflow**:
|
||||
1. Edit YAML files in `src/lib/src/common/messagesYAML/` (human-editable)
|
||||
2. Run `npm run bakei18n` to compile: YAML → JSON → TypeScript constants
|
||||
3. Use `$t()`, `$msg()` functions for translations
|
||||
You can also use `$f` for formatted messages with Tagged Template Literals.
|
||||
- **Usage**:
|
||||
```typescript
|
||||
$msg("dialog.someKey"); // Typed key with autocomplete
|
||||
$t("Some message"); // Direct translation
|
||||
$f`Hello, ${userName}`; // Formatted message
|
||||
```
|
||||
- **Supported languages**: `def` (English), `de`, `es`, `ja`, `ko`, `ru`, `zh`, `zh-tw`
|
||||
|
||||
### File Path Handling
|
||||
|
||||
- Use tagged types from `types.ts`: `FilePath`, `FilePathWithPrefix`, `DocumentID`
|
||||
- Prefix constants: `CHeader` (chunks), `ICHeader`/`ICHeaderEnd` (internal data)
|
||||
- Path utilities in `src/lib/src/string_and_binary/path.ts`: `addPrefix()`, `stripAllPrefixes()`, `shouldBeIgnored()`
|
||||
|
||||
### Logging & Debugging
|
||||
|
||||
- Use `this._log(msg, LOG_LEVEL_INFO)` in modules (automatically prefixes with module name)
|
||||
- Log levels: `LOG_LEVEL_DEBUG`, `LOG_LEVEL_VERBOSE`, `LOG_LEVEL_INFO`, `LOG_LEVEL_NOTICE`, `LOG_LEVEL_URGENT`
|
||||
- LOG_LEVEL_NOTICE and above are reported to the user via Obsidian notices
|
||||
- LOG_LEVEL_DEBUG is for debug only and not shown in default builds
|
||||
- Dev mode creates `ls-debug/` folder in `.obsidian/` for debug outputs (e.g., missing translations)
|
||||
- This causes pretty significant performance overhead.
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Module Implementation
|
||||
|
||||
```typescript
|
||||
export class ModuleExample extends AbstractObsidianModule {
|
||||
async _everyOnloadStart(): Promise<boolean> {
|
||||
/* ... */
|
||||
}
|
||||
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.appLifecycle.handleOnInitialise(this._everyOnloadStart.bind(this));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Settings Management
|
||||
|
||||
- Settings defined in `src/lib/src/common/types.ts` (`ObsidianLiveSyncSettings`)
|
||||
- Configuration metadata in `src/lib/src/common/settingConstants.ts`
|
||||
- Use `this.services.setting.saveSettingData()` instead of using plugin methods directly
|
||||
|
||||
### Database Operations
|
||||
|
||||
- Local database operations through `LiveSyncLocalDB` (wraps PouchDB)
|
||||
- Document types: `EntryDoc` (files), `EntryLeaf` (chunks), `PluginDataEntry` (plugin sync)
|
||||
|
||||
## Important Files
|
||||
|
||||
- [main.ts](src/main.ts) - Plugin entry point, module registration
|
||||
- [esbuild.config.mjs](esbuild.config.mjs) - Build configuration with platform/dev file replacement
|
||||
- [package.json](package.json) - Scripts reference and dependencies
|
||||
|
||||
## Beta Policy
|
||||
|
||||
- Beta versions are denoted by appending `-patched-N` to the base version number.
|
||||
- `The base version` mostly corresponds to the stable release version.
|
||||
- e.g., v0.25.41-patched-1 is equivalent to v0.25.42-beta1.
|
||||
- This notation is due to SemVer incompatibility of Obsidian's plugin system.
|
||||
- Hence, this release is `0.25.41-patched-1`.
|
||||
- Each beta version may include larger changes, but bug fixes will often not be included.
|
||||
- I think that in most cases, bug fixes will cause the stable releases.
|
||||
- They will not be released per branch or backported; they will simply be released.
|
||||
- Bug fixes for previous versions will be applied to the latest beta version.
|
||||
This means, if xx.yy.02-patched-1 exists and there is a defect in xx.yy.01, a fix is applied to xx.yy.02-patched-1 and yields xx.yy.02-patched-2.
|
||||
If the fix is required immediately, it is released as xx.yy.02 (with xx.yy.01-patched-1).
|
||||
- This procedure remains unchanged from the current one.
|
||||
- At the very least, I am using the latest beta.
|
||||
- However, I will not be using a beta continuously for a week after it has been released. It is probably closer to an RC in nature.
|
||||
|
||||
In short, the situation remains unchanged for me, but it means you all become a little safer. Thank you for your understanding!
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
- Follow existing code style and conventions
|
||||
- Please bump dependencies with care, check artifacts after updates, with diff-tools and only expected changes in the build output (to avoid unexpected vulnerabilities).
|
||||
- When adding new features, please consider it has an OSS implementation, and avoid using proprietary services or APIs that may limit usage.
|
||||
- For example, any functionality to connect to a new type of server is expected to either have an OSS implementation available for that server, or to be managed under some responsibilities and/or limitations without disrupting existing functionality, and scope for surveillance reduced by some means (e.g., by client-side encryption, auditing the server ourselves).
|
||||
BIN
docs/all_toggles.png
Normal file
BIN
docs/all_toggles.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 7.3 KiB |
168
docs/datastructure.md
Normal file
168
docs/datastructure.md
Normal file
@@ -0,0 +1,168 @@
|
||||
# Data Structures of Self-Hosted LiveSync
|
||||
## Overview
|
||||
|
||||
Self-hosted LiveSync uses the following types of documents:
|
||||
|
||||
- Metadata
|
||||
- Legacy Metadata
|
||||
- Binary Metadata
|
||||
- Plain Metadata
|
||||
- Chunk
|
||||
- Versioning
|
||||
- Synchronise Information
|
||||
- Synchronise Parameters
|
||||
- Milestone Information
|
||||
|
||||
## Description of Each Data Structure
|
||||
|
||||
All documents inherit from the `DatabaseEntry` interface. This is necessary for conflict resolution and deletion flags.
|
||||
|
||||
```ts
|
||||
export interface DatabaseEntry {
|
||||
_id: DocumentID;
|
||||
_rev?: string;
|
||||
_deleted?: boolean;
|
||||
}
|
||||
```
|
||||
|
||||
### Versioning Document
|
||||
|
||||
This document stores version information for Self-hosted LiveSync.
|
||||
The ID is fixed as `obsydian_livesync_version` [VERSIONING_DOCID]. Yes, the typo has become a curse.
|
||||
When Self-hosted LiveSync detects changes to this document via Replication, it reads the version information and checks compatibility.
|
||||
In that case, if there are major changes, synchronisation may be stopped.
|
||||
Please refer to negotiation.ts.
|
||||
|
||||
### Synchronise Information Document
|
||||
|
||||
This document stores information that should be verified in synchronisation settings.
|
||||
The ID is fixed as `syncinfo` [SYNCINFO_ID].
|
||||
The information stored in this document is only the conditions necessary for synchronisation to succeed, and as of v0.25.43, only a random string is stored.
|
||||
This document is only used during rebuilds from the settings screen for CouchDB-based synchronisation, making it like an appendix. It may be removed in the future.
|
||||
|
||||
### Synchronise Parameters Document
|
||||
|
||||
This document stores synchronisation parameters.
|
||||
Synchronisation parameters include the protocol version and salt used for encryption, but do not include chunking settings.
|
||||
|
||||
The ID is fixed as `_local/obsidian_livesync_sync_parameters` [DOCID_SYNC_PARAMETERS] or `_obsidian_livesync_journal_sync_parameters.json` [DOCID_JOURNAL_SYNC_PARAMETERS].
|
||||
|
||||
This document exists only on the remote and not locally.
|
||||
This document stores the following information.
|
||||
It is read each time before connecting and is used to verify that E2EE settings match.
|
||||
This mismatch cannot be ignored and synchronisation will be stopped.
|
||||
|
||||
```ts
|
||||
export interface SyncParameters extends DatabaseEntry {
|
||||
_id: typeof DOCID_SYNC_PARAMETERS;
|
||||
type: (typeof EntryTypes)["SYNC_PARAMETERS"];
|
||||
protocolVersion: ProtocolVersion;
|
||||
pbkdf2salt: string;
|
||||
}
|
||||
```
|
||||
|
||||
#### protocolVersion
|
||||
|
||||
This field indicates the protocol version used by the remote. Mostly, this value should be `2` (ProtocolVersions.ADVANCED_E2EE), which indicates safer E2EE support.
|
||||
|
||||
#### pbkdf2salt
|
||||
|
||||
This field stores the salt used for PBKDF2 key derivation on the remote. This salt and the passphrase provides E2EE encryption keys.
|
||||
|
||||
### Milestone Information Document
|
||||
|
||||
This document stores information about how the remote accepts and recognises clients.
|
||||
The ID is fixed as `_local/obsidian_livesync_milestone` [MILESTONE_DOCID].
|
||||
This document exists only on the remote and not locally.
|
||||
This document is used to indicate synchronisation progress and includes the version range of accepted chunks for each node and adjustment values for each node.
|
||||
Tweak Mismatched is determined based on the information in this document.
|
||||
|
||||
For details, please refer to LiveSyncReplicator.ts, LiveSyncJournalReplicator.ts, and LiveSyncDBFunctions.ts.
|
||||
|
||||
```ts
|
||||
export interface EntryMilestoneInfo extends DatabaseEntry {
|
||||
_id: typeof MILESTONE_DOCID;
|
||||
type: EntryTypes["MILESTONE_INFO"];
|
||||
created: number;
|
||||
accepted_nodes: string[];
|
||||
node_info: { [key: NodeKey]: NodeData };
|
||||
locked: boolean;
|
||||
cleaned?: boolean;
|
||||
node_chunk_info: { [key: NodeKey]: ChunkVersionRange };
|
||||
tweak_values: { [key: NodeKey]: TweakValues };
|
||||
}
|
||||
```
|
||||
|
||||
### locked
|
||||
|
||||
If the remote has been requested to lock out from any client, this is set to true.
|
||||
When set to true, clients will stop synchronisation unless they are included in accepted_nodes.
|
||||
|
||||
### cleaned
|
||||
|
||||
If the remote has been cleaned up from any client, this is set to true.
|
||||
In this case, clients will stop synchronisation as they need to rebuild again.
|
||||
|
||||
### Metadata Document
|
||||
|
||||
Metadata documents store metadata for Obsidian notes.
|
||||
|
||||
```ts
|
||||
export interface MetadataDocument extends DatabaseEntry {
|
||||
_id: DocumentID;
|
||||
ctime: number;
|
||||
mtime: number;
|
||||
size: number;
|
||||
deleted?: boolean;
|
||||
eden: Record<string, EdenChunk>; // Obsolete
|
||||
path: FilePathWithPrefix;
|
||||
children: string[];
|
||||
type: EntryTypes["NOTE_LEGACY" | "NOTE_BINARY" | "NOTE_PLAIN"];
|
||||
}
|
||||
```
|
||||
|
||||
### type
|
||||
|
||||
This field indicates the type of Metadata document.
|
||||
By convention, Self-hosted LiveSync does not save the mime type of the file, but distinguishes them with this field. Please note this.
|
||||
Possible values are as follows:
|
||||
|
||||
- NOTE_LEGACY: Legacy metadata document
|
||||
- Please do not use
|
||||
- NOTE_BINARY: Binary metadata document (newnote)
|
||||
- NOTE_PLAIN: Plain metadata document (plain)
|
||||
|
||||
#### children
|
||||
|
||||
This field stores an array of Chunk Document IDs.
|
||||
|
||||
#### \_id, path
|
||||
|
||||
\_id is generated based on the path of the Obsidian note.
|
||||
|
||||
- If the path starts with `_`, it is converted to `/_` for convenience.
|
||||
- If Case Sensitive is disabled, it is converted to lowercase.
|
||||
|
||||
When Obfuscation is enabled, the path field contains `f:{obfuscated path}`.
|
||||
The path field stores the path as is. However, when Obfuscation is enabled, the obfuscated path is stored.
|
||||
|
||||
When Property Encryption is enabled, the path field stores all properties including children, mtime, ctime, and size in an encrypted state. Please refer to encryption.ts.
|
||||
|
||||
### Chunk Document
|
||||
|
||||
```ts
|
||||
export type EntryLeaf = DatabaseEntry & {
|
||||
_id: DocumentID;
|
||||
type: EntryTypes["CHUNK"];
|
||||
data: string;
|
||||
};
|
||||
```
|
||||
|
||||
Chunk documents store parts of note content.
|
||||
|
||||
- The type field is always `[CHUNK]`, `leaf`.
|
||||
- The data field stores the chunk content.
|
||||
- The \_id field is generated based on a hash of the content and the passphrase.
|
||||
|
||||
Hash functions used include xxHash and SHA-1, depending on settings.
|
||||
Chunking methods used include Contextual Chunking and Rabin-Karp Chunking, depending on settings.
|
||||
122
docs/design_docs/chunk_aggregation_by_prefix.md
Normal file
122
docs/design_docs/chunk_aggregation_by_prefix.md
Normal file
@@ -0,0 +1,122 @@
|
||||
# [WITHDRAWN] Chunk Aggregation by Prefix
|
||||
|
||||
## Goal
|
||||
|
||||
To address the "document explosion" and storage bloat issues caused by the current chunking mechanism, while preserving the benefits of content-addressable storage and efficient delta synchronisation. This design aims to significantly reduce the number of documents in the database and simplify Garbage Collection (GC).
|
||||
|
||||
## Motivation
|
||||
|
||||
Our current synchronisation solution splits files into content-defined chunks, with each chunk stored as a separate document in CouchDB, identified by its hash. This architecture effectively leverages CouchDB's replication for automatic deduplication and efficient transfer.
|
||||
|
||||
However, this approach faces significant challenges as the number of files and edits increases:
|
||||
1. **Document Explosion:** A large vault can generate millions of chunk documents, severely degrading CouchDB's performance, particularly during view building and replication.
|
||||
2. **Storage Bloat & GC Difficulty:** Obsolete chunks generated during edits are difficult to identify and remove. Since CouchDB's deletion (`_deleted: true`) is a soft delete, and compaction is a heavy, space-intensive operation, unused chunks perpetually consume storage, making GC impractical for many users.
|
||||
3. **The "Eden" Problem:** A previous attempt, "Keep newborn chunks in Eden", aimed to mitigate this by embedding volatile chunks within the parent document. While it reduced the number of standalone chunks, it introduced a new issue: the parent document's history (`_revs_info`) became excessively large, causing its own form of database bloat and making compaction equally necessary but difficult to manage.
|
||||
|
||||
This new design addresses the root cause—the sheer number of documents—by aggregating chunks into sets.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- The new implementation must maintain the core benefit of deduplication to ensure efficient synchronisation.
|
||||
- The solution must not introduce a single point of bottleneck and should handle concurrent writes from multiple clients gracefully.
|
||||
- The system must provide a clear and feasible strategy for Garbage Collection.
|
||||
- The design should be forward-compatible, allowing for a smooth migration path for existing users.
|
||||
|
||||
## Outlined Methods and Implementation Plans
|
||||
|
||||
### Abstract
|
||||
|
||||
This design introduces a two-tiered document structure to manage chunks: **Index Documents** and **Data Documents**. Chunks are no longer stored as individual documents. Instead, they are grouped into `Data Documents` based on a common hash prefix. The existence and location of each chunk are tracked by `Index Documents`, which are also grouped by the same prefix. This approach dramatically reduces the total document count.
|
||||
|
||||
### Detailed Implementation
|
||||
|
||||
**1. Document Structure:**
|
||||
|
||||
- **Index Document:** Maps chunk hashes to their corresponding Data Document ID. Identified by a prefix of the chunk hash.
|
||||
- `_id`: `idx:{prefix}` (e.g., `idx:a9f1b`)
|
||||
- Content:
|
||||
```json
|
||||
{
|
||||
"_id": "idx:a9f1b",
|
||||
"_rev": "...",
|
||||
"chunks": {
|
||||
"a9f1b12...": "dat:a9f1b-001",
|
||||
"a9f1b34...": "dat:a9f1b-001",
|
||||
"a9f1b56...": "dat:a9f1b-002"
|
||||
}
|
||||
}
|
||||
```
|
||||
- **Data Document:** Contains the actual chunk data as base64-encoded strings. Identified by a prefix and a sequential number.
|
||||
- `_id`: `dat:{prefix}-{sequence}` (e.g., `dat:a9f1b-001`)
|
||||
- Content:
|
||||
```json
|
||||
{
|
||||
"_id": "dat:a9f1b-001",
|
||||
"_rev": "...",
|
||||
"chunks": {
|
||||
"a9f1b12...": "...", // base64 data
|
||||
"a9f1b34...": "..." // base64 data
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**2. Configuration:**
|
||||
|
||||
- `chunk_prefix_length`: The number of characters from the start of a chunk hash to use as a prefix (e.g., `5`). This determines the granularity of aggregation.
|
||||
- `data_doc_size_limit`: The maximum size for a single Data Document to prevent it from becoming too large (e.g., 1MB). When this limit is reached, a new Data Document with an incremented sequence number is created.
|
||||
|
||||
**3. Write/Save Operation Flow:**
|
||||
|
||||
When a client creates new chunks:
|
||||
1. For each new chunk, determine its hash prefix.
|
||||
2. Read the corresponding `Index Document` (e.g., `idx:a9f1b`).
|
||||
3. From the index, determine which of the new chunks already exist in the database.
|
||||
4. For the **truly new chunks only**:
|
||||
a. Read the last `Data Document` for that prefix (e.g., `dat:a9f1b-005`).
|
||||
b. If it is nearing its size limit, create a new one (`dat:a9f1b-006`).
|
||||
c. Add the new chunk data to the Data Document and save it.
|
||||
5. Update the `Index Document` with the locations of the newly added chunks.
|
||||
|
||||
**4. Handling Write Conflicts:**
|
||||
|
||||
Concurrent writes to the same `Index Document` or `Data Document` from multiple clients will cause conflicts (409 Conflict). This is expected and must be handled gracefully. Since additions are incremental, the client application must implement a **retry-and-merge loop**:
|
||||
1. Attempt to save the document.
|
||||
2. On a conflict, re-fetch the latest version of the document from the server.
|
||||
3. Merge its own changes into the latest version.
|
||||
4. Attempt to save again.
|
||||
5. Repeat until successful or a retry limit is reached.
|
||||
|
||||
**5. Garbage Collection (GC):**
|
||||
|
||||
GC becomes a manageable, periodic batch process:
|
||||
1. Scan all file metadata documents to build a master set of all *currently referenced* chunk hashes.
|
||||
2. Iterate through all `Index Documents`. For each chunk listed:
|
||||
a. If the chunk hash is not in the master reference set, it is garbage.
|
||||
b. Remove the garbage entry from the `Index Document`.
|
||||
c. Remove the corresponding data from its `Data Document`.
|
||||
3. If a `Data Document` becomes empty after this process, it can be deleted.
|
||||
|
||||
## Test Strategy
|
||||
|
||||
1. **Unit Tests:** Implement tests for the conflict resolution logic (retry-and-merge loop) to ensure robustness.
|
||||
2. **Integration Tests:**
|
||||
- Verify that concurrent writes from multiple simulated clients result in a consistent, merged state without data loss.
|
||||
- Run a full synchronisation scenario and confirm the resulting database has a significantly lower document count compared to the previous implementation.
|
||||
3. **GC Test:** Simulate a scenario where files are deleted, run the GC process, and verify that orphaned chunks are correctly removed from both Index and Data documents, and that storage is reclaimed after compaction.
|
||||
4. **Migration Test:** Develop and test a "rebuild" process for existing users, which migrates their chunk data into the new aggregated structure.
|
||||
|
||||
## Documentation Strategy
|
||||
|
||||
- This design document will be published to explain the new architecture.
|
||||
- The configuration options (`chunk_prefix_length`, etc.) will be documented for advanced users.
|
||||
- A guide for the migration/rebuild process will be provided.
|
||||
|
||||
## Future Work
|
||||
|
||||
The separation of index and data opens up a powerful possibility. While this design initially implements both within CouchDB, the `Data Documents` could be offloaded to a dedicated object storage service such as **S3, MinIO, or Cloudflare R2**.
|
||||
|
||||
In such a hybrid model, CouchDB would handle only the lightweight `Index Documents` and file metadata, serving as a high-speed synchronisation and coordination layer. The bulky chunk data would reside in a more cost-effective and scalable blob store. This would represent the ultimate evolution of this architecture, combining the best of both worlds.
|
||||
|
||||
## Consideration and Conclusion
|
||||
|
||||
This design directly addresses the scalability limitations of the original chunk-per-document model. By aggregating chunks into sets, it significantly reduces the document count, which in turn improves database performance and makes maintenance feasible. The explicit handling of write conflicts and a clear strategy for garbage collection make this a robust and sustainable long-term solution. It effectively resolves the problems identified in previous approaches, including the "Eden" experiment, by tackling the root cause of database bloat. This architecture provides a solid foundation for future growth and scalability.
|
||||
127
docs/design_docs/intention_of_chunks.md
Normal file
127
docs/design_docs/intention_of_chunks.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# [WIP] The design intent explanation for using metadata and chunks
|
||||
|
||||
## Abstract
|
||||
|
||||
## Goal
|
||||
|
||||
- To explain the following:
|
||||
- What metadata and chunks are
|
||||
- The design intent of using metadata and chunks
|
||||
|
||||
## Background and Motivation
|
||||
|
||||
We are using PouchDB and CouchDB for storing files and synchronising them. PouchDB is a JavaScript database that stores data on the device (browser, and of course, Obsidian), while CouchDB is a NoSQL database that stores data on the server. The two databases can be synchronised to keep data consistent across devices via the CouchDB replication protocol. This is a powerful and flexible way to store and synchronise data, including conflict management, but it is not well suited for files. Therefore, we needed to manage how to store files and synchronise them.
|
||||
|
||||
## Terminology
|
||||
|
||||
- Password:
|
||||
- A string used to authenticate the user.
|
||||
|
||||
- Passphrase:
|
||||
- A string used to encrypt and decrypt data.
|
||||
- This is not a password.
|
||||
|
||||
- Encrypt:
|
||||
- To convert data into a format that is unreadable to anyone.
|
||||
- Can be decrypted by the user who has the passphrase.
|
||||
- Should be 1:n, containing random data to ensure that even the same data, when encrypted, results in different outputs.
|
||||
|
||||
- Obfuscate:
|
||||
- To convert data into a format that is not easily readable.
|
||||
- Can be decrypted by the user who has the passphrase.
|
||||
- Should be 1:1, containing no random data, and the same data is always obfuscated to the same result. It is necessarily unreadable.
|
||||
|
||||
- Hash:
|
||||
- To convert data into a fixed-length string that is not easily readable.
|
||||
- Cannot be decrypted.
|
||||
- Should be 1:1, containing no random data, and the same data is always hashed to the same result.
|
||||
|
||||
## Designs
|
||||
|
||||
### Principles
|
||||
|
||||
- To synchronise and handle conflicts, we should keep the history of modifications.
|
||||
- No data should be lost. Even though some extra data may be stored, it should be removed later, safely.
|
||||
- Each stored data item should be as small as possible to transfer efficiently, but not so small as to be inefficient.
|
||||
- Any type of file should be supported, including binary files.
|
||||
- Encryption should be supported efficiently.
|
||||
- This method should not depart too far from the PouchDB/CouchDB philosophy. It needs to leave room for other `remote`s, to benefit from custom replicators.
|
||||
|
||||
As a result, we have adopted the following design.
|
||||
|
||||
- Files are stored as one metadata entry and multiple chunks.
|
||||
- Chunks are content-addressable, and the metadata contains the ids of the chunks.
|
||||
- Chunks may be referenced from multiple metadata entries. They should be efficiently managed to avoid redundancy.
|
||||
|
||||
### Metadata Design
|
||||
|
||||
The metadata contains the following information:
|
||||
|
||||
| Field | Type | Description | Note |
|
||||
| -------- | -------------------- | ---------------------------- | ----------------------------------------------------------------------------------------------------- |
|
||||
| _id | string | The id of the metadata | It is created from the file path |
|
||||
| _rev | string | The revision of the metadata | It is created by PouchDB |
|
||||
| children | [string] | The ids of the chunks | |
|
||||
| path | string | The path of the file | If Obfuscate path has been enabled, it has been encrypted |
|
||||
| size | number | The size of the metadata | Not respected; for troubleshooting |
|
||||
| ctime | string | The creation timestamp | This is not used to compare files, but when writing to storage, it will be used |
|
||||
| mtime | string | The modification timestamp | This will be used to compare files, and will be written to storage |
|
||||
| type | `plain` \| `newnote` | The type of the file | Children of type `plain` will not be base64 encoded, while `newnote` will be |
|
||||
| e_ | boolean | The file is encrypted | Encryption is processed during transfer to the remote. In local storage, this property does not exist |
|
||||
|
||||
#### Decision Rule for `_id` of Metadata
|
||||
|
||||
```ts
|
||||
// Note: This is pseudo code.
|
||||
let _id = PATH;
|
||||
if (!HANDLE_FILES_AS_CASE_SENSITIVE) {
|
||||
_id = _id.toLowerCase();
|
||||
}
|
||||
if (_id.startsWith("_")) {
|
||||
_id = "/" + _id;
|
||||
}
|
||||
if (OBFUSCATE_PATH) {
|
||||
_id = `f:${OBFUSCATE_PATH(_id, E2EE_PASSPHRASE)}`;
|
||||
}
|
||||
return _id;
|
||||
```
|
||||
|
||||
#### Expected Questions
|
||||
|
||||
- Why do we need to handle files as case-sensitive?
|
||||
- Some filesystems are case-sensitive, while others are not. For example, Windows is not case-sensitive, while Linux is. Therefore, we need to handle files as case-sensitive to manage conflicts.
|
||||
- The trade-off is that you will not be able to manage files with different cases, so this can be disabled if you only have case-sensitive terminals.
|
||||
- Why obfuscate the path?
|
||||
- E2EE only encrypts the content of the file, not metadata. Hence, E2EE alone is not enough to protect the vault completely. The path is also part of the metadata, so it should be obfuscated. This is a trade-off between security and performance. However, if you title a note with sensitive information, you should obfuscate the path.
|
||||
- What is `f:`?
|
||||
- It is a prefix to indicate that the path is obfuscated. It is used to distinguish between normal paths and obfuscated paths. Due to file enumeration, Self-hosted LiveSync should scan the files to find the metadata, excluding chunks and other information.
|
||||
- Why does an unobfuscated path not start with `f:`?
|
||||
- For compatibility. Self-hosted LiveSync, by its nature, must also be able to handle files created with newer versions as far as possible.
|
||||
|
||||
### Chunk Design
|
||||
|
||||
#### Chunk Structure
|
||||
|
||||
The chunk contains the following information:
|
||||
|
||||
| Field | Type | Description | Note |
|
||||
| ----- | ------------ | ------------------------- | ----------------------------------------------------------------------------------------------------- |
|
||||
| _id | `h:{string}` | The id of the chunk | It is created from the hash of the chunk content |
|
||||
| _rev | string | The revision of the chunk | It is created by PouchDB |
|
||||
| data | string | The content of the chunk | |
|
||||
| type | `leaf` | Fixed | |
|
||||
| e_ | boolean | The chunk is encrypted | Encryption is processed during transfer to the remote. In local storage, this property does not exist |
|
||||
|
||||
**SORRY, TO BE WRITTEN, BUT WE HAVE IMPLEMENTED `v2`, WHICH REQUIRES MORE INFORMATION.**
|
||||
|
||||
### How they are unified
|
||||
|
||||
## Deduplication and Optimisation
|
||||
|
||||
## Synchronisation Strategy
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
## Security and Privacy
|
||||
|
||||
## Edge Cases
|
||||
117
docs/design_docs/tired_chunk_pack.md
Normal file
117
docs/design_docs/tired_chunk_pack.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# [IN DESIGN] Tiered Chunk Storage with Live Compaction
|
||||
|
||||
** VERY IMPORTANT NOTE: This design must be used with the new journal synchronisation method. Otherwise, we risk introducing the bloat of changes from hot-pack into the Bucket. (CouchDB/PouchDB can synchronise only the most recent changes, or resolve conflicts.) Previous Journal Sync **IS NOT**. Please proceed with caution. **
|
||||
|
||||
## Goal
|
||||
|
||||
To establish a highly efficient, robust, and scalable synchronisation architecture by introducing a tiered storage system inspired by Log-Structured Merge-Trees (LSM-Trees). This design aims to address the challenges of real-time synchronisation, specifically the massive generation of transient data, while minimising storage bloat and ensuring high performance.
|
||||
|
||||
## Motivation
|
||||
|
||||
Our previous designs, including "Chunk Aggregation by Prefix", successfully addressed the "document explosion" problem. However, the introduction of real-time editor synchronisation exposed a new, critical challenge: the constant generation of short-lived "garbage" chunks during user input. This "garbage storm" places immense pressure on storage, I/O, and the Garbage Collection (GC) process.
|
||||
|
||||
A simple aggregation strategy is insufficient because it treats all data equally, mixing valuable, stable chunks with transient, garbage chunks in permanent storage. This leads to storage bloat and inefficient compaction. We require a system that can intelligently distinguish between "hot" (volatile) and "cold" (stable) data, processing them in the most efficient manner possible.
|
||||
|
||||
## Outlined Methods and Implementation Plans
|
||||
|
||||
### Abstract
|
||||
|
||||
This design implements a two-tiered storage system within CouchDB.
|
||||
1. **Level 0 – Hot Storage:** A set of "Hot-Packs", one for each active client. These act as fast, append-only logs for all newly created chunks. They serve as a temporary staging area, absorbing the "garbage storm" of real-time editing.
|
||||
2. **Level 1 – Cold Storage:** The permanent, immutable storage for stable chunks, consisting of **Index Documents** for fast lookups and **Data Documents (Cold-Packs)** for storing chunk data.
|
||||
|
||||
A background "Compaction" process continuously promotes stable chunks from Hot Storage to Cold Storage, while automatically discarding garbage. This keeps the permanent storage clean and highly optimised.
|
||||
|
||||
### Detailed Implementation
|
||||
|
||||
**1. Document Structure:**
|
||||
|
||||
- **Hot-Pack Document (Level 0):** A per-client, append-only log.
|
||||
- `_id`: `hotpack:{client_id}` (`client_id` could be the same as the `deviceNodeID` used in the `accepted_nodes` in MILESTONE_DOC; enables database 'lockout' for safe synchronisation)
|
||||
- Content: A log of chunk creation events.
|
||||
```json
|
||||
{
|
||||
"_id": "hotpack:a9f1b12...",
|
||||
"_rev": "...",
|
||||
"log": [
|
||||
{ "hash": "abc...", "data": "...", "ts": ..., "file_id": "file1" },
|
||||
{ "hash": "def...", "data": "...", "ts": ..., "file_id": "file2" }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
- **Index Document (Level 1):** A fast, prefix-based lookup table for stable chunks.
|
||||
- `_id`: `idx:{prefix}` (e.g., `idx:a9f1b`)
|
||||
- Content: Maps a chunk hash to the ID of the Cold-Pack it resides in.
|
||||
```json
|
||||
{
|
||||
"_id": "idx:a9f1b",
|
||||
"chunks": { "a9f1b12...": "dat:1678886400" }
|
||||
}
|
||||
```
|
||||
|
||||
- **Cold-Pack Document (Level 1):** An immutable data block created by the compaction process.
|
||||
- `_id`: `dat:{timestamp_or_uuid}` (e.g., `dat:1678886400123`)
|
||||
- Content: A collection of stable chunks.
|
||||
```json
|
||||
{
|
||||
"_id": "dat:1678886400123",
|
||||
"chunks": { "a9f1b12...": "...", "c3d4e5f...": "..." }
|
||||
}
|
||||
```
|
||||
|
||||
- **Hot-Pack List Document:** A central registry of all active Hot-Packs. This might be a computed document that clients maintain in memory on startup.
|
||||
- `_id`: `hotpack_list`
|
||||
- Content: `{"active_clients": ["hotpack:a9f1b12...", "hotpack:c3d4e5f..."]}`
|
||||
|
||||
**2. Write/Save Operation Flow (Real-time Editing):**
|
||||
|
||||
1. A client generates a new chunk.
|
||||
2. It **immediately appends** the chunk object (`{hash, data, ts, file_id}`) to its **own** Hot-Pack document's `log` array within its local PouchDB. This operation is extremely fast.
|
||||
3. The PouchDB synchronisation process replicates this change to the remote CouchDB and other clients in the background. No other Hot-Packs are consulted during this write operation.
|
||||
|
||||
**3. Read/Load Operation Flow:**
|
||||
|
||||
To find a chunk's data:
|
||||
1. The client first consults its in-memory list of active Hot-Pack IDs (see section 5).
|
||||
2. It searches for the chunk hash in all **Hot-Pack documents**, starting from its own, then others. It reads them in reverse log order (newest first).
|
||||
3. If not found, it consults the appropriate **Index Document (`idx:...`)** to get the ID of the Cold-Pack.
|
||||
4. It then reads the chunk data from the corresponding **Cold-Pack document (`dat:...`)**.
|
||||
|
||||
**4. Compaction & Promotion Process (The "GC"):**
|
||||
|
||||
This is a background task run periodically by clients, or triggered when the number of unprocessed log entries exceeds a threshold (to maintain the ability to synchronise with the remote database, which has a limited document size).
|
||||
1. The client takes its own Hot-Pack (`hotpack:{client_id}`) and scans its `log` array from the beginning (oldest first).
|
||||
2. For each chunk in the log, it checks if the chunk is still referenced in the latest revision of any file.
|
||||
- **If not referenced (Garbage):** The log entry is simply discarded.
|
||||
- **If referenced (Stable):** The chunk is added to a "promotion batch".
|
||||
3. After scanning a certain number of log entries, the client takes the "promotion batch".
|
||||
4. It creates one or more new, immutable **Cold-Pack (`dat:...`)** documents to store the chunk data from the batch.
|
||||
5. It updates the corresponding **Index (`idx:...`)** documents to point to the new Cold-Pack(s).
|
||||
6. Once the promotion is successfully saved to the database, it **removes the processed entries from its Hot-Pack's `log` array**. This is a critical step to prevent reprocessing and keep the Hot-Pack small.
|
||||
|
||||
**5. Hot-Pack List Management:**
|
||||
|
||||
To know which Hot-Packs to read, clients will:
|
||||
1. On startup, load the `hotpack_list` document into memory.
|
||||
2. Use PouchDB's live `changes` feed to monitor the creation of new `hotpack:*` documents.
|
||||
3. Upon detecting an unknown Hot-Pack, the client updates its in-memory list and attempts to update the central `hotpack_list` document (on a best-effort basis, with conflict resolution).
|
||||
|
||||
## Planned Test Strategy
|
||||
|
||||
1. **Unit Tests:** Test the Compaction/Promotion logic extensively. Ensure garbage is correctly identified and stable chunks are promoted correctly.
|
||||
2. **Integration Tests:** Simulate a multi-client real-time editing session.
|
||||
- Verify that writes are fast and responsive.
|
||||
- Confirm that transient garbage chunks do not pollute the Cold Storage.
|
||||
- Confirm that after a period of inactivity, compaction runs and the Hot-Packs shrink.
|
||||
3. **Stress Tests:** Simulate many clients joining and leaving to test the robustness of the `hotpack_list` management.
|
||||
|
||||
## Documentation Strategy
|
||||
|
||||
- This design document will serve as the core architectural reference.
|
||||
- The roles of each document type (Hot-Pack, Index, Cold-Pack, List) will be clearly explained for future developers.
|
||||
- The logic of the Compaction/Promotion process will be detailed.
|
||||
|
||||
## Consideration and Conclusion
|
||||
|
||||
This tiered storage design is a direct evolution, born from the lessons of previous architectures. It embraces the ephemeral nature of data in real-time applications. By creating a "staging area" (Hot-Packs) for volatile data, it protects the integrity and performance of the permanent "cold" storage. The Compaction process acts as a self-cleaning mechanism, ensuring that only valuable, stable data is retained long-term. This is not just an optimisation; it is a fundamental shift that enables robust, high-performance, and scalable real-time synchronisation on top of CouchDB.
|
||||
97
docs/design_docs/tired_chunk_pack_bucket.md
Normal file
97
docs/design_docs/tired_chunk_pack_bucket.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# [IN DESIGN] Tiered Chunk Storage for Bucket Sync
|
||||
|
||||
## Goal
|
||||
|
||||
To evolve the "Journal Sync" mechanism by integrating the Tiered Storage architecture. This design aims to drastically reduce the size and number of sync packs, minimise storage consumption on the backend bucket, and establish a clear, efficient process for Garbage Collection, all while remaining protocol-agnostic.
|
||||
|
||||
## Motivation
|
||||
|
||||
The original "Journal Sync" liberates us from CouchDB's protocol, but it still packages and transfers entire document changes, including bulky and often transient chunk data. In a real-time or frequent-editing scenario, this results in:
|
||||
1. **Bloated Sync Packs:** Packs become large with redundant or short-lived chunk data, increasing upload and download times.
|
||||
2. **Inefficient Storage:** The backend bucket stores numerous packs containing overlapping and obsolete chunk data, wasting space.
|
||||
3. **Impractical Garbage Collection:** Identifying and purging obsolete *chunk data* from within the pack-based journal history is extremely difficult.
|
||||
|
||||
This new design addresses these problems by fundamentally changing *what* is synchronised in the journal packs. We will synchronise lightweight metadata and logs, while handling bulk data separately.
|
||||
|
||||
## Outlined methods and implementation plans
|
||||
|
||||
### Abstract
|
||||
|
||||
This design adapts the Tiered Storage model for a bucket-based backend. The backend bucket is partitioned into distinct areas for different data types. The "Journal Sync" process is now responsible for synchronising only the "hot" volatile data and lightweight metadata. A separate, asynchronous "Compaction" process, which can be run by any client, is responsible for migrating stable data into permanent, deduplicated "cold" storage.
|
||||
|
||||
### Detailed Implementation
|
||||
|
||||
**1. Bucket Structure:**
|
||||
|
||||
The backend bucket will have four distinct logical areas (prefixes):
|
||||
- `packs/`: For "Journal Sync" packs, containing the journal of metadata and Hot-Log changes.
|
||||
- `hot_logs/`: A dedicated area for each client's "Hot-Log," containing newly created, volatile chunks.
|
||||
- `indices/`: For prefix-based Index files, mapping chunk hashes to their permanent location in Cold Storage.
|
||||
- `cold_chunks/`: For deduplicated, stable chunk data, stored by content hash.
|
||||
|
||||
**2. Data Structures (Client-side PouchDB & Backend Bucket):**
|
||||
|
||||
- **Client Metadata:** Standard file metadata documents, kept in the client's PouchDB.
|
||||
- **Hot-Log (in `hot_logs/`):** A per-client, append-only log file on the bucket.
|
||||
- Path: `hot_logs/{client_id}.jsonlog`
|
||||
- Content: A sequence of JSON objects, one per line, representing chunk creation events. `{"hash": "...", "data": "...", "ts": ..., "file_id": "..."}`
|
||||
|
||||
- **Index File (in `indices/`):** A JSON file for a given hash prefix.
|
||||
- Path: `indices/{prefix}.json`
|
||||
- Content: Maps a chunk hash to its content hash (which is its key in `cold_chunks/`). `{"hash_abc...": true, "hash_def...": true}`
|
||||
|
||||
- **Cold Chunk (in `cold_chunks/`):** The raw, immutable, deduplicated chunk data.
|
||||
- Path: `cold_chunks/{chunk_hash}`
|
||||
|
||||
**3. "Journal Sync" - Send/Receive Operation (Not Live):**
|
||||
|
||||
This process is now extremely lightweight.
|
||||
1. **Send:**
|
||||
a. The client takes all newly generated chunks and **appends them to its own Hot-Log file (`hot_logs/{client_id}.jsonlog`)** on the bucket.
|
||||
b. The client updates its local file metadata in PouchDB.
|
||||
c. It then creates a "Journal Sync" pack containing **only the PouchDB journal of the file metadata changes.** This pack is very small as it contains no chunk data.
|
||||
d. The pack is uploaded to `packs/`.
|
||||
|
||||
2. **Receive:**
|
||||
a. The client downloads new packs from `packs/` and applies the metadata journal to its local PouchDB.
|
||||
b. It downloads the latest versions of all **other clients' Hot-Log files** from `hot_logs/`.
|
||||
c. Now the client has a complete, up-to-date view of all metadata and all "hot" chunks.
|
||||
|
||||
**4. Read/Load Operation Flow:**
|
||||
|
||||
To find a chunk's data:
|
||||
1. The client searches for the chunk hash in its local copy of all **Hot-Logs**.
|
||||
2. If not found, it downloads and consults the appropriate **Index file (`indices/{prefix}.json`)**.
|
||||
3. If the index confirms existence, it downloads the data from **`cold_chunks/{chunk_hash}`**.
|
||||
|
||||
**5. Compaction & Promotion Process (Asynchronous "GC"):**
|
||||
|
||||
This is a deliberate, offline-capable process that any client can choose to run.
|
||||
1. The client "leases" its own Hot-Log for compaction.
|
||||
2. It reads its entire `hot_logs/{client_id}.jsonlog`.
|
||||
3. For each chunk in the log, it checks if the chunk is referenced in the *current, latest state* of the file metadata.
|
||||
- **If not referenced (Garbage):** The log entry is discarded.
|
||||
- **If referenced (Stable):** The chunk is added to a "promotion batch."
|
||||
4. For each chunk in the promotion batch:
|
||||
a. It checks the corresponding `indices/{prefix}.json` to see if the chunk already exists in Cold Storage.
|
||||
b. If it does not exist, it **uploads the chunk data to `cold_chunks/{chunk_hash}`** and updates the `indices/{prefix}.json` file.
|
||||
5. Once the entire Hot-Log has been processed, the client **deletes its `hot_logs/{client_id}.jsonlog` file** (or truncates it to empty), effectively completing the cycle.
|
||||
|
||||
## Test strategy
|
||||
|
||||
1. **Component Tests:** Test the Compaction process independently. Ensure it correctly identifies stable versus garbage chunks and populates the `cold_chunks/` and `indices/` areas correctly.
|
||||
2. **Integration Tests:**
|
||||
- Simulate a multi-client sync cycle. Verify that sync packs in `packs/` are small.
|
||||
- Confirm that `hot_logs/` are correctly created and updated.
|
||||
- Run the Compaction process and verify that data migrates correctly to cold storage and the hot log is cleared.
|
||||
3. **Conflict Tests:** Simulate two clients trying to compact the same index file simultaneously and ensure the outcome is consistent (for example, via a locking mechanism or last-write-wins).
|
||||
|
||||
## Documentation strategy
|
||||
|
||||
- This design document will be the primary reference for the bucket-based architecture.
|
||||
- The structure of the backend bucket (`packs/`, `hot_logs/`, etc.) will be clearly defined.
|
||||
- A detailed description of how to run the Compaction process will be provided to users.
|
||||
|
||||
## Consideration and Conclusion
|
||||
|
||||
By applying the Tiered Storage model to "Journal Sync", we transform it into a remarkably efficient system. The synchronisation of everyday changes becomes extremely fast and lightweight, as only metadata journals are exchanged. The heavy lifting of data deduplication and permanent storage is offloaded to a separate, asynchronous Compaction process. This clear separation of concerns makes the system highly scalable, minimises storage costs, and finally provides a practical, robust solution for Garbage Collection in a protocol-agnostic, bucket-based environment.
|
||||
@@ -1,6 +1,6 @@
|
||||
# Keep newborn chunks in Eden.
|
||||
# Keep newborn chunks in Eden
|
||||
|
||||
NOTE: This is the planned feature design document. This is planned, but not be implemented now (v0.23.3). This has not reached the design freeze and will be added to from time to time.
|
||||
Notice: deprecated. please refer to the result section of this document.
|
||||
|
||||
## Goal
|
||||
|
||||
@@ -19,15 +19,18 @@ Reduce the number of chunks which in volatile, and reduce the usage of storage o
|
||||
- The problem is that this unnecessary chunking slows down both local and remote operations.
|
||||
|
||||
## Prerequisite
|
||||
|
||||
- The implementation must be able to control the size of the document appropriately so that it does not become non-transferable (1).
|
||||
- The implementation must be such that data corruption can be avoided even if forward compatibility is not maintained; due to the nature of Self-hosted LiveSync, backward version connexions are expected.
|
||||
- The implementation must be such that data corruption can be avoided even if forward compatibility is not maintained; due to the nature of Self-hosted LiveSync, backward version connexions are expected.
|
||||
- Viewed as a feature:
|
||||
- This feature should be disabled for migration users.
|
||||
- This feature should be enabled for new users and after rebuilds of migrated users.
|
||||
- Therefore, back into the implementation view, Ideally, the implementation should be such that data recovery can be achieved by immediately upgrading after replication.
|
||||
|
||||
## Outlined methods and implementation plans
|
||||
|
||||
### Abstract
|
||||
|
||||
To store and transfer only stable chunks independently and share them from multiple documents after stabilisation, new chunks, i.e. chunks that are considered non-stable, are modified to be stored in the document and transferred with the document. In this case, care should be taken not to exceed prerequisite (1).
|
||||
|
||||
If this is achieved, the non-leaf document will not be transferred, and even if it is, the chunk will be stored in the document, so that the size can be reduced by the compaction.
|
||||
@@ -40,11 +43,11 @@ Details are given below.
|
||||
type EntryWithEden = {
|
||||
eden: {
|
||||
[key: DocumentID]: {
|
||||
data: string,
|
||||
epoch: number, // The document revision which this chunk has been born.
|
||||
}
|
||||
}
|
||||
}
|
||||
data: string;
|
||||
epoch: number; // The document revision which this chunk has been born.
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
2. The following configuration items are added:
|
||||
Note: These configurations should be shared as `Tweaks value` between each client.
|
||||
@@ -63,6 +66,7 @@ Details are given below.
|
||||
5. In End-to-End Encryption, property `eden` of documents will also be encrypted.
|
||||
|
||||
### Note
|
||||
|
||||
- When this feature has been enabled, forward compatibility is temporarily lost. However, it is detected as missing chunks, and this data is not reflected in the storage in the old version. Therefore, no data loss will occur.
|
||||
|
||||
## Test strategy
|
||||
@@ -77,5 +81,26 @@ Details are given below.
|
||||
- Indeed, we lack a fulfilled configuration table. Efforts will be made and, if they can be produced, this document will then be referenced. But not required while in the experimental or beta feature.
|
||||
- However, this might be an essential feature. Further efforts are desired.
|
||||
|
||||
## Results from actual operation
|
||||
|
||||
After implementing this feature, we have been using it for a while. The following results were obtained.
|
||||
|
||||
- Drawbacks were thought not to be a problem, but they were actually a problem:
|
||||
- A document with `Eden` has a quite larger history compared to a document without `Eden`.
|
||||
- Self-hosted LiveSync does not perform compaction aggressively, which results in the remote database becoming partially bloated.
|
||||
- Compaction of the Remote Database (CouchDB) requires the same amount of free space as the size of the database. Therefore, it is not possible to perform compaction on a remote database if we reached to the maximum size of the database. It means that when we detect it, it is too late.
|
||||
- We have mentioned that `We need compaction` in previous sections. However, but it was so hard to be determined whether the compaction is required or not, until the database is bloated. (Of course, it requires some time to compact the database, and, literally, some document loses its history. It is not a good idea to perform frequently and meaninglessly. We need manual decision, but indeed difficult to normal users).
|
||||
|
||||
### Consideration and Conclusion
|
||||
To be described after implemented, tested, and, released.
|
||||
|
||||
This feature results in two aspects:
|
||||
|
||||
- For the users who are familiar with the CouchDB, this feature is a bit useful. They can watch and handle the database by themselves.
|
||||
- For the users who are not familiar with the CouchDB, i.e., normal users, this feature is not so useful, either. They are not familiar with the database, and they do not know how to handle it. Therefore, they cannot decide whether the compaction is required or not.
|
||||
|
||||
Hence, this feature would be kept as an experimental feature, but it is not enabled by default. In addition to that, it is marked as deprecated. Detailed notice will be noisy for the users who are not familiar with the CouchDB. Details would be kept in this document, for the future.
|
||||
It is not recommended to use this feature, unless the person who is familiar with the CouchDB and the database management.
|
||||
|
||||
Vorotamoroz has written this document. Bias: I am the first author of this plug-in, familiar with the CouchDB.
|
||||
|
||||
Research and development has been frozen on 2025-04-11. But, bugs will be fixed if they are found. Please feel free to report them.
|
||||
|
||||
@@ -5,10 +5,15 @@
|
||||
- [Setup a CouchDB server](#setup-a-couchdb-server)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [1. Prepare CouchDB](#1-prepare-couchdb)
|
||||
- [A. Using Docker container](#a-using-docker-container)
|
||||
- [A. Using Docker](#a-using-docker)
|
||||
- [1. Prepare](#1-prepare)
|
||||
- [2. Run docker container](#2-run-docker-container)
|
||||
- [B. Install CouchDB directly](#b-install-couchdb-directly)
|
||||
- [B. Using Docker Compose](#b-using-docker-compose)
|
||||
- [1. Prepare](#1-prepare-1)
|
||||
- [2. Creating Compose file](#2-create-a-docker-composeyml-file-with-the-following-added-to-it)
|
||||
- [3. Boot check](#3-run-the-docker-compose-file-to-boot-check)
|
||||
- [4. Starting Docker Compose in background](#4-run-the-docker-compose-file-in-the-background)
|
||||
- [C. Install CouchDB directly](#c-install-couchdb-directly)
|
||||
- [2. Run couchdb-init.sh for initialise](#2-run-couchdb-initsh-for-initialise)
|
||||
- [3. Expose CouchDB to the Internet](#3-expose-couchdb-to-the-internet)
|
||||
- [4. Client Setup](#4-client-setup)
|
||||
@@ -21,43 +26,95 @@
|
||||
---
|
||||
|
||||
## 1. Prepare CouchDB
|
||||
### A. Using Docker container
|
||||
### A. Using Docker
|
||||
|
||||
#### 1. Prepare
|
||||
```bash
|
||||
|
||||
# Prepare environment variables.
|
||||
# Adding environment variables.
|
||||
export hostname=localhost:5984
|
||||
export username=goojdasjdas #Please change as you like.
|
||||
export password=kpkdasdosakpdsa #Please change as you like
|
||||
|
||||
# Prepare directories which saving data and configurations.
|
||||
# Creating the save data & configuration directories.
|
||||
mkdir couchdb-data
|
||||
mkdir couchdb-etc
|
||||
```
|
||||
|
||||
#### 2. Run docker container
|
||||
|
||||
1. Boot Check.
|
||||
```
|
||||
$ docker run --name couchdb-for-ols --rm -it -e COUCHDB_USER=${username} -e COUCHDB_PASSWORD=${password} -v ${PWD}/couchdb-data:/opt/couchdb/data -v ${PWD}/couchdb-etc:/opt/couchdb/etc/local.d -p 5984:5984 couchdb
|
||||
```
|
||||
If your container has been exited, please check the permission of couchdb-data, and couchdb-etc.
|
||||
Once CouchDB run, these directories will be owned by uid:`5984`. Please chown it for you again.
|
||||
> [!WARNING]
|
||||
> If your container threw an error or exited unexpectedly, please check the permission of couchdb-data, and couchdb-etc.
|
||||
> Once CouchDB starts, these directories will be owned by uid:`5984`. Please chown it for that uid again.
|
||||
|
||||
2. Enable it in background
|
||||
2. Enable it in the background
|
||||
```
|
||||
$ docker run --name couchdb-for-ols -d --restart always -e COUCHDB_USER=${username} -e COUCHDB_PASSWORD=${password} -v ${PWD}/couchdb-data:/opt/couchdb/data -v ${PWD}/couchdb-etc:/opt/couchdb/etc/local.d -p 5984:5984 couchdb
|
||||
```
|
||||
### B. Install CouchDB directly
|
||||
Please refer the [official document](https://docs.couchdb.org/en/stable/install/index.html). However, we do not have to configure it fully. Just administrator needs to be configured.
|
||||
|
||||
Congrats, move on to [step 2](#2-run-couchdb-initsh-for-initialise)
|
||||
### B. Using Docker Compose
|
||||
|
||||
#### 1. Prepare
|
||||
|
||||
```
|
||||
# Creating the save data & configuration directories.
|
||||
mkdir couchdb-data
|
||||
mkdir couchdb-etc
|
||||
```
|
||||
|
||||
#### 2. Create a `docker-compose.yml` file with the following added to it
|
||||
```
|
||||
services:
|
||||
couchdb:
|
||||
image: couchdb:latest
|
||||
container_name: couchdb-for-ols
|
||||
user: 5984:5984
|
||||
environment:
|
||||
- COUCHDB_USER=<INSERT USERNAME HERE> #Please change as you like.
|
||||
- COUCHDB_PASSWORD=<INSERT PASSWORD HERE> #Please change as you like.
|
||||
volumes:
|
||||
- ./couchdb-data:/opt/couchdb/data
|
||||
- ./couchdb-etc:/opt/couchdb/etc/local.d
|
||||
ports:
|
||||
- 5984:5984
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
#### 3. Run the Docker Compose file to boot check
|
||||
|
||||
```
|
||||
docker compose up
|
||||
# Or if using the old version
|
||||
docker-compose up
|
||||
```
|
||||
> [!WARNING]
|
||||
> If your container threw an error or exited unexpectedly, please check the permission of couchdb-data, and couchdb-etc.
|
||||
> Once CouchDB starts, these directories will be owned by uid:`5984`. Please chown it for that uid again.
|
||||
|
||||
#### 4. Run the Docker Compose file in the background
|
||||
If all went well and didn't throw any errors, `CTRL+C` out of it, and then run this command
|
||||
```
|
||||
docker compose up -d
|
||||
# Or if using the old version
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
Congrats, move on to [step 2](#2-run-couchdb-initsh-for-initialise)
|
||||
|
||||
|
||||
### C. Install CouchDB directly
|
||||
Please refer to the [official document](https://docs.couchdb.org/en/stable/install/index.html). However, we do not have to configure it fully. Just the administrator needs to be configured.
|
||||
|
||||
## 2. Run couchdb-init.sh for initialise
|
||||
```
|
||||
curl -s https://raw.githubusercontent.com/vrtmrz/obsidian-livesync/main/utils/couchdb/couchdb-init.sh | bash
|
||||
```
|
||||
|
||||
If it results like following:
|
||||
If it results like the following:
|
||||
```
|
||||
-- Configuring CouchDB by REST APIs... -->
|
||||
{"ok":true}
|
||||
@@ -75,12 +132,17 @@ If it results like following:
|
||||
|
||||
Your CouchDB has been initialised successfully. If you want this manually, please read the script.
|
||||
|
||||
If you are using Docker Compose and the above command does not work or displays `ERROR: Hostname missing`, you can try running the following command, replacing the placeholders with your own values:
|
||||
```
|
||||
curl -s https://raw.githubusercontent.com/vrtmrz/obsidian-livesync/main/utils/couchdb/couchdb-init.sh | hostname=http://<YOUR SERVER IP>:5984 username=<INSERT USERNAME HERE> password=<INSERT PASSWORD HERE> bash
|
||||
```
|
||||
|
||||
## 3. Expose CouchDB to the Internet
|
||||
|
||||
- You can skip this instruction if you using only in intranet and only with desktop devices.
|
||||
- For mobile devices, Obsidian requires a valid SSL certificate. Usually, it needs exposing the internet.
|
||||
|
||||
Whatever solutions we can use. For the simplicity, following sample uses Cloudflare Zero Trust for testing.
|
||||
Whatever solutions we can use. For simplicity, the following sample uses Cloudflare Zero Trust for testing.
|
||||
|
||||
```
|
||||
cloudflared tunnel --url http://localhost:5984
|
||||
@@ -99,12 +161,12 @@ You will then get the following output:
|
||||
:
|
||||
:
|
||||
```
|
||||
Now `https://tiles-photograph-routine-groundwater.trycloudflare.com` is our server. Make it into background once please.
|
||||
Now `https://tiles-photograph-routine-groundwater.trycloudflare.com` is our server. Make it into the background once, please.
|
||||
|
||||
|
||||
## 4. Client Setup
|
||||
> [!TIP]
|
||||
> Now manually configuration is not recommended for some reasons. However, if you want to do so, please use `Setup wizard`. The recommended extra configurations will be also set.
|
||||
> Now manual configuration is not recommended for some reasons. However, if you want to do so, please use `Setup wizard`. The recommended extra configurations will be also set.
|
||||
|
||||
### 1. Generate the setup URI on a desktop device or server
|
||||
```bash
|
||||
@@ -116,6 +178,13 @@ export password=abc123
|
||||
deno run -A https://raw.githubusercontent.com/vrtmrz/obsidian-livesync/main/utils/flyio/generate_setupuri.ts
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> What is the `passphrase`? Is it different from `uri_passphrase`?
|
||||
> Yes, the `passphrase` we have exported now is for an End-to-End Encryption passphrase.
|
||||
> And, `uri_passphrase` that used in the `generate_setupuri.ts` is a different one; for decrypting Set-up URI at using that.
|
||||
> Why: I (vorotamoroz) think that the passphrase of the Setup-URI should be different from the E2EE passphrase to prevent exposure caused by operational errors or the possibility of evil in our environment. On top of that, I believe that it is desirable for the Setup-URI to be random. Setup-URI is inevitably long, so it goes through the clipboard. I think that its passphrase should not go through the same path, so it should essentially be typed manually.
|
||||
> Hence, if we keep empty for uri_passphrase, generate_setupuri.ts generates an adjective-noun-randomnumber passphrase so that we can remember it without going through the clipboard.
|
||||
|
||||
You will then get the following output:
|
||||
|
||||
```bash
|
||||
|
||||
16
docs/tech_info_cn.md
Normal file
16
docs/tech_info_cn.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# 架构设计
|
||||
|
||||
## 这个插件是怎么实现同步的.
|
||||
|
||||

|
||||
|
||||
1. 当笔记创建或修改时,Obsidian会触发事件。Self-hosted LiveSync捕获这些事件,并将变更同步至本地PouchDB
|
||||
2. PouchDB通过自动或手动方式将变更同步至远程CouchDB
|
||||
3. 其他设备监听远程CouchDB的变更,从而获取最新更新
|
||||
4. Self-hosted LiveSync 将同步的变更集反映到Obsidian存储库中。
|
||||
|
||||
注:图示为简化演示,仅展示两个设备间的单向同步。实际为多设备间同时进行的双向同步。
|
||||
|
||||
## 降低带宽消耗的技术方案。
|
||||
|
||||

|
||||
@@ -1,10 +1,24 @@
|
||||
# Terms used in this project
|
||||
# Notes on Terminology, Spelling, Vocabulary Conventions
|
||||
|
||||
## Terms
|
||||
## Spelling and Vocabulary conventions
|
||||
|
||||
### Chunks
|
||||
<!-- TBW, sorry for the draft! -->
|
||||
1. Almost all of the english words are written in British English. For example, "organisation" instead of "organization", "synchronisation" instead of "synchronization", etc. This convention originated from the author's personal preference but is now maintained for consistency.
|
||||
|
||||
2. Idiomatic terms, such as used in HTML, CSS, and JavaScript, are usually be aligned with the language used in the technology. For example, "color" instead of "colour", "program" instead of "programme", etc. Especially, terms which are used for attributes, properties, and methods are notable.
|
||||
|
||||
<!-- Please feel free to write any terms that should be mentioned. And please make pull request. I would love to fill the rest. -->
|
||||
<!-- ### Chunks -->
|
||||
3. We use `dialogue` in documentation for consistency. While `dialog` may appear in source code, particularly in class names, method names, and attributes (following technical conventions in No. 2), we consistently use `dialogue` for user-facing messages and general documentation text. This approach balances No. 1 with No. 2.
|
||||
|
||||
4. Contractions are not used. For example, "do not" instead of "don't", "cannot" instead of "can't", etc. especially `'d`.
|
||||
- We may encounter difficulties with tenses.
|
||||
|
||||
5. However, try using affirmative forms, `Discard` instead of `Do not keep`, `Continue` instead of `Do not stop`, etc.
|
||||
- Some languages, such as Japanese, have a different meaning for `yes` and `no` between affirmative and negative questions.
|
||||
|
||||
## Terminology
|
||||
|
||||
- Self-hosted LiveSync
|
||||
- This plug-in name. `Self-hosted` is one word.
|
||||
- LiveSync
|
||||
- Very confusing term.
|
||||
- As shorten-form of `Self-hosted LiveSync`.
|
||||
- As a name of synchronisation mode. This should be changed to `Continuos`, in contrast to `Periodic`.
|
||||
|
||||
81
docs/tips/jwt-on-couchdb.md
Normal file
81
docs/tips/jwt-on-couchdb.md
Normal file
@@ -0,0 +1,81 @@
|
||||
---
|
||||
title: "JWT Authentication on CouchDB"
|
||||
livesync-version: 0.25.24
|
||||
tags:
|
||||
- tips
|
||||
- CouchDB
|
||||
- JWT
|
||||
authors:
|
||||
- vorotamoroz
|
||||
---
|
||||
|
||||
# JWT Authentication on CouchDB
|
||||
|
||||
When using CouchDB as a backend for Self-hosted LiveSync, it is possible to enhance security by employing JWT (JSON Web Token) Authentication. In particular, using asymmetric keys (ES256 and ES512) provides greater security against token interception.
|
||||
|
||||
## Setting up JWT Authentication (Asymmetrical Key Example)
|
||||
|
||||
### 1. Generate a key pair
|
||||
|
||||
We can use `openssl` to generate an EC key pair as follows:
|
||||
|
||||
```bash
|
||||
# Generate private key
|
||||
# ES512 for secp521r1 curve, we can also use ES256 for prime256v1 curve
|
||||
openssl ecparam -name secp521r1 -genkey -noout | openssl pkcs8 -topk8 -inform PEM -nocrypt -out private_key.pem
|
||||
# openssl ecparam -name prime256v1 -genkey -noout | openssl pkcs8 -topk8 -inform PEM -nocrypt -out private_key.pem
|
||||
# Generate public key in SPKI format
|
||||
openssl ec -in private_key.pem -pubout -outform PEM -out public_key.pem
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> A key generator will be provided again in a future version of the user interface.
|
||||
|
||||
### 2. Configure CouchDB to accept JWT tokens
|
||||
|
||||
The following configuration is required:
|
||||
|
||||
| Key | Value | Note |
|
||||
| ------------------------------ | ----------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| chttpd/authentication_handlers | {chttpd_auth, jwt_authentication_handler} | In total, it may be `{chttpd_auth, jwt_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}`, or something similar. |
|
||||
| jwt_auth/required_claims | "exp" | |
|
||||
| jwt_keys/ec:your_key_id | Your public key in PEM (SPKI) format | Replace `your_key_id` with your actual key ID. You can decide as you like. Note that you can add multiple keys if needed. If you want to use HSxxx, you should set `jwt_keys/hmac:your_key_id` with your HMAC secret. |
|
||||
|
||||
|
||||
Note: When configuring CouchDB via web interface (Fauxton), new-lines on the public key should be replaced with `\n` for header and footer lines (So wired, but true I have tested). as follows:
|
||||
```
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
\nMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQBq0irb/+K0Qzo7ayIHj0Xtthcntjz
|
||||
r665J5UYdEQMiTtku5rnp95RuN97uA2pPOJOacMBAoiVUnZ1pqEBz9xH9yoAixji
|
||||
Ju...........................................................gTt
|
||||
/xtqrJRwrEy986oRZRQ=
|
||||
\n-----END PUBLIC KEY-----
|
||||
```
|
||||
|
||||
For detailed information, please refer to the [CouchDB JWT Authentication Documentation](https://docs.couchdb.org/en/stable/api/server/authn.html#jwt-authentication).
|
||||
|
||||
### 3. Configure Self-hosted LiveSync to use JWT Authentication
|
||||
|
||||
| Setting | Description |
|
||||
| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Use JWT Authentication | Enable this option to use JWT Authentication. |
|
||||
| JWT Algorithm | Select the JWT signing algorithm (e.g., ES256, ES512) that matches your key pair. |
|
||||
| JWT Key | Paste your private key in PEM (pkcs8) format. |
|
||||
| JWT Expiration Duration | Set the token expiration time in minutes. Locally cached tokens are also invalidated after this duration. |
|
||||
| JWT Key ID (kid) | Enter the key ID that you used when configuring CouchDB, i.e., the one that replaced `your_key_id`. |
|
||||
| JWT Subject (sub) | Set your user ID; this overrides the original `Username` setting. If you have detected access with `Username`, you have failed to authorise with JWT. |
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Self-hosted LiveSync requests to CouchDB treat the user as `_admin`. If you want to restrict access, configure `jwt_auth/roles_claim_name` to a custom claim name. (Self-hosted LiveSync always sets `_couchdb.roles` with the value `["_admin"]`).
|
||||
|
||||
### 4. Test the configuration
|
||||
|
||||
Just try to `Test Settings and Continue` in the remote setup dialogue. If you have successfully authenticated, you are all set.
|
||||
|
||||
## Additional Notes
|
||||
|
||||
This feature is still experimental. Please ensure to test thoroughly in your environment before deploying to production.
|
||||
|
||||
However, we think that this is a great step towards enhancing security when using CouchDB with Self-hosted LiveSync. We shall enable this setting by default in future releases.
|
||||
|
||||
We would love to hear your feedback and any issues you encounter.
|
||||
29
docs/tips/p2p-sync-tips.md
Normal file
29
docs/tips/p2p-sync-tips.md
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
title: "Peer-to-Peer Synchronisation Tips"
|
||||
livesync-version: 0.25.24
|
||||
tags:
|
||||
- tips
|
||||
- p2p
|
||||
authors:
|
||||
- vorotamoroz
|
||||
---
|
||||
|
||||
# Peer-to-Peer Synchronisation Tips
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Peer-to-peer synchronisation is still an experimental feature. Although we have made every effort to ensure its reliability, it may not function correctly in all environments.
|
||||
|
||||
## Difficulties with Peer-to-Peer Synchronisation
|
||||
|
||||
It is often the case that peer-to-peer connections do not function correctly, for instance, when using mobile data services.
|
||||
In such circumstances, we recommend connecting all devices to a single Virtual Private Network (VPN). It is advisable to select a service, such as Tailscale, which facilitates direct communication between peers wherever possible.
|
||||
Should one be in an environment where even Tailscale is unable to connect, or where it cannot be lawfully installed, please continue reading.
|
||||
|
||||
## A More Detailed Explanation
|
||||
|
||||
The failure of a Peer-to-Peer connection via WebRTC can be attributed to several factors. These may include an unsuccessful UDP hole-punching attempt, or an intermediary gateway intentionally terminating the connection. Troubleshooting this matter is not a simple undertaking. Furthermore, and rather unfortunately, gateway administrators are typically aware of this type of network behaviour. Whilst a legitimate purpose for such traffic can be cited, such as for web conferencing, this is often insufficient to prevent it from being blocked.
|
||||
|
||||
This situation, however, is the primary reason that our project does not provide a TURN server. Although it is said that a TURN server within WebRTC does not decrypt communications, the project holds the view that the risk of a malicious party impersonating a TURN server must be avoided. Consequently, configuring a TURN server for relay communication is not currently possible through the user interface. Furthermore, there is no official project TURN server, which is to say, one that could be monitored by a third party.
|
||||
|
||||
We request that you provide your own server, using your own Fully Qualified Domain Name (FQDN), and subsequently enter its details into the advanced settings.
|
||||
For testing purposes, Cloudflare's Real-Time TURN Service is exceedingly convenient and offers a generous amount of free data. However, it must be noted that because it is a well-known destination, such traffic is highly conspicuous. There is also a significant possibility that it may be blocked by default. We advise proceeding with caution.
|
||||
@@ -1,12 +1,24 @@
|
||||
<!-- 2024-02-15 -->
|
||||
# Tips and Troubleshooting
|
||||
|
||||
|
||||
- [Tips and Troubleshooting](#tips-and-troubleshooting)
|
||||
- [Tips](#tips)
|
||||
- [CORS avoidance](#cors-avoidance)
|
||||
- [CORS configuration with reverse proxy](#cors-configuration-with-reverse-proxy)
|
||||
- [Nginx](#nginx)
|
||||
- [Nginx and subdirectory](#nginx-and-subdirectory)
|
||||
- [Caddy](#caddy)
|
||||
- [Caddy and subdirectory](#caddy-and-subdirectory)
|
||||
- [Apache](#apache)
|
||||
- [Show all setting panes](#show-all-setting-panes)
|
||||
- [How to resolve `Tweaks Mismatched of Changed`](#how-to-resolve-tweaks-mismatched-of-changed)
|
||||
- [Notable bugs and fixes](#notable-bugs-and-fixes)
|
||||
- [Binary files get bigger on iOS](#binary-files-get-bigger-on-ios)
|
||||
- [Some setting name has been changed](#some-setting-name-has-been-changed)
|
||||
- [FAQ](#faq)
|
||||
- [Questions and Answers](#questions-and-answers)
|
||||
- [How should I share the settings between multiple devices?](#how-should-i-share-the-settings-between-multiple-devices)
|
||||
- [What should I enter for the passphrase of Setup-URI?](#what-should-i-enter-for-the-passphrase-of-setup-uri)
|
||||
- [Why the settings of Self-hosted LiveSync itself is disabled in default?](#why-the-settings-of-self-hosted-livesync-itself-is-disabled-in-default)
|
||||
- [The plug-in says `something went wrong`.](#the-plug-in-says-something-went-wrong)
|
||||
- [A large number of files were deleted, and were synchronised!](#a-large-number-of-files-were-deleted-and-were-synchronised)
|
||||
- [Why `Use an old adapter for compatibility` is somehow enabled in my vault?](#why-use-an-old-adapter-for-compatibility-is-somehow-enabled-in-my-vault)
|
||||
- [ZIP (or any extensions) files were not synchronised. Why?](#zip-or-any-extensions-files-were-not-synchronised-why)
|
||||
- [I hope to report the issue, but you said you needs `Report`. How to make it?](#i-hope-to-report-the-issue-but-you-said-you-needs-report-how-to-make-it)
|
||||
@@ -14,25 +26,148 @@
|
||||
- [Why are the logs volatile and ephemeral?](#why-are-the-logs-volatile-and-ephemeral)
|
||||
- [Some network logs are not written into the file.](#some-network-logs-are-not-written-into-the-file)
|
||||
- [If a file were deleted or trimmed, the capacity of the database should be reduced, right?](#if-a-file-were-deleted-or-trimmed-the-capacity-of-the-database-should-be-reduced-right)
|
||||
- [How to launch the DevTools](#how-to-launch-the-devtools)
|
||||
- [On Desktop Devices](#on-desktop-devices)
|
||||
- [On Android](#on-android)
|
||||
- [On iOS, iPadOS devices](#on-ios-ipados-devices)
|
||||
- [How can I use the DevTools?](#how-can-i-use-the-devtools)
|
||||
- [Checking the network log](#checking-the-network-log)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [While using Cloudflare Tunnels, often Obsidian API fallback and `524` error occurs.](#while-using-cloudflare-tunnels-often-obsidian-api-fallback-and-524-error-occurs)
|
||||
- [On the mobile device, cannot synchronise on the local network!](#on-the-mobile-device-cannot-synchronise-on-the-local-network)
|
||||
- [I think that something bad happening on the vault...](#i-think-that-something-bad-happening-on-the-vault)
|
||||
- [Tips](#tips)
|
||||
- [How to resolve `Tweaks Mismatched of Changed`](#how-to-resolve-tweaks-mismatched-of-changed)
|
||||
- [Flag Files](#flag-files)
|
||||
- [Old tips](#old-tips)
|
||||
|
||||
<!-- - -->
|
||||
|
||||
## Tips
|
||||
|
||||
### CORS avoidance
|
||||
|
||||
If we are unable to configure CORS properly for any reason (for example, if we cannot configure non-administered network devices), we may choose to ignore CORS.
|
||||
To use the Obsidian API (also known as the Non-Native API) to bypass CORS, we can enable the toggle ``Use Request API to avoid `inevitable` CORS problem``.
|
||||
|
||||
<!-- Add **Long explanation of CORS** here for integrity -->
|
||||
|
||||
### CORS configuration with reverse proxy
|
||||
|
||||
- IMPORTANT: CouchDB handles CORS by itself. Do not process CORS on the reverse
|
||||
proxy.
|
||||
- Do not process `Option` requests on the reverse proxy!
|
||||
- Make sure `host` and `X-Forwarded-For` headers are forwarded to the CouchDB.
|
||||
- If you are using a subdirectory, make sure to handle it properly. More
|
||||
detailed information is in the
|
||||
[CouchDB documentation](https://docs.couchdb.org/en/stable/best-practices/reverse-proxies.html).
|
||||
|
||||
Minimal configurations are as follows:
|
||||
|
||||
#### Nginx
|
||||
|
||||
```nginx
|
||||
location / {
|
||||
proxy_pass http://localhost:5984;
|
||||
proxy_redirect off;
|
||||
proxy_buffering off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
```
|
||||
|
||||
#### Nginx and subdirectory
|
||||
|
||||
```nginx
|
||||
location /couchdb {
|
||||
rewrite ^ $request_uri;
|
||||
rewrite ^/couchdb/(.*) /$1 break;
|
||||
proxy_pass http://localhost:5984$uri;
|
||||
proxy_redirect off;
|
||||
proxy_buffering off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location /_session {
|
||||
proxy_pass http://localhost:5984/_session;
|
||||
proxy_redirect off;
|
||||
proxy_buffering off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
```
|
||||
|
||||
#### Caddy
|
||||
|
||||
```caddyfile
|
||||
domain.com {
|
||||
reverse_proxy localhost:5984
|
||||
}
|
||||
```
|
||||
|
||||
#### Caddy and subdirectory
|
||||
|
||||
```caddyfile
|
||||
domain.com {
|
||||
reverse_proxy /couchdb/* localhost:5984
|
||||
reverse_proxy /_session/* localhost:5984/_session
|
||||
}
|
||||
```
|
||||
|
||||
#### Apache
|
||||
|
||||
Sorry, Apache is not recommended for CouchDB. Omit the configuration from here.
|
||||
Please refer to the
|
||||
[Official documentation](https://docs.couchdb.org/en/stable/best-practices/reverse-proxies.html#reverse-proxying-with-apache-http-server).
|
||||
|
||||
### Show all setting panes
|
||||
|
||||
Full pane is not shown by default. To show all panes, please toggle all in
|
||||
`🧙♂️ Wizard` -> `Enable extra and advanced features`.
|
||||
|
||||
For your information, the all panes are as follows:
|
||||

|
||||
|
||||
### How to resolve `Tweaks Mismatched of Changed`
|
||||
|
||||
(Since v0.23.17)
|
||||
|
||||
If you have changed some configurations or tweaks which should be unified
|
||||
between the devices, you will be asked how to reflect (or not) other devices at
|
||||
the next synchronisation. It also occurs on the device itself, where changes are
|
||||
made, to prevent unexpected configuration changes from unwanted propagation.\
|
||||
(We may thank this behaviour if we have synchronised or backed up and restored
|
||||
Self-hosted LiveSync. At least, for me so).
|
||||
|
||||
Following dialogue will be shown: 
|
||||
|
||||
- If we want to propagate the setting of the device, we should choose
|
||||
`Update with mine`.
|
||||
- On other devices, we should choose `Use configured` to accept and use the
|
||||
configured configuration.
|
||||
- `Dismiss` can postpone a decision. However, we cannot synchronise until we
|
||||
have decided.
|
||||
|
||||
Rest assured that in most cases we can choose `Use configured`. (Unless you are
|
||||
certain that you have not changed the configuration).
|
||||
|
||||
If we see it for the first time, it reflects the settings of the device that has
|
||||
been synchronised with the remote for the first time since the upgrade.
|
||||
Probably, we can accept that.
|
||||
|
||||
<!-- Add here -->
|
||||
|
||||
## Notable bugs and fixes
|
||||
|
||||
### Binary files get bigger on iOS
|
||||
|
||||
- Reported at: v0.20.x
|
||||
- Fixed at: v0.21.2 (Fixed but not reviewed)
|
||||
- Required action: larger files will not be fixed automatically, please perform `Verify and repair all files`. If our local database and storage are not matched, we will be asked to apply which one.
|
||||
- Required action: larger files will not be fixed automatically, please perform
|
||||
`Verify and repair all files`. If our local database and storage are not
|
||||
matched, we will be asked to apply which one.
|
||||
|
||||
### Some setting name has been changed
|
||||
|
||||
- Fixed at: v0.22.6
|
||||
|
||||
| Previous name | New name |
|
||||
@@ -42,107 +177,253 @@
|
||||
| Setup Wizard | Minimal Setup |
|
||||
| Check database configuration | Check and Fix database configuration |
|
||||
|
||||
## FAQ
|
||||
## Questions and Answers
|
||||
|
||||
### How should I share the settings between multiple devices?
|
||||
|
||||
- Device setup:
|
||||
- Using `Setup URI` is the most straightforward way.
|
||||
- Setting changes during use:
|
||||
- Use `Sync settings via Markdown files` on the `🔄️ Sync settings` pane.
|
||||
|
||||
### What should I enter for the passphrase of Setup-URI?
|
||||
|
||||
- Anything you like is OK. However, the recommendation is as follows:
|
||||
- Include the vault (group) information.
|
||||
- Include the date of operation.
|
||||
- Anything random for your security.
|
||||
- For example, `MyVault-20240901-r4nd0mStr1ng`.
|
||||
- Why?
|
||||
- The Setup-URI is encoded; that means it cannot indicate the actual settings. Hence, if you use the same passphrase for multiple vaults, you may accidentally mix up vaults.
|
||||
|
||||
### Why the settings of Self-hosted LiveSync itself is disabled in default?
|
||||
|
||||
Basically, if we configure all `additionalSuffixOfDatabaseName` the same, we can synchronise this file between multiple devices.
|
||||
(`additionalSuffixOfDatabaseName` should be unique in each device, not in the synchronised vaults).
|
||||
However, if we synchronise the settings of Self-hosted LiveSync itself, we may encounter some unexpected behaviours.
|
||||
For example, if a setting that 'let Self-hosted LiveSync setting be excluded' is synced, it is very unlikely that things will recover automatically after this, and there is little chance we will even notice this. Even if we change our minds and change the settings back on other devices. It could get even worse if incompatible changes are automatically reflected; everything will break.
|
||||
|
||||
### The plug-in says `something went wrong`.
|
||||
|
||||
There are many cases where this is really unclear. One possibility is that the chunk fetch did not go well.
|
||||
|
||||
1. Restarting Obsidian sometimes helps (fetch-order problem).
|
||||
2. If actually there are no chunks, please perform `Recreate missing chunks for all files` on the `🧰 Hatch` pane at the other devices. And synchronise again. (also restart Obsidian may effect).
|
||||
3. If the problem persists, please perform `Verify and repair all files` on the `🧰 Hatch` pane. If our local database and storage are not matched, we will be asked to apply which one.
|
||||
|
||||
### A large number of files were deleted, and were synchronised!
|
||||
|
||||
1. Backup everything important.
|
||||
- Your local vault.
|
||||
- Your CouchDB database (this can be done by replicating to another database).
|
||||
2. Prepare the empty vault
|
||||
3. Place `redflag.md` at the top of the vault.
|
||||
4. Apply the settings **BUT DO NOT PROCEED TO RESTORE YET**.
|
||||
- You can use `Setup URI`, QR Code, or manually apply the settings.
|
||||
5. Set `Maximum file modification time for reflected file events` in `Remediation` on the `🩹 Patches` pane.
|
||||
- If you know when the files were deleted, set the time a bit before that.
|
||||
- If not, bisecting may help us.
|
||||
6. Delete `redflag.md`.
|
||||
7. Perform `Reset synchronisation on This Device` on the `🎛️ Maintenance` pane.
|
||||
|
||||
This mode is very fragile. Please be careful.
|
||||
|
||||
### Why `Use an old adapter for compatibility` is somehow enabled in my vault?
|
||||
|
||||
Because you are a compassionate and experienced user. Before v0.17.16, we used an old adapter for the local database. At that time, current default adapter has not been stable.
|
||||
The new adapter has better performance and has a new feature like purging. Therefore, we should use new adapters and current default is so.
|
||||
Because you are a compassionate and experienced user. Before v0.17.16, we used
|
||||
an old adapter for the local database. At that time, current default adapter has
|
||||
not been stable. The new adapter has better performance and has a new feature
|
||||
like purging. Therefore, we should use new adapters and current default is so.
|
||||
|
||||
However, when switching from an old adapter to a new adapter, some converting or local database rebuilding is required, and it takes a few time. It was a long time ago now, but we once inconvenienced everyone in a hurry when we changed the format of our database.
|
||||
For these reasons, this toggle is automatically on if we have upgraded from vault which using an old adapter.
|
||||
However, when switching from an old adapter to a new adapter, some converting or
|
||||
local database rebuilding is required, and it takes a few time. It was a long
|
||||
time ago now, but we once inconvenienced everyone in a hurry when we changed the
|
||||
format of our database. For these reasons, this toggle is automatically on if we
|
||||
have upgraded from vault which using an old adapter.
|
||||
|
||||
When you rebuild everything or fetch from the remote again, you will be asked to switch this.
|
||||
When you rebuild everything or fetch from the remote again, you will be asked to
|
||||
switch this.
|
||||
|
||||
Therefore, experienced users (especially those stable enough not to have to rebuild the database) may have this toggle enabled in their Vault.
|
||||
Please disable it when you have enough time.
|
||||
Therefore, experienced users (especially those stable enough not to have to
|
||||
rebuild the database) may have this toggle enabled in their Vault. Please
|
||||
disable it when you have enough time.
|
||||
|
||||
### ZIP (or any extensions) files were not synchronised. Why?
|
||||
It depends on Obsidian detects. May toggling `Detect all extensions` of `File and links` (setting of Obsidian) will help us.
|
||||
|
||||
It depends on Obsidian detects. May toggling `Detect all extensions` of
|
||||
`File and links` (setting of Obsidian) will help us.
|
||||
|
||||
### I hope to report the issue, but you said you needs `Report`. How to make it?
|
||||
We can copy the report to the clipboard, by pressing the `Make report` button on the `Hatch` pane.
|
||||

|
||||
|
||||
We can copy the report to the clipboard, by pressing the `Make report` button on
|
||||
the `Hatch` pane. 
|
||||
|
||||
### Where can I check the log?
|
||||
We can launch the log pane by `Show log` on the command palette.
|
||||
And if you have troubled something, please enable the `Verbose Log` on the `General Setting` pane.
|
||||
|
||||
However, the logs would not be kept so long and cleared when restarted. If you want to check the logs, please enable `Write logs into the file` temporarily.
|
||||
We can launch the log pane by `Show log` on the command palette. And if you have
|
||||
troubled something, please enable the `Verbose Log` on the `General Setting`
|
||||
pane.
|
||||
|
||||
However, the logs would not be kept so long and cleared when restarted. If you
|
||||
want to check the logs, please enable `Write logs into the file` temporarily.
|
||||
|
||||

|
||||
|
||||
> [!IMPORTANT]
|
||||
>
|
||||
> - Writing logs into the file will impact the performance.
|
||||
> - Please make sure that you have erased all your confidential information before reporting issue.
|
||||
> - Please make sure that you have erased all your confidential information
|
||||
> before reporting issue.
|
||||
|
||||
### Why are the logs volatile and ephemeral?
|
||||
|
||||
To avoid unexpected exposure to our confidential things.
|
||||
|
||||
### Some network logs are not written into the file.
|
||||
Especially the CORS error will be reported as a general error to the plug-in for security reasons. So we cannot detect and log it. We are only able to investigate them by [Checking the network log](#checking-the-network-log).
|
||||
|
||||
Especially the CORS error will be reported as a general error to the plug-in for
|
||||
security reasons. So we cannot detect and log it. We are only able to
|
||||
investigate them by [Checking the network log](#checking-the-network-log).
|
||||
|
||||
### If a file were deleted or trimmed, the capacity of the database should be reduced, right?
|
||||
No, even though if files were deleted, chunks were not deleted.
|
||||
Self-hosted LiveSync splits the files into multiple chunks and transfers only newly created. This behaviour enables us to less traffic. And, the chunks will be shared between the files to reduce the total usage of the database.
|
||||
|
||||
And one more thing, we can handle the conflicts on any device even though it has happened on other devices. This means that conflicts will happen in the past, after the time we have synchronised. Hence we cannot collect and delete the unused chunks even though if we are not currently referenced.
|
||||
No, even though if files were deleted, chunks were not deleted. Self-hosted
|
||||
LiveSync splits the files into multiple chunks and transfers only newly created.
|
||||
This behaviour enables us to less traffic. And, the chunks will be shared
|
||||
between the files to reduce the total usage of the database.
|
||||
|
||||
To shrink the database size, `Rebuild everything` only reliably and effectively. But do not worry, if we have synchronised well. We have the actual and real files. Only it takes a bit of time and traffics.
|
||||
And one more thing, we can handle the conflicts on any device even though it has
|
||||
happened on other devices. This means that conflicts will happen in the past,
|
||||
after the time we have synchronised. Hence we cannot collect and delete the
|
||||
unused chunks even though if we are not currently referenced.
|
||||
|
||||
To shrink the database size, `Rebuild everything` only reliably and effectively.
|
||||
But do not worry, if we have synchronised well. We have the actual and real
|
||||
files. Only it takes a bit of time and traffics.
|
||||
|
||||
### How to launch the DevTools
|
||||
|
||||
#### On Desktop Devices
|
||||
|
||||
We can launch the DevTools by pressing `ctrl`+`shift`+`i` (`Command`+`shift`+`i` on Mac).
|
||||
|
||||
#### On Android
|
||||
|
||||
Please refer to [Remote debug Android devices](https://developer.chrome.com/docs/devtools/remote-debugging/).
|
||||
Once the DevTools have been launched, everything operates the same as on a PC.
|
||||
|
||||
#### On iOS, iPadOS devices
|
||||
|
||||
If we have a Mac, we can inspect from Safari on the Mac. Please refer to [Inspecting iOS and iPadOS](https://developer.apple.com/documentation/safari-developer-tools/inspecting-ios).
|
||||
|
||||
### How can I use the DevTools?
|
||||
|
||||
#### Checking the network log
|
||||
|
||||
1. Open the network pane.
|
||||
2. Find the requests marked in red.
|
||||

|
||||
3. Capture the `Headers`, `Payload`, and, `Response`. **Please be sure to keep important information confidential**. If the `Response` contains secrets, you can omitted that.
|
||||
Note: Headers contains a some credentials. **The path of the request URL, Remote Address, authority, and authorization must be concealed.**
|
||||

|
||||
2. Find the requests marked in red.\
|
||||

|
||||
3. Capture the `Headers`, `Payload`, and, `Response`. **Please be sure to keep
|
||||
important information confidential**. If the `Response` contains secrets, you
|
||||
can omitted that. Note: Headers contains a some credentials. **The path of
|
||||
the request URL, Remote Address, authority, and authorization must be
|
||||
concealed.**\
|
||||

|
||||
|
||||
## Troubleshooting
|
||||
|
||||
<!-- Add here -->
|
||||
|
||||
### While using Cloudflare Tunnels, often Obsidian API fallback and `524` error occurs.
|
||||
|
||||
A `524` error occurs when the request to the server is not completed within a
|
||||
`specified time`. This is a timeout error from Cloudflare. From the reported
|
||||
issue, it seems to be 100 seconds. (#627).
|
||||
|
||||
Therefore, this error returns from Cloudflare, not from the server. Hence, the
|
||||
result contains no CORS field. It means that this response makes the Obsidian
|
||||
API fallback.
|
||||
|
||||
However, even if the Obsidian API fallback occurs, the request is still not
|
||||
completed within the `specified time`, 100 seconds.
|
||||
|
||||
To solve this issue, we need to configure the timeout settings.
|
||||
|
||||
Please enable the toggle in `💪 Power users` -> `CouchDB Connection Tweak` ->
|
||||
`Use timeouts instead of heartbeats`.
|
||||
|
||||
### On the mobile device, cannot synchronise on the local network!
|
||||
Obsidian mobile is not able to connect to the non-secure end-point, such as starting with `http://`. Make sure your URI of CouchDB. Also not able to use a self-signed certificate.
|
||||
|
||||
Obsidian mobile is not able to connect to the non-secure end-point, such as
|
||||
starting with `http://`. Make sure your URI of CouchDB. Also not able to use a
|
||||
self-signed certificate.
|
||||
|
||||
### I think that something bad happening on the vault...
|
||||
Place `redflag.md` on top of the vault, and restart Obsidian. The most simple way is to create a new note and rename it to `redflag`. Of course, we can put it without Obsidian.
|
||||
|
||||
If there is `redflag.md`, Self-hosted LiveSync suspends all database and storage processes.
|
||||
Place the [flag file](#flag-files) on top of the vault, and restart Obsidian. The most simple
|
||||
way is to create a new note and rename it to `redflag`. Of course, we can put it
|
||||
without Obsidian.
|
||||
|
||||
## Tips
|
||||
For example, if there is `redflag.md`, Self-hosted LiveSync suspends all database and storage
|
||||
processes.
|
||||
|
||||
### How to resolve `Tweaks Mismatched of Changed`
|
||||
### Flag Files
|
||||
|
||||
(Since v0.23.17)
|
||||
The flag file is a simple Markdown file designed to prevent storage events and database events in self-hosted LiveSync.
|
||||
Its very existence is significant; it may be left blank, or it may contain text; either is acceptable.
|
||||
|
||||
If you have changed some configurations or tweaks which should be unified between the devices, you will be asked how to reflect (or not) other devices at the next synchronisation. It also occurs on the device itself, where changes are made, to prevent unexpected configuration changes from unwanted propagation.
|
||||
(We may thank this behaviour if we have synchronised or backed up and restored Self-hosted LiveSync. At least, for me so).
|
||||
This file is in Markdown format so that it can be placed in the Vault externally, even if Obsidian fails to launch.
|
||||
|
||||
Following dialogue will be shown:
|
||||

|
||||
There are some options to use `redflag.md`.
|
||||
|
||||
- If we want to propagate the setting of the device, we should choose `Update with mine`.
|
||||
- On other devices, we should choose `Use configured` to accept and use the configured configuration.
|
||||
- `Dismiss` can postpone a decision. However, we cannot synchronise until we have decided.
|
||||
| Filename | Human-Friendly Name | Description |
|
||||
| ------------- | ------------------- | ------------------------------------------------------------------------------------ |
|
||||
| `redflag.md` | - | Suspends all processes. |
|
||||
| `redflag2.md` | `flag_rebuild.md` | Suspends all processes, and rebuild both local and remote databases by local files. |
|
||||
| `redflag3.md` | `flag_fetch.md` | Suspends all processes, discard the local database, and fetch from the remote again. |
|
||||
|
||||
Rest assured that in most cases we can choose `Use configured`. (Unless you are certain that you have not changed the configuration).
|
||||
|
||||
If we see it for the first time, it reflects the settings of the device that has been synchronised with the remote for the first time since the upgrade. Probably, we can accept that.
|
||||
|
||||
<!-- Add here -->
|
||||
When fetching everything remotely or performing a rebuild, restarting Obsidian
|
||||
is performed once for safety reasons. At that time, Self-hosted LiveSync uses
|
||||
these files to determine whether the process should be carried out. (The use of
|
||||
normal markdown files is a trick to externally force cancellation in the event
|
||||
of faults in the rebuild or fetch function itself, especially on mobile
|
||||
devices). This mechanism is also used for set-up. And just for information,
|
||||
these files are also not subject to synchronisation.
|
||||
|
||||
However, occasionally the deletion of files may fail. This should generally work
|
||||
normally after restarting Obsidian. (As far as I can observe).
|
||||
|
||||
### Old tips
|
||||
- Rarely, a file in the database could be corrupted. The plugin will not write to local storage when a file looks corrupted. If a local version of the file is on your device, the corruption could be fixed by editing the local file and synchronizing it. But if the file does not exist on any of your devices, then it can not be rescued. In this case, you can delete these items from the settings dialog.
|
||||
- To stop the boot-up sequence (eg. for fixing problems on databases), you can put a `redflag.md` file (or directory) at the root of your vault.
|
||||
Tip for iOS: a redflag directory can be created at the root of the vault using the File application.
|
||||
- Also, with `redflag2.md` placed, we can automatically rebuild both the local and the remote databases during the boot-up sequence. With `redflag3.md`, we can discard only the local database and fetch from the remote again.
|
||||
- Q: The database is growing, how can I shrink it down?
|
||||
A: each of the docs is saved with their past 100 revisions for detecting and resolving conflicts. Picturing that one device has been offline for a while, and comes online again. The device has to compare its notes with the remotely saved ones. If there exists a historic revision in which the note used to be identical, it could be updated safely (like git fast-forward). Even if that is not in revision histories, we only have to check the differences after the revision that both devices commonly have. This is like git's conflict-resolving method. So, We have to make the database again like an enlarged git repo if you want to solve the root of the problem.
|
||||
- And more technical Information is in the [Technical Information](tech_info.md)
|
||||
- If you want to synchronize files without obsidian, you can use [filesystem-livesync](https://github.com/vrtmrz/filesystem-livesync).
|
||||
- WebClipper is also available on Chrome Web Store:[obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf)
|
||||
|
||||
Repo is here: [obsidian-livesync-webclip](https://github.com/vrtmrz/obsidian-livesync-webclip). (Docs are a work in progress.)
|
||||
- Rarely, a file in the database could be corrupted. The plugin will not write
|
||||
to local storage when a file looks corrupted. If a local version of the file
|
||||
is on your device, the corruption could be fixed by editing the local file and
|
||||
synchronizing it. But if the file does not exist on any of your devices, then
|
||||
it can not be rescued. In this case, you can delete these items from the
|
||||
settings dialog.
|
||||
- To stop the boot-up sequence (eg. for fixing problems on databases), you can
|
||||
put a `redflag.md` file (or directory) at the root of your vault. Tip for iOS:
|
||||
a redflag directory can be created at the root of the vault using the File
|
||||
application.
|
||||
- Also, with `redflag2.md` placed, we can automatically rebuild both the local
|
||||
and the remote databases during the boot-up sequence. With `redflag3.md`, we
|
||||
can discard only the local database and fetch from the remote again.
|
||||
- Q: The database is growing, how can I shrink it down? A: each of the docs is
|
||||
saved with their past 100 revisions for detecting and resolving conflicts.
|
||||
Picturing that one device has been offline for a while, and comes online
|
||||
again. The device has to compare its notes with the remotely saved ones. If
|
||||
there exists a historic revision in which the note used to be identical, it
|
||||
could be updated safely (like git fast-forward). Even if that is not in
|
||||
revision histories, we only have to check the differences after the revision
|
||||
that both devices commonly have. This is like git's conflict-resolving method.
|
||||
So, We have to make the database again like an enlarged git repo if you want
|
||||
to solve the root of the problem.
|
||||
- And more technical Information is in the [Technical Information](tech_info.md)
|
||||
- If you want to synchronize files without obsidian, you can use
|
||||
[filesystem-livesync](https://github.com/vrtmrz/filesystem-livesync).
|
||||
- WebClipper is also available on Chrome Web
|
||||
Store:[obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf)
|
||||
|
||||
Repo is here:
|
||||
[obsidian-livesync-webclip](https://github.com/vrtmrz/obsidian-livesync-webclip).
|
||||
(Docs are a work in progress.)
|
||||
|
||||
@@ -4,7 +4,7 @@ import esbuild from "esbuild";
|
||||
import process from "process";
|
||||
import builtins from "builtin-modules";
|
||||
import sveltePlugin from "esbuild-svelte";
|
||||
import sveltePreprocess from "svelte-preprocess";
|
||||
import { sveltePreprocess } from "svelte-preprocess";
|
||||
import fs from "node:fs";
|
||||
// import terser from "terser";
|
||||
import { minify } from "terser";
|
||||
@@ -12,13 +12,21 @@ import inlineWorkerPlugin from "esbuild-plugin-inline-worker";
|
||||
import { terserOption } from "./terser.config.mjs";
|
||||
import path from "node:path";
|
||||
|
||||
const prod = process.argv[2] === "production";
|
||||
const prod = process.argv[2] === "production" || process.env?.BUILD_MODE === "production";
|
||||
const keepTest = true; //!prod;
|
||||
|
||||
const manifestJson = JSON.parse(fs.readFileSync("./manifest.json") + "");
|
||||
const packageJson = JSON.parse(fs.readFileSync("./package.json") + "");
|
||||
const updateInfo = JSON.stringify(fs.readFileSync("./updates.md") + "");
|
||||
|
||||
const PATHS_TEST_INSTALL = process.env?.PATHS_TEST_INSTALL || "";
|
||||
const PATH_TEST_INSTALL = PATHS_TEST_INSTALL.split(path.delimiter).map(p => p.trim()).filter(p => p.length);
|
||||
if (PATH_TEST_INSTALL) {
|
||||
console.log(`Built files will be copied to ${PATH_TEST_INSTALL}`);
|
||||
} else {
|
||||
console.log("Development build: You can install the plug-in to Obsidian for testing by exporting the PATHS_TEST_INSTALL environment variable with the paths to your vault plugins directories separated by your system path delimiter (':' on Unix, ';' on Windows).");
|
||||
}
|
||||
|
||||
const moduleAliasPlugin = {
|
||||
name: "module-alias",
|
||||
setup(build) {
|
||||
@@ -95,6 +103,21 @@ const plugins = [
|
||||
} else {
|
||||
fs.copyFileSync("./main_org.js", "./main.js");
|
||||
}
|
||||
if (PATH_TEST_INSTALL) {
|
||||
for (const installPath of PATH_TEST_INSTALL) {
|
||||
const realPath = path.resolve(installPath);
|
||||
console.log(`Copying built files to ${realPath}`);
|
||||
if (!fs.existsSync(realPath)) {
|
||||
console.warn(`Test install path ${installPath} does not exist`);
|
||||
continue;
|
||||
}
|
||||
const manifestX = JSON.parse(fs.readFileSync("./manifest.json") + "");
|
||||
manifestX.version = manifestJson.version + "." + Date.now();
|
||||
fs.writeFileSync(path.join(installPath, "manifest.json"), JSON.stringify(manifestX, null, 2));
|
||||
fs.copyFileSync("./main.js", path.join(installPath, "main.js"));
|
||||
fs.copyFileSync("./styles.css", path.join(installPath, "styles.css"));
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
},
|
||||
|
||||
102
eslint.config.mjs
Normal file
102
eslint.config.mjs
Normal file
@@ -0,0 +1,102 @@
|
||||
import typescriptEslint from "@typescript-eslint/eslint-plugin";
|
||||
import svelte from "eslint-plugin-svelte";
|
||||
import _import from "eslint-plugin-import";
|
||||
import { fixupPluginRules } from "@eslint/compat";
|
||||
import tsParser from "@typescript-eslint/parser";
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import js from "@eslint/js";
|
||||
import { FlatCompat } from "@eslint/eslintrc";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
const compat = new FlatCompat({
|
||||
baseDirectory: __dirname,
|
||||
recommendedConfig: js.configs.recommended,
|
||||
allConfig: js.configs.all,
|
||||
});
|
||||
|
||||
export default [
|
||||
{
|
||||
ignores: [
|
||||
"**/node_modules/*",
|
||||
"**/jest.config.js",
|
||||
"src/lib/coverage",
|
||||
"src/lib/browsertest",
|
||||
"**/test.ts",
|
||||
"**/tests.ts",
|
||||
"**/**test.ts",
|
||||
"**/**.test.ts",
|
||||
"**/esbuild.*.mjs",
|
||||
"**/terser.*.mjs",
|
||||
"**/node_modules",
|
||||
"**/build",
|
||||
"**/.eslintrc.js.bak",
|
||||
"src/lib/src/patches/pouchdb-utils",
|
||||
"**/esbuild.config.mjs",
|
||||
"**/rollup.config.js",
|
||||
"modules/octagonal-wheels/rollup.config.js",
|
||||
"modules/octagonal-wheels/dist/**/*",
|
||||
"src/lib/test",
|
||||
"src/lib/src/cli",
|
||||
"**/main.js",
|
||||
"src/apps/**/*",
|
||||
".prettierrc.*.mjs",
|
||||
".prettierrc.mjs",
|
||||
"*.config.mjs"
|
||||
],
|
||||
},
|
||||
...compat.extends(
|
||||
"eslint:recommended",
|
||||
"plugin:@typescript-eslint/eslint-recommended",
|
||||
"plugin:@typescript-eslint/recommended"
|
||||
),
|
||||
{
|
||||
plugins: {
|
||||
"@typescript-eslint": typescriptEslint,
|
||||
svelte,
|
||||
import: fixupPluginRules(_import),
|
||||
},
|
||||
|
||||
languageOptions: {
|
||||
parser: tsParser,
|
||||
ecmaVersion: 5,
|
||||
sourceType: "module",
|
||||
|
||||
parserOptions: {
|
||||
project: ["tsconfig.json"],
|
||||
},
|
||||
},
|
||||
|
||||
rules: {
|
||||
"no-unused-vars": "off",
|
||||
|
||||
"@typescript-eslint/no-unused-vars": [
|
||||
"error",
|
||||
{
|
||||
args: "none",
|
||||
},
|
||||
],
|
||||
|
||||
"no-unused-labels": "off",
|
||||
"@typescript-eslint/ban-ts-comment": "off",
|
||||
"no-prototype-builtins": "off",
|
||||
"@typescript-eslint/no-empty-function": "off",
|
||||
"require-await": "error",
|
||||
"@typescript-eslint/require-await": "warn",
|
||||
"@typescript-eslint/no-misused-promises": "warn",
|
||||
"@typescript-eslint/no-floating-promises": "warn",
|
||||
"no-async-promise-executor": "warn",
|
||||
"@typescript-eslint/no-explicit-any": "off",
|
||||
"@typescript-eslint/no-unnecessary-type-assertion": "error",
|
||||
|
||||
"no-constant-condition": [
|
||||
"error",
|
||||
{
|
||||
checkLoops: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
1
example.env
Normal file
1
example.env
Normal file
@@ -0,0 +1 @@
|
||||
PATHS_TEST_INSTALL=your-vault-plugin-path:and-another-path
|
||||
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"id": "obsidian-livesync",
|
||||
"name": "Self-hosted LiveSync",
|
||||
"version": "0.24.0",
|
||||
"minAppVersion": "0.9.12",
|
||||
"description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
|
||||
"author": "vorotamoroz",
|
||||
"authorUrl": "https://github.com/vrtmrz",
|
||||
"isDesktopOnly": false
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "obsidian-livesync",
|
||||
"name": "Self-hosted LiveSync",
|
||||
"version": "0.24.12",
|
||||
"version": "0.25.52",
|
||||
"minAppVersion": "0.9.12",
|
||||
"description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
|
||||
"author": "vorotamoroz",
|
||||
|
||||
23566
package-lock.json
generated
23566
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
129
package.json
129
package.json
@@ -1,30 +1,73 @@
|
||||
{
|
||||
"name": "obsidian-livesync",
|
||||
"version": "0.24.12",
|
||||
"version": "0.25.52",
|
||||
"description": "Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
|
||||
"main": "main.js",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"bakei18n": "npx tsx ./src/lib/_tools/bakei18n.ts",
|
||||
"dev": "node esbuild.config.mjs",
|
||||
"build": "npm run bakei18n && node esbuild.config.mjs production",
|
||||
"bakei18n": "npm run i18n:yaml2json && npm run i18n:bakejson",
|
||||
"i18n:bakejson": "npx tsx ./src/lib/_tools/bakei18n.ts",
|
||||
"i18n:yaml2json": "npx tsx ./src/lib/_tools/yaml2json.ts",
|
||||
"i18n:json2yaml": "npx tsx ./src/lib/_tools/json2yaml.ts",
|
||||
"prettyjson": "prettier --config ./.prettierrc.mjs ./src/lib/src/common/messagesJson/*.json --write --log-level error",
|
||||
"postbakei18n": "prettier --config ./.prettierrc.mjs ./src/lib/src/common/messages/*.ts --write --log-level error",
|
||||
"posti18n:yaml2json": "npm run prettyjson",
|
||||
"predev": "npm run bakei18n",
|
||||
"dev": "node --env-file=.env esbuild.config.mjs",
|
||||
"prebuild": "npm run bakei18n",
|
||||
"build": "node esbuild.config.mjs production",
|
||||
"buildVite": "npx dotenv-cli -e .env -- vite build --mode production",
|
||||
"buildViteOriginal": "npx dotenv-cli -e .env -- vite build --mode original",
|
||||
"buildDev": "node esbuild.config.mjs dev",
|
||||
"lint": "eslint src",
|
||||
"svelte-check": "svelte-check --tsconfig ./tsconfig.json",
|
||||
"tsc-check": "tsc --noEmit",
|
||||
"pretty": "npm run prettyNoWrite -- --write --log-level error",
|
||||
"prettyCheck": "npm run prettyNoWrite -- --check",
|
||||
"prettyNoWrite": "prettier --config ./.prettierrc \"**/*.js\" \"**/*.ts\" \"**/*.json\" ",
|
||||
"check": "npm run lint && npm run svelte-check && npm run tsc-check"
|
||||
"prettyNoWrite": "prettier --config ./.prettierrc.mjs \"**/*.js\" \"**/*.ts\" \"**/*.json\" ",
|
||||
"check": "npm run lint && npm run svelte-check",
|
||||
"unittest": "deno test -A --no-check --coverage=cov_profile --v8-flags=--expose-gc --trace-leaks ./src/",
|
||||
"test": "vitest run",
|
||||
"test:unit": "vitest run --config vitest.config.unit.ts",
|
||||
"test:unit:coverage": "vitest run --config vitest.config.unit.ts --coverage",
|
||||
"test:install-playwright": "npx playwright install chromium",
|
||||
"test:install-dependencies": "npm run test:install-playwright",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"test:docker-couchdb:up": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/couchdb-start.sh",
|
||||
"test:docker-couchdb:init": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/couchdb-init.sh",
|
||||
"test:docker-couchdb:start": "npm run test:docker-couchdb:up && sleep 5 && npm run test:docker-couchdb:init",
|
||||
"test:docker-couchdb:down": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/couchdb-stop.sh",
|
||||
"test:docker-couchdb:stop": "npm run test:docker-couchdb:down",
|
||||
"test:docker-s3:up": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/minio-start.sh",
|
||||
"test:docker-s3:init": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/minio-init.sh",
|
||||
"test:docker-s3:start": "npm run test:docker-s3:up && sleep 3 && npm run test:docker-s3:init",
|
||||
"test:docker-s3:down": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/minio-stop.sh",
|
||||
"test:docker-s3:stop": "npm run test:docker-s3:down",
|
||||
"test:docker-p2p:up": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/p2p-start.sh",
|
||||
"test:docker-p2p:init": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/p2p-init.sh",
|
||||
"test:docker-p2p:start": "npm run test:docker-p2p:up && sleep 3 && npm run test:docker-p2p:init",
|
||||
"test:docker-p2p:down": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/p2p-stop.sh",
|
||||
"test:docker-p2p:stop": "npm run test:docker-p2p:down",
|
||||
"test:docker-all:up": "npm run test:docker-couchdb:up ; npm run test:docker-s3:up ; npm run test:docker-p2p:up",
|
||||
"test:docker-all:init": "npm run test:docker-couchdb:init ; npm run test:docker-s3:init ; npm run test:docker-p2p:init",
|
||||
"test:docker-all:down": "npm run test:docker-couchdb:down ; npm run test:docker-s3:down ; npm run test:docker-p2p:down",
|
||||
"test:docker-all:start": "npm run test:docker-all:up && sleep 5 && npm run test:docker-all:init",
|
||||
"test:docker-all:stop": "npm run test:docker-all:down",
|
||||
"test:full": "npm run test:docker-all:start && vitest run --coverage && npm run test:docker-all:stop"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "vorotamoroz",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@chialab/esbuild-plugin-worker": "^0.18.1",
|
||||
"@tsconfig/svelte": "^5.0.4",
|
||||
"@chialab/esbuild-plugin-worker": "^0.19.0",
|
||||
"@eslint/compat": "^2.0.2",
|
||||
"@eslint/eslintrc": "^3.3.4",
|
||||
"@eslint/js": "^9.39.3",
|
||||
"@sveltejs/vite-plugin-svelte": "^6.2.4",
|
||||
"@tsconfig/svelte": "^5.0.8",
|
||||
"@types/deno": "^2.5.0",
|
||||
"@types/diff-match-patch": "^1.0.36",
|
||||
"@types/node": "^22.5.4",
|
||||
"@types/node": "^24.10.13",
|
||||
"@types/pouchdb": "^6.4.2",
|
||||
"@types/pouchdb-adapter-http": "^6.1.6",
|
||||
"@types/pouchdb-adapter-idb": "^6.1.7",
|
||||
@@ -33,21 +76,30 @@
|
||||
"@types/pouchdb-mapreduce": "^6.1.10",
|
||||
"@types/pouchdb-replication": "^6.4.7",
|
||||
"@types/transform-pouch": "^1.0.6",
|
||||
"@typescript-eslint/eslint-plugin": "^8.23.0",
|
||||
"@typescript-eslint/parser": "^8.23.0",
|
||||
"builtin-modules": "^4.0.0",
|
||||
"esbuild": "0.24.2",
|
||||
"esbuild-svelte": "^0.9.0",
|
||||
"eslint": "^8.57.0",
|
||||
"eslint-config-airbnb-base": "^15.0.0",
|
||||
"eslint-plugin-import": "^2.31.0",
|
||||
"@typescript-eslint/eslint-plugin": "8.56.1",
|
||||
"@typescript-eslint/parser": "8.56.1",
|
||||
"@vitest/browser": "^4.0.16",
|
||||
"@vitest/browser-playwright": "^4.0.16",
|
||||
"@vitest/coverage-v8": "^4.0.16",
|
||||
"builtin-modules": "5.0.0",
|
||||
"dotenv": "^17.3.1",
|
||||
"dotenv-cli": "^11.0.0",
|
||||
"esbuild": "0.25.0",
|
||||
"esbuild-plugin-inline-worker": "^0.1.1",
|
||||
"esbuild-svelte": "^0.9.4",
|
||||
"eslint": "^9.39.3",
|
||||
"eslint-plugin-import": "^2.32.0",
|
||||
"eslint-plugin-svelte": "^3.15.0",
|
||||
"events": "^3.3.0",
|
||||
"obsidian": "^1.7.2",
|
||||
"postcss": "^8.5.1",
|
||||
"glob": "^13.0.6",
|
||||
"obsidian": "^1.12.3",
|
||||
"playwright": "^1.58.2",
|
||||
"postcss": "^8.5.6",
|
||||
"postcss-load-config": "^6.0.1",
|
||||
"pouchdb-adapter-http": "^9.0.0",
|
||||
"pouchdb-adapter-idb": "^9.0.0",
|
||||
"pouchdb-adapter-indexeddb": "^9.0.0",
|
||||
"pouchdb-adapter-memory": "^9.0.0",
|
||||
"pouchdb-core": "^9.0.0",
|
||||
"pouchdb-errors": "^9.0.0",
|
||||
"pouchdb-find": "^9.0.0",
|
||||
@@ -55,28 +107,37 @@
|
||||
"pouchdb-merge": "^9.0.0",
|
||||
"pouchdb-replication": "^9.0.0",
|
||||
"pouchdb-utils": "^9.0.0",
|
||||
"prettier": "^3.4.2",
|
||||
"svelte": "^5.19.7",
|
||||
"prettier": "3.8.1",
|
||||
"rollup-plugin-copy": "^3.5.0",
|
||||
"svelte": "5.41.1",
|
||||
"svelte-check": "^4.4.3",
|
||||
"svelte-preprocess": "^6.0.3",
|
||||
"terser": "^5.37.0",
|
||||
"terser": "^5.39.0",
|
||||
"transform-pouch": "^2.0.0",
|
||||
"tslib": "^2.8.1",
|
||||
"tsx": "^4.19.2",
|
||||
"typescript": "^5.7.3"
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "5.9.3",
|
||||
"vite": "^7.3.1",
|
||||
"vitest": "^4.0.16",
|
||||
"webdriverio": "^9.24.0",
|
||||
"yaml": "^2.8.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-s3": "^3.645.0",
|
||||
"@smithy/fetch-http-handler": "^3.2.4",
|
||||
"@smithy/protocol-http": "^4.1.0",
|
||||
"@smithy/querystring-builder": "^3.0.3",
|
||||
"@aws-sdk/client-s3": "^3.808.0",
|
||||
"@smithy/fetch-http-handler": "^5.3.10",
|
||||
"@smithy/md5-js": "^4.2.9",
|
||||
"@smithy/middleware-apply-body-checksum": "^4.3.9",
|
||||
"@smithy/protocol-http": "^5.3.9",
|
||||
"@smithy/querystring-builder": "^4.2.9",
|
||||
"commander": "^14.0.3",
|
||||
"diff-match-patch": "^1.0.5",
|
||||
"esbuild-plugin-inline-worker": "^0.1.1",
|
||||
"fflate": "^0.8.2",
|
||||
"idb": "^8.0.2",
|
||||
"minimatch": "^10.0.1",
|
||||
"octagonal-wheels": "^0.1.23",
|
||||
"svelte-check": "^4.1.4",
|
||||
"trystero": "^0.20.0",
|
||||
"idb": "^8.0.3",
|
||||
"minimatch": "^10.2.2",
|
||||
"octagonal-wheels": "^0.1.45",
|
||||
"pouchdb-adapter-leveldb": "^9.0.0",
|
||||
"qrcode-generator": "^1.4.4",
|
||||
"trystero": "^0.22.0",
|
||||
"xxhash-wasm-102": "npm:xxhash-wasm@^1.0.2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# see https://fly.io/docs/reference/regions/\n",
|
||||
"region = \"nrt/Tokyo, Japan\" #@param [\"ams/Amsterdam, Netherlands\",\"arn/Stockholm, Sweden\",\"atl/Atlanta, Georgia (US)\",\"bog/Bogotá, Colombia\",\"bos/Boston, Massachusetts (US)\",\"cdg/Paris, France\",\"den/Denver, Colorado (US)\",\"dfw/Dallas, Texas (US)\",\"ewr/Secaucus, NJ (US)\",\"eze/Ezeiza, Argentina\",\"gdl/Guadalajara, Mexico\",\"gig/Rio de Janeiro, Brazil\",\"gru/Sao Paulo, Brazil\",\"hkg/Hong Kong, Hong Kong\",\"iad/Ashburn, Virginia (US)\",\"jnb/Johannesburg, South Africa\",\"lax/Los Angeles, California (US)\",\"lhr/London, United Kingdom\",\"mad/Madrid, Spain\",\"mia/Miami, Florida (US)\",\"nrt/Tokyo, Japan\",\"ord/Chicago, Illinois (US)\",\"otp/Bucharest, Romania\",\"phx/Phoenix, Arizona (US)\",\"qro/Querétaro, Mexico\",\"scl/Santiago, Chile\",\"sea/Seattle, Washington (US)\",\"sin/Singapore, Singapore\",\"sjc/San Jose, California (US)\",\"syd/Sydney, Australia\",\"waw/Warsaw, Poland\",\"yul/Montreal, Canada\",\"yyz/Toronto, Canada\" ] {allow-input: true}\n",
|
||||
"region = \"nrt/Tokyo, Japan\" #@param [\"jnb/Johannesburg, South Africa\",\"bom/Mumbai, India\",\"sin/Singapore, Singapore\",\"syd/Sydney, Australia\",\"nrt/Tokyo, Japan\",\"ams/Amsterdam, Netherlands\",\"fra/Frankfurt, Germany\",\"lhr/London, United Kingdom\",\"cdg/Paris, France\",\"arn/Stockholm, Sweden\",\"iad/Ashburn, Virginia (US)\",\"ord/Chicago, Illinois (US)\",\"dfw/Dallas, Texas (US)\",\"lax/Los Angeles, California (US)\",\"sjc/San Jose, California (US)\",\"ewr/Secaucus, NJ (US)\",\"yyz/Toronto, Canada\",\"gru/Sao Paulo, Brazil\"] {allow-input: true}\n",
|
||||
"%env region={region.split(\"/\")[0]}\n",
|
||||
"#%env appame=\n",
|
||||
"#%env username=\n",
|
||||
|
||||
282
src/LiveSyncBaseCore.ts
Normal file
282
src/LiveSyncBaseCore.ts
Normal file
@@ -0,0 +1,282 @@
|
||||
import { LOG_LEVEL_INFO } from "octagonal-wheels/common/logger";
|
||||
import type { SimpleStore } from "octagonal-wheels/databases/SimpleStoreBase";
|
||||
import type { HasSettings, ObsidianLiveSyncSettings, EntryDoc } from "./lib/src/common/types";
|
||||
import { __$checkInstanceBinding } from "./lib/src/dev/checks";
|
||||
import type { Confirm } from "./lib/src/interfaces/Confirm";
|
||||
import type { DatabaseFileAccess } from "./lib/src/interfaces/DatabaseFileAccess";
|
||||
import type { Rebuilder } from "./lib/src/interfaces/DatabaseRebuilder";
|
||||
import type { IFileHandler } from "./lib/src/interfaces/FileHandler";
|
||||
import type { StorageAccess } from "./lib/src/interfaces/StorageAccess";
|
||||
import type { LiveSyncLocalDBEnv } from "./lib/src/pouchdb/LiveSyncLocalDB";
|
||||
import type { LiveSyncCouchDBReplicatorEnv } from "./lib/src/replication/couchdb/LiveSyncReplicator";
|
||||
import type { CheckPointInfo } from "./lib/src/replication/journal/JournalSyncTypes";
|
||||
import type { LiveSyncJournalReplicatorEnv } from "./lib/src/replication/journal/LiveSyncJournalReplicatorEnv";
|
||||
import type { LiveSyncReplicatorEnv } from "./lib/src/replication/LiveSyncAbstractReplicator";
|
||||
import { useTargetFilters } from "./lib/src/serviceFeatures/targetFilter";
|
||||
import type { ServiceContext } from "./lib/src/services/base/ServiceBase";
|
||||
import type { InjectableServiceHub } from "./lib/src/services/InjectableServices";
|
||||
import { AbstractModule } from "./modules/AbstractModule";
|
||||
import { ModulePeriodicProcess } from "./modules/core/ModulePeriodicProcess";
|
||||
import { ModuleReplicator } from "./modules/core/ModuleReplicator";
|
||||
import { ModuleReplicatorCouchDB } from "./modules/core/ModuleReplicatorCouchDB";
|
||||
import { ModuleReplicatorMinIO } from "./modules/core/ModuleReplicatorMinIO";
|
||||
import { ModuleConflictChecker } from "./modules/coreFeatures/ModuleConflictChecker";
|
||||
import { ModuleConflictResolver } from "./modules/coreFeatures/ModuleConflictResolver";
|
||||
import { ModuleResolvingMismatchedTweaks } from "./modules/coreFeatures/ModuleResolveMismatchedTweaks";
|
||||
import { ModuleLiveSyncMain } from "./modules/main/ModuleLiveSyncMain";
|
||||
import type { ServiceModules } from "./lib/src/interfaces/ServiceModule";
|
||||
import { ModuleBasicMenu } from "./modules/essential/ModuleBasicMenu";
|
||||
import { usePrepareDatabaseForUse } from "./lib/src/serviceFeatures/prepareDatabaseForUse";
|
||||
|
||||
export class LiveSyncBaseCore<
|
||||
T extends ServiceContext = ServiceContext,
|
||||
TCommands extends IMinimumLiveSyncCommands = IMinimumLiveSyncCommands,
|
||||
>
|
||||
implements
|
||||
LiveSyncLocalDBEnv,
|
||||
LiveSyncReplicatorEnv,
|
||||
LiveSyncJournalReplicatorEnv,
|
||||
LiveSyncCouchDBReplicatorEnv,
|
||||
HasSettings<ObsidianLiveSyncSettings>
|
||||
{
|
||||
addOns = [] as TCommands[];
|
||||
|
||||
/**
|
||||
* register an add-onn to the plug-in.
|
||||
* Add-ons are features that are not essential to the core functionality of the plugin,
|
||||
* @param addOn
|
||||
*/
|
||||
private _registerAddOn(addOn: TCommands) {
|
||||
this.addOns.push(addOn);
|
||||
this.services.appLifecycle.onUnload.addHandler(() => Promise.resolve(addOn.onunload()).then(() => true));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an add-on by its class name. Returns undefined if not found.
|
||||
* @param cls
|
||||
* @returns
|
||||
*/
|
||||
getAddOn<T extends TCommands>(cls: string) {
|
||||
for (const addon of this.addOns) {
|
||||
if (addon.constructor.name == cls) return addon as T;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
constructor(
|
||||
serviceHub: InjectableServiceHub<T>,
|
||||
serviceModuleInitialiser: (
|
||||
core: LiveSyncBaseCore<T, TCommands>,
|
||||
serviceHub: InjectableServiceHub<T>
|
||||
) => ServiceModules,
|
||||
extraModuleInitialiser: (core: LiveSyncBaseCore<T, TCommands>) => AbstractModule[],
|
||||
addOnsInitialiser: (core: LiveSyncBaseCore<T, TCommands>) => TCommands[],
|
||||
featuresInitialiser: (core: LiveSyncBaseCore<T, TCommands>) => void
|
||||
) {
|
||||
this._services = serviceHub;
|
||||
this._serviceModules = serviceModuleInitialiser(this, serviceHub);
|
||||
const extraModules = extraModuleInitialiser(this);
|
||||
this.registerModules(extraModules);
|
||||
this.initialiseServiceFeatures();
|
||||
featuresInitialiser(this);
|
||||
const addOns = addOnsInitialiser(this);
|
||||
for (const addOn of addOns) {
|
||||
this._registerAddOn(addOn);
|
||||
}
|
||||
this.bindModuleFunctions();
|
||||
}
|
||||
/**
|
||||
* The service hub for managing all services.
|
||||
*/
|
||||
_services: InjectableServiceHub<T> | undefined = undefined;
|
||||
|
||||
get services() {
|
||||
if (!this._services) {
|
||||
throw new Error("Services not initialised yet");
|
||||
}
|
||||
return this._services;
|
||||
}
|
||||
/**
|
||||
* Service Modules
|
||||
*/
|
||||
protected _serviceModules: ServiceModules;
|
||||
|
||||
get serviceModules() {
|
||||
return this._serviceModules;
|
||||
}
|
||||
|
||||
/**
|
||||
* The modules of the plug-in. Modules are responsible for specific features or functionalities of the plug-in, such as file handling, conflict resolution, replication, etc.
|
||||
*/
|
||||
private modules = [
|
||||
// Move to registerModules
|
||||
] as AbstractModule[];
|
||||
|
||||
/**
|
||||
* Get a module by its class. Throws an error if not found.
|
||||
* Mostly used for getting SetupManager.
|
||||
* @param constructor
|
||||
* @returns
|
||||
*/
|
||||
getModule<T extends AbstractModule>(constructor: new (...args: any[]) => T): T {
|
||||
for (const module of this.modules) {
|
||||
if (module.constructor === constructor) return module as T;
|
||||
}
|
||||
throw new Error(`Module ${constructor} not found or not loaded.`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a module to the plug-in.
|
||||
* @param module The module to register.
|
||||
*/
|
||||
private _registerModule(module: AbstractModule) {
|
||||
this.modules.push(module);
|
||||
}
|
||||
|
||||
public registerModules(extraModules: AbstractModule[] = []) {
|
||||
this._registerModule(new ModuleLiveSyncMain(this));
|
||||
this._registerModule(new ModuleConflictChecker(this));
|
||||
this._registerModule(new ModuleReplicatorMinIO(this));
|
||||
this._registerModule(new ModuleReplicatorCouchDB(this));
|
||||
this._registerModule(new ModuleReplicator(this));
|
||||
this._registerModule(new ModuleConflictResolver(this));
|
||||
this._registerModule(new ModulePeriodicProcess(this));
|
||||
this._registerModule(new ModuleResolvingMismatchedTweaks(this));
|
||||
this._registerModule(new ModuleBasicMenu(this));
|
||||
|
||||
for (const module of extraModules) {
|
||||
this._registerModule(module);
|
||||
}
|
||||
// Test and Dev Modules
|
||||
}
|
||||
|
||||
/**
|
||||
* Bind module functions to services.
|
||||
*/
|
||||
public bindModuleFunctions() {
|
||||
for (const module of this.modules) {
|
||||
if (module instanceof AbstractModule) {
|
||||
module.onBindFunction(this, this.services);
|
||||
__$checkInstanceBinding(module); // Check if all functions are properly bound, and log warnings if not.
|
||||
} else {
|
||||
this.services.API.addLog(
|
||||
`Module ${(module as any)?.constructor?.name ?? "unknown"} does not have onBindFunction, skipping binding.`,
|
||||
LOG_LEVEL_INFO
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @obsolete Use services.UI.confirm instead. The confirm function to show a confirmation dialog to the user.
|
||||
*/
|
||||
get confirm(): Confirm {
|
||||
return this.services.UI.confirm;
|
||||
}
|
||||
|
||||
/**
|
||||
* @obsolete Use services.setting.currentSettings instead. The current settings of the plug-in.
|
||||
*/
|
||||
get settings() {
|
||||
return this.services.setting.settings;
|
||||
}
|
||||
|
||||
/**
|
||||
* @obsolete Use services.setting.settings instead. Set the settings of the plug-in.
|
||||
*/
|
||||
set settings(value: ObsidianLiveSyncSettings) {
|
||||
this.services.setting.settings = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* @obsolete Use services.setting.currentSettings instead. Get the settings of the plug-in.
|
||||
* @returns The current settings of the plug-in.
|
||||
*/
|
||||
getSettings(): ObsidianLiveSyncSettings {
|
||||
return this.settings;
|
||||
}
|
||||
|
||||
/**
|
||||
* @obsolete Use services.database.localDatabase instead. The local database instance.
|
||||
*/
|
||||
get localDatabase() {
|
||||
return this.services.database.localDatabase;
|
||||
}
|
||||
|
||||
/**
|
||||
* @obsolete Use services.database.localDatabase instead. Get the PouchDB database instance. Note that this is not the same as the local database instance, which is a wrapper around the PouchDB database.
|
||||
* @returns The PouchDB database instance.
|
||||
*/
|
||||
getDatabase(): PouchDB.Database<EntryDoc> {
|
||||
return this.localDatabase.localDatabase;
|
||||
}
|
||||
|
||||
/**
|
||||
* @obsolete Use services.keyValueDB.simpleStore instead. A simple key-value store for storing non-file data, such as checkpoints, sync status, etc.
|
||||
*/
|
||||
get simpleStore() {
|
||||
return this.services.keyValueDB.simpleStore as SimpleStore<CheckPointInfo>;
|
||||
}
|
||||
|
||||
/**
|
||||
* @obsolete Use services.replication.getActiveReplicator instead. Get the active replicator instance. Note that there can be multiple replicators, but only one can be active at a time.
|
||||
*/
|
||||
get replicator() {
|
||||
return this.services.replicator.getActiveReplicator()!;
|
||||
}
|
||||
|
||||
/**
|
||||
* @obsolete Use services.keyValueDB.kvDB instead. Get the key-value database instance. This is used for storing large data that cannot be stored in the simple store, such as file metadata, etc.
|
||||
*/
|
||||
get kvDB() {
|
||||
return this.services.keyValueDB.kvDB;
|
||||
}
|
||||
|
||||
/// Modules which were relied on services
|
||||
/**
|
||||
* Storage Accessor for handling file operations.
|
||||
* @obsolete Use serviceModules.storageAccess instead.
|
||||
*/
|
||||
get storageAccess(): StorageAccess {
|
||||
return this.serviceModules.storageAccess;
|
||||
}
|
||||
/**
|
||||
* Database File Accessor for handling file operations related to the database, such as exporting the database, importing from a file, etc.
|
||||
* @obsolete Use serviceModules.databaseFileAccess instead.
|
||||
*/
|
||||
get databaseFileAccess(): DatabaseFileAccess {
|
||||
return this.serviceModules.databaseFileAccess;
|
||||
}
|
||||
/**
|
||||
* File Handler for handling file operations related to replication, such as resolving conflicts, applying changes from replication, etc.
|
||||
* @obsolete Use serviceModules.fileHandler instead.
|
||||
*/
|
||||
get fileHandler(): IFileHandler {
|
||||
return this.serviceModules.fileHandler;
|
||||
}
|
||||
/**
|
||||
* Rebuilder for handling database rebuilding operations.
|
||||
* @obsolete Use serviceModules.rebuilder instead.
|
||||
*/
|
||||
get rebuilder(): Rebuilder {
|
||||
return this.serviceModules.rebuilder;
|
||||
}
|
||||
|
||||
// private initialiseServices<T extends ServiceContext>(serviceHub: InjectableServiceHub<T>) {
|
||||
// this._services = serviceHub;
|
||||
// }
|
||||
/**
|
||||
* Initialise ServiceFeatures.
|
||||
* (Please refer `serviceFeatures` for more details)
|
||||
*/
|
||||
initialiseServiceFeatures() {
|
||||
useTargetFilters(this);
|
||||
// enable target filter feature.
|
||||
usePrepareDatabaseForUse(this);
|
||||
}
|
||||
}
|
||||
|
||||
export interface IMinimumLiveSyncCommands {
|
||||
onunload(): void;
|
||||
onload(): void | Promise<void>;
|
||||
constructor: { name: string };
|
||||
}
|
||||
4
src/apps/cli/.gitignore
vendored
Normal file
4
src/apps/cli/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
.livesync
|
||||
test/*
|
||||
!test/*.sh
|
||||
node_modules
|
||||
8
src/apps/cli/.test.env
Normal file
8
src/apps/cli/.test.env
Normal file
@@ -0,0 +1,8 @@
|
||||
hostname=http://127.0.0.1:5989/
|
||||
dbname=livesync-test-db2
|
||||
minioEndpoint=http://127.0.0.1:9000
|
||||
username=admin
|
||||
password=testpassword
|
||||
accessKey=minioadmin
|
||||
secretKey=minioadmin
|
||||
bucketName=livesync-test-bucket
|
||||
285
src/apps/cli/README.md
Normal file
285
src/apps/cli/README.md
Normal file
@@ -0,0 +1,285 @@
|
||||
# Self-hosted LiveSync CLI
|
||||
Command-line version of Self-hosted LiveSync plugin for syncing vaults without Obsidian.
|
||||
|
||||
## Features
|
||||
|
||||
- ✅ Sync Obsidian vaults using CouchDB without running Obsidian
|
||||
- ✅ Compatible with Self-hosted LiveSync plugin settings
|
||||
- ✅ Supports all core sync features (encryption, conflict resolution, etc.)
|
||||
- ✅ Lightweight and headless operation
|
||||
- ✅ Cross-platform (Windows, macOS, Linux)
|
||||
|
||||
## Architecture
|
||||
|
||||
This CLI version is built using the same core as the Obsidian plugin:
|
||||
|
||||
```
|
||||
CLI Main
|
||||
└─ LiveSyncBaseCore<ServiceContext, IMinimumLiveSyncCommands>
|
||||
├─ NodeServiceHub (All services without Obsidian dependencies)
|
||||
└─ ServiceModules (wired by initialiseServiceModulesCLI)
|
||||
├─ FileAccessCLI (Node.js FileSystemAdapter)
|
||||
├─ StorageEventManagerCLI
|
||||
├─ ServiceFileAccessCLI
|
||||
├─ ServiceDatabaseFileAccessCLI
|
||||
├─ ServiceFileHandler
|
||||
└─ ServiceRebuilder
|
||||
```
|
||||
|
||||
### Key Components
|
||||
|
||||
1. **Node.js FileSystem Adapter** (`adapters/`)
|
||||
- Platform-agnostic file operations using Node.js `fs/promises`
|
||||
- Implements same interface as Obsidian's file system
|
||||
|
||||
2. **Service Modules** (`serviceModules/`)
|
||||
- Initialised by `initialiseServiceModulesCLI`
|
||||
- All core sync functionality preserved
|
||||
|
||||
3. **Service Hub and Settings Services** (`services/`)
|
||||
- `NodeServiceHub` provides the CLI service context
|
||||
- Node-specific settings and key-value services are provided without Obsidian dependencies
|
||||
|
||||
4. **Main Entry Point** (`main.ts`)
|
||||
- Command-line interface
|
||||
- Settings management (JSON file)
|
||||
- Graceful shutdown handling
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
# Install dependencies (ensure you are in repository root directory, not src/apps/cli)
|
||||
# due to shared dependencies with webapp and main library
|
||||
npm install
|
||||
# Build the project (ensure you are in `src/apps/cli` directory)
|
||||
npm run build
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Usage
|
||||
|
||||
As you know, the CLI is designed to be used in a headless environment. Hence all operations are performed against a local vault directory and a settings file. Here are some example commands:
|
||||
|
||||
```bash
|
||||
# Sync local database with CouchDB (no files will be changed).
|
||||
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json sync
|
||||
|
||||
# Push files to local database
|
||||
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json push /your/storage/file.md /vault/path/file.md
|
||||
|
||||
# Pull files from local database
|
||||
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json pull /vault/path/file.md /your/storage/file.md
|
||||
|
||||
# Verbose logging
|
||||
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json --verbose
|
||||
|
||||
# Apply setup URI to settings file (settings only; does not run synchronisation)
|
||||
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json setup "obsidian://setuplivesync?settings=..."
|
||||
|
||||
# Put text from stdin into local database
|
||||
echo "Hello from stdin" | npm run cli -- /path/to/your-local-database --settings /path/to/settings.json put /vault/path/file.md
|
||||
|
||||
# Output a file from local database to stdout
|
||||
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json cat /vault/path/file.md
|
||||
|
||||
# Output a specific revision of a file from local database
|
||||
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json cat-rev /vault/path/file.md 3-abcdef
|
||||
|
||||
# Pull a specific revision of a file from local database to local storage
|
||||
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json pull-rev /vault/path/file.md /your/storage/file.old.md 3-abcdef
|
||||
|
||||
# List files in local database
|
||||
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json ls /vault/path/
|
||||
|
||||
# Show metadata for a file in local database
|
||||
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json info /vault/path/file.md
|
||||
|
||||
# Mark a file as deleted in local database
|
||||
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json rm /vault/path/file.md
|
||||
|
||||
# Resolve conflict by keeping a specific revision
|
||||
npm run cli -- /path/to/your-local-database --settings /path/to/settings.json resolve /vault/path/file.md 3-abcdef
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
The CLI uses the same settings format as the Obsidian plugin. Create a `.livesync/settings.json` file in your vault directory:
|
||||
|
||||
```json
|
||||
{
|
||||
"couchDB_URI": "http://localhost:5984",
|
||||
"couchDB_USER": "admin",
|
||||
"couchDB_PASSWORD": "password",
|
||||
"couchDB_DBNAME": "obsidian-livesync",
|
||||
"liveSync": true,
|
||||
"syncOnSave": true,
|
||||
"syncOnStart": true,
|
||||
"encrypt": true,
|
||||
"passphrase": "your-encryption-passphrase",
|
||||
"usePluginSync": false,
|
||||
"isConfigured": true
|
||||
}
|
||||
```
|
||||
|
||||
**Minimum required settings:**
|
||||
|
||||
- `couchDB_URI`: CouchDB server URL
|
||||
- `couchDB_USER`: CouchDB username
|
||||
- `couchDB_PASSWORD`: CouchDB password
|
||||
- `couchDB_DBNAME`: Database name
|
||||
- `isConfigured`: Set to `true` after configuration
|
||||
|
||||
### Command-line Reference
|
||||
|
||||
```
|
||||
Usage:
|
||||
livesync-cli [database-path] [options] [command] [command-args]
|
||||
|
||||
Arguments:
|
||||
database-path Path to the local database directory (required)
|
||||
|
||||
Options:
|
||||
--settings, -s <path> Path to settings file (default: .livesync/settings.json in local database directory)
|
||||
--force, -f Overwrite existing file on init-settings
|
||||
--verbose, -v Enable verbose logging
|
||||
--help, -h Show this help message
|
||||
|
||||
Commands:
|
||||
init-settings [path] Create settings JSON from DEFAULT_SETTINGS
|
||||
sync Run one replication cycle and exit
|
||||
push <src> <dst> Push local file <src> into local database path <dst>
|
||||
pull <src> <dst> Pull file <src> from local database into local file <dst>
|
||||
pull-rev <src> <dst> <revision> Pull specific revision into local file <dst>
|
||||
setup <setupURI> Apply setup URI to settings file
|
||||
put <vaultPath> Read text from standard input and write to local database
|
||||
cat <vaultPath> Write latest file content from local database to standard output
|
||||
cat-rev <vaultPath> <revision> Write specific revision content from local database to standard output
|
||||
ls <prefix> List files as path<TAB>size<TAB>mtime<TAB>revision[*]
|
||||
info <vaultPath> Show file metadata including current and past revisions, conflicts, and chunk list
|
||||
rm <vaultPath> Mark file as deleted in local database
|
||||
resolve <vaultPath> <revision> Resolve conflict by keeping the specified revision
|
||||
```
|
||||
|
||||
Run via npm script:
|
||||
|
||||
```bash
|
||||
npm run cli -- [database-path] [options] [command] [command-args]
|
||||
```
|
||||
|
||||
`info` output fields:
|
||||
|
||||
- `id`: Document ID
|
||||
- `revision`: Current revision
|
||||
- `conflicts`: Conflicted revisions, or `N/A`
|
||||
- `filename`: Basename of path
|
||||
- `path`: Vault-relative path
|
||||
- `size`: Size in bytes
|
||||
- `revisions`: Available non-current revisions
|
||||
- `Chunks`: Number of chunk IDs
|
||||
- `child: ...`: Chunk ID list
|
||||
|
||||
### Planned options:
|
||||
|
||||
TODO: Conflict and resolution checks for real local databases.
|
||||
|
||||
- `--immediate`: Perform sync after the command (e.g. `push`, `pull`, `put`, `rm`).
|
||||
- `serve`: Start CLI in server mode, exposing REST APIs for remote, and batch operations.
|
||||
- `cause-conflicted <vaultPath>`: Mark a file as conflicted without changing its content, to trigger conflict resolution in Obsidian.
|
||||
|
||||
## Current Limitations and known issues
|
||||
- Binary files are not supported yet (it seems... but I haven't tested this yet).
|
||||
|
||||
## Use Cases
|
||||
|
||||
### 1. Bootstrap a new headless vault
|
||||
|
||||
Create default settings, apply a setup URI, then run one sync cycle.
|
||||
|
||||
```bash
|
||||
npm run cli -- init-settings /data/livesync-settings.json
|
||||
printf '%s\n' "$SETUP_PASSPHRASE" | npm run cli -- /data/vault --settings /data/livesync-settings.json setup "$SETUP_URI"
|
||||
npm run cli -- /data/vault --settings /data/livesync-settings.json sync
|
||||
```
|
||||
|
||||
### 2. Scripted import and export
|
||||
|
||||
Push local files into the database from automation, and pull them back for export or backup.
|
||||
|
||||
```bash
|
||||
npm run cli -- /data/vault --settings /data/livesync-settings.json push ./note.md notes/note.md
|
||||
npm run cli -- /data/vault --settings /data/livesync-settings.json pull notes/note.md ./exports/note.md
|
||||
```
|
||||
|
||||
### 3. Revision inspection and restore
|
||||
|
||||
List metadata, find an older revision, then restore it by content (`cat-rev`) or file output (`pull-rev`).
|
||||
|
||||
```bash
|
||||
npm run cli -- /data/vault --settings /data/livesync-settings.json info notes/note.md
|
||||
npm run cli -- /data/vault --settings /data/livesync-settings.json cat-rev notes/note.md 3-abcdef
|
||||
npm run cli -- /data/vault --settings /data/livesync-settings.json pull-rev notes/note.md ./restore/note.old.md 3-abcdef
|
||||
```
|
||||
|
||||
### 4. Conflict and cleanup workflow
|
||||
|
||||
Inspect conflicted revisions, resolve by keeping one revision, then delete obsolete files.
|
||||
|
||||
```bash
|
||||
npm run cli -- /data/vault --settings /data/livesync-settings.json info notes/note.md
|
||||
npm run cli -- /data/vault --settings /data/livesync-settings.json resolve notes/note.md 3-abcdef
|
||||
npm run cli -- /data/vault --settings /data/livesync-settings.json rm notes/obsolete.md
|
||||
```
|
||||
|
||||
### 5. CI smoke test for content round-trip
|
||||
|
||||
Validate that `put`/`cat` is behaving as expected in a pipeline.
|
||||
|
||||
```bash
|
||||
echo "hello-ci" | npm run cli -- /data/vault --settings /data/livesync-settings.json put ci/test.md
|
||||
npm run cli -- /data/vault --settings /data/livesync-settings.json cat ci/test.md
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Project Structure
|
||||
|
||||
```
|
||||
src/apps/cli/
|
||||
├── commands/ # Command dispatcher and command utilities
|
||||
│ ├── runCommand.ts
|
||||
│ ├── types.ts
|
||||
│ └── utils.ts
|
||||
├── adapters/ # Node.js FileSystem Adapter
|
||||
│ ├── NodeConversionAdapter.ts
|
||||
│ ├── NodeFileSystemAdapter.ts
|
||||
│ ├── NodePathAdapter.ts
|
||||
│ ├── NodeStorageAdapter.ts
|
||||
│ ├── NodeTypeGuardAdapter.ts
|
||||
│ ├── NodeTypes.ts
|
||||
│ └── NodeVaultAdapter.ts
|
||||
├── lib/
|
||||
│ └── pouchdb-node.ts
|
||||
├── managers/ # CLI-specific managers
|
||||
│ ├── CLIStorageEventManagerAdapter.ts
|
||||
│ └── StorageEventManagerCLI.ts
|
||||
├── serviceModules/ # Service modules (ported from main.ts)
|
||||
│ ├── CLIServiceModules.ts
|
||||
│ ├── DatabaseFileAccess.ts
|
||||
│ ├── FileAccessCLI.ts
|
||||
│ └── ServiceFileAccessImpl.ts
|
||||
├── services/
|
||||
│ ├── NodeKeyValueDBService.ts
|
||||
│ ├── NodeServiceHub.ts
|
||||
│ └── NodeSettingService.ts
|
||||
├── test/
|
||||
│ ├── test-push-pull-linux.sh
|
||||
│ ├── test-setup-put-cat-linux.sh
|
||||
│ └── test-sync-two-local-databases-linux.sh
|
||||
├── .gitignore
|
||||
├── main.ts # CLI entry point
|
||||
├── package.json
|
||||
├── README.md # This file
|
||||
├── tsconfig.json
|
||||
└── vite.config.ts
|
||||
```
|
||||
28
src/apps/cli/adapters/NodeConversionAdapter.ts
Normal file
28
src/apps/cli/adapters/NodeConversionAdapter.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import * as path from "path";
|
||||
import type { UXFileInfoStub, UXFolderInfo } from "@lib/common/types";
|
||||
import type { IConversionAdapter } from "@lib/serviceModules/adapters";
|
||||
import type { NodeFile, NodeFolder } from "./NodeTypes";
|
||||
|
||||
/**
|
||||
* Conversion adapter implementation for Node.js
|
||||
*/
|
||||
export class NodeConversionAdapter implements IConversionAdapter<NodeFile, NodeFolder> {
|
||||
nativeFileToUXFileInfoStub(file: NodeFile): UXFileInfoStub {
|
||||
return {
|
||||
name: path.basename(file.path),
|
||||
path: file.path,
|
||||
stat: file.stat,
|
||||
isFolder: false,
|
||||
};
|
||||
}
|
||||
|
||||
nativeFolderToUXFolder(folder: NodeFolder): UXFolderInfo {
|
||||
return {
|
||||
name: path.basename(folder.path),
|
||||
path: folder.path,
|
||||
isFolder: true,
|
||||
children: [],
|
||||
parent: path.dirname(folder.path) as any,
|
||||
};
|
||||
}
|
||||
}
|
||||
153
src/apps/cli/adapters/NodeFileSystemAdapter.ts
Normal file
153
src/apps/cli/adapters/NodeFileSystemAdapter.ts
Normal file
@@ -0,0 +1,153 @@
|
||||
import * as fs from "fs/promises";
|
||||
import * as path from "path";
|
||||
import type { FilePath, UXStat } from "@lib/common/types";
|
||||
import type { IFileSystemAdapter } from "@lib/serviceModules/adapters";
|
||||
import { NodePathAdapter } from "./NodePathAdapter";
|
||||
import { NodeTypeGuardAdapter } from "./NodeTypeGuardAdapter";
|
||||
import { NodeConversionAdapter } from "./NodeConversionAdapter";
|
||||
import { NodeStorageAdapter } from "./NodeStorageAdapter";
|
||||
import { NodeVaultAdapter } from "./NodeVaultAdapter";
|
||||
import type { NodeFile, NodeFolder, NodeStat } from "./NodeTypes";
|
||||
|
||||
/**
|
||||
* Complete file system adapter implementation for Node.js
|
||||
*/
|
||||
export class NodeFileSystemAdapter implements IFileSystemAdapter<NodeFile, NodeFile, NodeFolder, NodeStat> {
|
||||
readonly path: NodePathAdapter;
|
||||
readonly typeGuard: NodeTypeGuardAdapter;
|
||||
readonly conversion: NodeConversionAdapter;
|
||||
readonly storage: NodeStorageAdapter;
|
||||
readonly vault: NodeVaultAdapter;
|
||||
|
||||
private fileCache = new Map<string, NodeFile>();
|
||||
|
||||
constructor(private basePath: string) {
|
||||
this.path = new NodePathAdapter();
|
||||
this.typeGuard = new NodeTypeGuardAdapter();
|
||||
this.conversion = new NodeConversionAdapter();
|
||||
this.storage = new NodeStorageAdapter(basePath);
|
||||
this.vault = new NodeVaultAdapter(basePath);
|
||||
}
|
||||
|
||||
private resolvePath(p: FilePath | string): string {
|
||||
return path.join(this.basePath, p);
|
||||
}
|
||||
|
||||
private normalisePath(p: FilePath | string): string {
|
||||
return this.path.normalisePath(p as string);
|
||||
}
|
||||
|
||||
async getAbstractFileByPath(p: FilePath | string): Promise<NodeFile | null> {
|
||||
const pathStr = this.normalisePath(p);
|
||||
|
||||
const cached = this.fileCache.get(pathStr);
|
||||
if (cached) {
|
||||
return cached;
|
||||
}
|
||||
|
||||
return await this.refreshFile(pathStr);
|
||||
}
|
||||
|
||||
async getAbstractFileByPathInsensitive(p: FilePath | string): Promise<NodeFile | null> {
|
||||
const pathStr = this.normalisePath(p);
|
||||
|
||||
const exact = await this.getAbstractFileByPath(pathStr);
|
||||
if (exact) {
|
||||
return exact;
|
||||
}
|
||||
|
||||
const lowerPath = pathStr.toLowerCase();
|
||||
for (const [cachedPath, cachedFile] of this.fileCache.entries()) {
|
||||
if (cachedPath.toLowerCase() === lowerPath) {
|
||||
return cachedFile;
|
||||
}
|
||||
}
|
||||
|
||||
await this.scanDirectory();
|
||||
|
||||
for (const [cachedPath, cachedFile] of this.fileCache.entries()) {
|
||||
if (cachedPath.toLowerCase() === lowerPath) {
|
||||
return cachedFile;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
async getFiles(): Promise<NodeFile[]> {
|
||||
if (this.fileCache.size === 0) {
|
||||
await this.scanDirectory();
|
||||
}
|
||||
return Array.from(this.fileCache.values());
|
||||
}
|
||||
|
||||
async statFromNative(file: NodeFile): Promise<UXStat> {
|
||||
return file.stat;
|
||||
}
|
||||
|
||||
async reconcileInternalFile(p: string): Promise<void> {
|
||||
// No-op in Node.js version
|
||||
// This is used by Obsidian to sync internal file metadata
|
||||
}
|
||||
|
||||
async refreshFile(p: string): Promise<NodeFile | null> {
|
||||
const pathStr = this.normalisePath(p);
|
||||
try {
|
||||
const fullPath = this.resolvePath(pathStr);
|
||||
const stat = await fs.stat(fullPath);
|
||||
if (!stat.isFile()) {
|
||||
this.fileCache.delete(pathStr);
|
||||
return null;
|
||||
}
|
||||
|
||||
const file: NodeFile = {
|
||||
path: pathStr as FilePath,
|
||||
stat: {
|
||||
size: stat.size,
|
||||
mtime: stat.mtimeMs,
|
||||
ctime: stat.ctimeMs,
|
||||
type: "file",
|
||||
},
|
||||
};
|
||||
this.fileCache.set(pathStr, file);
|
||||
return file;
|
||||
} catch {
|
||||
this.fileCache.delete(pathStr);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to recursively scan directory and populate file cache
|
||||
*/
|
||||
async scanDirectory(relativePath: string = ""): Promise<void> {
|
||||
const fullPath = this.resolvePath(relativePath);
|
||||
try {
|
||||
const entries = await fs.readdir(fullPath, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
const entryRelativePath = path.join(relativePath, entry.name).replace(/\\/g, "/");
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
await this.scanDirectory(entryRelativePath);
|
||||
} else if (entry.isFile()) {
|
||||
const entryFullPath = this.resolvePath(entryRelativePath);
|
||||
const stat = await fs.stat(entryFullPath);
|
||||
const file: NodeFile = {
|
||||
path: entryRelativePath as FilePath,
|
||||
stat: {
|
||||
size: stat.size,
|
||||
mtime: stat.mtimeMs,
|
||||
ctime: stat.ctimeMs,
|
||||
type: "file",
|
||||
},
|
||||
};
|
||||
this.fileCache.set(entryRelativePath, file);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Directory doesn't exist or is not readable
|
||||
console.error(`Error scanning directory ${fullPath}:`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
18
src/apps/cli/adapters/NodePathAdapter.ts
Normal file
18
src/apps/cli/adapters/NodePathAdapter.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
import * as path from "path";
|
||||
import type { FilePath } from "@lib/common/types";
|
||||
import type { IPathAdapter } from "@lib/serviceModules/adapters";
|
||||
import type { NodeFile } from "./NodeTypes";
|
||||
|
||||
/**
|
||||
* Path adapter implementation for Node.js
|
||||
*/
|
||||
export class NodePathAdapter implements IPathAdapter<NodeFile> {
|
||||
getPath(file: string | NodeFile): FilePath {
|
||||
return (typeof file === "string" ? file : file.path) as FilePath;
|
||||
}
|
||||
|
||||
normalisePath(p: string): string {
|
||||
// Normalize path separators to forward slashes (like Obsidian)
|
||||
return path.normalize(p).replace(/\\/g, "/");
|
||||
}
|
||||
}
|
||||
124
src/apps/cli/adapters/NodeStorageAdapter.ts
Normal file
124
src/apps/cli/adapters/NodeStorageAdapter.ts
Normal file
@@ -0,0 +1,124 @@
|
||||
import * as fs from "fs/promises";
|
||||
import * as path from "path";
|
||||
import type { UXDataWriteOptions } from "@lib/common/types";
|
||||
import type { IStorageAdapter } from "@lib/serviceModules/adapters";
|
||||
import type { NodeStat } from "./NodeTypes";
|
||||
|
||||
/**
|
||||
* Storage adapter implementation for Node.js
|
||||
*/
|
||||
export class NodeStorageAdapter implements IStorageAdapter<NodeStat> {
|
||||
constructor(private basePath: string) {}
|
||||
|
||||
private resolvePath(p: string): string {
|
||||
return path.join(this.basePath, p);
|
||||
}
|
||||
|
||||
async exists(p: string): Promise<boolean> {
|
||||
try {
|
||||
await fs.access(this.resolvePath(p));
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async trystat(p: string): Promise<NodeStat | null> {
|
||||
try {
|
||||
const stat = await fs.stat(this.resolvePath(p));
|
||||
return {
|
||||
size: stat.size,
|
||||
mtime: stat.mtimeMs,
|
||||
ctime: stat.ctimeMs,
|
||||
type: stat.isDirectory() ? "folder" : "file",
|
||||
};
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async stat(p: string): Promise<NodeStat | null> {
|
||||
return await this.trystat(p);
|
||||
}
|
||||
|
||||
async mkdir(p: string): Promise<void> {
|
||||
await fs.mkdir(this.resolvePath(p), { recursive: true });
|
||||
}
|
||||
|
||||
async remove(p: string): Promise<void> {
|
||||
const fullPath = this.resolvePath(p);
|
||||
const stat = await fs.stat(fullPath);
|
||||
if (stat.isDirectory()) {
|
||||
await fs.rm(fullPath, { recursive: true, force: true });
|
||||
} else {
|
||||
await fs.unlink(fullPath);
|
||||
}
|
||||
}
|
||||
|
||||
async read(p: string): Promise<string> {
|
||||
return await fs.readFile(this.resolvePath(p), "utf-8");
|
||||
}
|
||||
|
||||
async readBinary(p: string): Promise<ArrayBuffer> {
|
||||
const buffer = await fs.readFile(this.resolvePath(p));
|
||||
return buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength) as ArrayBuffer;
|
||||
}
|
||||
|
||||
async write(p: string, data: string, options?: UXDataWriteOptions): Promise<void> {
|
||||
const fullPath = this.resolvePath(p);
|
||||
await fs.mkdir(path.dirname(fullPath), { recursive: true });
|
||||
await fs.writeFile(fullPath, data, "utf-8");
|
||||
|
||||
if (options?.mtime || options?.ctime) {
|
||||
const atime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
const mtime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
await fs.utimes(fullPath, atime, mtime);
|
||||
}
|
||||
}
|
||||
|
||||
async writeBinary(p: string, data: ArrayBuffer, options?: UXDataWriteOptions): Promise<void> {
|
||||
const fullPath = this.resolvePath(p);
|
||||
await fs.mkdir(path.dirname(fullPath), { recursive: true });
|
||||
await fs.writeFile(fullPath, new Uint8Array(data));
|
||||
|
||||
if (options?.mtime || options?.ctime) {
|
||||
const atime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
const mtime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
await fs.utimes(fullPath, atime, mtime);
|
||||
}
|
||||
}
|
||||
|
||||
async append(p: string, data: string, options?: UXDataWriteOptions): Promise<void> {
|
||||
const fullPath = this.resolvePath(p);
|
||||
await fs.mkdir(path.dirname(fullPath), { recursive: true });
|
||||
await fs.appendFile(fullPath, data, "utf-8");
|
||||
|
||||
if (options?.mtime || options?.ctime) {
|
||||
const atime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
const mtime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
await fs.utimes(fullPath, atime, mtime);
|
||||
}
|
||||
}
|
||||
|
||||
async list(basePath: string): Promise<{ files: string[]; folders: string[] }> {
|
||||
const fullPath = this.resolvePath(basePath);
|
||||
try {
|
||||
const entries = await fs.readdir(fullPath, { withFileTypes: true });
|
||||
const files: string[] = [];
|
||||
const folders: string[] = [];
|
||||
|
||||
for (const entry of entries) {
|
||||
const entryPath = path.join(basePath, entry.name).replace(/\\/g, "/");
|
||||
if (entry.isDirectory()) {
|
||||
folders.push(entryPath);
|
||||
} else if (entry.isFile()) {
|
||||
files.push(entryPath);
|
||||
}
|
||||
}
|
||||
|
||||
return { files, folders };
|
||||
} catch {
|
||||
return { files: [], folders: [] };
|
||||
}
|
||||
}
|
||||
}
|
||||
15
src/apps/cli/adapters/NodeTypeGuardAdapter.ts
Normal file
15
src/apps/cli/adapters/NodeTypeGuardAdapter.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import type { ITypeGuardAdapter } from "@lib/serviceModules/adapters";
|
||||
import type { NodeFile, NodeFolder } from "./NodeTypes";
|
||||
|
||||
/**
|
||||
* Type guard adapter implementation for Node.js
|
||||
*/
|
||||
export class NodeTypeGuardAdapter implements ITypeGuardAdapter<NodeFile, NodeFolder> {
|
||||
isFile(file: any): file is NodeFile {
|
||||
return file && typeof file === "object" && "path" in file && "stat" in file && !file.isFolder;
|
||||
}
|
||||
|
||||
isFolder(item: any): item is NodeFolder {
|
||||
return item && typeof item === "object" && "path" in item && item.isFolder === true;
|
||||
}
|
||||
}
|
||||
22
src/apps/cli/adapters/NodeTypes.ts
Normal file
22
src/apps/cli/adapters/NodeTypes.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import type { FilePath, UXStat } from "@lib/common/types";
|
||||
|
||||
/**
|
||||
* Node.js file representation
|
||||
*/
|
||||
export type NodeFile = {
|
||||
path: FilePath;
|
||||
stat: UXStat;
|
||||
};
|
||||
|
||||
/**
|
||||
* Node.js folder representation
|
||||
*/
|
||||
export type NodeFolder = {
|
||||
path: FilePath;
|
||||
isFolder: true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Node.js stat type (compatible with UXStat)
|
||||
*/
|
||||
export type NodeStat = UXStat;
|
||||
118
src/apps/cli/adapters/NodeVaultAdapter.ts
Normal file
118
src/apps/cli/adapters/NodeVaultAdapter.ts
Normal file
@@ -0,0 +1,118 @@
|
||||
import * as fs from "fs/promises";
|
||||
import * as path from "path";
|
||||
import type { UXDataWriteOptions } from "@lib/common/types";
|
||||
import type { IVaultAdapter } from "@lib/serviceModules/adapters";
|
||||
import type { NodeFile, NodeFolder, NodeStat } from "./NodeTypes";
|
||||
|
||||
/**
|
||||
* Vault adapter implementation for Node.js
|
||||
*/
|
||||
export class NodeVaultAdapter implements IVaultAdapter<NodeFile> {
|
||||
constructor(private basePath: string) {}
|
||||
|
||||
private resolvePath(p: string): string {
|
||||
return path.join(this.basePath, p);
|
||||
}
|
||||
|
||||
async read(file: NodeFile): Promise<string> {
|
||||
return await fs.readFile(this.resolvePath(file.path), "utf-8");
|
||||
}
|
||||
|
||||
async cachedRead(file: NodeFile): Promise<string> {
|
||||
// No caching in CLI version, just read directly
|
||||
return await this.read(file);
|
||||
}
|
||||
|
||||
async readBinary(file: NodeFile): Promise<ArrayBuffer> {
|
||||
const buffer = await fs.readFile(this.resolvePath(file.path));
|
||||
return buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength) as ArrayBuffer;
|
||||
}
|
||||
|
||||
async modify(file: NodeFile, data: string, options?: UXDataWriteOptions): Promise<void> {
|
||||
const fullPath = this.resolvePath(file.path);
|
||||
await fs.writeFile(fullPath, data, "utf-8");
|
||||
|
||||
if (options?.mtime || options?.ctime) {
|
||||
const atime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
const mtime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
await fs.utimes(fullPath, atime, mtime);
|
||||
}
|
||||
}
|
||||
|
||||
async modifyBinary(file: NodeFile, data: ArrayBuffer, options?: UXDataWriteOptions): Promise<void> {
|
||||
const fullPath = this.resolvePath(file.path);
|
||||
await fs.writeFile(fullPath, new Uint8Array(data));
|
||||
|
||||
if (options?.mtime || options?.ctime) {
|
||||
const atime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
const mtime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
await fs.utimes(fullPath, atime, mtime);
|
||||
}
|
||||
}
|
||||
|
||||
async create(p: string, data: string, options?: UXDataWriteOptions): Promise<NodeFile> {
|
||||
const fullPath = this.resolvePath(p);
|
||||
await fs.mkdir(path.dirname(fullPath), { recursive: true });
|
||||
await fs.writeFile(fullPath, data, "utf-8");
|
||||
|
||||
if (options?.mtime || options?.ctime) {
|
||||
const atime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
const mtime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
await fs.utimes(fullPath, atime, mtime);
|
||||
}
|
||||
|
||||
const stat = await fs.stat(fullPath);
|
||||
return {
|
||||
path: p as any,
|
||||
stat: {
|
||||
size: stat.size,
|
||||
mtime: stat.mtimeMs,
|
||||
ctime: stat.ctimeMs,
|
||||
type: "file",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async createBinary(p: string, data: ArrayBuffer, options?: UXDataWriteOptions): Promise<NodeFile> {
|
||||
const fullPath = this.resolvePath(p);
|
||||
await fs.mkdir(path.dirname(fullPath), { recursive: true });
|
||||
await fs.writeFile(fullPath, new Uint8Array(data));
|
||||
|
||||
if (options?.mtime || options?.ctime) {
|
||||
const atime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
const mtime = options.mtime ? new Date(options.mtime) : new Date();
|
||||
await fs.utimes(fullPath, atime, mtime);
|
||||
}
|
||||
|
||||
const stat = await fs.stat(fullPath);
|
||||
return {
|
||||
path: p as any,
|
||||
stat: {
|
||||
size: stat.size,
|
||||
mtime: stat.mtimeMs,
|
||||
ctime: stat.ctimeMs,
|
||||
type: "file",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async delete(file: NodeFile | NodeFolder, force = false): Promise<void> {
|
||||
const fullPath = this.resolvePath(file.path);
|
||||
const stat = await fs.stat(fullPath);
|
||||
if (stat.isDirectory()) {
|
||||
await fs.rm(fullPath, { recursive: true, force });
|
||||
} else {
|
||||
await fs.unlink(fullPath);
|
||||
}
|
||||
}
|
||||
|
||||
async trash(file: NodeFile | NodeFolder, force = false): Promise<void> {
|
||||
// In CLI, trash is the same as delete (no recycle bin)
|
||||
await this.delete(file, force);
|
||||
}
|
||||
|
||||
trigger(name: string, ...data: any[]): any {
|
||||
// No-op in CLI version (no event system)
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
312
src/apps/cli/commands/runCommand.ts
Normal file
312
src/apps/cli/commands/runCommand.ts
Normal file
@@ -0,0 +1,312 @@
|
||||
import * as fs from "fs/promises";
|
||||
import * as path from "path";
|
||||
import { decodeSettingsFromSetupURI } from "@lib/API/processSetting";
|
||||
import { configURIBase } from "@lib/common/models/shared.const";
|
||||
import { DEFAULT_SETTINGS, type FilePathWithPrefix, type ObsidianLiveSyncSettings } from "@lib/common/types";
|
||||
import { stripAllPrefixes } from "@lib/string_and_binary/path";
|
||||
import type { CLICommandContext, CLIOptions } from "./types";
|
||||
import { promptForPassphrase, readStdinAsUtf8, toArrayBuffer, toVaultRelativePath } from "./utils";
|
||||
|
||||
export async function runCommand(options: CLIOptions, context: CLICommandContext): Promise<boolean> {
|
||||
const { vaultPath, core, settingsPath } = context;
|
||||
|
||||
await core.services.control.activated;
|
||||
if (options.command === "daemon") {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (options.command === "sync") {
|
||||
console.log("[Command] sync");
|
||||
const result = await core.services.replication.replicate(true);
|
||||
return !!result;
|
||||
}
|
||||
|
||||
if (options.command === "push") {
|
||||
if (options.commandArgs.length < 2) {
|
||||
throw new Error("push requires two arguments: <src> <dst>");
|
||||
}
|
||||
const sourcePath = path.resolve(options.commandArgs[0]);
|
||||
const destinationVaultPath = toVaultRelativePath(options.commandArgs[1], vaultPath);
|
||||
const sourceData = await fs.readFile(sourcePath);
|
||||
const sourceStat = await fs.stat(sourcePath);
|
||||
console.log(`[Command] push ${sourcePath} -> ${destinationVaultPath}`);
|
||||
|
||||
await core.serviceModules.storageAccess.writeFileAuto(destinationVaultPath, toArrayBuffer(sourceData), {
|
||||
mtime: sourceStat.mtimeMs,
|
||||
ctime: sourceStat.ctimeMs,
|
||||
});
|
||||
const destinationPathWithPrefix = destinationVaultPath as FilePathWithPrefix;
|
||||
const stored = await core.serviceModules.fileHandler.storeFileToDB(destinationPathWithPrefix, true);
|
||||
return stored;
|
||||
}
|
||||
|
||||
if (options.command === "pull") {
|
||||
if (options.commandArgs.length < 2) {
|
||||
throw new Error("pull requires two arguments: <src> <dst>");
|
||||
}
|
||||
const sourceVaultPath = toVaultRelativePath(options.commandArgs[0], vaultPath);
|
||||
const destinationPath = path.resolve(options.commandArgs[1]);
|
||||
console.log(`[Command] pull ${sourceVaultPath} -> ${destinationPath}`);
|
||||
|
||||
const sourcePathWithPrefix = sourceVaultPath as FilePathWithPrefix;
|
||||
const restored = await core.serviceModules.fileHandler.dbToStorage(sourcePathWithPrefix, null, true);
|
||||
if (!restored) {
|
||||
return false;
|
||||
}
|
||||
const data = await core.serviceModules.storageAccess.readFileAuto(sourceVaultPath);
|
||||
await fs.mkdir(path.dirname(destinationPath), { recursive: true });
|
||||
if (typeof data === "string") {
|
||||
await fs.writeFile(destinationPath, data, "utf-8");
|
||||
} else {
|
||||
await fs.writeFile(destinationPath, new Uint8Array(data));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (options.command === "pull-rev") {
|
||||
if (options.commandArgs.length < 3) {
|
||||
throw new Error("pull-rev requires three arguments: <src> <dst> <rev>");
|
||||
}
|
||||
const sourceVaultPath = toVaultRelativePath(options.commandArgs[0], vaultPath);
|
||||
const destinationPath = path.resolve(options.commandArgs[1]);
|
||||
const rev = options.commandArgs[2].trim();
|
||||
if (!rev) {
|
||||
throw new Error("pull-rev requires a non-empty revision");
|
||||
}
|
||||
console.log(`[Command] pull-rev ${sourceVaultPath}@${rev} -> ${destinationPath}`);
|
||||
|
||||
const source = await core.serviceModules.databaseFileAccess.fetch(
|
||||
sourceVaultPath as FilePathWithPrefix,
|
||||
rev,
|
||||
true
|
||||
);
|
||||
if (!source || source.deleted) {
|
||||
return false;
|
||||
}
|
||||
|
||||
await fs.mkdir(path.dirname(destinationPath), { recursive: true });
|
||||
const body = source.body;
|
||||
if (body.type === "text/plain") {
|
||||
await fs.writeFile(destinationPath, await body.text(), "utf-8");
|
||||
} else {
|
||||
await fs.writeFile(destinationPath, new Uint8Array(await body.arrayBuffer()));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (options.command === "setup") {
|
||||
if (options.commandArgs.length < 1) {
|
||||
throw new Error("setup requires one argument: <setupURI>");
|
||||
}
|
||||
const setupURI = options.commandArgs[0].trim();
|
||||
if (!setupURI.startsWith(configURIBase)) {
|
||||
throw new Error(`setup URI must start with ${configURIBase}`);
|
||||
}
|
||||
const passphrase = await promptForPassphrase();
|
||||
const decoded = await decodeSettingsFromSetupURI(setupURI, passphrase);
|
||||
if (!decoded) {
|
||||
throw new Error("Failed to decode settings from setup URI");
|
||||
}
|
||||
const nextSettings = {
|
||||
...DEFAULT_SETTINGS,
|
||||
...decoded,
|
||||
useIndexedDBAdapter: false,
|
||||
isConfigured: true,
|
||||
} as ObsidianLiveSyncSettings;
|
||||
|
||||
console.log(`[Command] setup -> ${settingsPath}`);
|
||||
await core.services.setting.applyPartial(nextSettings, true);
|
||||
await core.services.control.applySettings();
|
||||
return true;
|
||||
}
|
||||
|
||||
if (options.command === "put") {
|
||||
if (options.commandArgs.length < 1) {
|
||||
throw new Error("put requires one argument: <dst>");
|
||||
}
|
||||
const destinationVaultPath = toVaultRelativePath(options.commandArgs[0], vaultPath);
|
||||
const content = await readStdinAsUtf8();
|
||||
console.log(`[Command] put stdin -> ${destinationVaultPath}`);
|
||||
return await core.serviceModules.databaseFileAccess.storeContent(
|
||||
destinationVaultPath as FilePathWithPrefix,
|
||||
content
|
||||
);
|
||||
}
|
||||
|
||||
if (options.command === "cat") {
|
||||
if (options.commandArgs.length < 1) {
|
||||
throw new Error("cat requires one argument: <src>");
|
||||
}
|
||||
const sourceVaultPath = toVaultRelativePath(options.commandArgs[0], vaultPath);
|
||||
console.error(`[Command] cat ${sourceVaultPath}`);
|
||||
const source = await core.serviceModules.databaseFileAccess.fetch(
|
||||
sourceVaultPath as FilePathWithPrefix,
|
||||
undefined,
|
||||
true
|
||||
);
|
||||
if (!source || source.deleted) {
|
||||
return false;
|
||||
}
|
||||
const body = source.body;
|
||||
if (body.type === "text/plain") {
|
||||
process.stdout.write(await body.text());
|
||||
} else {
|
||||
process.stdout.write(Buffer.from(await body.arrayBuffer()));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (options.command === "cat-rev") {
|
||||
if (options.commandArgs.length < 2) {
|
||||
throw new Error("cat-rev requires two arguments: <src> <rev>");
|
||||
}
|
||||
const sourceVaultPath = toVaultRelativePath(options.commandArgs[0], vaultPath);
|
||||
const rev = options.commandArgs[1].trim();
|
||||
if (!rev) {
|
||||
throw new Error("cat-rev requires a non-empty revision");
|
||||
}
|
||||
console.error(`[Command] cat-rev ${sourceVaultPath} @ ${rev}`);
|
||||
const source = await core.serviceModules.databaseFileAccess.fetch(
|
||||
sourceVaultPath as FilePathWithPrefix,
|
||||
rev,
|
||||
true
|
||||
);
|
||||
if (!source || source.deleted) {
|
||||
return false;
|
||||
}
|
||||
const body = source.body;
|
||||
if (body.type === "text/plain") {
|
||||
process.stdout.write(await body.text());
|
||||
} else {
|
||||
process.stdout.write(Buffer.from(await body.arrayBuffer()));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (options.command === "ls") {
|
||||
const prefix =
|
||||
options.commandArgs.length > 0 && options.commandArgs[0].trim() !== ""
|
||||
? toVaultRelativePath(options.commandArgs[0], vaultPath)
|
||||
: "";
|
||||
const rows: { path: string; line: string }[] = [];
|
||||
|
||||
for await (const doc of core.services.database.localDatabase.findAllNormalDocs({ conflicts: true })) {
|
||||
if (doc._deleted || doc.deleted) {
|
||||
continue;
|
||||
}
|
||||
const docPath = stripAllPrefixes(doc.path);
|
||||
if (prefix !== "" && !docPath.startsWith(prefix)) {
|
||||
continue;
|
||||
}
|
||||
const revision = `${doc._rev ?? ""}${(doc._conflicts?.length ?? 0) > 0 ? "*" : ""}`;
|
||||
rows.push({
|
||||
path: docPath,
|
||||
line: `${docPath}\t${doc.size}\t${doc.mtime}\t${revision}`,
|
||||
});
|
||||
}
|
||||
|
||||
rows.sort((a, b) => a.path.localeCompare(b.path));
|
||||
if (rows.length > 0) {
|
||||
process.stdout.write(rows.map((e) => e.line).join("\n") + "\n");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (options.command === "info") {
|
||||
if (options.commandArgs.length < 1) {
|
||||
throw new Error("info requires one argument: <path>");
|
||||
}
|
||||
const targetPath = toVaultRelativePath(options.commandArgs[0], vaultPath);
|
||||
|
||||
for await (const doc of core.services.database.localDatabase.findAllNormalDocs({ conflicts: true })) {
|
||||
if (doc._deleted || doc.deleted) continue;
|
||||
const docPath = stripAllPrefixes(doc.path);
|
||||
if (docPath !== targetPath) continue;
|
||||
|
||||
const filename = path.basename(docPath);
|
||||
const conflictsText = (doc._conflicts?.length ?? 0) > 0 ? doc._conflicts.join("\n ") : "N/A";
|
||||
const children = "children" in doc ? doc.children : [];
|
||||
const rawDoc = await core.services.database.localDatabase.getRaw<any>(doc._id, {
|
||||
revs_info: true,
|
||||
});
|
||||
const pastRevisions = (rawDoc._revs_info ?? [])
|
||||
.filter((entry: { rev?: string; status?: string }) => {
|
||||
if (!entry.rev) return false;
|
||||
if (entry.rev === doc._rev) return false;
|
||||
return entry.status === "available";
|
||||
})
|
||||
.map((entry: { rev: string }) => entry.rev);
|
||||
const pastRevisionsText =
|
||||
pastRevisions.length > 0 ? pastRevisions.map((rev: string) => `${rev}`) : ["N/A"];
|
||||
const out = {
|
||||
id: doc._id,
|
||||
revision: doc._rev ?? "",
|
||||
conflicts: conflictsText,
|
||||
filename: filename,
|
||||
path: docPath,
|
||||
size: doc.size,
|
||||
revisions: pastRevisionsText,
|
||||
chunks: children.length,
|
||||
children: children,
|
||||
};
|
||||
process.stdout.write(JSON.stringify(out, null, 2) + "\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
process.stderr.write(`[Info] File not found: ${targetPath}\n`);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (options.command === "rm") {
|
||||
if (options.commandArgs.length < 1) {
|
||||
throw new Error("rm requires one argument: <path>");
|
||||
}
|
||||
const targetPath = toVaultRelativePath(options.commandArgs[0], vaultPath);
|
||||
console.error(`[Command] rm ${targetPath}`);
|
||||
return await core.serviceModules.databaseFileAccess.delete(targetPath as FilePathWithPrefix);
|
||||
}
|
||||
|
||||
if (options.command === "resolve") {
|
||||
if (options.commandArgs.length < 2) {
|
||||
throw new Error("resolve requires two arguments: <path> <revision-to-keep>");
|
||||
}
|
||||
const targetPath = toVaultRelativePath(options.commandArgs[0], vaultPath) as FilePathWithPrefix;
|
||||
const revisionToKeep = options.commandArgs[1].trim();
|
||||
if (revisionToKeep === "") {
|
||||
throw new Error("resolve requires a non-empty revision-to-keep");
|
||||
}
|
||||
|
||||
const currentMeta = await core.serviceModules.databaseFileAccess.fetchEntryMeta(targetPath, undefined, true);
|
||||
if (currentMeta === false || currentMeta._deleted || currentMeta.deleted) {
|
||||
process.stderr.write(`[Info] File not found: ${targetPath}\n`);
|
||||
return false;
|
||||
}
|
||||
|
||||
const conflicts = await core.serviceModules.databaseFileAccess.getConflictedRevs(targetPath);
|
||||
const candidateRevisions = [currentMeta._rev, ...conflicts];
|
||||
if (!candidateRevisions.includes(revisionToKeep)) {
|
||||
process.stderr.write(`[Info] Revision not found for ${targetPath}: ${revisionToKeep}\n`);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (conflicts.length === 0 && currentMeta._rev === revisionToKeep) {
|
||||
console.error(`[Command] resolve ${targetPath} keep ${revisionToKeep} (already resolved)`);
|
||||
return true;
|
||||
}
|
||||
|
||||
console.error(`[Command] resolve ${targetPath} keep ${revisionToKeep}`);
|
||||
for (const revision of candidateRevisions) {
|
||||
if (revision === revisionToKeep) {
|
||||
continue;
|
||||
}
|
||||
const resolved = await core.services.conflict.resolveByDeletingRevision(targetPath, revision, "CLI");
|
||||
if (!resolved) {
|
||||
process.stderr.write(`[Info] Failed to delete revision ${revision} for ${targetPath}\n`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
throw new Error(`Unsupported command: ${options.command}`);
|
||||
}
|
||||
49
src/apps/cli/commands/types.ts
Normal file
49
src/apps/cli/commands/types.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
import { LiveSyncBaseCore } from "../../../LiveSyncBaseCore";
|
||||
import { ServiceContext } from "@lib/services/base/ServiceBase";
|
||||
|
||||
export type CLICommand =
|
||||
| "daemon"
|
||||
| "sync"
|
||||
| "push"
|
||||
| "pull"
|
||||
| "pull-rev"
|
||||
| "setup"
|
||||
| "put"
|
||||
| "cat"
|
||||
| "cat-rev"
|
||||
| "ls"
|
||||
| "info"
|
||||
| "rm"
|
||||
| "resolve"
|
||||
| "init-settings";
|
||||
|
||||
export interface CLIOptions {
|
||||
databasePath?: string;
|
||||
settingsPath?: string;
|
||||
verbose?: boolean;
|
||||
force?: boolean;
|
||||
command: CLICommand;
|
||||
commandArgs: string[];
|
||||
}
|
||||
|
||||
export interface CLICommandContext {
|
||||
vaultPath: string;
|
||||
core: LiveSyncBaseCore<ServiceContext, any>;
|
||||
settingsPath: string;
|
||||
}
|
||||
|
||||
export const VALID_COMMANDS = new Set([
|
||||
"sync",
|
||||
"push",
|
||||
"pull",
|
||||
"pull-rev",
|
||||
"setup",
|
||||
"put",
|
||||
"cat",
|
||||
"cat-rev",
|
||||
"ls",
|
||||
"info",
|
||||
"rm",
|
||||
"resolve",
|
||||
"init-settings",
|
||||
] as const);
|
||||
44
src/apps/cli/commands/utils.ts
Normal file
44
src/apps/cli/commands/utils.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
import * as path from "path";
|
||||
import * as readline from "node:readline/promises";
|
||||
|
||||
export function toArrayBuffer(data: Buffer): ArrayBuffer {
|
||||
return data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength) as ArrayBuffer;
|
||||
}
|
||||
|
||||
export function toVaultRelativePath(inputPath: string, vaultPath: string): string {
|
||||
const stripped = inputPath.replace(/^[/\\]+/, "");
|
||||
if (!path.isAbsolute(inputPath)) {
|
||||
return stripped.replace(/\\/g, "/");
|
||||
}
|
||||
const resolved = path.resolve(inputPath);
|
||||
const rel = path.relative(vaultPath, resolved);
|
||||
if (rel.startsWith("..") || path.isAbsolute(rel)) {
|
||||
throw new Error(`Path ${inputPath} is outside of the local database directory`);
|
||||
}
|
||||
return rel.replace(/\\/g, "/");
|
||||
}
|
||||
|
||||
export async function readStdinAsUtf8(): Promise<string> {
|
||||
const chunks: Buffer[] = [];
|
||||
for await (const chunk of process.stdin) {
|
||||
if (typeof chunk === "string") {
|
||||
chunks.push(Buffer.from(chunk, "utf-8"));
|
||||
} else {
|
||||
chunks.push(chunk);
|
||||
}
|
||||
}
|
||||
return Buffer.concat(chunks).toString("utf-8");
|
||||
}
|
||||
|
||||
export async function promptForPassphrase(prompt = "Enter setup URI passphrase: "): Promise<string> {
|
||||
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
||||
try {
|
||||
const passphrase = await rl.question(prompt);
|
||||
if (!passphrase) {
|
||||
throw new Error("Passphrase is required");
|
||||
}
|
||||
return passphrase;
|
||||
} finally {
|
||||
rl.close();
|
||||
}
|
||||
}
|
||||
134
src/apps/cli/lib/pouchdb-node.ts
Normal file
134
src/apps/cli/lib/pouchdb-node.ts
Normal file
@@ -0,0 +1,134 @@
|
||||
import PouchDB from "pouchdb-core";
|
||||
|
||||
import HttpPouch from "pouchdb-adapter-http";
|
||||
import mapreduce from "pouchdb-mapreduce";
|
||||
import replication from "pouchdb-replication";
|
||||
|
||||
import LevelDBAdapter from "pouchdb-adapter-leveldb";
|
||||
|
||||
import find from "pouchdb-find";
|
||||
import transform from "transform-pouch";
|
||||
//@ts-ignore
|
||||
import { findPathToLeaf } from "pouchdb-merge";
|
||||
//@ts-ignore
|
||||
import { adapterFun } from "pouchdb-utils";
|
||||
//@ts-ignore
|
||||
import { createError, MISSING_DOC, UNKNOWN_ERROR } from "pouchdb-errors";
|
||||
import { mapAllTasksWithConcurrencyLimit, unwrapTaskResult } from "octagonal-wheels/concurrency/task";
|
||||
|
||||
PouchDB.plugin(LevelDBAdapter).plugin(HttpPouch).plugin(mapreduce).plugin(replication).plugin(find).plugin(transform);
|
||||
|
||||
type PurgeMultiResult = {
|
||||
ok: true;
|
||||
deletedRevs: string[];
|
||||
documentWasRemovedCompletely: boolean;
|
||||
};
|
||||
type PurgeMultiParam = [docId: string, rev$$1: string];
|
||||
function appendPurgeSeqs(db: PouchDB.Database, docs: PurgeMultiParam[]) {
|
||||
return db
|
||||
.get("_local/purges")
|
||||
.then(function (doc: any) {
|
||||
for (const [docId, rev$$1] of docs) {
|
||||
const purgeSeq = doc.purgeSeq + 1;
|
||||
doc.purges.push({
|
||||
docId,
|
||||
rev: rev$$1,
|
||||
purgeSeq,
|
||||
});
|
||||
//@ts-ignore : missing type def
|
||||
if (doc.purges.length > db.purged_infos_limit) {
|
||||
//@ts-ignore : missing type def
|
||||
doc.purges.splice(0, doc.purges.length - db.purged_infos_limit);
|
||||
}
|
||||
doc.purgeSeq = purgeSeq;
|
||||
}
|
||||
return doc;
|
||||
})
|
||||
.catch(function (err) {
|
||||
if (err.status !== 404) {
|
||||
throw err;
|
||||
}
|
||||
return {
|
||||
_id: "_local/purges",
|
||||
purges: docs.map(([docId, rev$$1], idx) => ({
|
||||
docId,
|
||||
rev: rev$$1,
|
||||
purgeSeq: idx,
|
||||
})),
|
||||
purgeSeq: docs.length,
|
||||
};
|
||||
})
|
||||
.then(function (doc) {
|
||||
return db.put(doc);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* purge multiple documents at once.
|
||||
*/
|
||||
PouchDB.prototype.purgeMulti = adapterFun(
|
||||
"_purgeMulti",
|
||||
function (
|
||||
docs: PurgeMultiParam[],
|
||||
callback: (
|
||||
error: Error,
|
||||
result?: {
|
||||
[x: string]: PurgeMultiResult | Error;
|
||||
}
|
||||
) => void
|
||||
) {
|
||||
//@ts-ignore
|
||||
if (typeof this._purge === "undefined") {
|
||||
return callback(
|
||||
//@ts-ignore: this ts-ignore might be hiding a `this` bug where we don't have "this" conext.
|
||||
createError(UNKNOWN_ERROR, "Purge is not implemented in the " + this.adapter + " adapter.")
|
||||
);
|
||||
}
|
||||
//@ts-ignore
|
||||
// eslint-disable-next-line @typescript-eslint/no-this-alias
|
||||
const self = this;
|
||||
const tasks = docs.map(
|
||||
(param) => () =>
|
||||
new Promise<[PurgeMultiParam, PurgeMultiResult | Error]>((res, rej) => {
|
||||
const [docId, rev$$1] = param;
|
||||
self._getRevisionTree(docId, (error: Error, revs: string[]) => {
|
||||
if (error) {
|
||||
return res([param, error]);
|
||||
}
|
||||
if (!revs) {
|
||||
return res([param, createError(MISSING_DOC)]);
|
||||
}
|
||||
let path;
|
||||
try {
|
||||
path = findPathToLeaf(revs, rev$$1);
|
||||
} catch (error) {
|
||||
//@ts-ignore
|
||||
return res([param, error.message || error]);
|
||||
}
|
||||
self._purge(docId, path, (error: Error, result: PurgeMultiResult) => {
|
||||
if (error) {
|
||||
return res([param, error]);
|
||||
} else {
|
||||
return res([param, result]);
|
||||
}
|
||||
});
|
||||
});
|
||||
})
|
||||
);
|
||||
(async () => {
|
||||
const ret = await mapAllTasksWithConcurrencyLimit(1, tasks);
|
||||
const retAll = ret.map((e) => unwrapTaskResult(e)) as [PurgeMultiParam, PurgeMultiResult | Error][];
|
||||
await appendPurgeSeqs(
|
||||
self,
|
||||
retAll.filter((e) => "ok" in e[1]).map((e) => e[0])
|
||||
);
|
||||
const result = Object.fromEntries(retAll.map((e) => [e[0][0], e[1]]));
|
||||
return result;
|
||||
})()
|
||||
//@ts-ignore
|
||||
.then((result) => callback(undefined, result))
|
||||
.catch((error) => callback(error));
|
||||
}
|
||||
);
|
||||
|
||||
export { PouchDB };
|
||||
375
src/apps/cli/main.ts
Normal file
375
src/apps/cli/main.ts
Normal file
@@ -0,0 +1,375 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Self-hosted LiveSync CLI
|
||||
* Command-line version of Self-hosted LiveSync plugin for syncing vaults without Obsidian
|
||||
*/
|
||||
|
||||
if (!("localStorage" in globalThis)) {
|
||||
const store = new Map<string, string>();
|
||||
(globalThis as any).localStorage = {
|
||||
getItem: (key: string) => (store.has(key) ? store.get(key)! : null),
|
||||
setItem: (key: string, value: string) => {
|
||||
store.set(key, value);
|
||||
},
|
||||
removeItem: (key: string) => {
|
||||
store.delete(key);
|
||||
},
|
||||
clear: () => {
|
||||
store.clear();
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
import * as fs from "fs/promises";
|
||||
import * as path from "path";
|
||||
import { NodeServiceContext, NodeServiceHub } from "./services/NodeServiceHub";
|
||||
import { LiveSyncBaseCore } from "../../LiveSyncBaseCore";
|
||||
import { initialiseServiceModulesCLI } from "./serviceModules/CLIServiceModules";
|
||||
import { DEFAULT_SETTINGS, LOG_LEVEL_VERBOSE, type LOG_LEVEL, type ObsidianLiveSyncSettings } from "@lib/common/types";
|
||||
import type { InjectableServiceHub } from "@lib/services/implements/injectable/InjectableServiceHub";
|
||||
import type { InjectableSettingService } from "@/lib/src/services/implements/injectable/InjectableSettingService";
|
||||
import { LOG_LEVEL_DEBUG, setGlobalLogFunction, defaultLoggerEnv } from "octagonal-wheels/common/logger";
|
||||
import { runCommand } from "./commands/runCommand";
|
||||
import { VALID_COMMANDS } from "./commands/types";
|
||||
import type { CLICommand, CLIOptions } from "./commands/types";
|
||||
|
||||
const SETTINGS_FILE = ".livesync/settings.json";
|
||||
defaultLoggerEnv.minLogLevel = LOG_LEVEL_DEBUG;
|
||||
// DI the log again.
|
||||
// const recentLogEntries = reactiveSource<LogEntry[]>([]);
|
||||
// const globalLogFunction = (message: any, level?: number, key?: string) => {
|
||||
// const messageX =
|
||||
// message instanceof Error
|
||||
// ? new LiveSyncError("[Error Logged]: " + message.message, { cause: message })
|
||||
// : message;
|
||||
// const entry = { message: messageX, level, key } as LogEntry;
|
||||
// recentLogEntries.value = [...recentLogEntries.value, entry];
|
||||
// };
|
||||
|
||||
setGlobalLogFunction((msg, level) => {
|
||||
console.error(`[${level}] ${typeof msg === "string" ? msg : JSON.stringify(msg)}`);
|
||||
if (msg instanceof Error) {
|
||||
console.error(msg);
|
||||
}
|
||||
});
|
||||
function printHelp(): void {
|
||||
console.log(`
|
||||
Self-hosted LiveSync CLI
|
||||
|
||||
Usage:
|
||||
livesync-cli [database-path] [options] [command] [command-args]
|
||||
|
||||
Arguments:
|
||||
database-path Path to the local database directory (required)
|
||||
|
||||
Commands:
|
||||
sync Run one replication cycle and exit
|
||||
push <src> <dst> Push local file <src> into local database path <dst>
|
||||
pull <src> <dst> Pull file <src> from local database into local file <dst>
|
||||
pull-rev <src> <dst> <rev> Pull file <src> at specific revision <rev> into local file <dst>
|
||||
setup <setupURI> Apply setup URI to settings file
|
||||
put <dst> Read UTF-8 content from stdin and write to local database path <dst>
|
||||
cat <src> Read file <src> from local database and write to stdout
|
||||
cat-rev <src> <rev> Read file <src> at specific revision <rev> and write to stdout
|
||||
ls [prefix] List DB files as path<TAB>size<TAB>mtime<TAB>revision[*]
|
||||
info <path> Show detailed metadata for a file (ID, revision, conflicts, chunks)
|
||||
rm <path> Mark a file as deleted in local database
|
||||
resolve <path> <rev> Resolve conflicts by keeping <rev> and deleting others
|
||||
Examples:
|
||||
livesync-cli ./my-database sync
|
||||
livesync-cli ./my-database --settings ./custom-settings.json push ./note.md folder/note.md
|
||||
livesync-cli ./my-database pull folder/note.md ./exports/note.md
|
||||
livesync-cli ./my-database pull-rev folder/note.md ./exports/note.old.md 3-abcdef
|
||||
livesync-cli ./my-database setup "obsidian://setuplivesync?settings=..."
|
||||
echo "Hello" | livesync-cli ./my-database put notes/hello.md
|
||||
livesync-cli ./my-database cat notes/hello.md
|
||||
livesync-cli ./my-database cat-rev notes/hello.md 3-abcdef
|
||||
livesync-cli ./my-database ls notes/
|
||||
livesync-cli ./my-database info notes/hello.md
|
||||
livesync-cli ./my-database rm notes/hello.md
|
||||
livesync-cli ./my-database resolve notes/hello.md 3-abcdef
|
||||
livesync-cli init-settings ./data.json
|
||||
livesync-cli ./my-database --verbose
|
||||
`);
|
||||
}
|
||||
|
||||
function parseArgs(): CLIOptions {
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length === 0 || args.includes("--help") || args.includes("-h")) {
|
||||
printHelp();
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
let databasePath: string | undefined;
|
||||
let settingsPath: string | undefined;
|
||||
let verbose = false;
|
||||
let force = false;
|
||||
let command: CLICommand = "daemon";
|
||||
const commandArgs: string[] = [];
|
||||
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
const token = args[i];
|
||||
switch (token) {
|
||||
case "--settings":
|
||||
case "-s": {
|
||||
i++;
|
||||
if (!args[i]) {
|
||||
console.error(`Error: Missing value for ${token}`);
|
||||
process.exit(1);
|
||||
}
|
||||
settingsPath = args[i];
|
||||
break;
|
||||
}
|
||||
case "--verbose":
|
||||
case "-v":
|
||||
verbose = true;
|
||||
break;
|
||||
case "--force":
|
||||
case "-f":
|
||||
force = true;
|
||||
break;
|
||||
default: {
|
||||
if (!databasePath) {
|
||||
if (command === "daemon" && VALID_COMMANDS.has(token as any)) {
|
||||
command = token as CLICommand;
|
||||
break;
|
||||
}
|
||||
if (command === "init-settings") {
|
||||
commandArgs.push(token);
|
||||
break;
|
||||
}
|
||||
databasePath = token;
|
||||
break;
|
||||
}
|
||||
if (command === "daemon" && VALID_COMMANDS.has(token as any)) {
|
||||
command = token as CLICommand;
|
||||
break;
|
||||
}
|
||||
commandArgs.push(token);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!databasePath && command !== "init-settings") {
|
||||
console.error("Error: database-path is required");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
return {
|
||||
databasePath,
|
||||
settingsPath,
|
||||
verbose,
|
||||
force,
|
||||
command,
|
||||
commandArgs,
|
||||
};
|
||||
}
|
||||
|
||||
async function createDefaultSettingsFile(options: CLIOptions) {
|
||||
const targetPath = options.settingsPath
|
||||
? path.resolve(options.settingsPath)
|
||||
: options.commandArgs[0]
|
||||
? path.resolve(options.commandArgs[0])
|
||||
: path.resolve(process.cwd(), "data.json");
|
||||
|
||||
if (!options.force) {
|
||||
try {
|
||||
await fs.stat(targetPath);
|
||||
throw new Error(`Settings file already exists: ${targetPath} (use --force to overwrite)`);
|
||||
} catch (ex: any) {
|
||||
if (!(ex && ex?.code === "ENOENT")) {
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const settings = {
|
||||
...DEFAULT_SETTINGS,
|
||||
useIndexedDBAdapter: false,
|
||||
} as ObsidianLiveSyncSettings;
|
||||
|
||||
await fs.mkdir(path.dirname(targetPath), { recursive: true });
|
||||
await fs.writeFile(targetPath, JSON.stringify(settings, null, 2), "utf-8");
|
||||
console.log(`[Done] Created settings file: ${targetPath}`);
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const options = parseArgs();
|
||||
const avoidStdoutNoise =
|
||||
options.command === "cat" ||
|
||||
options.command === "cat-rev" ||
|
||||
options.command === "ls" ||
|
||||
options.command === "info" ||
|
||||
options.command === "rm" ||
|
||||
options.command === "resolve";
|
||||
const infoLog = avoidStdoutNoise ? console.error : console.log;
|
||||
|
||||
if (options.command === "init-settings") {
|
||||
await createDefaultSettingsFile(options);
|
||||
return;
|
||||
}
|
||||
|
||||
// Resolve vault path
|
||||
const vaultPath = path.resolve(options.databasePath!);
|
||||
// Check if vault directory exists
|
||||
try {
|
||||
const stat = await fs.stat(vaultPath);
|
||||
if (!stat.isDirectory()) {
|
||||
console.error(`Error: ${vaultPath} is not a directory`);
|
||||
process.exit(1);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error: Vault directory ${vaultPath} does not exist`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Resolve settings path
|
||||
const settingsPath = options.settingsPath
|
||||
? path.resolve(options.settingsPath)
|
||||
: path.join(vaultPath, SETTINGS_FILE);
|
||||
|
||||
infoLog(`Self-hosted LiveSync CLI`);
|
||||
infoLog(`Vault: ${vaultPath}`);
|
||||
infoLog(`Settings: ${settingsPath}`);
|
||||
infoLog("");
|
||||
|
||||
// Create service context and hub
|
||||
const context = new NodeServiceContext(vaultPath);
|
||||
const serviceHubInstance = new NodeServiceHub<NodeServiceContext>(vaultPath, context);
|
||||
serviceHubInstance.API.addLog.setHandler((message: string, level: LOG_LEVEL) => {
|
||||
const prefix = `[${level}]`;
|
||||
if (level <= LOG_LEVEL_VERBOSE) {
|
||||
if (!options.verbose) return;
|
||||
}
|
||||
console.error(`${prefix} ${message}`);
|
||||
});
|
||||
// Prevent replication result to be processed automatically.
|
||||
serviceHubInstance.replication.processSynchroniseResult.addHandler(async () => {
|
||||
console.error(`[Info] Replication result received, but not processed automatically in CLI mode.`);
|
||||
return await Promise.resolve(true);
|
||||
}, -100);
|
||||
// Setup settings handlers
|
||||
const settingService = serviceHubInstance.setting;
|
||||
|
||||
(settingService as InjectableSettingService<NodeServiceContext>).saveData.setHandler(
|
||||
async (data: ObsidianLiveSyncSettings) => {
|
||||
try {
|
||||
await fs.writeFile(settingsPath, JSON.stringify(data, null, 2), "utf-8");
|
||||
if (options.verbose) {
|
||||
console.error(`[Settings] Saved to ${settingsPath}`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`[Settings] Failed to save:`, error);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
(settingService as InjectableSettingService<NodeServiceContext>).loadData.setHandler(
|
||||
async (): Promise<ObsidianLiveSyncSettings | undefined> => {
|
||||
try {
|
||||
const content = await fs.readFile(settingsPath, "utf-8");
|
||||
const data = JSON.parse(content);
|
||||
if (options.verbose) {
|
||||
console.error(`[Settings] Loaded from ${settingsPath}`);
|
||||
}
|
||||
// Force disable IndexedDB adapter in CLI environment
|
||||
data.useIndexedDBAdapter = false;
|
||||
return data;
|
||||
} catch (error) {
|
||||
if (options.verbose) {
|
||||
console.error(`[Settings] File not found, using defaults`);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// Create LiveSync core
|
||||
const core = new LiveSyncBaseCore(
|
||||
serviceHubInstance,
|
||||
(core: LiveSyncBaseCore<NodeServiceContext, any>, serviceHub: InjectableServiceHub<NodeServiceContext>) => {
|
||||
return initialiseServiceModulesCLI(vaultPath, core, serviceHub);
|
||||
},
|
||||
() => [], // No extra modules
|
||||
() => [], // No add-ons
|
||||
() => [] // No serviceFeatures
|
||||
);
|
||||
|
||||
// Setup signal handlers for graceful shutdown
|
||||
const shutdown = async (signal: string) => {
|
||||
console.log();
|
||||
console.log(`[Shutdown] Received ${signal}, shutting down gracefully...`);
|
||||
try {
|
||||
await core.services.control.onUnload();
|
||||
console.log(`[Shutdown] Complete`);
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error(`[Shutdown] Error:`, error);
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
process.on("SIGINT", () => shutdown("SIGINT"));
|
||||
process.on("SIGTERM", () => shutdown("SIGTERM"));
|
||||
|
||||
// Start the core
|
||||
try {
|
||||
infoLog(`[Starting] Initializing LiveSync...`);
|
||||
|
||||
const loadResult = await core.services.control.onLoad();
|
||||
if (!loadResult) {
|
||||
console.error(`[Error] Failed to initialize LiveSync`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
await core.services.control.onReady();
|
||||
|
||||
infoLog(`[Ready] LiveSync is running`);
|
||||
infoLog(`[Ready] Press Ctrl+C to stop`);
|
||||
infoLog("");
|
||||
|
||||
// Check if configured
|
||||
const settings = core.services.setting.currentSettings();
|
||||
if (!settings.isConfigured) {
|
||||
console.warn(`[Warning] LiveSync is not configured yet`);
|
||||
console.warn(`[Warning] Please edit ${settingsPath} to configure CouchDB connection`);
|
||||
console.warn();
|
||||
console.warn(`Required settings:`);
|
||||
console.warn(` - couchDB_URI: CouchDB server URL`);
|
||||
console.warn(` - couchDB_USER: CouchDB username`);
|
||||
console.warn(` - couchDB_PASSWORD: CouchDB password`);
|
||||
console.warn(` - couchDB_DBNAME: Database name`);
|
||||
console.warn();
|
||||
} else {
|
||||
infoLog(`[Info] LiveSync is configured and ready`);
|
||||
infoLog(`[Info] Database: ${settings.couchDB_URI}/${settings.couchDB_DBNAME}`);
|
||||
infoLog("");
|
||||
}
|
||||
|
||||
const result = await runCommand(options, { vaultPath, core, settingsPath });
|
||||
if (!result) {
|
||||
console.error(`[Error] Command '${options.command}' failed`);
|
||||
process.exitCode = 1;
|
||||
} else if (options.command !== "daemon") {
|
||||
infoLog(`[Done] Command '${options.command}' completed`);
|
||||
}
|
||||
|
||||
if (options.command === "daemon") {
|
||||
// Keep the process running
|
||||
await new Promise(() => {});
|
||||
} else {
|
||||
await core.services.control.onUnload();
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`[Error] Failed to start:`, error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run main
|
||||
main().catch((error) => {
|
||||
console.error(`[Fatal Error]`, error);
|
||||
process.exit(1);
|
||||
});
|
||||
133
src/apps/cli/managers/CLIStorageEventManagerAdapter.ts
Normal file
133
src/apps/cli/managers/CLIStorageEventManagerAdapter.ts
Normal file
@@ -0,0 +1,133 @@
|
||||
import type { FilePath, UXFileInfoStub, UXInternalFileInfoStub } from "@lib/common/types";
|
||||
import type { FileEventItem } from "@lib/common/types";
|
||||
import type { IStorageEventManagerAdapter } from "@lib/managers/adapters";
|
||||
import type {
|
||||
IStorageEventTypeGuardAdapter,
|
||||
IStorageEventPersistenceAdapter,
|
||||
IStorageEventWatchAdapter,
|
||||
IStorageEventStatusAdapter,
|
||||
IStorageEventConverterAdapter,
|
||||
IStorageEventWatchHandlers,
|
||||
} from "@lib/managers/adapters";
|
||||
import type { FileEventItemSentinel } from "@lib/managers/StorageEventManager";
|
||||
import type { NodeFile, NodeFolder } from "../adapters/NodeTypes";
|
||||
import * as fs from "fs/promises";
|
||||
import * as path from "path";
|
||||
|
||||
/**
|
||||
* CLI-specific type guard adapter
|
||||
*/
|
||||
class CLITypeGuardAdapter implements IStorageEventTypeGuardAdapter<NodeFile, NodeFolder> {
|
||||
isFile(file: any): file is NodeFile {
|
||||
return file && typeof file === "object" && "path" in file && "stat" in file && !file.isFolder;
|
||||
}
|
||||
|
||||
isFolder(item: any): item is NodeFolder {
|
||||
return item && typeof item === "object" && "path" in item && item.isFolder === true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* CLI-specific persistence adapter (file-based snapshot)
|
||||
*/
|
||||
class CLIPersistenceAdapter implements IStorageEventPersistenceAdapter {
|
||||
private snapshotPath: string;
|
||||
|
||||
constructor(basePath: string) {
|
||||
this.snapshotPath = path.join(basePath, ".livesync-snapshot.json");
|
||||
}
|
||||
|
||||
async saveSnapshot(snapshot: (FileEventItem | FileEventItemSentinel)[]): Promise<void> {
|
||||
try {
|
||||
await fs.writeFile(this.snapshotPath, JSON.stringify(snapshot, null, 2), "utf-8");
|
||||
} catch (error) {
|
||||
console.error("Failed to save snapshot:", error);
|
||||
}
|
||||
}
|
||||
|
||||
async loadSnapshot(): Promise<(FileEventItem | FileEventItemSentinel)[] | null> {
|
||||
try {
|
||||
const content = await fs.readFile(this.snapshotPath, "utf-8");
|
||||
return JSON.parse(content);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* CLI-specific status adapter (console logging)
|
||||
*/
|
||||
class CLIStatusAdapter implements IStorageEventStatusAdapter {
|
||||
private lastUpdate = 0;
|
||||
private updateInterval = 5000; // Update every 5 seconds
|
||||
|
||||
updateStatus(status: { batched: number; processing: number; totalQueued: number }): void {
|
||||
const now = Date.now();
|
||||
if (now - this.lastUpdate > this.updateInterval) {
|
||||
if (status.totalQueued > 0 || status.processing > 0) {
|
||||
console.log(
|
||||
`[StorageEventManager] Batched: ${status.batched}, Processing: ${status.processing}, Total Queued: ${status.totalQueued}`
|
||||
);
|
||||
}
|
||||
this.lastUpdate = now;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* CLI-specific converter adapter
|
||||
*/
|
||||
class CLIConverterAdapter implements IStorageEventConverterAdapter<NodeFile> {
|
||||
toFileInfo(file: NodeFile, deleted?: boolean): UXFileInfoStub {
|
||||
return {
|
||||
name: path.basename(file.path),
|
||||
path: file.path,
|
||||
stat: file.stat,
|
||||
deleted: deleted,
|
||||
isFolder: false,
|
||||
};
|
||||
}
|
||||
|
||||
toInternalFileInfo(p: FilePath): UXInternalFileInfoStub {
|
||||
return {
|
||||
name: path.basename(p),
|
||||
path: p,
|
||||
isInternal: true,
|
||||
stat: undefined,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* CLI-specific watch adapter (optional file watching with chokidar)
|
||||
*/
|
||||
class CLIWatchAdapter implements IStorageEventWatchAdapter {
|
||||
constructor(private basePath: string) {}
|
||||
|
||||
async beginWatch(handlers: IStorageEventWatchHandlers): Promise<void> {
|
||||
// File watching is not activated in the CLI.
|
||||
// Because the CLI is designed for push/pull operations, not real-time sync.
|
||||
console.error("[CLIWatchAdapter] File watching is not enabled in CLI version");
|
||||
return Promise.resolve();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Composite adapter for CLI StorageEventManager
|
||||
*/
|
||||
export class CLIStorageEventManagerAdapter implements IStorageEventManagerAdapter<NodeFile, NodeFolder> {
|
||||
readonly typeGuard: CLITypeGuardAdapter;
|
||||
readonly persistence: CLIPersistenceAdapter;
|
||||
readonly watch: CLIWatchAdapter;
|
||||
readonly status: CLIStatusAdapter;
|
||||
readonly converter: CLIConverterAdapter;
|
||||
|
||||
constructor(basePath: string) {
|
||||
this.typeGuard = new CLITypeGuardAdapter();
|
||||
this.persistence = new CLIPersistenceAdapter(basePath);
|
||||
this.watch = new CLIWatchAdapter(basePath);
|
||||
this.status = new CLIStatusAdapter();
|
||||
this.converter = new CLIConverterAdapter();
|
||||
}
|
||||
}
|
||||
28
src/apps/cli/managers/StorageEventManagerCLI.ts
Normal file
28
src/apps/cli/managers/StorageEventManagerCLI.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { StorageEventManagerBase, type StorageEventManagerBaseDependencies } from "@lib/managers/StorageEventManager";
|
||||
import { CLIStorageEventManagerAdapter } from "./CLIStorageEventManagerAdapter";
|
||||
import type { IMinimumLiveSyncCommands, LiveSyncBaseCore } from "../../../LiveSyncBaseCore";
|
||||
import type { ServiceContext } from "@lib/services/base/ServiceBase";
|
||||
// import type { IMinimumLiveSyncCommands } from "@lib/services/base/IService";
|
||||
|
||||
export class StorageEventManagerCLI extends StorageEventManagerBase<CLIStorageEventManagerAdapter> {
|
||||
core: LiveSyncBaseCore<ServiceContext, IMinimumLiveSyncCommands>;
|
||||
|
||||
constructor(
|
||||
basePath: string,
|
||||
core: LiveSyncBaseCore<ServiceContext, IMinimumLiveSyncCommands>,
|
||||
dependencies: StorageEventManagerBaseDependencies
|
||||
) {
|
||||
const adapter = new CLIStorageEventManagerAdapter(basePath);
|
||||
super(adapter, dependencies);
|
||||
this.core = core;
|
||||
}
|
||||
|
||||
/**
|
||||
* Override _watchVaultRawEvents for CLI-specific logic
|
||||
* In CLI, we don't have internal files like Obsidian's .obsidian folder
|
||||
*/
|
||||
protected override async _watchVaultRawEvents(path: string) {
|
||||
// No-op in CLI version
|
||||
// Internal file handling is not needed
|
||||
}
|
||||
}
|
||||
18
src/apps/cli/package.json
Normal file
18
src/apps/cli/package.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"name": "self-hosted-livesync-cli",
|
||||
"private": true,
|
||||
"version": "0.0.0",
|
||||
"main": "dist/index.cjs",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview",
|
||||
"cli": "node dist/index.cjs",
|
||||
"buildRun": "npm run build && npm run cli --",
|
||||
"check": "svelte-check --tsconfig ./tsconfig.app.json && tsc -p tsconfig.node.json",
|
||||
"test:e2e:two-vaults": "bash test/test-e2e-two-vaults-with-docker-linux.sh"
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {}
|
||||
}
|
||||
104
src/apps/cli/serviceModules/CLIServiceModules.ts
Normal file
104
src/apps/cli/serviceModules/CLIServiceModules.ts
Normal file
@@ -0,0 +1,104 @@
|
||||
import type { InjectableServiceHub } from "@lib/services/implements/injectable/InjectableServiceHub";
|
||||
import { ServiceRebuilder } from "@lib/serviceModules/Rebuilder";
|
||||
import { ServiceFileHandler } from "../../../serviceModules/FileHandler";
|
||||
import { StorageAccessManager } from "@lib/managers/StorageProcessingManager";
|
||||
import type { LiveSyncBaseCore } from "../../../LiveSyncBaseCore";
|
||||
import type { ServiceContext } from "@lib/services/base/ServiceBase";
|
||||
import { FileAccessCLI } from "./FileAccessCLI";
|
||||
import { ServiceFileAccessCLI } from "./ServiceFileAccessImpl";
|
||||
import { ServiceDatabaseFileAccessCLI } from "./DatabaseFileAccess";
|
||||
import { StorageEventManagerCLI } from "../managers/StorageEventManagerCLI";
|
||||
import type { ServiceModules } from "@lib/interfaces/ServiceModule";
|
||||
|
||||
/**
|
||||
* Initialize service modules for CLI version
|
||||
* This is the CLI equivalent of ObsidianLiveSyncPlugin.initialiseServiceModules
|
||||
*
|
||||
* @param basePath - The base path of the vault directory
|
||||
* @param core - The LiveSyncBaseCore instance
|
||||
* @param services - The service hub
|
||||
* @returns ServiceModules containing all initialized service modules
|
||||
*/
|
||||
export function initialiseServiceModulesCLI(
|
||||
basePath: string,
|
||||
core: LiveSyncBaseCore<ServiceContext, any>,
|
||||
services: InjectableServiceHub<ServiceContext>
|
||||
): ServiceModules {
|
||||
const storageAccessManager = new StorageAccessManager();
|
||||
|
||||
// CLI-specific file access using Node.js FileSystemAdapter
|
||||
const vaultAccess = new FileAccessCLI(basePath, {
|
||||
storageAccessManager: storageAccessManager,
|
||||
vaultService: services.vault,
|
||||
settingService: services.setting,
|
||||
APIService: services.API,
|
||||
pathService: services.path,
|
||||
});
|
||||
|
||||
// CLI-specific storage event manager
|
||||
const storageEventManager = new StorageEventManagerCLI(basePath, core, {
|
||||
fileProcessing: services.fileProcessing,
|
||||
setting: services.setting,
|
||||
vaultService: services.vault,
|
||||
storageAccessManager: storageAccessManager,
|
||||
APIService: services.API,
|
||||
});
|
||||
|
||||
// Storage access using CLI file system adapter
|
||||
const storageAccess = new ServiceFileAccessCLI({
|
||||
API: services.API,
|
||||
setting: services.setting,
|
||||
fileProcessing: services.fileProcessing,
|
||||
vault: services.vault,
|
||||
appLifecycle: services.appLifecycle,
|
||||
storageEventManager: storageEventManager,
|
||||
storageAccessManager: storageAccessManager,
|
||||
vaultAccess: vaultAccess,
|
||||
});
|
||||
|
||||
// Database file access (platform-independent)
|
||||
const databaseFileAccess = new ServiceDatabaseFileAccessCLI({
|
||||
API: services.API,
|
||||
database: services.database,
|
||||
path: services.path,
|
||||
storageAccess: storageAccess,
|
||||
vault: services.vault,
|
||||
});
|
||||
|
||||
// File handler (platform-independent)
|
||||
const fileHandler = new (ServiceFileHandler as any)({
|
||||
API: services.API,
|
||||
databaseFileAccess: databaseFileAccess,
|
||||
conflict: services.conflict,
|
||||
setting: services.setting,
|
||||
fileProcessing: services.fileProcessing,
|
||||
vault: services.vault,
|
||||
path: services.path,
|
||||
replication: services.replication,
|
||||
storageAccess: storageAccess,
|
||||
});
|
||||
|
||||
// Rebuilder (platform-independent)
|
||||
const rebuilder = new ServiceRebuilder({
|
||||
API: services.API,
|
||||
database: services.database,
|
||||
appLifecycle: services.appLifecycle,
|
||||
setting: services.setting,
|
||||
remote: services.remote,
|
||||
databaseEvents: services.databaseEvents,
|
||||
replication: services.replication,
|
||||
replicator: services.replicator,
|
||||
UI: services.UI,
|
||||
vault: services.vault,
|
||||
fileHandler: fileHandler,
|
||||
storageAccess: storageAccess,
|
||||
control: services.control,
|
||||
});
|
||||
|
||||
return {
|
||||
rebuilder,
|
||||
fileHandler,
|
||||
databaseFileAccess,
|
||||
storageAccess,
|
||||
};
|
||||
}
|
||||
15
src/apps/cli/serviceModules/DatabaseFileAccess.ts
Normal file
15
src/apps/cli/serviceModules/DatabaseFileAccess.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import {
|
||||
ServiceDatabaseFileAccessBase,
|
||||
type ServiceDatabaseFileAccessDependencies,
|
||||
} from "@lib/serviceModules/ServiceDatabaseFileAccessBase";
|
||||
import type { DatabaseFileAccess } from "@lib/interfaces/DatabaseFileAccess";
|
||||
|
||||
/**
|
||||
* CLI-specific implementation of ServiceDatabaseFileAccess
|
||||
* Same as Obsidian version, no platform-specific changes needed
|
||||
*/
|
||||
export class ServiceDatabaseFileAccessCLI extends ServiceDatabaseFileAccessBase implements DatabaseFileAccess {
|
||||
constructor(services: ServiceDatabaseFileAccessDependencies) {
|
||||
super(services);
|
||||
}
|
||||
}
|
||||
20
src/apps/cli/serviceModules/FileAccessCLI.ts
Normal file
20
src/apps/cli/serviceModules/FileAccessCLI.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
import { FileAccessBase, type FileAccessBaseDependencies } from "@lib/serviceModules/FileAccessBase";
|
||||
import { NodeFileSystemAdapter } from "../adapters/NodeFileSystemAdapter";
|
||||
|
||||
/**
|
||||
* CLI-specific implementation of FileAccessBase
|
||||
* Uses NodeFileSystemAdapter for Node.js file operations
|
||||
*/
|
||||
export class FileAccessCLI extends FileAccessBase<NodeFileSystemAdapter> {
|
||||
constructor(basePath: string, dependencies: FileAccessBaseDependencies) {
|
||||
const adapter = new NodeFileSystemAdapter(basePath);
|
||||
super(adapter, dependencies);
|
||||
}
|
||||
|
||||
/**
|
||||
* Expose the adapter for accessing scanDirectory
|
||||
*/
|
||||
get nodeAdapter(): NodeFileSystemAdapter {
|
||||
return this.adapter;
|
||||
}
|
||||
}
|
||||
12
src/apps/cli/serviceModules/ServiceFileAccessImpl.ts
Normal file
12
src/apps/cli/serviceModules/ServiceFileAccessImpl.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import { ServiceFileAccessBase, type StorageAccessBaseDependencies } from "@lib/serviceModules/ServiceFileAccessBase";
|
||||
import { NodeFileSystemAdapter } from "../adapters/NodeFileSystemAdapter";
|
||||
|
||||
/**
|
||||
* CLI-specific implementation of ServiceFileAccess
|
||||
* Uses NodeFileSystemAdapter for platform-specific operations
|
||||
*/
|
||||
export class ServiceFileAccessCLI extends ServiceFileAccessBase<NodeFileSystemAdapter> {
|
||||
constructor(services: StorageAccessBaseDependencies<NodeFileSystemAdapter>) {
|
||||
super(services);
|
||||
}
|
||||
}
|
||||
211
src/apps/cli/services/NodeKeyValueDBService.ts
Normal file
211
src/apps/cli/services/NodeKeyValueDBService.ts
Normal file
@@ -0,0 +1,211 @@
|
||||
import { LOG_LEVEL_NOTICE, LOG_LEVEL_VERBOSE } from "@lib/common/logger";
|
||||
import type { KeyValueDatabase } from "@lib/interfaces/KeyValueDatabase";
|
||||
import type { IKeyValueDBService } from "@lib/services/base/IService";
|
||||
import { ServiceBase, type ServiceContext } from "@lib/services/base/ServiceBase";
|
||||
import type { InjectableAppLifecycleService } from "@lib/services/implements/injectable/InjectableAppLifecycleService";
|
||||
import type { InjectableDatabaseEventService } from "@lib/services/implements/injectable/InjectableDatabaseEventService";
|
||||
import type { IVaultService } from "@lib/services/base/IService";
|
||||
import type { SimpleStore } from "octagonal-wheels/databases/SimpleStoreBase";
|
||||
import { createInstanceLogFunction } from "@lib/services/lib/logUtils";
|
||||
import * as nodeFs from "node:fs";
|
||||
import * as nodePath from "node:path";
|
||||
|
||||
class NodeFileKeyValueDatabase implements KeyValueDatabase {
|
||||
private filePath: string;
|
||||
private data = new Map<string, unknown>();
|
||||
|
||||
constructor(filePath: string) {
|
||||
this.filePath = filePath;
|
||||
this.load();
|
||||
}
|
||||
|
||||
private asKeyString(key: IDBValidKey): string {
|
||||
if (typeof key === "string") {
|
||||
return key;
|
||||
}
|
||||
return JSON.stringify(key);
|
||||
}
|
||||
|
||||
private load() {
|
||||
try {
|
||||
const loaded = JSON.parse(nodeFs.readFileSync(this.filePath, "utf-8")) as Record<string, unknown>;
|
||||
this.data = new Map(Object.entries(loaded));
|
||||
} catch {
|
||||
this.data = new Map();
|
||||
}
|
||||
}
|
||||
|
||||
private flush() {
|
||||
nodeFs.mkdirSync(nodePath.dirname(this.filePath), { recursive: true });
|
||||
nodeFs.writeFileSync(this.filePath, JSON.stringify(Object.fromEntries(this.data), null, 2), "utf-8");
|
||||
}
|
||||
|
||||
async get<T>(key: IDBValidKey): Promise<T> {
|
||||
return this.data.get(this.asKeyString(key)) as T;
|
||||
}
|
||||
|
||||
async set<T>(key: IDBValidKey, value: T): Promise<IDBValidKey> {
|
||||
this.data.set(this.asKeyString(key), value);
|
||||
this.flush();
|
||||
return key;
|
||||
}
|
||||
|
||||
async del(key: IDBValidKey): Promise<void> {
|
||||
this.data.delete(this.asKeyString(key));
|
||||
this.flush();
|
||||
}
|
||||
|
||||
async clear(): Promise<void> {
|
||||
this.data.clear();
|
||||
this.flush();
|
||||
}
|
||||
|
||||
private isIDBKeyRangeLike(value: unknown): value is { lower?: IDBValidKey; upper?: IDBValidKey } {
|
||||
return typeof value === "object" && value !== null && ("lower" in value || "upper" in value);
|
||||
}
|
||||
|
||||
async keys(query?: IDBValidKey | IDBKeyRange, count?: number): Promise<IDBValidKey[]> {
|
||||
const allKeys = [...this.data.keys()];
|
||||
let filtered = allKeys;
|
||||
if (typeof query !== "undefined") {
|
||||
if (this.isIDBKeyRangeLike(query)) {
|
||||
const lower = query.lower?.toString() ?? "";
|
||||
const upper = query.upper?.toString() ?? "\uffff";
|
||||
filtered = filtered.filter((key) => key >= lower && key <= upper);
|
||||
} else {
|
||||
const exact = query.toString();
|
||||
filtered = filtered.filter((key) => key === exact);
|
||||
}
|
||||
}
|
||||
if (typeof count === "number") {
|
||||
filtered = filtered.slice(0, count);
|
||||
}
|
||||
return filtered;
|
||||
}
|
||||
|
||||
async close(): Promise<void> {
|
||||
this.flush();
|
||||
}
|
||||
|
||||
async destroy(): Promise<void> {
|
||||
this.data.clear();
|
||||
nodeFs.rmSync(this.filePath, { force: true });
|
||||
}
|
||||
}
|
||||
|
||||
export interface NodeKeyValueDBDependencies<T extends ServiceContext = ServiceContext> {
|
||||
databaseEvents: InjectableDatabaseEventService<T>;
|
||||
vault: IVaultService;
|
||||
appLifecycle: InjectableAppLifecycleService<T>;
|
||||
}
|
||||
|
||||
export class NodeKeyValueDBService<T extends ServiceContext = ServiceContext>
|
||||
extends ServiceBase<T>
|
||||
implements IKeyValueDBService
|
||||
{
|
||||
private _kvDB: KeyValueDatabase | undefined;
|
||||
private _simpleStore: SimpleStore<any> | undefined;
|
||||
private filePath: string;
|
||||
private _log = createInstanceLogFunction("NodeKeyValueDBService");
|
||||
|
||||
get simpleStore() {
|
||||
if (!this._simpleStore) {
|
||||
throw new Error("SimpleStore is not initialized yet");
|
||||
}
|
||||
return this._simpleStore;
|
||||
}
|
||||
|
||||
get kvDB() {
|
||||
if (!this._kvDB) {
|
||||
throw new Error("KeyValueDB is not initialized yet");
|
||||
}
|
||||
return this._kvDB;
|
||||
}
|
||||
|
||||
constructor(context: T, dependencies: NodeKeyValueDBDependencies<T>, filePath: string) {
|
||||
super(context);
|
||||
this.filePath = filePath;
|
||||
|
||||
dependencies.databaseEvents.onResetDatabase.addHandler(this._everyOnResetDatabase.bind(this));
|
||||
dependencies.appLifecycle.onSettingLoaded.addHandler(this._everyOnloadAfterLoadSettings.bind(this));
|
||||
dependencies.databaseEvents.onDatabaseInitialisation.addHandler(this._everyOnInitializeDatabase.bind(this));
|
||||
dependencies.databaseEvents.onUnloadDatabase.addHandler(this._onOtherDatabaseUnload.bind(this));
|
||||
dependencies.databaseEvents.onCloseDatabase.addHandler(this._onOtherDatabaseClose.bind(this));
|
||||
}
|
||||
|
||||
private async openKeyValueDB(): Promise<boolean> {
|
||||
try {
|
||||
this._kvDB = new NodeFileKeyValueDatabase(this.filePath);
|
||||
return true;
|
||||
} catch (ex) {
|
||||
this._log("Failed to open Node key-value database", LOG_LEVEL_NOTICE);
|
||||
this._log(ex, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private async _everyOnResetDatabase(): Promise<boolean> {
|
||||
try {
|
||||
await this._kvDB?.del("queued-files");
|
||||
await this._kvDB?.destroy();
|
||||
return await this.openKeyValueDB();
|
||||
} catch (ex) {
|
||||
this._log("Failed to reset Node key-value database", LOG_LEVEL_NOTICE);
|
||||
this._log(ex, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private async _onOtherDatabaseUnload(): Promise<boolean> {
|
||||
await this._kvDB?.close();
|
||||
return true;
|
||||
}
|
||||
|
||||
private async _onOtherDatabaseClose(): Promise<boolean> {
|
||||
await this._kvDB?.close();
|
||||
return true;
|
||||
}
|
||||
|
||||
private _everyOnInitializeDatabase(): Promise<boolean> {
|
||||
return this.openKeyValueDB();
|
||||
}
|
||||
|
||||
private async _everyOnloadAfterLoadSettings(): Promise<boolean> {
|
||||
if (!(await this.openKeyValueDB())) {
|
||||
return false;
|
||||
}
|
||||
this._simpleStore = this.openSimpleStore<any>("os");
|
||||
return true;
|
||||
}
|
||||
|
||||
openSimpleStore<T>(kind: string): SimpleStore<T> {
|
||||
const getDB = () => {
|
||||
if (!this._kvDB) {
|
||||
throw new Error("KeyValueDB is not initialized yet");
|
||||
}
|
||||
return this._kvDB;
|
||||
};
|
||||
const prefix = `${kind}-`;
|
||||
return {
|
||||
get: async (key: string): Promise<T> => {
|
||||
return await getDB().get(`${prefix}${key}`);
|
||||
},
|
||||
set: async (key: string, value: any): Promise<void> => {
|
||||
await getDB().set(`${prefix}${key}`, value);
|
||||
},
|
||||
delete: async (key: string): Promise<void> => {
|
||||
await getDB().del(`${prefix}${key}`);
|
||||
},
|
||||
keys: async (from: string | undefined, to: string | undefined, count?: number): Promise<string[]> => {
|
||||
const allKeys = (await getDB().keys(undefined, count)).map((e) => e.toString());
|
||||
const lower = `${prefix}${from ?? ""}`;
|
||||
const upper = `${prefix}${to ?? "\uffff"}`;
|
||||
return allKeys
|
||||
.filter((key) => key.startsWith(prefix))
|
||||
.filter((key) => key >= lower && key <= upper)
|
||||
.map((key) => key.substring(prefix.length));
|
||||
},
|
||||
db: Promise.resolve(getDB()),
|
||||
} satisfies SimpleStore<T>;
|
||||
}
|
||||
}
|
||||
206
src/apps/cli/services/NodeServiceHub.ts
Normal file
206
src/apps/cli/services/NodeServiceHub.ts
Normal file
@@ -0,0 +1,206 @@
|
||||
import type { AppLifecycleService, AppLifecycleServiceDependencies } from "@lib/services/base/AppLifecycleService";
|
||||
import { ServiceContext } from "@lib/services/base/ServiceBase";
|
||||
import * as nodePath from "node:path";
|
||||
import { ConfigServiceBrowserCompat } from "@lib/services/implements/browser/ConfigServiceBrowserCompat";
|
||||
import { SvelteDialogManagerBase, type ComponentHasResult } from "@lib/services/implements/base/SvelteDialog";
|
||||
import { UIService } from "@lib/services/implements/base/UIService";
|
||||
import { InjectableServiceHub } from "@lib/services/implements/injectable/InjectableServiceHub";
|
||||
import { InjectableAppLifecycleService } from "@lib/services/implements/injectable/InjectableAppLifecycleService";
|
||||
import { InjectableConflictService } from "@lib/services/implements/injectable/InjectableConflictService";
|
||||
import { InjectableDatabaseEventService } from "@lib/services/implements/injectable/InjectableDatabaseEventService";
|
||||
import { InjectableFileProcessingService } from "@lib/services/implements/injectable/InjectableFileProcessingService";
|
||||
import { PathServiceCompat } from "@lib/services/implements/injectable/InjectablePathService";
|
||||
import { InjectableRemoteService } from "@lib/services/implements/injectable/InjectableRemoteService";
|
||||
import { InjectableReplicationService } from "@lib/services/implements/injectable/InjectableReplicationService";
|
||||
import { InjectableReplicatorService } from "@lib/services/implements/injectable/InjectableReplicatorService";
|
||||
import { InjectableTestService } from "@lib/services/implements/injectable/InjectableTestService";
|
||||
import { InjectableTweakValueService } from "@lib/services/implements/injectable/InjectableTweakValueService";
|
||||
import { InjectableVaultServiceCompat } from "@lib/services/implements/injectable/InjectableVaultService";
|
||||
import { ControlService } from "@lib/services/base/ControlService";
|
||||
import type { IControlService } from "@lib/services/base/IService";
|
||||
import { HeadlessAPIService } from "@lib/services/implements/headless/HeadlessAPIService";
|
||||
// import { HeadlessDatabaseService } from "@lib/services/implements/headless/HeadlessDatabaseService";
|
||||
import type { ServiceInstances } from "@lib/services/ServiceHub";
|
||||
import { NodeKeyValueDBService } from "./NodeKeyValueDBService";
|
||||
import { NodeSettingService } from "./NodeSettingService";
|
||||
import { DatabaseService } from "@lib/services/base/DatabaseService";
|
||||
import type { ObsidianLiveSyncSettings } from "@/lib/src/common/types";
|
||||
|
||||
export class NodeServiceContext extends ServiceContext {
|
||||
vaultPath: string;
|
||||
constructor(vaultPath: string) {
|
||||
super();
|
||||
this.vaultPath = vaultPath;
|
||||
}
|
||||
}
|
||||
|
||||
class NodeAppLifecycleService<T extends ServiceContext> extends InjectableAppLifecycleService<T> {
|
||||
constructor(context: T, dependencies: AppLifecycleServiceDependencies) {
|
||||
super(context, dependencies);
|
||||
}
|
||||
}
|
||||
|
||||
class NodeSvelteDialogManager<T extends ServiceContext> extends SvelteDialogManagerBase<T> {
|
||||
openSvelteDialog<TValue, UInitial>(
|
||||
component: ComponentHasResult<TValue, UInitial>,
|
||||
initialData?: UInitial
|
||||
): Promise<TValue | undefined> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
}
|
||||
|
||||
type NodeUIServiceDependencies<T extends ServiceContext = ServiceContext> = {
|
||||
appLifecycle: AppLifecycleService<T>;
|
||||
config: ConfigServiceBrowserCompat<T>;
|
||||
replicator: InjectableReplicatorService<T>;
|
||||
APIService: HeadlessAPIService<T>;
|
||||
control: IControlService;
|
||||
};
|
||||
class NodeDatabaseService<T extends NodeServiceContext> extends DatabaseService<T> {
|
||||
protected override modifyDatabaseOptions(
|
||||
settings: ObsidianLiveSyncSettings,
|
||||
name: string,
|
||||
options: PouchDB.Configuration.DatabaseConfiguration
|
||||
): { name: string; options: PouchDB.Configuration.DatabaseConfiguration } {
|
||||
const optionPass = {
|
||||
...options,
|
||||
prefix: this.context.vaultPath + nodePath.sep,
|
||||
};
|
||||
const passSettings = { ...settings, useIndexedDBAdapter: false };
|
||||
return super.modifyDatabaseOptions(passSettings, name, optionPass);
|
||||
}
|
||||
}
|
||||
class NodeUIService<T extends ServiceContext> extends UIService<T> {
|
||||
override get dialogToCopy(): never {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
constructor(context: T, dependencies: NodeUIServiceDependencies<T>) {
|
||||
const headlessConfirm = dependencies.APIService.confirm;
|
||||
const dialogManager = new NodeSvelteDialogManager<T>(context, {
|
||||
confirm: headlessConfirm,
|
||||
appLifecycle: dependencies.appLifecycle,
|
||||
config: dependencies.config,
|
||||
replicator: dependencies.replicator,
|
||||
control: dependencies.control,
|
||||
});
|
||||
|
||||
super(context, {
|
||||
appLifecycle: dependencies.appLifecycle,
|
||||
dialogManager,
|
||||
APIService: dependencies.APIService,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export class NodeServiceHub<T extends NodeServiceContext> extends InjectableServiceHub<T> {
|
||||
constructor(basePath: string, context: T = new NodeServiceContext(basePath) as T) {
|
||||
const runtimeDir = nodePath.join(basePath, ".livesync", "runtime");
|
||||
const localStoragePath = nodePath.join(runtimeDir, "local-storage.json");
|
||||
const keyValueDBPath = nodePath.join(runtimeDir, "keyvalue-db.json");
|
||||
|
||||
const API = new HeadlessAPIService<T>(context);
|
||||
const conflict = new InjectableConflictService(context);
|
||||
const fileProcessing = new InjectableFileProcessingService(context);
|
||||
|
||||
const setting = new NodeSettingService(context, { APIService: API }, localStoragePath);
|
||||
|
||||
const appLifecycle = new NodeAppLifecycleService<T>(context, {
|
||||
settingService: setting,
|
||||
});
|
||||
|
||||
const remote = new InjectableRemoteService(context, {
|
||||
APIService: API,
|
||||
appLifecycle,
|
||||
setting,
|
||||
});
|
||||
|
||||
const tweakValue = new InjectableTweakValueService(context);
|
||||
const vault = new InjectableVaultServiceCompat(context, {
|
||||
settingService: setting,
|
||||
APIService: API,
|
||||
});
|
||||
const test = new InjectableTestService(context);
|
||||
const databaseEvents = new InjectableDatabaseEventService(context);
|
||||
const path = new PathServiceCompat(context, {
|
||||
settingService: setting,
|
||||
});
|
||||
|
||||
const database = new NodeDatabaseService<T>(context, {
|
||||
API: API,
|
||||
path,
|
||||
vault,
|
||||
setting,
|
||||
});
|
||||
|
||||
const config = new ConfigServiceBrowserCompat<T>(context, {
|
||||
settingService: setting,
|
||||
APIService: API,
|
||||
});
|
||||
|
||||
const replicator = new InjectableReplicatorService(context, {
|
||||
settingService: setting,
|
||||
appLifecycleService: appLifecycle,
|
||||
databaseEventService: databaseEvents,
|
||||
});
|
||||
|
||||
const replication = new InjectableReplicationService(context, {
|
||||
APIService: API,
|
||||
appLifecycleService: appLifecycle,
|
||||
replicatorService: replicator,
|
||||
settingService: setting,
|
||||
fileProcessingService: fileProcessing,
|
||||
databaseService: database,
|
||||
});
|
||||
|
||||
const keyValueDB = new NodeKeyValueDBService(
|
||||
context,
|
||||
{
|
||||
appLifecycle,
|
||||
databaseEvents,
|
||||
vault,
|
||||
},
|
||||
keyValueDBPath
|
||||
);
|
||||
|
||||
const control = new ControlService(context, {
|
||||
appLifecycleService: appLifecycle,
|
||||
settingService: setting,
|
||||
databaseService: database,
|
||||
fileProcessingService: fileProcessing,
|
||||
APIService: API,
|
||||
replicatorService: replicator,
|
||||
});
|
||||
|
||||
const ui = new NodeUIService<T>(context, {
|
||||
appLifecycle,
|
||||
config,
|
||||
replicator,
|
||||
APIService: API,
|
||||
control,
|
||||
});
|
||||
|
||||
const serviceInstancesToInit: Required<ServiceInstances<T>> = {
|
||||
appLifecycle,
|
||||
conflict,
|
||||
database,
|
||||
databaseEvents,
|
||||
fileProcessing,
|
||||
replication,
|
||||
replicator,
|
||||
remote,
|
||||
setting,
|
||||
tweakValue,
|
||||
vault,
|
||||
test,
|
||||
ui,
|
||||
path,
|
||||
API,
|
||||
config,
|
||||
keyValueDB: keyValueDB as any,
|
||||
control,
|
||||
};
|
||||
|
||||
super(context, serviceInstancesToInit as any);
|
||||
}
|
||||
}
|
||||
61
src/apps/cli/services/NodeSettingService.ts
Normal file
61
src/apps/cli/services/NodeSettingService.ts
Normal file
@@ -0,0 +1,61 @@
|
||||
import { EVENT_SETTING_SAVED } from "@lib/events/coreEvents";
|
||||
import { EVENT_REQUEST_RELOAD_SETTING_TAB } from "@/common/events";
|
||||
import { eventHub } from "@lib/hub/hub";
|
||||
import { handlers } from "@lib/services/lib/HandlerUtils";
|
||||
import type { ObsidianLiveSyncSettings } from "@lib/common/types";
|
||||
import type { ServiceContext } from "@lib/services/base/ServiceBase";
|
||||
import { SettingService, type SettingServiceDependencies } from "@lib/services/base/SettingService";
|
||||
import * as nodeFs from "node:fs";
|
||||
import * as nodePath from "node:path";
|
||||
|
||||
export class NodeSettingService<T extends ServiceContext> extends SettingService<T> {
|
||||
private storagePath: string;
|
||||
private localStore: Record<string, string> = {};
|
||||
|
||||
constructor(context: T, dependencies: SettingServiceDependencies, storagePath: string) {
|
||||
super(context, dependencies);
|
||||
this.storagePath = storagePath;
|
||||
this.loadLocalStoreFromFile();
|
||||
this.onSettingSaved.addHandler((settings) => {
|
||||
eventHub.emitEvent(EVENT_SETTING_SAVED, settings);
|
||||
return Promise.resolve(true);
|
||||
});
|
||||
this.onSettingLoaded.addHandler((settings) => {
|
||||
eventHub.emitEvent(EVENT_REQUEST_RELOAD_SETTING_TAB);
|
||||
return Promise.resolve(true);
|
||||
});
|
||||
}
|
||||
|
||||
private loadLocalStoreFromFile() {
|
||||
try {
|
||||
const loaded = JSON.parse(nodeFs.readFileSync(this.storagePath, "utf-8")) as Record<string, string>;
|
||||
this.localStore = { ...loaded };
|
||||
} catch {
|
||||
this.localStore = {};
|
||||
}
|
||||
}
|
||||
|
||||
private flushLocalStoreToFile() {
|
||||
nodeFs.mkdirSync(nodePath.dirname(this.storagePath), { recursive: true });
|
||||
nodeFs.writeFileSync(this.storagePath, JSON.stringify(this.localStore, null, 2), "utf-8");
|
||||
}
|
||||
|
||||
protected setItem(key: string, value: string) {
|
||||
this.localStore[key] = value;
|
||||
this.flushLocalStoreToFile();
|
||||
}
|
||||
|
||||
protected getItem(key: string): string {
|
||||
return this.localStore[key] ?? "";
|
||||
}
|
||||
|
||||
protected deleteItem(key: string): void {
|
||||
if (key in this.localStore) {
|
||||
delete this.localStore[key];
|
||||
this.flushLocalStoreToFile();
|
||||
}
|
||||
}
|
||||
|
||||
public saveData = handlers<{ saveData: (data: ObsidianLiveSyncSettings) => Promise<void> }>().binder("saveData");
|
||||
public loadData = handlers<{ loadData: () => Promise<ObsidianLiveSyncSettings | undefined> }>().binder("loadData");
|
||||
}
|
||||
247
src/apps/cli/test/test-e2e-two-vaults-with-docker-linux.sh
Normal file
247
src/apps/cli/test/test-e2e-two-vaults-with-docker-linux.sh
Normal file
@@ -0,0 +1,247 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
|
||||
cd "$CLI_DIR"
|
||||
|
||||
# verbose
|
||||
CLI_CMD=(npm run cli -- -v )
|
||||
RUN_BUILD="${RUN_BUILD:-1}"
|
||||
KEEP_TEST_DATA="${KEEP_TEST_DATA:-0}"
|
||||
TEST_ENV_FILE="${TEST_ENV_FILE:-$CLI_DIR/.test.env}"
|
||||
|
||||
if [[ ! -f "$TEST_ENV_FILE" ]]; then
|
||||
echo "[ERROR] test env file not found: $TEST_ENV_FILE" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -a
|
||||
source "$TEST_ENV_FILE"
|
||||
set +a
|
||||
|
||||
for var in hostname dbname username password; do
|
||||
if [[ -z "${!var:-}" ]]; then
|
||||
echo "[ERROR] required variable '$var' is missing in $TEST_ENV_FILE" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
COUCHDB_URI="${hostname%/}"
|
||||
DB_SUFFIX="$(date +%s)-$RANDOM"
|
||||
COUCHDB_DBNAME="${dbname}-${DB_SUFFIX}"
|
||||
|
||||
VAULT_ROOT="$CLI_DIR/.livesync"
|
||||
VAULT_A="$VAULT_ROOT/testvault_a"
|
||||
VAULT_B="$VAULT_ROOT/testvault_b"
|
||||
SETTINGS_A="$VAULT_ROOT/test-settings-a.json"
|
||||
SETTINGS_B="$VAULT_ROOT/test-settings-b.json"
|
||||
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-e2e.XXXXXX")"
|
||||
|
||||
cleanup() {
|
||||
local exit_code=$?
|
||||
bash "$CLI_DIR/util/couchdb-stop.sh" >/dev/null 2>&1 || true
|
||||
if [[ "$KEEP_TEST_DATA" != "1" ]]; then
|
||||
rm -rf "$VAULT_A" "$VAULT_B" "$SETTINGS_A" "$SETTINGS_B" "$WORK_DIR"
|
||||
else
|
||||
echo "[INFO] KEEP_TEST_DATA=1, preserving test artefacts"
|
||||
echo " vault a: $VAULT_A"
|
||||
echo " vault b: $VAULT_B"
|
||||
echo " settings: $SETTINGS_A, $SETTINGS_B"
|
||||
echo " work dir: $WORK_DIR"
|
||||
fi
|
||||
exit "$exit_code"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
run_cli() {
|
||||
"${CLI_CMD[@]}" "$@"
|
||||
}
|
||||
|
||||
run_cli_a() {
|
||||
run_cli "$VAULT_A" --settings "$SETTINGS_A" "$@"
|
||||
}
|
||||
|
||||
run_cli_b() {
|
||||
run_cli "$VAULT_B" --settings "$SETTINGS_B" "$@"
|
||||
}
|
||||
|
||||
assert_contains() {
|
||||
local haystack="$1"
|
||||
local needle="$2"
|
||||
local message="$3"
|
||||
if ! grep -Fq "$needle" <<< "$haystack"; then
|
||||
echo "[FAIL] $message" >&2
|
||||
echo "[FAIL] expected to find: $needle" >&2
|
||||
echo "[FAIL] actual output:" >&2
|
||||
echo "$haystack" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
assert_equal() {
|
||||
local expected="$1"
|
||||
local actual="$2"
|
||||
local message="$3"
|
||||
if [[ "$expected" != "$actual" ]]; then
|
||||
echo "[FAIL] $message" >&2
|
||||
echo "[FAIL] expected: $expected" >&2
|
||||
echo "[FAIL] actual: $actual" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
assert_command_fails() {
|
||||
local message="$1"
|
||||
shift
|
||||
set +e
|
||||
"$@" >"$WORK_DIR/failed-command.log" 2>&1
|
||||
local exit_code=$?
|
||||
set -e
|
||||
if [[ "$exit_code" -eq 0 ]]; then
|
||||
echo "[FAIL] $message" >&2
|
||||
cat "$WORK_DIR/failed-command.log" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
sanitise_cat_stdout() {
|
||||
sed '/^\[CLIWatchAdapter\] File watching is not enabled in CLI version$/d'
|
||||
}
|
||||
|
||||
sync_both() {
|
||||
run_cli_a sync >/dev/null
|
||||
run_cli_b sync >/dev/null
|
||||
}
|
||||
|
||||
curl_json() {
|
||||
curl -4 -sS --fail --connect-timeout 3 --max-time 15 "$@"
|
||||
}
|
||||
|
||||
init_settings() {
|
||||
local settings_file="$1"
|
||||
run_cli init-settings --force "$settings_file" >/dev/null
|
||||
SETTINGS_FILE="$settings_file" \
|
||||
COUCHDB_URI="$COUCHDB_URI" \
|
||||
COUCHDB_USER="$username" \
|
||||
COUCHDB_PASSWORD="$password" \
|
||||
COUCHDB_DBNAME="$COUCHDB_DBNAME" \
|
||||
node <<'NODE'
|
||||
const fs = require("node:fs");
|
||||
const settingsPath = process.env.SETTINGS_FILE;
|
||||
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
|
||||
|
||||
data.couchDB_URI = process.env.COUCHDB_URI;
|
||||
data.couchDB_USER = process.env.COUCHDB_USER;
|
||||
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
|
||||
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
|
||||
data.liveSync = true;
|
||||
data.syncOnStart = false;
|
||||
data.syncOnSave = false;
|
||||
data.usePluginSync = false;
|
||||
data.isConfigured = true;
|
||||
|
||||
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
|
||||
NODE
|
||||
cat "$settings_file"
|
||||
}
|
||||
|
||||
echo "[INFO] stopping leftover CouchDB container if present"
|
||||
bash "$CLI_DIR/util/couchdb-stop.sh" >/dev/null 2>&1 || true
|
||||
|
||||
echo "[INFO] starting CouchDB test container"
|
||||
bash "$CLI_DIR/util/couchdb-start.sh"
|
||||
|
||||
echo "status"
|
||||
docker ps --filter "name=couchdb-test"
|
||||
|
||||
echo "[INFO] initialising CouchDB test container"
|
||||
bash "$CLI_DIR/util/couchdb-init.sh"
|
||||
|
||||
echo "[INFO] CouchDB create test database: $COUCHDB_DBNAME"
|
||||
until (curl_json -X PUT --user "${username}:${password}" "${hostname}/${COUCHDB_DBNAME}" ); do sleep 5; done
|
||||
|
||||
if [[ "$RUN_BUILD" == "1" ]]; then
|
||||
echo "[INFO] building CLI"
|
||||
npm run build
|
||||
fi
|
||||
|
||||
echo "[INFO] preparing vaults and settings"
|
||||
rm -rf "$VAULT_A" "$VAULT_B" "$SETTINGS_A" "$SETTINGS_B"
|
||||
mkdir -p "$VAULT_A" "$VAULT_B"
|
||||
init_settings "$SETTINGS_A"
|
||||
init_settings "$SETTINGS_B"
|
||||
|
||||
echo "[INFO] test DB: $COUCHDB_DBNAME"
|
||||
|
||||
TARGET_A_ONLY="e2e/a-only-info.md"
|
||||
TARGET_SYNC="e2e/sync-info.md"
|
||||
TARGET_PUSH="e2e/pushed-from-a.md"
|
||||
TARGET_PUT="e2e/put-from-a.md"
|
||||
TARGET_CONFLICT="e2e/conflict.md"
|
||||
|
||||
echo "[CASE] A puts and A can get info"
|
||||
printf 'alpha-from-a\n' | run_cli_a put "$TARGET_A_ONLY" >/dev/null
|
||||
INFO_A_ONLY="$(run_cli_a info "$TARGET_A_ONLY")"
|
||||
assert_contains "$INFO_A_ONLY" "\"path\": \"$TARGET_A_ONLY\"" "A info should include path after put"
|
||||
echo "[PASS] A put/info"
|
||||
|
||||
echo "[CASE] A puts, both sync, and B can get info"
|
||||
printf 'visible-after-sync\n' | run_cli_a put "$TARGET_SYNC" >/dev/null
|
||||
sync_both
|
||||
INFO_B_SYNC="$(run_cli_b info "$TARGET_SYNC")"
|
||||
assert_contains "$INFO_B_SYNC" "\"path\": \"$TARGET_SYNC\"" "B info should include path after sync"
|
||||
echo "[PASS] sync A->B and B info"
|
||||
|
||||
echo "[CASE] A pushes and puts, both sync, and B can pull and cat"
|
||||
PUSH_SRC="$WORK_DIR/push-source.txt"
|
||||
PULL_DST="$WORK_DIR/pull-destination.txt"
|
||||
printf 'pushed-content-%s\n' "$DB_SUFFIX" > "$PUSH_SRC"
|
||||
run_cli_a push "$PUSH_SRC" "$TARGET_PUSH" >/dev/null
|
||||
printf 'put-content-%s\n' "$DB_SUFFIX" | run_cli_a put "$TARGET_PUT" >/dev/null
|
||||
sync_both
|
||||
run_cli_b pull "$TARGET_PUSH" "$PULL_DST" >/dev/null
|
||||
if ! cmp -s "$PUSH_SRC" "$PULL_DST"; then
|
||||
echo "[FAIL] B pull result does not match pushed source" >&2
|
||||
echo "--- source ---" >&2
|
||||
cat "$PUSH_SRC" >&2
|
||||
echo "--- pulled ---" >&2
|
||||
cat "$PULL_DST" >&2
|
||||
exit 1
|
||||
fi
|
||||
CAT_B_PUT="$(run_cli_b cat "$TARGET_PUT" | sanitise_cat_stdout)"
|
||||
assert_equal "put-content-$DB_SUFFIX" "$CAT_B_PUT" "B cat should return A put content"
|
||||
echo "[PASS] push/pull and put/cat across vaults"
|
||||
|
||||
echo "[CASE] A removes, both sync, and B can no longer cat"
|
||||
run_cli_a rm "$TARGET_PUT" >/dev/null
|
||||
sync_both
|
||||
assert_command_fails "B cat should fail after A removed the file and synced" run_cli_b cat "$TARGET_PUT"
|
||||
echo "[PASS] rm is replicated"
|
||||
|
||||
echo "[CASE] verify conflict detection"
|
||||
printf 'conflict-base\n' | run_cli_a put "$TARGET_CONFLICT" >/dev/null
|
||||
sync_both
|
||||
INFO_B_BASE="$(run_cli_b info "$TARGET_CONFLICT")"
|
||||
assert_contains "$INFO_B_BASE" "\"path\": \"$TARGET_CONFLICT\"" "B should be able to info before creating conflict"
|
||||
|
||||
printf 'conflict-from-a-%s\n' "$DB_SUFFIX" | run_cli_a put "$TARGET_CONFLICT" >/dev/null
|
||||
printf 'conflict-from-b-%s\n' "$DB_SUFFIX" | run_cli_b put "$TARGET_CONFLICT" >/dev/null
|
||||
|
||||
run_cli_a sync >/dev/null
|
||||
run_cli_b sync >/dev/null
|
||||
run_cli_a sync >/dev/null
|
||||
|
||||
INFO_A_CONFLICT="$(run_cli_a info "$TARGET_CONFLICT")"
|
||||
INFO_B_CONFLICT="$(run_cli_b info "$TARGET_CONFLICT")"
|
||||
if grep -qF '"conflicts": "N/A"' <<< "$INFO_A_CONFLICT" && grep -qF '"conflicts": "N/A"' <<< "$INFO_B_CONFLICT"; then
|
||||
echo "[FAIL] conflict was expected but both A and B show Conflicts: N/A" >&2
|
||||
echo "--- A info ---" >&2
|
||||
echo "$INFO_A_CONFLICT" >&2
|
||||
echo "--- B info ---" >&2
|
||||
echo "$INFO_B_CONFLICT" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "[PASS] conflict detected by info"
|
||||
|
||||
echo "[PASS] all requested E2E scenarios completed"
|
||||
68
src/apps/cli/test/test-push-pull-linux.sh
Normal file
68
src/apps/cli/test/test-push-pull-linux.sh
Normal file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
|
||||
cd "$CLI_DIR"
|
||||
|
||||
CLI_CMD=(npm run cli --)
|
||||
RUN_BUILD="${RUN_BUILD:-1}"
|
||||
REMOTE_PATH="${REMOTE_PATH:-test/push-pull.txt}"
|
||||
|
||||
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-test.XXXXXX")"
|
||||
trap 'rm -rf "$WORK_DIR"' EXIT
|
||||
|
||||
SETTINGS_FILE="${1:-$WORK_DIR/data.json}"
|
||||
|
||||
if [[ "$RUN_BUILD" == "1" ]]; then
|
||||
echo "[INFO] building CLI..."
|
||||
npm run build
|
||||
fi
|
||||
|
||||
run_cli() {
|
||||
"${CLI_CMD[@]}" "$@"
|
||||
}
|
||||
|
||||
echo "[INFO] generating settings from DEFAULT_SETTINGS -> $SETTINGS_FILE"
|
||||
run_cli init-settings --force "$SETTINGS_FILE"
|
||||
|
||||
if [[ -n "${COUCHDB_URI:-}" && -n "${COUCHDB_USER:-}" && -n "${COUCHDB_PASSWORD:-}" && -n "${COUCHDB_DBNAME:-}" ]]; then
|
||||
echo "[INFO] applying CouchDB env vars to generated settings"
|
||||
SETTINGS_FILE="$SETTINGS_FILE" node <<'NODE'
|
||||
const fs = require("node:fs");
|
||||
const settingsPath = process.env.SETTINGS_FILE;
|
||||
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
|
||||
data.couchDB_URI = process.env.COUCHDB_URI;
|
||||
data.couchDB_USER = process.env.COUCHDB_USER;
|
||||
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
|
||||
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
|
||||
data.isConfigured = true;
|
||||
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
|
||||
NODE
|
||||
else
|
||||
echo "[WARN] CouchDB env vars are not fully set. push/pull may fail unless generated settings are updated."
|
||||
fi
|
||||
|
||||
VAULT_DIR="$WORK_DIR/vault"
|
||||
mkdir -p "$VAULT_DIR/test"
|
||||
|
||||
SRC_FILE="$WORK_DIR/push-source.txt"
|
||||
PULLED_FILE="$WORK_DIR/pull-result.txt"
|
||||
printf 'push-pull-test %s\n' "$(date -u +%Y-%m-%dT%H:%M:%SZ)" > "$SRC_FILE"
|
||||
|
||||
echo "[INFO] push -> $REMOTE_PATH"
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" push "$SRC_FILE" "$REMOTE_PATH"
|
||||
|
||||
echo "[INFO] pull <- $REMOTE_PATH"
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" pull "$REMOTE_PATH" "$PULLED_FILE"
|
||||
|
||||
if cmp -s "$SRC_FILE" "$PULLED_FILE"; then
|
||||
echo "[PASS] push/pull roundtrip matched"
|
||||
else
|
||||
echo "[FAIL] push/pull roundtrip mismatch" >&2
|
||||
echo "--- source ---" >&2
|
||||
cat "$SRC_FILE" >&2
|
||||
echo "--- pulled ---" >&2
|
||||
cat "$PULLED_FILE" >&2
|
||||
exit 1
|
||||
fi
|
||||
338
src/apps/cli/test/test-setup-put-cat-linux.sh
Executable file
338
src/apps/cli/test/test-setup-put-cat-linux.sh
Executable file
@@ -0,0 +1,338 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
|
||||
REPO_ROOT="$(cd -- "$CLI_DIR/../../.." && pwd)"
|
||||
cd "$CLI_DIR"
|
||||
|
||||
CLI_CMD=(npm run cli --)
|
||||
RUN_BUILD="${RUN_BUILD:-1}"
|
||||
REMOTE_PATH="${REMOTE_PATH:-test/setup-put-cat.txt}"
|
||||
SETUP_PASSPHRASE="${SETUP_PASSPHRASE:-setup-passphrase}"
|
||||
|
||||
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-test.XXXXXX")"
|
||||
trap 'rm -rf "$WORK_DIR"' EXIT
|
||||
|
||||
SETTINGS_FILE="${1:-$WORK_DIR/data.json}"
|
||||
|
||||
if [[ "$RUN_BUILD" == "1" ]]; then
|
||||
echo "[INFO] building CLI..."
|
||||
npm run build
|
||||
fi
|
||||
|
||||
run_cli() {
|
||||
"${CLI_CMD[@]}" "$@"
|
||||
}
|
||||
|
||||
echo "[INFO] generating settings from DEFAULT_SETTINGS -> $SETTINGS_FILE"
|
||||
run_cli init-settings --force "$SETTINGS_FILE"
|
||||
|
||||
echo "[INFO] creating setup URI from settings"
|
||||
SETUP_URI="$(
|
||||
REPO_ROOT="$REPO_ROOT" SETTINGS_FILE="$SETTINGS_FILE" SETUP_PASSPHRASE="$SETUP_PASSPHRASE" npx tsx -e '
|
||||
import fs from "node:fs";
|
||||
(async () => {
|
||||
const { encodeSettingsToSetupURI } = await import(process.env.REPO_ROOT + "/src/lib/src/API/processSetting.ts");
|
||||
const settingsPath = process.env.SETTINGS_FILE;
|
||||
const setupPassphrase = process.env.SETUP_PASSPHRASE;
|
||||
const settings = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
|
||||
settings.couchDB_DBNAME = "setup-put-cat-db";
|
||||
settings.couchDB_URI = "http://127.0.0.1:5999";
|
||||
settings.couchDB_USER = "dummy";
|
||||
settings.couchDB_PASSWORD = "dummy";
|
||||
settings.liveSync = false;
|
||||
settings.syncOnStart = false;
|
||||
settings.syncOnSave = false;
|
||||
const uri = await encodeSettingsToSetupURI(settings, setupPassphrase);
|
||||
process.stdout.write(uri.trim());
|
||||
})();
|
||||
'
|
||||
)"
|
||||
|
||||
VAULT_DIR="$WORK_DIR/vault"
|
||||
mkdir -p "$VAULT_DIR/test"
|
||||
|
||||
echo "[INFO] applying setup URI"
|
||||
SETUP_LOG="$WORK_DIR/setup-output.log"
|
||||
set +e
|
||||
printf '%s\n' "$SETUP_PASSPHRASE" | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" setup "$SETUP_URI" \
|
||||
>"$SETUP_LOG" 2>&1
|
||||
SETUP_EXIT=$?
|
||||
set -e
|
||||
cat "$SETUP_LOG"
|
||||
if [[ "$SETUP_EXIT" -ne 0 ]]; then
|
||||
echo "[FAIL] setup command exited with $SETUP_EXIT" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if grep -Fq "[Command] setup ->" "$SETUP_LOG"; then
|
||||
echo "[PASS] setup command executed"
|
||||
else
|
||||
echo "[FAIL] setup command did not execute expected code path" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SRC_FILE="$WORK_DIR/put-source.txt"
|
||||
printf 'setup-put-cat-test %s\nline-2\n' "$(date -u +%Y-%m-%dT%H:%M:%SZ)" > "$SRC_FILE"
|
||||
|
||||
echo "[INFO] put -> $REMOTE_PATH"
|
||||
cat "$SRC_FILE" | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put "$REMOTE_PATH"
|
||||
|
||||
echo "[INFO] cat <- $REMOTE_PATH"
|
||||
CAT_OUTPUT="$WORK_DIR/cat-output.txt"
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" cat "$REMOTE_PATH" > "$CAT_OUTPUT"
|
||||
|
||||
CAT_OUTPUT_CLEAN="$WORK_DIR/cat-output-clean.txt"
|
||||
grep -v '^\[CLIWatchAdapter\] File watching is not enabled in CLI version$' "$CAT_OUTPUT" > "$CAT_OUTPUT_CLEAN" || true
|
||||
|
||||
if cmp -s "$SRC_FILE" "$CAT_OUTPUT_CLEAN"; then
|
||||
echo "[PASS] setup/put/cat roundtrip matched"
|
||||
else
|
||||
echo "[FAIL] setup/put/cat roundtrip mismatch" >&2
|
||||
echo "--- source ---" >&2
|
||||
cat "$SRC_FILE" >&2
|
||||
echo "--- cat-output ---" >&2
|
||||
cat "$CAT_OUTPUT_CLEAN" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[INFO] ls $REMOTE_PATH"
|
||||
LS_OUTPUT="$WORK_DIR/ls-output.txt"
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" ls "$REMOTE_PATH" > "$LS_OUTPUT"
|
||||
|
||||
LS_LINE="$(grep -F "$REMOTE_PATH" "$LS_OUTPUT" | head -n 1 || true)"
|
||||
if [[ -z "$LS_LINE" ]]; then
|
||||
echo "[FAIL] ls output did not include target path" >&2
|
||||
cat "$LS_OUTPUT" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IFS=$'\t' read -r LS_PATH LS_SIZE LS_MTIME LS_REV <<< "$LS_LINE"
|
||||
if [[ "$LS_PATH" != "$REMOTE_PATH" ]]; then
|
||||
echo "[FAIL] ls path column mismatch: $LS_PATH" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! "$LS_SIZE" =~ ^[0-9]+$ ]]; then
|
||||
echo "[FAIL] ls size column is not numeric: $LS_SIZE" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! "$LS_MTIME" =~ ^[0-9]+$ ]]; then
|
||||
echo "[FAIL] ls mtime column is not numeric: $LS_MTIME" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "$LS_REV" ]]; then
|
||||
echo "[FAIL] ls revision column is empty" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "[PASS] ls output format matched"
|
||||
|
||||
echo "[INFO] adding more files for ls test cases"
|
||||
printf 'file-a\n' | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put test/a-first.txt >/dev/null
|
||||
printf 'file-z\n' | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put test/z-last.txt >/dev/null
|
||||
|
||||
echo "[INFO] ls test/ (prefix filter and sorting)"
|
||||
LS_PREFIX_OUTPUT="$WORK_DIR/ls-prefix-output.txt"
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" ls test/ > "$LS_PREFIX_OUTPUT"
|
||||
|
||||
if [[ "$(wc -l < "$LS_PREFIX_OUTPUT")" -lt 3 ]]; then
|
||||
echo "[FAIL] ls prefix output expected at least 3 rows" >&2
|
||||
cat "$LS_PREFIX_OUTPUT" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
FIRST_PATH="$(cut -f1 "$LS_PREFIX_OUTPUT" | sed -n '1p')"
|
||||
SECOND_PATH="$(cut -f1 "$LS_PREFIX_OUTPUT" | sed -n '2p')"
|
||||
if [[ "$FIRST_PATH" > "$SECOND_PATH" ]]; then
|
||||
echo "[FAIL] ls output is not sorted by path" >&2
|
||||
cat "$LS_PREFIX_OUTPUT" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! grep -Fq $'test/a-first.txt\t' "$LS_PREFIX_OUTPUT"; then
|
||||
echo "[FAIL] ls prefix output missing test/a-first.txt" >&2
|
||||
cat "$LS_PREFIX_OUTPUT" >&2
|
||||
exit 1
|
||||
fi
|
||||
if ! grep -Fq $'test/z-last.txt\t' "$LS_PREFIX_OUTPUT"; then
|
||||
echo "[FAIL] ls prefix output missing test/z-last.txt" >&2
|
||||
cat "$LS_PREFIX_OUTPUT" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "[PASS] ls prefix and sorting matched"
|
||||
|
||||
echo "[INFO] ls no-match prefix"
|
||||
LS_EMPTY_OUTPUT="$WORK_DIR/ls-empty-output.txt"
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" ls no-such-prefix/ > "$LS_EMPTY_OUTPUT"
|
||||
if [[ -s "$LS_EMPTY_OUTPUT" ]]; then
|
||||
echo "[FAIL] ls no-match prefix should produce empty output" >&2
|
||||
cat "$LS_EMPTY_OUTPUT" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "[PASS] ls no-match prefix matched"
|
||||
|
||||
echo "[INFO] info $REMOTE_PATH"
|
||||
INFO_OUTPUT="$WORK_DIR/info-output.txt"
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" info "$REMOTE_PATH" > "$INFO_OUTPUT"
|
||||
|
||||
# Check required label lines
|
||||
for label in "ID:" "Revision:" "Conflicts:" "Filename:" "Path:" "Size:" "Chunks:"; do
|
||||
if ! grep -q "^$label" "$INFO_OUTPUT"; then
|
||||
echo "[FAIL] info output missing label: $label" >&2
|
||||
cat "$INFO_OUTPUT" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Path value must match
|
||||
INFO_PATH="$(grep '^Path:' "$INFO_OUTPUT" | sed 's/^Path:[[:space:]]*//')"
|
||||
if [[ "$INFO_PATH" != "$REMOTE_PATH" ]]; then
|
||||
echo "[FAIL] info Path mismatch: $INFO_PATH" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Filename must be the basename
|
||||
INFO_FILENAME="$(grep '^Filename:' "$INFO_OUTPUT" | sed 's/^Filename:[[:space:]]*//')"
|
||||
EXPECTED_FILENAME="$(basename "$REMOTE_PATH")"
|
||||
if [[ "$INFO_FILENAME" != "$EXPECTED_FILENAME" ]]; then
|
||||
echo "[FAIL] info Filename mismatch: $INFO_FILENAME != $EXPECTED_FILENAME" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Size must be numeric
|
||||
INFO_SIZE="$(grep '^Size:' "$INFO_OUTPUT" | sed 's/^Size:[[:space:]]*//')"
|
||||
if [[ ! "$INFO_SIZE" =~ ^[0-9]+$ ]]; then
|
||||
echo "[FAIL] info Size is not numeric: $INFO_SIZE" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Chunks count must be numeric and ≥1
|
||||
INFO_CHUNKS="$(grep '^Chunks:' "$INFO_OUTPUT" | sed 's/^Chunks:[[:space:]]*//')"
|
||||
if [[ ! "$INFO_CHUNKS" =~ ^[0-9]+$ ]] || [[ "$INFO_CHUNKS" -lt 1 ]]; then
|
||||
echo "[FAIL] info Chunks is not a positive integer: $INFO_CHUNKS" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Conflicts should be N/A (no live CouchDB)
|
||||
INFO_CONFLICTS="$(grep '^Conflicts:' "$INFO_OUTPUT" | sed 's/^Conflicts:[[:space:]]*//')"
|
||||
if [[ "$INFO_CONFLICTS" != "N/A" ]]; then
|
||||
echo "[FAIL] info Conflicts expected N/A, got: $INFO_CONFLICTS" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[PASS] info output format matched"
|
||||
|
||||
echo "[INFO] info non-existent path"
|
||||
INFO_MISSING_EXIT=0
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" info no-such-file.md > /dev/null || INFO_MISSING_EXIT=$?
|
||||
if [[ "$INFO_MISSING_EXIT" -eq 0 ]]; then
|
||||
echo "[FAIL] info on non-existent file should exit non-zero" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "[PASS] info non-existent path returns non-zero"
|
||||
|
||||
echo "[INFO] rm test/z-last.txt"
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" rm test/z-last.txt > /dev/null
|
||||
|
||||
RM_CAT_EXIT=0
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" cat test/z-last.txt > /dev/null || RM_CAT_EXIT=$?
|
||||
if [[ "$RM_CAT_EXIT" -eq 0 ]]; then
|
||||
echo "[FAIL] rm target should not be readable by cat" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LS_AFTER_RM="$WORK_DIR/ls-after-rm.txt"
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" ls test/ > "$LS_AFTER_RM"
|
||||
if grep -Fq $'test/z-last.txt\t' "$LS_AFTER_RM"; then
|
||||
echo "[FAIL] rm target should not appear in ls output" >&2
|
||||
cat "$LS_AFTER_RM" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "[PASS] rm removed target from visible entries"
|
||||
|
||||
echo "[INFO] resolve test/a-first.txt using current revision"
|
||||
RESOLVE_LS_LINE="$(run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" ls test/a-first.txt | head -n 1)"
|
||||
if [[ -z "$RESOLVE_LS_LINE" ]]; then
|
||||
echo "[FAIL] could not fetch revision for resolve test" >&2
|
||||
exit 1
|
||||
fi
|
||||
IFS=$'\t' read -r _ _ _ RESOLVE_REV <<< "$RESOLVE_LS_LINE"
|
||||
if [[ -z "$RESOLVE_REV" ]]; then
|
||||
echo "[FAIL] revision was empty for resolve test" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" resolve test/a-first.txt "$RESOLVE_REV" > /dev/null
|
||||
echo "[PASS] resolve accepted current revision"
|
||||
|
||||
echo "[INFO] resolve with non-existent revision"
|
||||
RESOLVE_BAD_EXIT=0
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" resolve test/a-first.txt 9-no-such-rev > /dev/null || RESOLVE_BAD_EXIT=$?
|
||||
if [[ "$RESOLVE_BAD_EXIT" -eq 0 ]]; then
|
||||
echo "[FAIL] resolve with non-existent revision should exit non-zero" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "[PASS] resolve non-existent revision returns non-zero"
|
||||
|
||||
echo "[INFO] preparing revision history for cat-rev test"
|
||||
REV_PATH="test/revision-history.txt"
|
||||
REV_V1_FILE="$WORK_DIR/rev-v1.txt"
|
||||
REV_V2_FILE="$WORK_DIR/rev-v2.txt"
|
||||
REV_V3_FILE="$WORK_DIR/rev-v3.txt"
|
||||
|
||||
printf 'revision-v1\n' > "$REV_V1_FILE"
|
||||
printf 'revision-v2\n' > "$REV_V2_FILE"
|
||||
printf 'revision-v3\n' > "$REV_V3_FILE"
|
||||
|
||||
cat "$REV_V1_FILE" | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put "$REV_PATH" > /dev/null
|
||||
cat "$REV_V2_FILE" | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put "$REV_PATH" > /dev/null
|
||||
cat "$REV_V3_FILE" | run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" put "$REV_PATH" > /dev/null
|
||||
|
||||
echo "[INFO] info $REV_PATH (past revisions)"
|
||||
REV_INFO_OUTPUT="$WORK_DIR/rev-info-output.txt"
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" info "$REV_PATH" > "$REV_INFO_OUTPUT"
|
||||
|
||||
PAST_REV="$(grep '^ rev: ' "$REV_INFO_OUTPUT" | head -n 1 | sed 's/^ rev: //')"
|
||||
if [[ -z "$PAST_REV" ]]; then
|
||||
echo "[FAIL] info output did not include any past revision" >&2
|
||||
cat "$REV_INFO_OUTPUT" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[INFO] cat-rev $REV_PATH @ $PAST_REV"
|
||||
REV_CAT_OUTPUT="$WORK_DIR/rev-cat-output.txt"
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" cat-rev "$REV_PATH" "$PAST_REV" > "$REV_CAT_OUTPUT"
|
||||
|
||||
if cmp -s "$REV_CAT_OUTPUT" "$REV_V1_FILE" || cmp -s "$REV_CAT_OUTPUT" "$REV_V2_FILE"; then
|
||||
echo "[PASS] cat-rev matched one of the past revisions from info"
|
||||
else
|
||||
echo "[FAIL] cat-rev output did not match expected past revisions" >&2
|
||||
echo "--- info output ---" >&2
|
||||
cat "$REV_INFO_OUTPUT" >&2
|
||||
echo "--- cat-rev output ---" >&2
|
||||
cat "$REV_CAT_OUTPUT" >&2
|
||||
echo "--- expected v1 ---" >&2
|
||||
cat "$REV_V1_FILE" >&2
|
||||
echo "--- expected v2 ---" >&2
|
||||
cat "$REV_V2_FILE" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[INFO] pull-rev $REV_PATH @ $PAST_REV"
|
||||
REV_PULL_OUTPUT="$WORK_DIR/rev-pull-output.txt"
|
||||
run_cli "$VAULT_DIR" --settings "$SETTINGS_FILE" pull-rev "$REV_PATH" "$REV_PULL_OUTPUT" "$PAST_REV" > /dev/null
|
||||
|
||||
if cmp -s "$REV_PULL_OUTPUT" "$REV_V1_FILE" || cmp -s "$REV_PULL_OUTPUT" "$REV_V2_FILE"; then
|
||||
echo "[PASS] pull-rev matched one of the past revisions from info"
|
||||
else
|
||||
echo "[FAIL] pull-rev output did not match expected past revisions" >&2
|
||||
echo "--- info output ---" >&2
|
||||
cat "$REV_INFO_OUTPUT" >&2
|
||||
echo "--- pull-rev output ---" >&2
|
||||
cat "$REV_PULL_OUTPUT" >&2
|
||||
echo "--- expected v1 ---" >&2
|
||||
cat "$REV_V1_FILE" >&2
|
||||
echo "--- expected v2 ---" >&2
|
||||
cat "$REV_V2_FILE" >&2
|
||||
exit 1
|
||||
fi
|
||||
186
src/apps/cli/test/test-sync-two-local-databases-linux.sh
Executable file
186
src/apps/cli/test/test-sync-two-local-databases-linux.sh
Executable file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/env bash
|
||||
## TODO: test this script. I would love to go to my bed today (3a.m.) However, I am so excited about the new CLI that I want to at least get this skeleton in place. Delightful days!
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CLI_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)"
|
||||
cd "$CLI_DIR"
|
||||
|
||||
CLI_CMD=(npm run cli --)
|
||||
RUN_BUILD="${RUN_BUILD:-1}"
|
||||
COUCHDB_URI="${COUCHDB_URI:-}"
|
||||
COUCHDB_USER="${COUCHDB_USER:-}"
|
||||
COUCHDB_PASSWORD="${COUCHDB_PASSWORD:-}"
|
||||
COUCHDB_DBNAME_BASE="${COUCHDB_DBNAME:-livesync-cli-e2e}"
|
||||
|
||||
if [[ -z "$COUCHDB_URI" || -z "$COUCHDB_USER" || -z "$COUCHDB_PASSWORD" ]]; then
|
||||
echo "[ERROR] COUCHDB_URI, COUCHDB_USER, COUCHDB_PASSWORD are required" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORK_DIR="$(mktemp -d "${TMPDIR:-/tmp}/livesync-cli-two-db-test.XXXXXX")"
|
||||
trap 'rm -rf "$WORK_DIR"' EXIT
|
||||
|
||||
if [[ "$RUN_BUILD" == "1" ]]; then
|
||||
echo "[INFO] building CLI..."
|
||||
npm run build
|
||||
fi
|
||||
|
||||
run_cli() {
|
||||
"${CLI_CMD[@]}" "$@"
|
||||
}
|
||||
|
||||
DB_SUFFIX="$(date +%s)-$RANDOM"
|
||||
COUCHDB_DBNAME="${COUCHDB_DBNAME_BASE}-${DB_SUFFIX}"
|
||||
|
||||
echo "[INFO] using CouchDB database: $COUCHDB_DBNAME"
|
||||
|
||||
VAULT_A="$WORK_DIR/vault-a"
|
||||
VAULT_B="$WORK_DIR/vault-b"
|
||||
SETTINGS_A="$WORK_DIR/a-settings.json"
|
||||
SETTINGS_B="$WORK_DIR/b-settings.json"
|
||||
mkdir -p "$VAULT_A" "$VAULT_B"
|
||||
|
||||
run_cli init-settings --force "$SETTINGS_A" >/dev/null
|
||||
run_cli init-settings --force "$SETTINGS_B" >/dev/null
|
||||
|
||||
apply_settings() {
|
||||
local settings_file="$1"
|
||||
SETTINGS_FILE="$settings_file" \
|
||||
COUCHDB_URI="$COUCHDB_URI" \
|
||||
COUCHDB_USER="$COUCHDB_USER" \
|
||||
COUCHDB_PASSWORD="$COUCHDB_PASSWORD" \
|
||||
COUCHDB_DBNAME="$COUCHDB_DBNAME" \
|
||||
node <<'NODE'
|
||||
const fs = require("node:fs");
|
||||
const settingsPath = process.env.SETTINGS_FILE;
|
||||
const data = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
|
||||
data.couchDB_URI = process.env.COUCHDB_URI;
|
||||
data.couchDB_USER = process.env.COUCHDB_USER;
|
||||
data.couchDB_PASSWORD = process.env.COUCHDB_PASSWORD;
|
||||
data.couchDB_DBNAME = process.env.COUCHDB_DBNAME;
|
||||
data.liveSync = true;
|
||||
data.syncOnStart = false;
|
||||
data.syncOnSave = false;
|
||||
data.usePluginSync = false;
|
||||
data.isConfigured = true;
|
||||
fs.writeFileSync(settingsPath, JSON.stringify(data, null, 2), "utf-8");
|
||||
NODE
|
||||
}
|
||||
|
||||
apply_settings "$SETTINGS_A"
|
||||
apply_settings "$SETTINGS_B"
|
||||
|
||||
run_cli_a() {
|
||||
run_cli "$VAULT_A" --settings "$SETTINGS_A" "$@"
|
||||
}
|
||||
|
||||
run_cli_b() {
|
||||
run_cli "$VAULT_B" --settings "$SETTINGS_B" "$@"
|
||||
}
|
||||
|
||||
sync_a() {
|
||||
run_cli_a sync >/dev/null
|
||||
}
|
||||
|
||||
sync_b() {
|
||||
run_cli_b sync >/dev/null
|
||||
}
|
||||
|
||||
cat_a() {
|
||||
run_cli_a cat "$1"
|
||||
}
|
||||
|
||||
cat_b() {
|
||||
run_cli_b cat "$1"
|
||||
}
|
||||
|
||||
assert_equal() {
|
||||
local expected="$1"
|
||||
local actual="$2"
|
||||
local message="$3"
|
||||
if [[ "$expected" != "$actual" ]]; then
|
||||
echo "[FAIL] $message" >&2
|
||||
echo "expected: $expected" >&2
|
||||
echo "actual: $actual" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
echo "[INFO] case1: A creates file, B can read after sync"
|
||||
printf 'from-a\n' | run_cli_a put shared/from-a.txt >/dev/null
|
||||
sync_a
|
||||
sync_b
|
||||
VALUE_FROM_B="$(cat_b shared/from-a.txt)"
|
||||
assert_equal "from-a" "$VALUE_FROM_B" "B could not read file created on A"
|
||||
echo "[PASS] case1 passed"
|
||||
|
||||
echo "[INFO] case2: B creates file, A can read after sync"
|
||||
printf 'from-b\n' | run_cli_b put shared/from-b.txt >/dev/null
|
||||
sync_b
|
||||
sync_a
|
||||
VALUE_FROM_A="$(cat_a shared/from-b.txt)"
|
||||
assert_equal "from-b" "$VALUE_FROM_A" "A could not read file created on B"
|
||||
echo "[PASS] case2 passed"
|
||||
|
||||
echo "[INFO] case3: concurrent edits create conflict"
|
||||
printf 'base\n' | run_cli_a put shared/conflicted.txt >/dev/null
|
||||
sync_a
|
||||
sync_b
|
||||
|
||||
printf 'edit-from-a\n' | run_cli_a put shared/conflicted.txt >/dev/null
|
||||
printf 'edit-from-b\n' | run_cli_b put shared/conflicted.txt >/dev/null
|
||||
|
||||
sync_a
|
||||
sync_b
|
||||
|
||||
INFO_A="$WORK_DIR/info-a.txt"
|
||||
INFO_B="$WORK_DIR/info-b.txt"
|
||||
run_cli_a info shared/conflicted.txt > "$INFO_A"
|
||||
run_cli_b info shared/conflicted.txt > "$INFO_B"
|
||||
|
||||
if grep -q '^Conflicts: N/A$' "$INFO_A" && grep -q '^Conflicts: N/A$' "$INFO_B"; then
|
||||
echo "[FAIL] expected conflict after concurrent edits, but both sides show N/A" >&2
|
||||
echo "--- A info ---" >&2
|
||||
cat "$INFO_A" >&2
|
||||
echo "--- B info ---" >&2
|
||||
cat "$INFO_B" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "[PASS] case3 conflict detected"
|
||||
|
||||
echo "[INFO] case4: resolve on A, sync, and verify B has no conflict"
|
||||
KEEP_REV="$(sed -n 's/^Revision:[[:space:]]*//p' "$INFO_A" | head -n 1)"
|
||||
if [[ -z "$KEEP_REV" ]]; then
|
||||
echo "[FAIL] could not read Revision from A info output" >&2
|
||||
cat "$INFO_A" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
run_cli_a resolve shared/conflicted.txt "$KEEP_REV" >/dev/null
|
||||
sync_a
|
||||
sync_b
|
||||
|
||||
INFO_B_AFTER="$WORK_DIR/info-b-after-resolve.txt"
|
||||
run_cli_b info shared/conflicted.txt > "$INFO_B_AFTER"
|
||||
if ! grep -q '^Conflicts: N/A$' "$INFO_B_AFTER"; then
|
||||
echo "[FAIL] B still has conflicts after resolving on A and syncing" >&2
|
||||
cat "$INFO_B_AFTER" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CONTENT_A="$WORK_DIR/conflicted-a.txt"
|
||||
CONTENT_B="$WORK_DIR/conflicted-b.txt"
|
||||
cat_a shared/conflicted.txt > "$CONTENT_A"
|
||||
cat_b shared/conflicted.txt > "$CONTENT_B"
|
||||
if ! cmp -s "$CONTENT_A" "$CONTENT_B"; then
|
||||
echo "[FAIL] resolved content mismatch between A and B" >&2
|
||||
echo "--- A ---" >&2
|
||||
cat "$CONTENT_A" >&2
|
||||
echo "--- B ---" >&2
|
||||
cat "$CONTENT_B" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[PASS] case4 passed"
|
||||
echo "[PASS] all sync/resolve scenarios passed"
|
||||
32
src/apps/cli/tsconfig.json
Normal file
32
src/apps/cli/tsconfig.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"target": "ES2020",
|
||||
"useDefineForClassFields": true,
|
||||
"module": "ESNext",
|
||||
"lib": ["ES2020", "DOM", "DOM.Iterable"],
|
||||
"skipLibCheck": true,
|
||||
|
||||
/* Bundler mode */
|
||||
"moduleResolution": "bundler",
|
||||
"allowImportingTsExtensions": true,
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"noEmit": true,
|
||||
|
||||
/* Linting */
|
||||
"strict": false,
|
||||
"noUnusedLocals": false,
|
||||
"noUnusedParameters": false,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
|
||||
/* Path mapping */
|
||||
"baseUrl": ".",
|
||||
"paths": {
|
||||
"@/*": ["../../*"],
|
||||
"@lib/*": ["../../lib/src/*"]
|
||||
}
|
||||
},
|
||||
"include": ["*.ts", "**/*.ts", "**/*.tsx"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
47
src/apps/cli/util/couchdb-init.sh
Executable file
47
src/apps/cli/util/couchdb-init.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -z "${hostname:-}" ]]; then
|
||||
echo "ERROR: Hostname missing"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${username:-}" ]]; then
|
||||
echo "ERROR: Username missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${password:-}" ]]; then
|
||||
echo "ERROR: Password missing"
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${node:-}" ]]; then
|
||||
echo "INFO: defaulting to _local"
|
||||
node=_local
|
||||
fi
|
||||
|
||||
hostname="${hostname%/}"
|
||||
# Podman environments often resolve localhost to ::1 while published ports are IPv4-only.
|
||||
hostname="${hostname/localhost/127.0.0.1}"
|
||||
|
||||
curl_json() {
|
||||
curl -4 -sS --fail --connect-timeout 3 --max-time 15 "$@"
|
||||
}
|
||||
|
||||
echo "-- Configuring CouchDB by REST APIs... -->"
|
||||
echo " Hostname: $hostname"
|
||||
echo " Username: $username"
|
||||
|
||||
until (curl_json -X POST "${hostname}/_cluster_setup" -H "Content-Type: application/json" -d "{\"action\":\"enable_single_node\",\"username\":\"${username}\",\"password\":\"${password}\",\"bind_address\":\"0.0.0.0\",\"port\":5984,\"singlenode\":true}" --user "${username}:${password}"); do sleep 5; done
|
||||
until (curl_json -X PUT "${hostname}/_node/${node}/_config/chttpd/require_valid_user" -H "Content-Type: application/json" -d '"true"' --user "${username}:${password}"); do sleep 5; done
|
||||
until (curl_json -X PUT "${hostname}/_node/${node}/_config/chttpd_auth/require_valid_user" -H "Content-Type: application/json" -d '"true"' --user "${username}:${password}"); do sleep 5; done
|
||||
until (curl_json -X PUT "${hostname}/_node/${node}/_config/httpd/WWW-Authenticate" -H "Content-Type: application/json" -d '"Basic realm=\"couchdb\""' --user "${username}:${password}"); do sleep 5; done
|
||||
until (curl_json -X PUT "${hostname}/_node/${node}/_config/httpd/enable_cors" -H "Content-Type: application/json" -d '"true"' --user "${username}:${password}"); do sleep 5; done
|
||||
until (curl_json -X PUT "${hostname}/_node/${node}/_config/chttpd/enable_cors" -H "Content-Type: application/json" -d '"true"' --user "${username}:${password}"); do sleep 5; done
|
||||
until (curl_json -X PUT "${hostname}/_node/${node}/_config/chttpd/max_http_request_size" -H "Content-Type: application/json" -d '"4294967296"' --user "${username}:${password}"); do sleep 5; done
|
||||
until (curl_json -X PUT "${hostname}/_node/${node}/_config/couchdb/max_document_size" -H "Content-Type: application/json" -d '"50000000"' --user "${username}:${password}"); do sleep 5; done
|
||||
until (curl_json -X PUT "${hostname}/_node/${node}/_config/cors/credentials" -H "Content-Type: application/json" -d '"true"' --user "${username}:${password}"); do sleep 5; done
|
||||
until (curl_json -X PUT "${hostname}/_node/${node}/_config/cors/origins" -H "Content-Type: application/json" -d '"*"' --user "${username}:${password}"); do sleep 5; done
|
||||
|
||||
# Create test database
|
||||
until (curl_json -X PUT --user "${username}:${password}" "${hostname}/${dbname}" >/dev/null); do sleep 5; done
|
||||
echo "<-- Configuring CouchDB by REST APIs Done!"
|
||||
4
src/apps/cli/util/couchdb-start.sh
Executable file
4
src/apps/cli/util/couchdb-start.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
echo "username: $username"
|
||||
docker run -d --name couchdb-test -p 5989:5984 -e COUCHDB_USER=$username -e COUCHDB_PASSWORD=$password -e COUCHDB_SINGLE_NODE=y couchdb:3.5.0
|
||||
3
src/apps/cli/util/couchdb-stop.sh
Executable file
3
src/apps/cli/util/couchdb-stop.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
docker stop couchdb-test
|
||||
docker rm couchdb-test
|
||||
47
src/apps/cli/util/minio-init.sh
Executable file
47
src/apps/cli/util/minio-init.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
cat >/tmp/mybucket-rw.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:GetBucketLocation","s3:ListBucket"],
|
||||
"Resource": ["arn:aws:s3:::$bucketName"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:GetObject","s3:PutObject","s3:DeleteObject"],
|
||||
"Resource": ["arn:aws:s3:::$bucketName/*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
# echo "<CORSConfiguration>
|
||||
# <CORSRule>
|
||||
# <AllowedOrigin>http://localhost:63315</AllowedOrigin>
|
||||
# <AllowedOrigin>http://localhost:63316</AllowedOrigin>
|
||||
# <AllowedOrigin>http://localhost</AllowedOrigin>
|
||||
# <AllowedMethod>GET</AllowedMethod>
|
||||
# <AllowedMethod>PUT</AllowedMethod>
|
||||
# <AllowedMethod>POST</AllowedMethod>
|
||||
# <AllowedMethod>DELETE</AllowedMethod>
|
||||
# <AllowedMethod>HEAD</AllowedMethod>
|
||||
# <AllowedHeader>*</AllowedHeader>
|
||||
# </CORSRule>
|
||||
# </CORSConfiguration>" > /tmp/cors.xml
|
||||
# docker run --rm --network host -v /tmp/mybucket-rw.json:/tmp/mybucket-rw.json --entrypoint=/bin/sh minio/mc -c "
|
||||
# mc alias set myminio $minioEndpoint $username $password
|
||||
# mc mb --ignore-existing myminio/$bucketName
|
||||
# mc admin policy create myminio my-custom-policy /tmp/mybucket-rw.json
|
||||
# echo 'Creating service account for user $username with access key $accessKey'
|
||||
# mc admin user svcacct add --access-key '$accessKey' --secret-key '$secretKey' myminio '$username'
|
||||
# mc admin policy attach myminio my-custom-policy --user '$accessKey'
|
||||
# echo 'Verifying policy and user creation:'
|
||||
# mc admin user svcacct info myminio '$accessKey'
|
||||
# "
|
||||
|
||||
docker run --rm --network host -v /tmp/mybucket-rw.json:/tmp/mybucket-rw.json --entrypoint=/bin/sh minio/mc -c "
|
||||
mc alias set myminio $minioEndpoint $accessKey $secretKey
|
||||
mc mb --ignore-existing myminio/$bucketName
|
||||
"
|
||||
2
src/apps/cli/util/minio-start.sh
Executable file
2
src/apps/cli/util/minio-start.sh
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
docker run -d --name minio-test -p 9000:9000 -p 9001:9001 -e MINIO_ROOT_USER=$accessKey -e MINIO_ROOT_PASSWORD=$secretKey -e MINIO_SERVER_URL=$minioEndpoint minio/minio server /data --console-address ':9001'
|
||||
3
src/apps/cli/util/minio-stop.sh
Executable file
3
src/apps/cli/util/minio-stop.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
docker stop minio-test
|
||||
docker rm minio-test
|
||||
55
src/apps/cli/vite.config.ts
Normal file
55
src/apps/cli/vite.config.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
import { defineConfig } from "vite";
|
||||
import { svelte } from "@sveltejs/vite-plugin-svelte";
|
||||
import path from "node:path";
|
||||
import { readFileSync } from "node:fs";
|
||||
const packageJson = JSON.parse(readFileSync("../../../package.json", "utf-8"));
|
||||
const manifestJson = JSON.parse(readFileSync("../../../manifest.json", "utf-8"));
|
||||
// https://vite.dev/config/
|
||||
const defaultExternal = ["obsidian", "electron", "crypto", "pouchdb-adapter-leveldb", "commander", "punycode"];
|
||||
export default defineConfig({
|
||||
plugins: [svelte()],
|
||||
resolve: {
|
||||
alias: {
|
||||
"@lib/worker/bgWorker.ts": "../../lib/src/worker/bgWorker.mock.ts",
|
||||
"@lib/pouchdb/pouchdb-browser.ts": path.resolve(__dirname, "lib/pouchdb-node.ts"),
|
||||
"@": path.resolve(__dirname, "../../"),
|
||||
"@lib": path.resolve(__dirname, "../../lib/src"),
|
||||
"../../src/worker/bgWorker.ts": "../../src/worker/bgWorker.mock.ts",
|
||||
},
|
||||
},
|
||||
|
||||
base: "./",
|
||||
build: {
|
||||
outDir: "dist",
|
||||
emptyOutDir: true,
|
||||
minify: false,
|
||||
rollupOptions: {
|
||||
input: {
|
||||
index: path.resolve(__dirname, "main.ts"),
|
||||
},
|
||||
external: (id) => {
|
||||
if (defaultExternal.includes(id)) return true;
|
||||
if (id.startsWith(".") || id.startsWith("/")) return false;
|
||||
if (id.startsWith("@/") || id.startsWith("@lib/")) return false;
|
||||
if (id.endsWith(".ts") || id.endsWith(".js")) return false;
|
||||
if (id === "fs" || id === "fs/promises" || id === "path" || id === "crypto") return true;
|
||||
if (id.startsWith("pouchdb-")) return true;
|
||||
if (id.startsWith("node:")) return true;
|
||||
return false;
|
||||
},
|
||||
},
|
||||
lib: {
|
||||
entry: path.resolve(__dirname, "main.ts"),
|
||||
formats: ["cjs"],
|
||||
fileName: "index",
|
||||
},
|
||||
},
|
||||
define: {
|
||||
self: "globalThis",
|
||||
global: "globalThis",
|
||||
nonInteractive: "true",
|
||||
// localStorage: "undefined", // Prevent usage of localStorage in the CLI environment
|
||||
MANIFEST_VERSION: JSON.stringify(process.env.MANIFEST_VERSION || manifestJson.version || "0.0.0"),
|
||||
PACKAGE_VERSION: JSON.stringify(process.env.PACKAGE_VERSION || packageJson.version || "0.0.0"),
|
||||
},
|
||||
});
|
||||
4
src/apps/webapp/.gitignore
vendored
Normal file
4
src/apps/webapp/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
node_modules
|
||||
dist
|
||||
.DS_Store
|
||||
*.log
|
||||
181
src/apps/webapp/README.md
Normal file
181
src/apps/webapp/README.md
Normal file
@@ -0,0 +1,181 @@
|
||||
# LiveSync WebApp
|
||||
Browser-based implementation of Self-hosted LiveSync using the FileSystem API.
|
||||
Note: (I vrtmrz have not tested this so much yet).
|
||||
|
||||
## Features
|
||||
|
||||
- 🌐 Runs entirely in the browser
|
||||
- 📁 Uses FileSystem API to access your local vault
|
||||
- 🔄 Syncs with CouchDB, Object Storage server (compatible with Self-hosted LiveSync plugin)
|
||||
- 🚫 No server-side code required!!
|
||||
- 💾 Settings stored in `.livesync/settings.json` within your vault
|
||||
- 👁️ Real-time file watching (Chrome 124+ with FileSystemObserver)
|
||||
|
||||
## Requirements
|
||||
|
||||
- **FileSystem API support**:
|
||||
- Chrome/Edge 86+ (required)
|
||||
- Opera 72+ (required)
|
||||
- Safari 15.2+ (experimental, limited support)
|
||||
- Firefox: Not supported yet
|
||||
|
||||
- **FileSystemObserver support** (optional, for real-time file watching):
|
||||
- Chrome 124+ (recommended)
|
||||
- Without this, files are only scanned on startup
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
# Install dependencies (ensure you are in repository root directory, not src/apps/cli)
|
||||
# due to shared dependencies with webapp and main library
|
||||
npm install
|
||||
```
|
||||
|
||||
### Development
|
||||
|
||||
```bash
|
||||
# Build the project (ensure you are in `src/apps/webapp` directory)
|
||||
cd src/apps/webapp
|
||||
npm run dev
|
||||
```
|
||||
|
||||
This will start a development server at `http://localhost:3000`.
|
||||
|
||||
### Build
|
||||
|
||||
```bash
|
||||
# Build the project (ensure you are in `src/apps/webapp` directory)
|
||||
cd src/apps/webapp
|
||||
npm run build
|
||||
```
|
||||
|
||||
The built files will be in the `dist` directory.
|
||||
|
||||
### Usage
|
||||
|
||||
1. Open the webapp in your browser
|
||||
2. Grant directory access when prompted
|
||||
3. Configure CouchDB connection by editing `.livesync/settings.json` in your vault
|
||||
- You can also copy data.json from Obsidian's plug-in folder.
|
||||
|
||||
Example `.livesync/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"couchDB_URI": "https://your-couchdb-server.com",
|
||||
"couchDB_USER": "your-username",
|
||||
"couchDB_PASSWORD": "your-password",
|
||||
"couchDB_DBNAME": "your-database",
|
||||
"isConfigured": true,
|
||||
"liveSync": true,
|
||||
"syncOnSave": true
|
||||
}
|
||||
```
|
||||
|
||||
After editing, reload the page.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```
|
||||
webapp/
|
||||
├── adapters/ # FileSystem API adapters
|
||||
│ ├── FSAPITypes.ts
|
||||
│ ├── FSAPIPathAdapter.ts
|
||||
│ ├── FSAPITypeGuardAdapter.ts
|
||||
│ ├── FSAPIConversionAdapter.ts
|
||||
│ ├── FSAPIStorageAdapter.ts
|
||||
│ ├── FSAPIVaultAdapter.ts
|
||||
│ └── FSAPIFileSystemAdapter.ts
|
||||
├── managers/ # Event managers
|
||||
│ ├── FSAPIStorageEventManagerAdapter.ts
|
||||
│ └── StorageEventManagerFSAPI.ts
|
||||
├── serviceModules/ # Service implementations
|
||||
│ ├── FileAccessFSAPI.ts
|
||||
│ ├── ServiceFileAccessImpl.ts
|
||||
│ ├── DatabaseFileAccess.ts
|
||||
│ └── FSAPIServiceModules.ts
|
||||
├── main.ts # Application entry point
|
||||
├── index.html # HTML entry
|
||||
├── package.json
|
||||
├── vite.config.ts
|
||||
└── README.md
|
||||
```
|
||||
|
||||
### Key Components
|
||||
|
||||
1. **Adapters**: Implement `IFileSystemAdapter` interface using FileSystem API
|
||||
2. **Managers**: Handle storage events and file watching
|
||||
3. **Service Modules**: Integrate with LiveSyncBaseCore
|
||||
4. **Main**: Application initialization and lifecycle management
|
||||
|
||||
### Service Hub
|
||||
|
||||
Uses `BrowserServiceHub` which provides:
|
||||
|
||||
- Database service (IndexedDB via PouchDB)
|
||||
- Settings service (file-based in `.livesync/settings.json`)
|
||||
- Replication service
|
||||
- File processing service
|
||||
- And more...
|
||||
|
||||
## Limitations
|
||||
|
||||
- **Real-time file watching**: Requires Chrome 124+ with FileSystemObserver
|
||||
- Without it, changes are only detected on manual refresh
|
||||
- **Performance**: Slower than native file system access
|
||||
- **Permissions**: Requires user to grant directory access (cached via IndexedDB)
|
||||
- **Browser support**: Limited to browsers with FileSystem API support
|
||||
|
||||
## Differences from CLI Version
|
||||
|
||||
- Uses `BrowserServiceHub` instead of `HeadlessServiceHub`
|
||||
- Uses FileSystem API instead of Node.js `fs`
|
||||
- Settings stored in `.livesync/settings.json` in vault
|
||||
- Real-time file watching only with FileSystemObserver (Chrome 124+)
|
||||
|
||||
## Differences from Obsidian Plugin
|
||||
|
||||
- No Obsidian-specific modules (UI, settings dialog, etc.)
|
||||
- Simplified configuration
|
||||
- No plugin/theme sync features
|
||||
- No internal file handling (`.obsidian` folder)
|
||||
|
||||
## Development Notes
|
||||
|
||||
- TypeScript configuration: Uses project's tsconfig.json
|
||||
- Module resolution: Aliased paths via Vite config
|
||||
- External dependencies: Bundled by Vite
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Failed to get directory access"
|
||||
|
||||
- Make sure you're using a supported browser
|
||||
- Try refreshing the page
|
||||
- Clear browser cache and IndexedDB
|
||||
|
||||
### "Settings not found"
|
||||
|
||||
- Check that `.livesync/settings.json` exists in your vault directory
|
||||
- Verify the JSON format is valid
|
||||
- Create the file manually if needed
|
||||
|
||||
### "File watching not working"
|
||||
|
||||
- Make sure you're using Chrome 124 or later
|
||||
- Check browser console for FileSystemObserver messages
|
||||
- Try manually triggering sync if automatic watching isn't available
|
||||
|
||||
### "Sync not working"
|
||||
|
||||
- Verify CouchDB credentials
|
||||
- Check browser console for errors
|
||||
- Ensure CouchDB server is accessible (CORS enabled)
|
||||
|
||||
## License
|
||||
|
||||
Same as the main Self-hosted LiveSync project.
|
||||
34
src/apps/webapp/adapters/FSAPIConversionAdapter.ts
Normal file
34
src/apps/webapp/adapters/FSAPIConversionAdapter.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import type { UXFileInfoStub, UXFolderInfo } from "@lib/common/types";
|
||||
import type { IConversionAdapter } from "@lib/serviceModules/adapters";
|
||||
import type { FSAPIFile, FSAPIFolder } from "./FSAPITypes";
|
||||
|
||||
/**
|
||||
* Conversion adapter implementation for FileSystem API
|
||||
*/
|
||||
export class FSAPIConversionAdapter implements IConversionAdapter<FSAPIFile, FSAPIFolder> {
|
||||
nativeFileToUXFileInfoStub(file: FSAPIFile): UXFileInfoStub {
|
||||
const pathParts = file.path.split("/");
|
||||
const name = pathParts[pathParts.length - 1] || file.handle.name;
|
||||
|
||||
return {
|
||||
name: name,
|
||||
path: file.path,
|
||||
stat: file.stat,
|
||||
isFolder: false,
|
||||
};
|
||||
}
|
||||
|
||||
nativeFolderToUXFolder(folder: FSAPIFolder): UXFolderInfo {
|
||||
const pathParts = folder.path.split("/");
|
||||
const name = pathParts[pathParts.length - 1] || folder.handle.name;
|
||||
const parentPath = pathParts.slice(0, -1).join("/");
|
||||
|
||||
return {
|
||||
name: name,
|
||||
path: folder.path,
|
||||
isFolder: true,
|
||||
children: [],
|
||||
parent: parentPath as any,
|
||||
};
|
||||
}
|
||||
}
|
||||
214
src/apps/webapp/adapters/FSAPIFileSystemAdapter.ts
Normal file
214
src/apps/webapp/adapters/FSAPIFileSystemAdapter.ts
Normal file
@@ -0,0 +1,214 @@
|
||||
import type { FilePath, UXStat } from "@lib/common/types";
|
||||
import type { IFileSystemAdapter } from "@lib/serviceModules/adapters";
|
||||
import { FSAPIPathAdapter } from "./FSAPIPathAdapter";
|
||||
import { FSAPITypeGuardAdapter } from "./FSAPITypeGuardAdapter";
|
||||
import { FSAPIConversionAdapter } from "./FSAPIConversionAdapter";
|
||||
import { FSAPIStorageAdapter } from "./FSAPIStorageAdapter";
|
||||
import { FSAPIVaultAdapter } from "./FSAPIVaultAdapter";
|
||||
import type { FSAPIFile, FSAPIFolder, FSAPIStat } from "./FSAPITypes";
|
||||
import { shareRunningResult } from "octagonal-wheels/concurrency/lock_v2";
|
||||
|
||||
/**
|
||||
* Complete file system adapter implementation for FileSystem API
|
||||
*/
|
||||
export class FSAPIFileSystemAdapter implements IFileSystemAdapter<FSAPIFile, FSAPIFile, FSAPIFolder, FSAPIStat> {
|
||||
readonly path: FSAPIPathAdapter;
|
||||
readonly typeGuard: FSAPITypeGuardAdapter;
|
||||
readonly conversion: FSAPIConversionAdapter;
|
||||
readonly storage: FSAPIStorageAdapter;
|
||||
readonly vault: FSAPIVaultAdapter;
|
||||
|
||||
private fileCache = new Map<string, FSAPIFile>();
|
||||
private handleCache = new Map<string, FileSystemFileHandle>();
|
||||
|
||||
constructor(private rootHandle: FileSystemDirectoryHandle) {
|
||||
this.path = new FSAPIPathAdapter();
|
||||
this.typeGuard = new FSAPITypeGuardAdapter();
|
||||
this.conversion = new FSAPIConversionAdapter();
|
||||
this.storage = new FSAPIStorageAdapter(rootHandle);
|
||||
this.vault = new FSAPIVaultAdapter(rootHandle);
|
||||
}
|
||||
|
||||
private normalisePath(path: FilePath | string): string {
|
||||
return this.path.normalisePath(path as string);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get file handle for a given path
|
||||
*/
|
||||
private async getFileHandleByPath(p: FilePath | string): Promise<FileSystemFileHandle | null> {
|
||||
const pathStr = p as string;
|
||||
|
||||
// Check cache first
|
||||
const cached = this.handleCache.get(pathStr);
|
||||
if (cached) return cached;
|
||||
|
||||
try {
|
||||
const parts = pathStr.split("/").filter((part) => part !== "");
|
||||
if (parts.length === 0) return null;
|
||||
|
||||
let currentHandle: FileSystemDirectoryHandle = this.rootHandle;
|
||||
const fileName = parts[parts.length - 1];
|
||||
|
||||
// Navigate to the parent directory
|
||||
for (let i = 0; i < parts.length - 1; i++) {
|
||||
currentHandle = await currentHandle.getDirectoryHandle(parts[i]);
|
||||
}
|
||||
|
||||
const fileHandle = await currentHandle.getFileHandle(fileName);
|
||||
this.handleCache.set(pathStr, fileHandle);
|
||||
return fileHandle;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async getAbstractFileByPath(p: FilePath | string): Promise<FSAPIFile | null> {
|
||||
const pathStr = this.normalisePath(p);
|
||||
|
||||
const cached = this.fileCache.get(pathStr);
|
||||
if (cached) {
|
||||
return cached;
|
||||
}
|
||||
|
||||
return await this.refreshFile(pathStr);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
async getAbstractFileByPathInsensitive(p: FilePath | string): Promise<FSAPIFile | null> {
|
||||
const pathStr = this.normalisePath(p);
|
||||
const exact = await this.getAbstractFileByPath(pathStr);
|
||||
if (exact) {
|
||||
return exact;
|
||||
}
|
||||
// TODO: Refactor: Very, Very heavy.
|
||||
|
||||
const lowerPath = pathStr.toLowerCase();
|
||||
for (const [cachedPath, cachedFile] of this.fileCache.entries()) {
|
||||
if (cachedPath.toLowerCase() === lowerPath) {
|
||||
return cachedFile;
|
||||
}
|
||||
}
|
||||
|
||||
await this.scanDirectory();
|
||||
|
||||
for (const [cachedPath, cachedFile] of this.fileCache.entries()) {
|
||||
if (cachedPath.toLowerCase() === lowerPath) {
|
||||
return cachedFile;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
async getFiles(): Promise<FSAPIFile[]> {
|
||||
if (this.fileCache.size === 0) {
|
||||
await this.scanDirectory();
|
||||
}
|
||||
return Array.from(this.fileCache.values());
|
||||
}
|
||||
|
||||
async statFromNative(file: FSAPIFile): Promise<UXStat> {
|
||||
// Refresh stat from the file handle
|
||||
try {
|
||||
const fileObject = await file.handle.getFile();
|
||||
return {
|
||||
size: fileObject.size,
|
||||
mtime: fileObject.lastModified,
|
||||
ctime: fileObject.lastModified,
|
||||
type: "file",
|
||||
};
|
||||
} catch {
|
||||
return file.stat;
|
||||
}
|
||||
}
|
||||
|
||||
async reconcileInternalFile(p: string): Promise<void> {
|
||||
// No-op in webapp version
|
||||
// This is used by Obsidian to sync internal file metadata
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh file cache for a specific path
|
||||
*/
|
||||
async refreshFile(p: string): Promise<FSAPIFile | null> {
|
||||
const pathStr = this.normalisePath(p);
|
||||
const handle = await this.getFileHandleByPath(pathStr);
|
||||
if (!handle) {
|
||||
this.fileCache.delete(pathStr);
|
||||
this.handleCache.delete(pathStr);
|
||||
return null;
|
||||
}
|
||||
|
||||
const fileObject = await handle.getFile();
|
||||
const file: FSAPIFile = {
|
||||
path: pathStr as FilePath,
|
||||
stat: {
|
||||
size: fileObject.size,
|
||||
mtime: fileObject.lastModified,
|
||||
ctime: fileObject.lastModified,
|
||||
type: "file",
|
||||
},
|
||||
handle: handle,
|
||||
};
|
||||
|
||||
this.fileCache.set(pathStr, file);
|
||||
this.handleCache.set(pathStr, handle);
|
||||
return file;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to recursively scan directory and populate file cache
|
||||
*/
|
||||
async scanDirectory(relativePath: string = ""): Promise<void> {
|
||||
return shareRunningResult("scanDirectory:" + relativePath, async () => {
|
||||
try {
|
||||
const parts = relativePath.split("/").filter((part) => part !== "");
|
||||
let currentHandle = this.rootHandle;
|
||||
|
||||
for (const part of parts) {
|
||||
currentHandle = await currentHandle.getDirectoryHandle(part);
|
||||
}
|
||||
|
||||
// Use AsyncIterator instead of .values() for better compatibility
|
||||
for await (const [name, entry] of (currentHandle as any).entries()) {
|
||||
const entryPath = relativePath ? `${relativePath}/${name}` : name;
|
||||
|
||||
if (entry.kind === "directory") {
|
||||
// Recursively scan subdirectories
|
||||
await this.scanDirectory(entryPath);
|
||||
} else if (entry.kind === "file") {
|
||||
const fileHandle = entry as FileSystemFileHandle;
|
||||
const fileObject = await fileHandle.getFile();
|
||||
|
||||
const file: FSAPIFile = {
|
||||
path: entryPath as FilePath,
|
||||
stat: {
|
||||
size: fileObject.size,
|
||||
mtime: fileObject.lastModified,
|
||||
ctime: fileObject.lastModified,
|
||||
type: "file",
|
||||
},
|
||||
handle: fileHandle,
|
||||
};
|
||||
|
||||
this.fileCache.set(entryPath, file);
|
||||
this.handleCache.set(entryPath, fileHandle);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error scanning directory ${relativePath}:`, error);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all caches
|
||||
*/
|
||||
clearCache(): void {
|
||||
this.fileCache.clear();
|
||||
this.handleCache.clear();
|
||||
}
|
||||
}
|
||||
18
src/apps/webapp/adapters/FSAPIPathAdapter.ts
Normal file
18
src/apps/webapp/adapters/FSAPIPathAdapter.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
import type { FilePath } from "@lib/common/types";
|
||||
import type { IPathAdapter } from "@lib/serviceModules/adapters";
|
||||
import type { FSAPIFile } from "./FSAPITypes";
|
||||
|
||||
/**
|
||||
* Path adapter implementation for FileSystem API
|
||||
*/
|
||||
export class FSAPIPathAdapter implements IPathAdapter<FSAPIFile> {
|
||||
getPath(file: string | FSAPIFile): FilePath {
|
||||
return (typeof file === "string" ? file : file.path) as FilePath;
|
||||
}
|
||||
|
||||
normalisePath(p: string): string {
|
||||
// Normalize path separators to forward slashes (like Obsidian)
|
||||
// Remove leading/trailing slashes
|
||||
return p.replace(/\\/g, "/").replace(/^\/+|\/+$/g, "");
|
||||
}
|
||||
}
|
||||
210
src/apps/webapp/adapters/FSAPIStorageAdapter.ts
Normal file
210
src/apps/webapp/adapters/FSAPIStorageAdapter.ts
Normal file
@@ -0,0 +1,210 @@
|
||||
import type { UXDataWriteOptions } from "@lib/common/types";
|
||||
import type { IStorageAdapter } from "@lib/serviceModules/adapters";
|
||||
import type { FSAPIStat } from "./FSAPITypes";
|
||||
|
||||
/**
|
||||
* Storage adapter implementation for FileSystem API
|
||||
*/
|
||||
export class FSAPIStorageAdapter implements IStorageAdapter<FSAPIStat> {
|
||||
constructor(private rootHandle: FileSystemDirectoryHandle) {}
|
||||
|
||||
/**
|
||||
* Resolve a path to directory and file handles
|
||||
*/
|
||||
private async resolvePath(p: string): Promise<{
|
||||
dirHandle: FileSystemDirectoryHandle;
|
||||
fileName: string;
|
||||
} | null> {
|
||||
try {
|
||||
const parts = p.split("/").filter((part) => part !== "");
|
||||
if (parts.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let currentHandle = this.rootHandle;
|
||||
const fileName = parts[parts.length - 1];
|
||||
|
||||
// Navigate to the parent directory
|
||||
for (let i = 0; i < parts.length - 1; i++) {
|
||||
currentHandle = await currentHandle.getDirectoryHandle(parts[i]);
|
||||
}
|
||||
|
||||
return { dirHandle: currentHandle, fileName };
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get file handle for a given path
|
||||
*/
|
||||
private async getFileHandle(p: string): Promise<FileSystemFileHandle | null> {
|
||||
const resolved = await this.resolvePath(p);
|
||||
if (!resolved) return null;
|
||||
|
||||
try {
|
||||
return await resolved.dirHandle.getFileHandle(resolved.fileName);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get directory handle for a given path
|
||||
*/
|
||||
private async getDirectoryHandle(p: string): Promise<FileSystemDirectoryHandle | null> {
|
||||
try {
|
||||
const parts = p.split("/").filter((part) => part !== "");
|
||||
if (parts.length === 0) {
|
||||
return this.rootHandle;
|
||||
}
|
||||
|
||||
let currentHandle = this.rootHandle;
|
||||
for (const part of parts) {
|
||||
currentHandle = await currentHandle.getDirectoryHandle(part);
|
||||
}
|
||||
|
||||
return currentHandle;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async exists(p: string): Promise<boolean> {
|
||||
const fileHandle = await this.getFileHandle(p);
|
||||
if (fileHandle) return true;
|
||||
|
||||
const dirHandle = await this.getDirectoryHandle(p);
|
||||
return dirHandle !== null;
|
||||
}
|
||||
|
||||
async trystat(p: string): Promise<FSAPIStat | null> {
|
||||
// Try as file first
|
||||
const fileHandle = await this.getFileHandle(p);
|
||||
if (fileHandle) {
|
||||
const file = await fileHandle.getFile();
|
||||
return {
|
||||
size: file.size,
|
||||
mtime: file.lastModified,
|
||||
ctime: file.lastModified,
|
||||
type: "file",
|
||||
};
|
||||
}
|
||||
|
||||
// Try as directory
|
||||
const dirHandle = await this.getDirectoryHandle(p);
|
||||
if (dirHandle) {
|
||||
return {
|
||||
size: 0,
|
||||
mtime: Date.now(),
|
||||
ctime: Date.now(),
|
||||
type: "folder",
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
async stat(p: string): Promise<FSAPIStat | null> {
|
||||
return await this.trystat(p);
|
||||
}
|
||||
|
||||
async mkdir(p: string): Promise<void> {
|
||||
const parts = p.split("/").filter((part) => part !== "");
|
||||
let currentHandle = this.rootHandle;
|
||||
|
||||
for (const part of parts) {
|
||||
currentHandle = await currentHandle.getDirectoryHandle(part, { create: true });
|
||||
}
|
||||
}
|
||||
|
||||
async remove(p: string): Promise<void> {
|
||||
const resolved = await this.resolvePath(p);
|
||||
if (!resolved) return;
|
||||
|
||||
await resolved.dirHandle.removeEntry(resolved.fileName, { recursive: true });
|
||||
}
|
||||
|
||||
async read(p: string): Promise<string> {
|
||||
const fileHandle = await this.getFileHandle(p);
|
||||
if (!fileHandle) {
|
||||
throw new Error(`File not found: ${p}`);
|
||||
}
|
||||
|
||||
const file = await fileHandle.getFile();
|
||||
return await file.text();
|
||||
}
|
||||
|
||||
async readBinary(p: string): Promise<ArrayBuffer> {
|
||||
const fileHandle = await this.getFileHandle(p);
|
||||
if (!fileHandle) {
|
||||
throw new Error(`File not found: ${p}`);
|
||||
}
|
||||
|
||||
const file = await fileHandle.getFile();
|
||||
return await file.arrayBuffer();
|
||||
}
|
||||
|
||||
async write(p: string, data: string, options?: UXDataWriteOptions): Promise<void> {
|
||||
const resolved = await this.resolvePath(p);
|
||||
if (!resolved) {
|
||||
throw new Error(`Invalid path: ${p}`);
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
await this.mkdir(p.split("/").slice(0, -1).join("/"));
|
||||
|
||||
const fileHandle = await resolved.dirHandle.getFileHandle(resolved.fileName, { create: true });
|
||||
const writable = await fileHandle.createWritable();
|
||||
await writable.write(data);
|
||||
await writable.close();
|
||||
}
|
||||
|
||||
async writeBinary(p: string, data: ArrayBuffer, options?: UXDataWriteOptions): Promise<void> {
|
||||
const resolved = await this.resolvePath(p);
|
||||
if (!resolved) {
|
||||
throw new Error(`Invalid path: ${p}`);
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
await this.mkdir(p.split("/").slice(0, -1).join("/"));
|
||||
|
||||
const fileHandle = await resolved.dirHandle.getFileHandle(resolved.fileName, { create: true });
|
||||
const writable = await fileHandle.createWritable();
|
||||
await writable.write(data);
|
||||
await writable.close();
|
||||
}
|
||||
|
||||
async append(p: string, data: string, options?: UXDataWriteOptions): Promise<void> {
|
||||
const existing = await this.exists(p);
|
||||
if (existing) {
|
||||
const currentContent = await this.read(p);
|
||||
await this.write(p, currentContent + data, options);
|
||||
} else {
|
||||
await this.write(p, data, options);
|
||||
}
|
||||
}
|
||||
|
||||
async list(basePath: string): Promise<{ files: string[]; folders: string[] }> {
|
||||
const dirHandle = await this.getDirectoryHandle(basePath);
|
||||
if (!dirHandle) {
|
||||
return { files: [], folders: [] };
|
||||
}
|
||||
|
||||
const files: string[] = [];
|
||||
const folders: string[] = [];
|
||||
|
||||
// Use AsyncIterator instead of .values() for better compatibility
|
||||
for await (const [name, entry] of (dirHandle as any).entries()) {
|
||||
const entryPath = basePath ? `${basePath}/${name}` : name;
|
||||
|
||||
if (entry.kind === "directory") {
|
||||
folders.push(entryPath);
|
||||
} else if (entry.kind === "file") {
|
||||
files.push(entryPath);
|
||||
}
|
||||
}
|
||||
|
||||
return { files, folders };
|
||||
}
|
||||
}
|
||||
17
src/apps/webapp/adapters/FSAPITypeGuardAdapter.ts
Normal file
17
src/apps/webapp/adapters/FSAPITypeGuardAdapter.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import type { ITypeGuardAdapter } from "@lib/serviceModules/adapters";
|
||||
import type { FSAPIFile, FSAPIFolder } from "./FSAPITypes";
|
||||
|
||||
/**
|
||||
* Type guard adapter implementation for FileSystem API
|
||||
*/
|
||||
export class FSAPITypeGuardAdapter implements ITypeGuardAdapter<FSAPIFile, FSAPIFolder> {
|
||||
isFile(file: any): file is FSAPIFile {
|
||||
return (
|
||||
file && typeof file === "object" && "path" in file && "stat" in file && "handle" in file && !file.isFolder
|
||||
);
|
||||
}
|
||||
|
||||
isFolder(item: any): item is FSAPIFolder {
|
||||
return item && typeof item === "object" && "path" in item && item.isFolder === true && "handle" in item;
|
||||
}
|
||||
}
|
||||
24
src/apps/webapp/adapters/FSAPITypes.ts
Normal file
24
src/apps/webapp/adapters/FSAPITypes.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import type { FilePath, UXStat } from "@lib/common/types";
|
||||
|
||||
/**
|
||||
* FileSystem API file representation
|
||||
*/
|
||||
export type FSAPIFile = {
|
||||
path: FilePath;
|
||||
stat: UXStat;
|
||||
handle: FileSystemFileHandle;
|
||||
};
|
||||
|
||||
/**
|
||||
* FileSystem API folder representation
|
||||
*/
|
||||
export type FSAPIFolder = {
|
||||
path: FilePath;
|
||||
isFolder: true;
|
||||
handle: FileSystemDirectoryHandle;
|
||||
};
|
||||
|
||||
/**
|
||||
* FileSystem API stat type (compatible with UXStat)
|
||||
*/
|
||||
export type FSAPIStat = UXStat;
|
||||
123
src/apps/webapp/adapters/FSAPIVaultAdapter.ts
Normal file
123
src/apps/webapp/adapters/FSAPIVaultAdapter.ts
Normal file
@@ -0,0 +1,123 @@
|
||||
import type { FilePath, UXDataWriteOptions } from "@lib/common/types";
|
||||
import type { IVaultAdapter } from "@lib/serviceModules/adapters";
|
||||
import type { FSAPIFile, FSAPIFolder } from "./FSAPITypes";
|
||||
|
||||
/**
|
||||
* Vault adapter implementation for FileSystem API
|
||||
*/
|
||||
export class FSAPIVaultAdapter implements IVaultAdapter<FSAPIFile> {
|
||||
constructor(private rootHandle: FileSystemDirectoryHandle) {}
|
||||
|
||||
async read(file: FSAPIFile): Promise<string> {
|
||||
const fileObject = await file.handle.getFile();
|
||||
return await fileObject.text();
|
||||
}
|
||||
|
||||
async cachedRead(file: FSAPIFile): Promise<string> {
|
||||
// No caching in webapp version, just read directly
|
||||
return await this.read(file);
|
||||
}
|
||||
|
||||
async readBinary(file: FSAPIFile): Promise<ArrayBuffer> {
|
||||
const fileObject = await file.handle.getFile();
|
||||
return await fileObject.arrayBuffer();
|
||||
}
|
||||
|
||||
async modify(file: FSAPIFile, data: string, options?: UXDataWriteOptions): Promise<void> {
|
||||
const writable = await file.handle.createWritable();
|
||||
await writable.write(data);
|
||||
await writable.close();
|
||||
}
|
||||
|
||||
async modifyBinary(file: FSAPIFile, data: ArrayBuffer, options?: UXDataWriteOptions): Promise<void> {
|
||||
const writable = await file.handle.createWritable();
|
||||
await writable.write(data);
|
||||
await writable.close();
|
||||
}
|
||||
|
||||
async create(p: string, data: string, options?: UXDataWriteOptions): Promise<FSAPIFile> {
|
||||
const parts = p.split("/").filter((part) => part !== "");
|
||||
const fileName = parts[parts.length - 1];
|
||||
|
||||
// Navigate to parent directory, creating as needed
|
||||
let currentHandle = this.rootHandle;
|
||||
for (let i = 0; i < parts.length - 1; i++) {
|
||||
currentHandle = await currentHandle.getDirectoryHandle(parts[i], { create: true });
|
||||
}
|
||||
|
||||
// Create the file
|
||||
const fileHandle = await currentHandle.getFileHandle(fileName, { create: true });
|
||||
const writable = await fileHandle.createWritable();
|
||||
await writable.write(data);
|
||||
await writable.close();
|
||||
|
||||
// Get file metadata
|
||||
const fileObject = await fileHandle.getFile();
|
||||
|
||||
return {
|
||||
path: p as FilePath,
|
||||
stat: {
|
||||
size: fileObject.size,
|
||||
mtime: fileObject.lastModified,
|
||||
ctime: fileObject.lastModified,
|
||||
type: "file",
|
||||
},
|
||||
handle: fileHandle,
|
||||
};
|
||||
}
|
||||
|
||||
async createBinary(p: string, data: ArrayBuffer, options?: UXDataWriteOptions): Promise<FSAPIFile> {
|
||||
const parts = p.split("/").filter((part) => part !== "");
|
||||
const fileName = parts[parts.length - 1];
|
||||
|
||||
// Navigate to parent directory, creating as needed
|
||||
let currentHandle = this.rootHandle;
|
||||
for (let i = 0; i < parts.length - 1; i++) {
|
||||
currentHandle = await currentHandle.getDirectoryHandle(parts[i], { create: true });
|
||||
}
|
||||
|
||||
// Create the file
|
||||
const fileHandle = await currentHandle.getFileHandle(fileName, { create: true });
|
||||
const writable = await fileHandle.createWritable();
|
||||
await writable.write(data);
|
||||
await writable.close();
|
||||
|
||||
// Get file metadata
|
||||
const fileObject = await fileHandle.getFile();
|
||||
|
||||
return {
|
||||
path: p as FilePath,
|
||||
stat: {
|
||||
size: fileObject.size,
|
||||
mtime: fileObject.lastModified,
|
||||
ctime: fileObject.lastModified,
|
||||
type: "file",
|
||||
},
|
||||
handle: fileHandle,
|
||||
};
|
||||
}
|
||||
|
||||
async delete(file: FSAPIFile | FSAPIFolder, force = false): Promise<void> {
|
||||
const parts = file.path.split("/").filter((part) => part !== "");
|
||||
const name = parts[parts.length - 1];
|
||||
|
||||
// Navigate to parent directory
|
||||
let currentHandle = this.rootHandle;
|
||||
for (let i = 0; i < parts.length - 1; i++) {
|
||||
currentHandle = await currentHandle.getDirectoryHandle(parts[i]);
|
||||
}
|
||||
|
||||
// Remove the entry
|
||||
await currentHandle.removeEntry(name, { recursive: force });
|
||||
}
|
||||
|
||||
async trash(file: FSAPIFile | FSAPIFolder, force = false): Promise<void> {
|
||||
// In webapp, trash is the same as delete (no recycle bin)
|
||||
await this.delete(file, force);
|
||||
}
|
||||
|
||||
trigger(name: string, ...data: any[]): any {
|
||||
// No-op in webapp version (no event system yet)
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
209
src/apps/webapp/index.html
Normal file
209
src/apps/webapp/index.html
Normal file
@@ -0,0 +1,209 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Self-hosted LiveSync WebApp</title>
|
||||
<style>
|
||||
* {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
|
||||
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
||||
min-height: 100vh;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
.container {
|
||||
background: white;
|
||||
border-radius: 12px;
|
||||
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3);
|
||||
padding: 40px;
|
||||
max-width: 600px;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
h1 {
|
||||
color: #333;
|
||||
margin-bottom: 10px;
|
||||
font-size: 28px;
|
||||
}
|
||||
|
||||
.subtitle {
|
||||
color: #666;
|
||||
margin-bottom: 30px;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
#status {
|
||||
padding: 15px;
|
||||
border-radius: 8px;
|
||||
margin-bottom: 20px;
|
||||
font-size: 14px;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
#status.error {
|
||||
background: #fee;
|
||||
color: #c33;
|
||||
border: 1px solid #fcc;
|
||||
}
|
||||
|
||||
#status.warning {
|
||||
background: #ffeaa7;
|
||||
color: #d63031;
|
||||
border: 1px solid #fdcb6e;
|
||||
}
|
||||
|
||||
#status.success {
|
||||
background: #d4edda;
|
||||
color: #155724;
|
||||
border: 1px solid #c3e6cb;
|
||||
}
|
||||
|
||||
#status.info {
|
||||
background: #d1ecf1;
|
||||
color: #0c5460;
|
||||
border: 1px solid #bee5eb;
|
||||
}
|
||||
|
||||
.info-section {
|
||||
margin-top: 30px;
|
||||
padding: 20px;
|
||||
background: #f8f9fa;
|
||||
border-radius: 8px;
|
||||
}
|
||||
|
||||
.info-section h2 {
|
||||
font-size: 18px;
|
||||
margin-bottom: 15px;
|
||||
color: #333;
|
||||
}
|
||||
|
||||
.info-section ul {
|
||||
list-style: none;
|
||||
padding-left: 0;
|
||||
}
|
||||
|
||||
.info-section li {
|
||||
padding: 8px 0;
|
||||
color: #666;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
.info-section li::before {
|
||||
content: "•";
|
||||
color: #667eea;
|
||||
font-weight: bold;
|
||||
display: inline-block;
|
||||
width: 1em;
|
||||
margin-left: -1em;
|
||||
padding-right: 0.5em;
|
||||
}
|
||||
|
||||
.feature-list {
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
.feature-list h3 {
|
||||
font-size: 16px;
|
||||
margin-bottom: 10px;
|
||||
color: #444;
|
||||
}
|
||||
|
||||
code {
|
||||
background: #e9ecef;
|
||||
padding: 2px 6px;
|
||||
border-radius: 4px;
|
||||
font-family: 'Courier New', monospace;
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
.footer {
|
||||
margin-top: 30px;
|
||||
text-align: center;
|
||||
color: #999;
|
||||
font-size: 12px;
|
||||
}
|
||||
|
||||
.footer a {
|
||||
color: #667eea;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.footer a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
.console-link {
|
||||
margin-top: 20px;
|
||||
text-align: center;
|
||||
font-size: 13px;
|
||||
color: #666;
|
||||
}
|
||||
|
||||
@media (max-width: 600px) {
|
||||
.container {
|
||||
padding: 30px 20px;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 24px;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>🔄 Self-hosted LiveSync</h1>
|
||||
<p class="subtitle">Browser-based Self-hosted LiveSync using FileSystem API</p>
|
||||
|
||||
<div id="status" class="info">
|
||||
Initialising...
|
||||
</div>
|
||||
|
||||
<div class="info-section">
|
||||
<h2>About This Application</h2>
|
||||
<ul>
|
||||
<li>Runs entirely in your browser</li>
|
||||
<li>Uses FileSystem API to access your local vault</li>
|
||||
<li>Syncs with CouchDB server (like Obsidian plugin)</li>
|
||||
<li>Settings stored in <code>.livesync/settings.json</code></li>
|
||||
<li>Real-time file watching with FileSystemObserver (Chrome 124+)</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div class="info-section">
|
||||
<h2>How to Use</h2>
|
||||
<ul>
|
||||
<li>Grant directory access when prompted</li>
|
||||
<li>Create <code>.livesync/settings.json</code> in your vault folder. (Compatible with Obsidian's Self-hosted LiveSync)</li>
|
||||
<li>Add your CouchDB connection details</li>
|
||||
<li>Your files will be synced automatically</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div class="console-link">
|
||||
💡 Open browser console (F12) for detailed logs
|
||||
</div>
|
||||
|
||||
<div class="footer">
|
||||
<p>
|
||||
Powered by
|
||||
<a href="https://github.com/vrtmrz/obsidian-livesync" target="_blank">
|
||||
Self-hosted LiveSync
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script type="module" src="./main.ts"></script>
|
||||
</body>
|
||||
</html>
|
||||
353
src/apps/webapp/main.ts
Normal file
353
src/apps/webapp/main.ts
Normal file
@@ -0,0 +1,353 @@
|
||||
/**
|
||||
* Self-hosted LiveSync WebApp
|
||||
* Browser-based version of Self-hosted LiveSync plugin using FileSystem API
|
||||
*/
|
||||
|
||||
import { BrowserServiceHub } from "@lib/services/BrowserServices";
|
||||
import { LiveSyncBaseCore } from "@/LiveSyncBaseCore";
|
||||
import { ServiceContext } from "@lib/services/base/ServiceBase";
|
||||
import { initialiseServiceModulesFSAPI } from "./serviceModules/FSAPIServiceModules";
|
||||
import type { ObsidianLiveSyncSettings } from "@lib/common/types";
|
||||
import type { BrowserAPIService } from "@lib/services/implements/browser/BrowserAPIService";
|
||||
import type { InjectableSettingService } from "@lib/services/implements/injectable/InjectableSettingService";
|
||||
import { useOfflineScanner } from "@lib/serviceFeatures/offlineScanner";
|
||||
import { useRedFlagFeatures } from "@/serviceFeatures/redFlag";
|
||||
import { useCheckRemoteSize } from "@lib/serviceFeatures/checkRemoteSize";
|
||||
import { SetupManager } from "@/modules/features/SetupManager";
|
||||
// import { ModuleObsidianSettingsAsMarkdown } from "@/modules/features/ModuleObsidianSettingAsMarkdown";
|
||||
import { ModuleSetupObsidian } from "@/modules/features/ModuleSetupObsidian";
|
||||
// import { ModuleObsidianMenu } from "@/modules/essentialObsidian/ModuleObsidianMenu";
|
||||
|
||||
const SETTINGS_DIR = ".livesync";
|
||||
const SETTINGS_FILE = "settings.json";
|
||||
const DB_NAME = "livesync-webapp";
|
||||
|
||||
/**
|
||||
* Default settings for the webapp
|
||||
*/
|
||||
const DEFAULT_SETTINGS: Partial<ObsidianLiveSyncSettings> = {
|
||||
liveSync: false,
|
||||
syncOnSave: true,
|
||||
syncOnStart: false,
|
||||
savingDelay: 200,
|
||||
lessInformationInLog: false,
|
||||
gcDelay: 0,
|
||||
periodicReplication: false,
|
||||
periodicReplicationInterval: 60,
|
||||
isConfigured: false,
|
||||
// CouchDB settings - user needs to configure these
|
||||
couchDB_URI: "",
|
||||
couchDB_USER: "",
|
||||
couchDB_PASSWORD: "",
|
||||
couchDB_DBNAME: "",
|
||||
// Disable features not needed in webapp
|
||||
usePluginSync: false,
|
||||
autoSweepPlugins: false,
|
||||
autoSweepPluginsPeriodic: false,
|
||||
};
|
||||
|
||||
class LiveSyncWebApp {
|
||||
private rootHandle: FileSystemDirectoryHandle | null = null;
|
||||
private core: LiveSyncBaseCore<ServiceContext, any> | null = null;
|
||||
private serviceHub: BrowserServiceHub<ServiceContext> | null = null;
|
||||
|
||||
async initialize() {
|
||||
console.log("Self-hosted LiveSync WebApp");
|
||||
console.log("Initializing...");
|
||||
|
||||
// Request directory access
|
||||
await this.requestDirectoryAccess();
|
||||
|
||||
if (!this.rootHandle) {
|
||||
throw new Error("Failed to get directory access");
|
||||
}
|
||||
|
||||
console.log(`Vault directory: ${this.rootHandle.name}`);
|
||||
|
||||
// Create service context and hub
|
||||
const context = new ServiceContext();
|
||||
this.serviceHub = new BrowserServiceHub<ServiceContext>();
|
||||
|
||||
// Setup API service
|
||||
(this.serviceHub.API as BrowserAPIService<ServiceContext>).getSystemVaultName.setHandler(
|
||||
() => this.rootHandle?.name || "livesync-webapp"
|
||||
);
|
||||
|
||||
// Setup settings handlers - save to .livesync folder
|
||||
const settingService = this.serviceHub.setting as InjectableSettingService<ServiceContext>;
|
||||
|
||||
settingService.saveData.setHandler(async (data: ObsidianLiveSyncSettings) => {
|
||||
try {
|
||||
await this.saveSettingsToFile(data);
|
||||
console.log("[Settings] Saved to .livesync/settings.json");
|
||||
} catch (error) {
|
||||
console.error("[Settings] Failed to save:", error);
|
||||
}
|
||||
});
|
||||
|
||||
settingService.loadData.setHandler(async (): Promise<ObsidianLiveSyncSettings | undefined> => {
|
||||
try {
|
||||
const data = await this.loadSettingsFromFile();
|
||||
if (data) {
|
||||
console.log("[Settings] Loaded from .livesync/settings.json");
|
||||
return { ...DEFAULT_SETTINGS, ...data } as ObsidianLiveSyncSettings;
|
||||
}
|
||||
} catch (error) {
|
||||
console.log("[Settings] Failed to load, using defaults");
|
||||
}
|
||||
return DEFAULT_SETTINGS as ObsidianLiveSyncSettings;
|
||||
});
|
||||
|
||||
// Create LiveSync core
|
||||
this.core = new LiveSyncBaseCore(
|
||||
this.serviceHub,
|
||||
(core, serviceHub) => {
|
||||
return initialiseServiceModulesFSAPI(this.rootHandle!, core, serviceHub);
|
||||
},
|
||||
(core) => [
|
||||
// new ModuleObsidianEvents(this, core),
|
||||
// new ModuleObsidianSettingDialogue(this, core),
|
||||
// new ModuleObsidianMenu(core),
|
||||
new ModuleSetupObsidian(core),
|
||||
new SetupManager(core),
|
||||
// new ModuleObsidianSettingsAsMarkdown(core),
|
||||
// new ModuleLog(this, core),
|
||||
// new ModuleObsidianDocumentHistory(this, core),
|
||||
// new ModuleInteractiveConflictResolver(this, core),
|
||||
// new ModuleObsidianGlobalHistory(this, core),
|
||||
// new ModuleDev(this, core),
|
||||
// new ModuleReplicateTest(this, core),
|
||||
// new ModuleIntegratedTest(this, core),
|
||||
// new SetupManager(core),
|
||||
],
|
||||
() => [], // No add-ons
|
||||
(core) => {
|
||||
useOfflineScanner(core);
|
||||
useRedFlagFeatures(core);
|
||||
useCheckRemoteSize(core);
|
||||
}
|
||||
);
|
||||
|
||||
// Start the core
|
||||
await this.start();
|
||||
}
|
||||
|
||||
private async saveSettingsToFile(data: ObsidianLiveSyncSettings): Promise<void> {
|
||||
if (!this.rootHandle) return;
|
||||
|
||||
try {
|
||||
// Create .livesync directory if it doesn't exist
|
||||
const livesyncDir = await this.rootHandle.getDirectoryHandle(SETTINGS_DIR, { create: true });
|
||||
|
||||
// Create/overwrite settings.json
|
||||
const fileHandle = await livesyncDir.getFileHandle(SETTINGS_FILE, { create: true });
|
||||
const writable = await fileHandle.createWritable();
|
||||
await writable.write(JSON.stringify(data, null, 2));
|
||||
await writable.close();
|
||||
} catch (error) {
|
||||
console.error("[Settings] Error saving to file:", error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
private async loadSettingsFromFile(): Promise<Partial<ObsidianLiveSyncSettings> | null> {
|
||||
if (!this.rootHandle) return null;
|
||||
|
||||
try {
|
||||
const livesyncDir = await this.rootHandle.getDirectoryHandle(SETTINGS_DIR);
|
||||
const fileHandle = await livesyncDir.getFileHandle(SETTINGS_FILE);
|
||||
const file = await fileHandle.getFile();
|
||||
const text = await file.text();
|
||||
return JSON.parse(text);
|
||||
} catch (error) {
|
||||
// File doesn't exist yet
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private async requestDirectoryAccess() {
|
||||
try {
|
||||
// Check if we have a cached directory handle
|
||||
const cached = await this.loadCachedDirectoryHandle();
|
||||
if (cached) {
|
||||
// Verify permission (cast to any for compatibility)
|
||||
try {
|
||||
const permission = await (cached as any).queryPermission({ mode: "readwrite" });
|
||||
if (permission === "granted") {
|
||||
this.rootHandle = cached;
|
||||
console.log("[Directory] Using cached directory handle");
|
||||
return;
|
||||
}
|
||||
} catch (e) {
|
||||
// queryPermission might not be supported, try to use anyway
|
||||
console.log("[Directory] Could not verify permission, requesting new access");
|
||||
}
|
||||
}
|
||||
|
||||
// Request new directory access
|
||||
console.log("[Directory] Requesting directory access...");
|
||||
this.rootHandle = await (window as any).showDirectoryPicker({
|
||||
mode: "readwrite",
|
||||
startIn: "documents",
|
||||
});
|
||||
|
||||
// Save the handle for next time
|
||||
await this.saveCachedDirectoryHandle(this.rootHandle);
|
||||
console.log("[Directory] Directory access granted");
|
||||
} catch (error) {
|
||||
console.error("[Directory] Failed to get directory access:", error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
private async saveCachedDirectoryHandle(handle: FileSystemDirectoryHandle) {
|
||||
try {
|
||||
// Use IndexedDB to store the directory handle
|
||||
const db = await this.openHandleDB();
|
||||
const transaction = db.transaction(["handles"], "readwrite");
|
||||
const store = transaction.objectStore("handles");
|
||||
await new Promise((resolve, reject) => {
|
||||
const request = store.put(handle, "rootHandle");
|
||||
request.onsuccess = resolve;
|
||||
request.onerror = reject;
|
||||
});
|
||||
db.close();
|
||||
} catch (error) {
|
||||
console.error("[Directory] Failed to cache handle:", error);
|
||||
}
|
||||
}
|
||||
|
||||
private async loadCachedDirectoryHandle(): Promise<FileSystemDirectoryHandle | null> {
|
||||
try {
|
||||
const db = await this.openHandleDB();
|
||||
const transaction = db.transaction(["handles"], "readonly");
|
||||
const store = transaction.objectStore("handles");
|
||||
const handle = await new Promise<FileSystemDirectoryHandle | null>((resolve, reject) => {
|
||||
const request = store.get("rootHandle");
|
||||
request.onsuccess = () => resolve(request.result || null);
|
||||
request.onerror = reject;
|
||||
});
|
||||
db.close();
|
||||
return handle;
|
||||
} catch (error) {
|
||||
console.error("[Directory] Failed to load cached handle:", error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private async openHandleDB(): Promise<IDBDatabase> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const request = indexedDB.open("livesync-webapp-handles", 1);
|
||||
request.onerror = () => reject(request.error);
|
||||
request.onsuccess = () => resolve(request.result);
|
||||
request.onupgradeneeded = (event) => {
|
||||
const db = (event.target as IDBOpenDBRequest).result;
|
||||
if (!db.objectStoreNames.contains("handles")) {
|
||||
db.createObjectStore("handles");
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
private async start() {
|
||||
if (!this.core) {
|
||||
throw new Error("Core not initialized");
|
||||
}
|
||||
|
||||
try {
|
||||
console.log("[Starting] Initializing LiveSync...");
|
||||
|
||||
const loadResult = await this.core.services.control.onLoad();
|
||||
if (!loadResult) {
|
||||
console.error("[Error] Failed to initialize LiveSync");
|
||||
this.showError("Failed to initialize LiveSync");
|
||||
return;
|
||||
}
|
||||
|
||||
await this.core.services.control.onReady();
|
||||
|
||||
console.log("[Ready] LiveSync is running");
|
||||
|
||||
// Check if configured
|
||||
const settings = this.core.services.setting.currentSettings();
|
||||
if (!settings.isConfigured) {
|
||||
console.warn("[Warning] LiveSync is not configured yet");
|
||||
this.showWarning("Please configure CouchDB connection in settings");
|
||||
} else {
|
||||
console.log("[Info] LiveSync is configured and ready");
|
||||
console.log(`[Info] Database: ${settings.couchDB_URI}/${settings.couchDB_DBNAME}`);
|
||||
this.showSuccess("LiveSync is ready!");
|
||||
}
|
||||
|
||||
// Scan the directory to populate file cache
|
||||
const fileAccess = (this.core as any)._serviceModules?.storageAccess?.vaultAccess;
|
||||
if (fileAccess?.fsapiAdapter) {
|
||||
console.log("[Scanning] Scanning vault directory...");
|
||||
await fileAccess.fsapiAdapter.scanDirectory();
|
||||
const files = await fileAccess.fsapiAdapter.getFiles();
|
||||
console.log(`[Scanning] Found ${files.length} files`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[Error] Failed to start:", error);
|
||||
this.showError(`Failed to start: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
async shutdown() {
|
||||
if (this.core) {
|
||||
console.log("[Shutdown] Shutting down...");
|
||||
|
||||
// Stop file watching
|
||||
const storageEventManager = (this.core as any)._serviceModules?.storageAccess?.storageEventManager;
|
||||
if (storageEventManager?.cleanup) {
|
||||
await storageEventManager.cleanup();
|
||||
}
|
||||
|
||||
await this.core.services.control.onUnload();
|
||||
console.log("[Shutdown] Complete");
|
||||
}
|
||||
}
|
||||
|
||||
private showError(message: string) {
|
||||
const statusEl = document.getElementById("status");
|
||||
if (statusEl) {
|
||||
statusEl.className = "error";
|
||||
statusEl.textContent = `Error: ${message}`;
|
||||
}
|
||||
}
|
||||
|
||||
private showWarning(message: string) {
|
||||
const statusEl = document.getElementById("status");
|
||||
if (statusEl) {
|
||||
statusEl.className = "warning";
|
||||
statusEl.textContent = `Warning: ${message}`;
|
||||
}
|
||||
}
|
||||
|
||||
private showSuccess(message: string) {
|
||||
const statusEl = document.getElementById("status");
|
||||
if (statusEl) {
|
||||
statusEl.className = "success";
|
||||
statusEl.textContent = message;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize on load
|
||||
const app = new LiveSyncWebApp();
|
||||
|
||||
window.addEventListener("load", async () => {
|
||||
try {
|
||||
await app.initialize();
|
||||
} catch (error) {
|
||||
console.error("Failed to initialize:", error);
|
||||
}
|
||||
});
|
||||
|
||||
// Handle page unload
|
||||
window.addEventListener("beforeunload", () => {
|
||||
void app.shutdown();
|
||||
});
|
||||
|
||||
// Export for debugging
|
||||
(window as any).livesyncApp = app;
|
||||
281
src/apps/webapp/managers/FSAPIStorageEventManagerAdapter.ts
Normal file
281
src/apps/webapp/managers/FSAPIStorageEventManagerAdapter.ts
Normal file
@@ -0,0 +1,281 @@
|
||||
import type { FilePath, UXFileInfoStub, UXInternalFileInfoStub } from "@lib/common/types";
|
||||
import type { FileEventItem } from "@lib/common/types";
|
||||
import type { IStorageEventManagerAdapter } from "@lib/managers/adapters";
|
||||
import type {
|
||||
IStorageEventTypeGuardAdapter,
|
||||
IStorageEventPersistenceAdapter,
|
||||
IStorageEventWatchAdapter,
|
||||
IStorageEventStatusAdapter,
|
||||
IStorageEventConverterAdapter,
|
||||
IStorageEventWatchHandlers,
|
||||
} from "@lib/managers/adapters";
|
||||
import type { FileEventItemSentinel } from "@lib/managers/StorageEventManager";
|
||||
import type { FSAPIFile, FSAPIFolder } from "../adapters/FSAPITypes";
|
||||
|
||||
/**
|
||||
* FileSystem API-specific type guard adapter
|
||||
*/
|
||||
class FSAPITypeGuardAdapter implements IStorageEventTypeGuardAdapter<FSAPIFile, FSAPIFolder> {
|
||||
isFile(file: any): file is FSAPIFile {
|
||||
return (
|
||||
file && typeof file === "object" && "path" in file && "stat" in file && "handle" in file && !file.isFolder
|
||||
);
|
||||
}
|
||||
|
||||
isFolder(item: any): item is FSAPIFolder {
|
||||
return item && typeof item === "object" && "path" in item && item.isFolder === true && "handle" in item;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* FileSystem API-specific persistence adapter (IndexedDB-based snapshot)
|
||||
*/
|
||||
class FSAPIPersistenceAdapter implements IStorageEventPersistenceAdapter {
|
||||
private dbName = "livesync-webapp-snapshot";
|
||||
private storeName = "snapshots";
|
||||
private snapshotKey = "file-events";
|
||||
|
||||
private async openDB(): Promise<IDBDatabase> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const request = indexedDB.open(this.dbName, 1);
|
||||
|
||||
request.onerror = () => reject(request.error);
|
||||
request.onsuccess = () => resolve(request.result);
|
||||
|
||||
request.onupgradeneeded = (event) => {
|
||||
const db = (event.target as IDBOpenDBRequest).result;
|
||||
if (!db.objectStoreNames.contains(this.storeName)) {
|
||||
db.createObjectStore(this.storeName);
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
async saveSnapshot(snapshot: (FileEventItem | FileEventItemSentinel)[]): Promise<void> {
|
||||
try {
|
||||
const db = await this.openDB();
|
||||
const transaction = db.transaction([this.storeName], "readwrite");
|
||||
const store = transaction.objectStore(this.storeName);
|
||||
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
const request = store.put(snapshot, this.snapshotKey);
|
||||
request.onsuccess = () => resolve();
|
||||
request.onerror = () => reject(request.error);
|
||||
});
|
||||
|
||||
db.close();
|
||||
} catch (error) {
|
||||
console.error("Failed to save snapshot:", error);
|
||||
}
|
||||
}
|
||||
|
||||
async loadSnapshot(): Promise<(FileEventItem | FileEventItemSentinel)[] | null> {
|
||||
try {
|
||||
const db = await this.openDB();
|
||||
const transaction = db.transaction([this.storeName], "readonly");
|
||||
const store = transaction.objectStore(this.storeName);
|
||||
|
||||
const result = await new Promise<(FileEventItem | FileEventItemSentinel)[] | null>((resolve, reject) => {
|
||||
const request = store.get(this.snapshotKey);
|
||||
request.onsuccess = () => resolve(request.result || null);
|
||||
request.onerror = () => reject(request.error);
|
||||
});
|
||||
|
||||
db.close();
|
||||
return result;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* FileSystem API-specific status adapter (console logging)
|
||||
*/
|
||||
class FSAPIStatusAdapter implements IStorageEventStatusAdapter {
|
||||
private lastUpdate = 0;
|
||||
private updateInterval = 5000; // Update every 5 seconds
|
||||
|
||||
updateStatus(status: { batched: number; processing: number; totalQueued: number }): void {
|
||||
const now = Date.now();
|
||||
if (now - this.lastUpdate > this.updateInterval) {
|
||||
if (status.totalQueued > 0 || status.processing > 0) {
|
||||
console.log(
|
||||
`[StorageEventManager] Batched: ${status.batched}, Processing: ${status.processing}, Total Queued: ${status.totalQueued}`
|
||||
);
|
||||
}
|
||||
this.lastUpdate = now;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* FileSystem API-specific converter adapter
|
||||
*/
|
||||
class FSAPIConverterAdapter implements IStorageEventConverterAdapter<FSAPIFile> {
|
||||
toFileInfo(file: FSAPIFile, deleted?: boolean): UXFileInfoStub {
|
||||
const pathParts = file.path.split("/");
|
||||
const name = pathParts[pathParts.length - 1] || file.handle.name;
|
||||
|
||||
return {
|
||||
name: name,
|
||||
path: file.path,
|
||||
stat: file.stat,
|
||||
deleted: deleted,
|
||||
isFolder: false,
|
||||
};
|
||||
}
|
||||
|
||||
toInternalFileInfo(p: FilePath): UXInternalFileInfoStub {
|
||||
const pathParts = p.split("/");
|
||||
const name = pathParts[pathParts.length - 1] || "";
|
||||
|
||||
return {
|
||||
name: name,
|
||||
path: p,
|
||||
isInternal: true,
|
||||
stat: undefined,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* FileSystem API-specific watch adapter using FileSystemObserver (Chrome only)
|
||||
*/
|
||||
class FSAPIWatchAdapter implements IStorageEventWatchAdapter {
|
||||
private observer: any = null; // FileSystemObserver type
|
||||
|
||||
constructor(private rootHandle: FileSystemDirectoryHandle) {}
|
||||
|
||||
async beginWatch(handlers: IStorageEventWatchHandlers): Promise<void> {
|
||||
// Use FileSystemObserver if available (Chrome 124+)
|
||||
if (typeof (window as any).FileSystemObserver === "undefined") {
|
||||
console.log("[FSAPIWatchAdapter] FileSystemObserver not available, file watching disabled");
|
||||
console.log("[FSAPIWatchAdapter] Consider using Chrome 124+ for real-time file watching");
|
||||
return Promise.resolve();
|
||||
}
|
||||
|
||||
try {
|
||||
const FileSystemObserver = (window as any).FileSystemObserver;
|
||||
|
||||
this.observer = new FileSystemObserver(async (records: any[]) => {
|
||||
for (const record of records) {
|
||||
const handle = record.root;
|
||||
const changedHandle = record.changedHandle;
|
||||
const relativePathComponents = record.relativePathComponents;
|
||||
const type = record.type; // "appeared", "disappeared", "modified", "moved", "unknown", "errored"
|
||||
|
||||
// Build relative path
|
||||
const relativePath = relativePathComponents ? relativePathComponents.join("/") : "";
|
||||
|
||||
// Skip .livesync directory to avoid infinite loops
|
||||
if (relativePath.startsWith(".livesync/") || relativePath === ".livesync") {
|
||||
continue;
|
||||
}
|
||||
|
||||
console.log(`[FileSystemObserver] ${type}: ${relativePath}`);
|
||||
|
||||
// Convert to our event handlers
|
||||
try {
|
||||
if (type === "appeared" || type === "modified") {
|
||||
if (changedHandle && changedHandle.kind === "file") {
|
||||
const file = await changedHandle.getFile();
|
||||
const fileInfo = {
|
||||
path: relativePath as any,
|
||||
stat: {
|
||||
size: file.size,
|
||||
mtime: file.lastModified,
|
||||
ctime: file.lastModified,
|
||||
type: "file" as const,
|
||||
},
|
||||
handle: changedHandle,
|
||||
};
|
||||
|
||||
if (type === "appeared") {
|
||||
await handlers.onCreate(fileInfo, undefined);
|
||||
} else {
|
||||
await handlers.onChange(fileInfo, undefined);
|
||||
}
|
||||
}
|
||||
} else if (type === "disappeared") {
|
||||
const fileInfo = {
|
||||
path: relativePath as any,
|
||||
stat: {
|
||||
size: 0,
|
||||
mtime: Date.now(),
|
||||
ctime: Date.now(),
|
||||
type: "file" as const,
|
||||
},
|
||||
handle: null as any,
|
||||
};
|
||||
await handlers.onDelete(fileInfo, undefined);
|
||||
} else if (type === "moved") {
|
||||
// Handle as delete + create
|
||||
// Note: FileSystemObserver provides both old and new paths in some cases
|
||||
// For simplicity, we'll treat it as a modification
|
||||
if (changedHandle && changedHandle.kind === "file") {
|
||||
const file = await changedHandle.getFile();
|
||||
const fileInfo = {
|
||||
path: relativePath as any,
|
||||
stat: {
|
||||
size: file.size,
|
||||
mtime: file.lastModified,
|
||||
ctime: file.lastModified,
|
||||
type: "file" as const,
|
||||
},
|
||||
handle: changedHandle,
|
||||
};
|
||||
await handlers.onChange(fileInfo, undefined);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`[FileSystemObserver] Error processing ${type} event for ${relativePath}:`,
|
||||
error
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Start observing
|
||||
await this.observer.observe(this.rootHandle, { recursive: true });
|
||||
console.log("[FSAPIWatchAdapter] FileSystemObserver started successfully");
|
||||
} catch (error) {
|
||||
console.error("[FSAPIWatchAdapter] Failed to start FileSystemObserver:", error);
|
||||
console.log("[FSAPIWatchAdapter] Falling back to manual sync mode");
|
||||
}
|
||||
|
||||
return Promise.resolve();
|
||||
}
|
||||
|
||||
async stopWatch(): Promise<void> {
|
||||
if (this.observer) {
|
||||
try {
|
||||
this.observer.disconnect();
|
||||
this.observer = null;
|
||||
console.log("[FSAPIWatchAdapter] FileSystemObserver stopped");
|
||||
} catch (error) {
|
||||
console.error("[FSAPIWatchAdapter] Error stopping observer:", error);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Composite adapter for FileSystem API StorageEventManager
|
||||
*/
|
||||
export class FSAPIStorageEventManagerAdapter implements IStorageEventManagerAdapter<FSAPIFile, FSAPIFolder> {
|
||||
readonly typeGuard: FSAPITypeGuardAdapter;
|
||||
readonly persistence: FSAPIPersistenceAdapter;
|
||||
readonly watch: FSAPIWatchAdapter;
|
||||
readonly status: FSAPIStatusAdapter;
|
||||
readonly converter: FSAPIConverterAdapter;
|
||||
|
||||
constructor(rootHandle: FileSystemDirectoryHandle) {
|
||||
this.typeGuard = new FSAPITypeGuardAdapter();
|
||||
this.persistence = new FSAPIPersistenceAdapter();
|
||||
this.watch = new FSAPIWatchAdapter(rootHandle);
|
||||
this.status = new FSAPIStatusAdapter();
|
||||
this.converter = new FSAPIConverterAdapter();
|
||||
}
|
||||
}
|
||||
36
src/apps/webapp/managers/StorageEventManagerFSAPI.ts
Normal file
36
src/apps/webapp/managers/StorageEventManagerFSAPI.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { StorageEventManagerBase, type StorageEventManagerBaseDependencies } from "@lib/managers/StorageEventManager";
|
||||
import { FSAPIStorageEventManagerAdapter } from "./FSAPIStorageEventManagerAdapter";
|
||||
import type { IMinimumLiveSyncCommands, LiveSyncBaseCore } from "@/LiveSyncBaseCore";
|
||||
import type { ServiceContext } from "@lib/services/base/ServiceBase";
|
||||
|
||||
export class StorageEventManagerFSAPI extends StorageEventManagerBase<FSAPIStorageEventManagerAdapter> {
|
||||
core: LiveSyncBaseCore<ServiceContext, IMinimumLiveSyncCommands>;
|
||||
private fsapiAdapter: FSAPIStorageEventManagerAdapter;
|
||||
|
||||
constructor(
|
||||
rootHandle: FileSystemDirectoryHandle,
|
||||
core: LiveSyncBaseCore<ServiceContext, IMinimumLiveSyncCommands>,
|
||||
dependencies: StorageEventManagerBaseDependencies
|
||||
) {
|
||||
const adapter = new FSAPIStorageEventManagerAdapter(rootHandle);
|
||||
super(adapter, dependencies);
|
||||
this.fsapiAdapter = adapter;
|
||||
this.core = core;
|
||||
}
|
||||
|
||||
/**
|
||||
* Override _watchVaultRawEvents for webapp-specific logic
|
||||
* In webapp, we don't have internal files like Obsidian's .obsidian folder
|
||||
*/
|
||||
protected override async _watchVaultRawEvents(path: string) {
|
||||
// No-op in webapp version
|
||||
// Internal file handling is not needed
|
||||
}
|
||||
|
||||
async cleanup() {
|
||||
// Stop file watching
|
||||
if (this.fsapiAdapter?.watch) {
|
||||
await (this.fsapiAdapter.watch as any).stopWatch?.();
|
||||
}
|
||||
}
|
||||
}
|
||||
17
src/apps/webapp/package.json
Normal file
17
src/apps/webapp/package.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"name": "livesync-webapp",
|
||||
"private": true,
|
||||
"version": "0.0.1",
|
||||
"type": "module",
|
||||
"description": "Browser-based Self-hosted LiveSync using FileSystem API",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
"typescript": "5.9.3",
|
||||
"vite": "^7.3.1"
|
||||
}
|
||||
}
|
||||
15
src/apps/webapp/serviceModules/DatabaseFileAccess.ts
Normal file
15
src/apps/webapp/serviceModules/DatabaseFileAccess.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import {
|
||||
ServiceDatabaseFileAccessBase,
|
||||
type ServiceDatabaseFileAccessDependencies,
|
||||
} from "@lib/serviceModules/ServiceDatabaseFileAccessBase";
|
||||
import type { DatabaseFileAccess } from "@lib/interfaces/DatabaseFileAccess";
|
||||
|
||||
/**
|
||||
* FileSystem API-specific implementation of ServiceDatabaseFileAccess
|
||||
* Same as Obsidian version, no platform-specific changes needed
|
||||
*/
|
||||
export class ServiceDatabaseFileAccessFSAPI extends ServiceDatabaseFileAccessBase implements DatabaseFileAccess {
|
||||
constructor(services: ServiceDatabaseFileAccessDependencies) {
|
||||
super(services);
|
||||
}
|
||||
}
|
||||
105
src/apps/webapp/serviceModules/FSAPIServiceModules.ts
Normal file
105
src/apps/webapp/serviceModules/FSAPIServiceModules.ts
Normal file
@@ -0,0 +1,105 @@
|
||||
import type { InjectableServiceHub } from "@lib/services/implements/injectable/InjectableServiceHub";
|
||||
import { ServiceRebuilder } from "@lib/serviceModules/Rebuilder";
|
||||
|
||||
import { StorageAccessManager } from "@lib/managers/StorageProcessingManager";
|
||||
import type { LiveSyncBaseCore } from "@/LiveSyncBaseCore";
|
||||
import type { ServiceContext } from "@lib/services/base/ServiceBase";
|
||||
import { FileAccessFSAPI } from "./FileAccessFSAPI";
|
||||
import { ServiceFileAccessFSAPI } from "./ServiceFileAccessImpl";
|
||||
import { ServiceDatabaseFileAccessFSAPI } from "./DatabaseFileAccess";
|
||||
import { StorageEventManagerFSAPI } from "../managers/StorageEventManagerFSAPI";
|
||||
import type { ServiceModules } from "@lib/interfaces/ServiceModule";
|
||||
import { ServiceFileHandler } from "@/serviceModules/FileHandler";
|
||||
|
||||
/**
|
||||
* Initialize service modules for FileSystem API webapp version
|
||||
* This is the webapp equivalent of ObsidianLiveSyncPlugin.initialiseServiceModules
|
||||
*
|
||||
* @param rootHandle - The root FileSystemDirectoryHandle for the vault
|
||||
* @param core - The LiveSyncBaseCore instance
|
||||
* @param services - The service hub
|
||||
* @returns ServiceModules containing all initialized service modules
|
||||
*/
|
||||
export function initialiseServiceModulesFSAPI(
|
||||
rootHandle: FileSystemDirectoryHandle,
|
||||
core: LiveSyncBaseCore<ServiceContext, any>,
|
||||
services: InjectableServiceHub<ServiceContext>
|
||||
): ServiceModules {
|
||||
const storageAccessManager = new StorageAccessManager();
|
||||
|
||||
// FileSystem API-specific file access
|
||||
const vaultAccess = new FileAccessFSAPI(rootHandle, {
|
||||
storageAccessManager: storageAccessManager,
|
||||
vaultService: services.vault,
|
||||
settingService: services.setting,
|
||||
APIService: services.API,
|
||||
pathService: services.path,
|
||||
});
|
||||
|
||||
// FileSystem API-specific storage event manager
|
||||
const storageEventManager = new StorageEventManagerFSAPI(rootHandle, core, {
|
||||
fileProcessing: services.fileProcessing,
|
||||
setting: services.setting,
|
||||
vaultService: services.vault,
|
||||
storageAccessManager: storageAccessManager,
|
||||
APIService: services.API,
|
||||
});
|
||||
|
||||
// Storage access using FileSystem API adapter
|
||||
const storageAccess = new ServiceFileAccessFSAPI({
|
||||
API: services.API,
|
||||
setting: services.setting,
|
||||
fileProcessing: services.fileProcessing,
|
||||
vault: services.vault,
|
||||
appLifecycle: services.appLifecycle,
|
||||
storageEventManager: storageEventManager,
|
||||
storageAccessManager: storageAccessManager,
|
||||
vaultAccess: vaultAccess,
|
||||
});
|
||||
|
||||
// Database file access (platform-independent)
|
||||
const databaseFileAccess = new ServiceDatabaseFileAccessFSAPI({
|
||||
API: services.API,
|
||||
database: services.database,
|
||||
path: services.path,
|
||||
storageAccess: storageAccess,
|
||||
vault: services.vault,
|
||||
});
|
||||
|
||||
// File handler (platform-independent)
|
||||
const fileHandler = new (ServiceFileHandler as any)({
|
||||
API: services.API,
|
||||
databaseFileAccess: databaseFileAccess,
|
||||
conflict: services.conflict,
|
||||
setting: services.setting,
|
||||
fileProcessing: services.fileProcessing,
|
||||
vault: services.vault,
|
||||
path: services.path,
|
||||
replication: services.replication,
|
||||
storageAccess: storageAccess,
|
||||
});
|
||||
|
||||
// Rebuilder (platform-independent)
|
||||
const rebuilder = new ServiceRebuilder({
|
||||
API: services.API,
|
||||
database: services.database,
|
||||
appLifecycle: services.appLifecycle,
|
||||
setting: services.setting,
|
||||
remote: services.remote,
|
||||
databaseEvents: services.databaseEvents,
|
||||
replication: services.replication,
|
||||
replicator: services.replicator,
|
||||
UI: services.UI,
|
||||
vault: services.vault,
|
||||
fileHandler: fileHandler,
|
||||
storageAccess: storageAccess,
|
||||
control: services.control,
|
||||
});
|
||||
|
||||
return {
|
||||
rebuilder,
|
||||
fileHandler,
|
||||
databaseFileAccess,
|
||||
storageAccess,
|
||||
};
|
||||
}
|
||||
20
src/apps/webapp/serviceModules/FileAccessFSAPI.ts
Normal file
20
src/apps/webapp/serviceModules/FileAccessFSAPI.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
import { FileAccessBase, type FileAccessBaseDependencies } from "@lib/serviceModules/FileAccessBase";
|
||||
import { FSAPIFileSystemAdapter } from "../adapters/FSAPIFileSystemAdapter";
|
||||
|
||||
/**
|
||||
* FileSystem API-specific implementation of FileAccessBase
|
||||
* Uses FSAPIFileSystemAdapter for browser file operations
|
||||
*/
|
||||
export class FileAccessFSAPI extends FileAccessBase<FSAPIFileSystemAdapter> {
|
||||
constructor(rootHandle: FileSystemDirectoryHandle, dependencies: FileAccessBaseDependencies) {
|
||||
const adapter = new FSAPIFileSystemAdapter(rootHandle);
|
||||
super(adapter, dependencies);
|
||||
}
|
||||
|
||||
/**
|
||||
* Expose the adapter for accessing scanDirectory and other methods
|
||||
*/
|
||||
get fsapiAdapter(): FSAPIFileSystemAdapter {
|
||||
return this.adapter;
|
||||
}
|
||||
}
|
||||
12
src/apps/webapp/serviceModules/ServiceFileAccessImpl.ts
Normal file
12
src/apps/webapp/serviceModules/ServiceFileAccessImpl.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import { ServiceFileAccessBase, type StorageAccessBaseDependencies } from "@lib/serviceModules/ServiceFileAccessBase";
|
||||
import { FSAPIFileSystemAdapter } from "../adapters/FSAPIFileSystemAdapter";
|
||||
|
||||
/**
|
||||
* FileSystem API-specific implementation of ServiceFileAccess
|
||||
* Uses FSAPIFileSystemAdapter for platform-specific operations
|
||||
*/
|
||||
export class ServiceFileAccessFSAPI extends ServiceFileAccessBase<FSAPIFileSystemAdapter> {
|
||||
constructor(services: StorageAccessBaseDependencies<FSAPIFileSystemAdapter>) {
|
||||
super(services);
|
||||
}
|
||||
}
|
||||
7
src/apps/webapp/svelte.config.js
Normal file
7
src/apps/webapp/svelte.config.js
Normal file
@@ -0,0 +1,7 @@
|
||||
import { vitePreprocess } from "@sveltejs/vite-plugin-svelte";
|
||||
|
||||
export default {
|
||||
// Consult https://svelte.dev/docs#compile-time-svelte-preprocess
|
||||
// for more information about preprocessors
|
||||
preprocess: vitePreprocess(),
|
||||
};
|
||||
32
src/apps/webapp/tsconfig.json
Normal file
32
src/apps/webapp/tsconfig.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"target": "ES2020",
|
||||
"useDefineForClassFields": true,
|
||||
"module": "ESNext",
|
||||
"lib": ["ES2020", "DOM", "DOM.Iterable"],
|
||||
"skipLibCheck": true,
|
||||
|
||||
/* Bundler mode */
|
||||
"moduleResolution": "bundler",
|
||||
"allowImportingTsExtensions": true,
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"noEmit": true,
|
||||
|
||||
/* Linting */
|
||||
"strict": false,
|
||||
"noUnusedLocals": false,
|
||||
"noUnusedParameters": false,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
|
||||
/* Path mapping */
|
||||
"baseUrl": ".",
|
||||
"paths": {
|
||||
"@/*": ["../../*"],
|
||||
"@lib/*": ["../../lib/src/*"]
|
||||
}
|
||||
},
|
||||
"include": ["*.ts", "**/*.ts", "**/*.tsx"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
34
src/apps/webapp/vite.config.ts
Normal file
34
src/apps/webapp/vite.config.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import { defineConfig } from "vite";
|
||||
import { svelte } from "@sveltejs/vite-plugin-svelte";
|
||||
import path from "node:path";
|
||||
import { readFileSync } from "node:fs";
|
||||
const packageJson = JSON.parse(readFileSync("../../../package.json", "utf-8"));
|
||||
const manifestJson = JSON.parse(readFileSync("../../../manifest.json", "utf-8"));
|
||||
// https://vite.dev/config/
|
||||
export default defineConfig({
|
||||
plugins: [svelte()],
|
||||
resolve: {
|
||||
alias: {
|
||||
"@": path.resolve(__dirname, "../../"),
|
||||
"@lib": path.resolve(__dirname, "../../lib/src"),
|
||||
},
|
||||
},
|
||||
base: "./",
|
||||
build: {
|
||||
outDir: "dist",
|
||||
emptyOutDir: true,
|
||||
rollupOptions: {
|
||||
input: {
|
||||
index: path.resolve(__dirname, "index.html"),
|
||||
},
|
||||
},
|
||||
},
|
||||
define: {
|
||||
MANIFEST_VERSION: JSON.stringify(process.env.MANIFEST_VERSION || manifestJson.version || "0.0.0"),
|
||||
PACKAGE_VERSION: JSON.stringify(process.env.PACKAGE_VERSION || packageJson.version || "0.0.0"),
|
||||
},
|
||||
server: {
|
||||
port: 3000,
|
||||
open: true,
|
||||
},
|
||||
});
|
||||
24
src/apps/webpeer/.gitignore
vendored
Normal file
24
src/apps/webpeer/.gitignore
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
dist
|
||||
dist-ssr
|
||||
*.local
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.idea
|
||||
.DS_Store
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
30
src/apps/webpeer/README.md
Normal file
30
src/apps/webpeer/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# A pseudo client for Self-hosted LiveSync Peer-to-Peer Sync mode
|
||||
|
||||
## What is it for?
|
||||
|
||||
This is a pseudo client for the Self-hosted LiveSync Peer-to-Peer Sync mode. It is a simple pure-client-side web-application that can be connected to the Self-hosted LiveSync in peer-to-peer.
|
||||
|
||||
As long as you have a browser, it starts up, so if you leave it opened some device, it can replace your existing remote servers such as CouchDB.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Of course, it has not been fully tested. Rather, it was created to be tested.
|
||||
|
||||
This pseudo client actually receives the data from other devices, and sends if some device requests it. However, it does not store **files** in the local storage. If you want to purge the data, please purge the browser's cache and indexedDB, local storage, etc.
|
||||
|
||||
## How to use it?
|
||||
|
||||
We can build the application by running the following command:
|
||||
|
||||
```bash
|
||||
$ deno task build
|
||||
```
|
||||
|
||||
Then, open the `dist/index.html` in the browser. It can be configured as the same as the Self-hosted LiveSync (Same components are used[^1]).
|
||||
|
||||
## Some notes
|
||||
|
||||
I will launch this application in the github pages later, so will be able to use it without building it. However, that shares the origin. Hence, the application that your have built and deployed would be more secure.
|
||||
|
||||
|
||||
[^1]: Congrats! I made it modular. Finally...
|
||||
|
||||
1101
src/apps/webpeer/deno.lock
generated
Normal file
1101
src/apps/webpeer/deno.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
17
src/apps/webpeer/index.html
Normal file
17
src/apps/webpeer/index.html
Normal file
@@ -0,0 +1,17 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" type="image/svg+xml" href="icon.svg" />
|
||||
<link rel="manifest" href="manifest.json" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Peer-to-Peer Daemon on Browser</title>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
<script type="module" src="./src/main.ts"></script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
26
src/apps/webpeer/package.json
Normal file
26
src/apps/webpeer/package.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "webpeer",
|
||||
"private": true,
|
||||
"version": "0.0.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview",
|
||||
"check": "svelte-check --tsconfig ./tsconfig.app.json && tsc -p tsconfig.node.json"
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
"eslint-plugin-svelte": "^3.15.0",
|
||||
"@sveltejs/vite-plugin-svelte": "^6.2.4",
|
||||
"@tsconfig/svelte": "^5.0.8",
|
||||
"svelte": "5.41.1",
|
||||
"svelte-check": "^443.3",
|
||||
"typescript": "5.9.3",
|
||||
"vite": "^7.3.1"
|
||||
},
|
||||
"imports": {
|
||||
"../../src/worker/bgWorker.ts": "../../src/worker/bgWorker.mock.ts",
|
||||
"@lib/worker/bgWorker.ts": "@lib/worker/bgWorker.mock.ts"
|
||||
}
|
||||
}
|
||||
52
src/apps/webpeer/public/icon.svg
Normal file
52
src/apps/webpeer/public/icon.svg
Normal file
@@ -0,0 +1,52 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="512"
|
||||
height="512"
|
||||
viewBox="0 0 511.99998 511.99998"
|
||||
version="1.1"
|
||||
id="svg1"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<defs
|
||||
id="defs1" />
|
||||
<g
|
||||
id="layer1"
|
||||
transform="translate(-22.694448,-28.922305)">
|
||||
<g
|
||||
id="g4"
|
||||
transform="matrix(4.6921194,0,0,4.6921194,-266.26061,-494.11652)">
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.2366;stroke-opacity:1"
|
||||
id="rect2"
|
||||
width="109.11913"
|
||||
height="109.11913"
|
||||
x="61.583057"
|
||||
y="111.47176" />
|
||||
<g
|
||||
id="g3"
|
||||
transform="matrix(0.77702959,0,0,0.77702959,22.523192,34.973874)">
|
||||
<path
|
||||
d="m 104.50787,75.245039 h -3.77394 l -10.90251,-29.352906 c 25.15963,-14.257127 33.96551,-46.12600067 20.12771,-71.285639 -14.25713,-25.159637 -46.126,-33.96551 -71.28564,-20.12771 -12.16049,6.709237 -21.38569,18.450401 -24.74031,31.868875 l -38.99744,-4.193274 c -2.93529,-18.4504 -20.12771,-31.449546 -38.578109,-28.514255 -16.773091,2.515964 -28.933582,16.773091 -28.933582,33.546184 0,5.8705823 1.677309,11.7411643 4.6126,17.1924183 l -46.964659,46.1260007 c -8.80587,-6.709236 -19.70838,-10.483182 -31.03022,-10.483182 -28.93358,0 -52.41591,23.482328 -52.41591,52.415908 0,28.933581 23.48233,52.415911 52.41591,52.415911 l 10.48319,62.89909 c -17.19242,7.54789 -25.15964,27.6756 -17.61175,44.86802 7.54789,17.19242 27.6756,25.15964 44.86802,17.61175 15.93444,-6.70924 23.90165,-24.32098 19.28905,-41.09408 l 36.900806,-19.28905 c 23.901654,26.41762 64.9957237,28.51425 91.832674,4.6126 13.41847,-12.16049 21.38569,-29.77224 21.38569,-47.80331 0,-4.6126 -0.41933,-9.64453 -1.67731,-14.25713 l 40.67475,-20.96636 c 12.57982,14.25713 33.96551,15.51511 48.22264,2.93529 14.25712,-12.57982 15.51511,-33.96551 2.93529,-48.222641 -7.96722,-6.70924 -17.19242,-10.90251 -26.83695,-10.90251 z m -223.92077,140.055311 c -5.45125,-5.45125 -12.99914,-8.80587 -20.54703,-9.64452 l -10.48319,-62.8991 c 10.06386,-3.35461 18.86973,-9.64452 25.15964,-18.03107 l 38.997438,20.54704 c -6.289909,16.77309 -5.031927,35.64282 3.354619,51.57725 z m 41.094077,-85.54276 -38.578107,-20.12771 c 1.67731,-5.45126 2.51596,-10.902511 2.51596,-16.773091 0,-10.90251 -3.35462,-21.38569 -9.64453,-30.191565 l 46.964658,-45.706673 c 15.934437,9.644527 36.900802,5.031927 46.545332,-10.9025087 1.25798,-2.096637 2.09663,-4.193273 2.93529,-6.28990997 l 38.99744,4.19327297 c 0.83865,15.0957817 8.38654,29.3529097 20.54703,38.5781097 l -34.3848403,64.157075 c -27.2562697,-10.902511 -58.7058147,-1.25798 -75.8982327,23.063 z m 148.861183,-20.12771 c 0,2.51596 0.41933,5.03193 1.25798,7.54789 l -38.57811,20.12771 c -4.6126,-9.2252 -11.74116,-16.77309 -20.12771,-22.64367 l 34.38484,-64.157077 c 5.45126,2.096637 11.32184,2.935291 17.19242,2.935291 3.35462,0 6.70924,-0.419327 10.06385,-0.838654 l 10.90251,29.352909 c -10.06385,5.87058 -15.51511,16.35376 -15.09578,27.675601 z"
|
||||
id="path1-1"
|
||||
style="overflow:hidden;stroke-width:4.19327"
|
||||
transform="matrix(0.26458333,0,0,0.26458333,130.85167,139.42444)" />
|
||||
<path
|
||||
id="path1-8-1-7"
|
||||
style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.073263;stroke-opacity:1"
|
||||
d="m 140.38615,132.15285 c -0.55386,0 -1.00708,0.45307 -1.00708,1.00708 v 12.05537 c 0,0.5546 0.45322,1.00707 1.00708,1.00707 h 0.504 v 1.51154 h 2.01461 v -1.51154 h 10.07399 v 1.51154 h 2.01461 v -1.51154 h 0.50354 c 0.55461,0 1.00754,-0.45247 1.00754,-1.00707 v -12.05537 c 0,-0.55401 -0.45293,-1.00708 -1.00754,-1.00708 z m 0.504,1.51108 h 14.10321 v 11.04783 h -14.10321 z m 1.00753,1.00754 v 9.03321 h 12.0886 v -9.03321 z m 3.52524,1.99776 c 1.3854,0 2.51906,1.13321 2.51906,2.51861 0,1.38467 -1.13366,2.51816 -2.51906,2.51816 -1.38467,0 -2.51771,-1.13349 -2.51771,-2.51816 0,-1.3854 1.13304,-2.51861 2.51771,-2.51861 z m 7.05183,0 c 0.27767,0 0.504,0.22706 0.504,0.504 v 4.02968 c 0,0.27694 -0.22633,0.50309 -0.504,0.50309 -0.27693,0 -0.50354,-0.22615 -0.50354,-0.50309 v -4.02968 c 0,-0.27694 0.22661,-0.504 0.50354,-0.504 z m -7.55492,0.504 v 0.60461 c -0.42786,0.15092 -0.75526,0.50306 -0.90692,0.90601 h -0.60461 v 1.00753 h 0.60461 c 0.15166,0.42859 0.47906,0.756 0.90692,0.90692 v 0.60461 h 1.00753 v -0.60461 c 0.42786,-0.15092 0.75509,-0.50397 0.90601,-0.90692 h 0.60462 v -1.00753 h -0.60462 c -0.15092,-0.42786 -0.47815,-0.75509 -0.90601,-0.90601 v -0.60461 z" />
|
||||
<path
|
||||
id="path1-8-1-7-3"
|
||||
style="fill:#ffffff;fill-opacity:1"
|
||||
d="m 2576.8666,993.66142 c -7.56,0 -13.7458,6.19115 -13.7458,13.75308 v 164.5449 c 0,7.57 6.1858,13.7458 13.7458,13.7458 h 6.8838 v 20.6369 h 27.499 v -20.6369 h 137.5019 v 20.6369 h 27.4989 v -20.6369 h 6.8693 c 7.57,0 13.7531,-6.1758 13.7531,-13.7458 v -164.5449 c 0,-7.56193 -6.1831,-13.75308 -13.7531,-13.75308 z m 6.8838,20.62968 h 192.4998 v 150.799 h -192.4998 z m 13.7531,13.753 v 123.2929 h 164.9936 v -123.2929 z m 48.1141,27.2674 c 18.91,0 34.3827,15.4654 34.3827,34.3754 0,18.9 -15.4727,34.3755 -34.3827,34.3755 -18.9,0 -34.3682,-15.4755 -34.3682,-34.3755 0,-18.91 15.4682,-34.3754 34.3682,-34.3754 z m 96.2499,0 c 3.79,0 6.8838,3.0965 6.8838,6.8765 v 55.0051 c 0,3.78 -3.0938,6.8693 -6.8838,6.8693 -3.78,0 -6.8693,-3.0893 -6.8693,-6.8693 v -55.0051 c 0,-3.78 3.0893,-6.8765 6.8693,-6.8765 z m -103.1192,6.8765 v 8.2519 c -5.84,2.06 -10.3078,6.8705 -12.3778,12.3705 h -8.2518 v 13.7531 h 8.2518 c 2.07,5.85 6.5378,10.3178 12.3778,12.3778 v 8.2518 h 13.7531 v -8.2518 c 5.84,-2.06 10.3105,-6.8778 12.3705,-12.3778 h 8.2446 v -13.7531 h -8.2446 c -2.06,-5.84 -6.5305,-10.3105 -12.3705,-12.3705 v -8.2519 z"
|
||||
transform="matrix(0.06289731,0,0,0.06289731,-82.022365,94.831671)" />
|
||||
<path
|
||||
id="path1-8-1"
|
||||
style="fill:#ffffff;fill-opacity:1"
|
||||
d="m 2576.8708,993.66021 c -7.56,0 -13.7505,6.18852 -13.7505,13.75049 v 164.5474 c 0,7.57 6.1905,13.7454 13.7505,13.7454 h 6.8778 v 20.6333 h 27.5009 v -20.6333 h 137.4996 v 20.6333 h 27.5009 v -20.6333 h 6.8727 c 7.57,0 13.7504,-6.1754 13.7504,-13.7454 v -164.5474 c 0,-7.56197 -6.1804,-13.75049 -13.7504,-13.75049 z m 6.8778,20.62819 h 192.5014 v 150.797 h -192.5014 z m 13.7504,13.7556 v 123.296 h 165.0005 v -123.296 z m 48.119,27.2668 c 18.91,0 34.3838,15.4687 34.3838,34.3787 0,18.9 -15.4738,34.3685 -34.3838,34.3685 -18.9,0 -34.3685,-15.4685 -34.3685,-34.3685 0,-18.91 15.4685,-34.3787 34.3685,-34.3787 z m 96.2533,0 c 3.79,0 6.8778,3.0977 6.8778,6.8777 v 55.0019 c 0,3.78 -3.0878,6.8676 -6.8778,6.8676 -3.78,0 -6.8727,-3.0876 -6.8727,-6.8676 v -55.0019 c 0,-3.78 3.0927,-6.8777 6.8727,-6.8777 z m -103.1209,6.8777 v 8.2523 c -5.84,2.06 -10.311,6.8709 -12.381,12.3709 h -8.2472 v 13.7505 h 8.2472 c 2.07,5.85 6.541,10.3159 12.381,12.3759 v 8.2523 h 13.7505 v -8.2523 c 5.84,-2.06 10.3108,-6.8759 12.3708,-12.3759 h 8.2473 v -13.7505 h -8.2473 c -2.06,-5.84 -6.5308,-10.3109 -12.3708,-12.3709 v -8.2523 z"
|
||||
transform="matrix(0.08943055,0,0,0.08943055,-115.49313,85.768735)" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 7.0 KiB |
26
src/apps/webpeer/public/manifest.json
Normal file
26
src/apps/webpeer/public/manifest.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "WebPeer - Pseudo client for Self-hosted LiveSync Peer-to-Peer Replication",
|
||||
"short_name": "WepPeer",
|
||||
"description": "A web-based pseudo peer-to-peer replication client using as like server for background sync.",
|
||||
"start_url": "./",
|
||||
"display": "standalone",
|
||||
"background_color": "#fff",
|
||||
"theme_color": "#fff",
|
||||
"orientation": "any",
|
||||
"icons": [
|
||||
{
|
||||
"src": "./icon.svg",
|
||||
"sizes": "512x512",
|
||||
"type": "image/svg+xml",
|
||||
"maskable": true
|
||||
}
|
||||
],
|
||||
"additional_icons": [
|
||||
{
|
||||
"src": "./icon.svg",
|
||||
"sizes": "512x512",
|
||||
"type": "image/svg+xml",
|
||||
"maskable": true
|
||||
}
|
||||
]
|
||||
}
|
||||
5
src/apps/webpeer/src/App.svelte
Normal file
5
src/apps/webpeer/src/App.svelte
Normal file
@@ -0,0 +1,5 @@
|
||||
<script lang="ts">
|
||||
import SyncMain from "./SyncMain.svelte";
|
||||
</script>
|
||||
|
||||
<SyncMain></SyncMain>
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user