Compare commits
647 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c54ae58c0f | ||
|
|
3f54921e90 | ||
|
|
1b070c2dd4 | ||
|
|
0e5846b670 | ||
|
|
f81e71802b | ||
|
|
4c761eebff | ||
|
|
bf754d6e07 | ||
|
|
3cc70b985a | ||
|
|
0e81ec2586 | ||
|
|
1c49acd5b5 | ||
|
|
bab66a64d7 | ||
|
|
477913456f | ||
|
|
b0661cdbab | ||
|
|
18f9a842b7 | ||
|
|
5130bc5f2a | ||
|
|
ca8af80a27 | ||
|
|
df273d273b | ||
|
|
23aa0a82ca | ||
|
|
8f488b205b | ||
|
|
893eac5c92 | ||
|
|
cd6946bce2 | ||
|
|
174ca08954 | ||
|
|
4af4d9c4bd | ||
|
|
1b7a25598a | ||
|
|
e2a01c14cc | ||
|
|
a623b987c8 | ||
|
|
db28b9ec11 | ||
|
|
b2fbbb38f5 | ||
|
|
33c01fdf1e | ||
|
|
536c0426d6 | ||
|
|
2f848878c2 | ||
|
|
c4f2baef5e | ||
|
|
a5b88a8d47 | ||
|
|
88e61fb41f | ||
|
|
9bf04332bb | ||
|
|
5238dec3f2 | ||
|
|
2b7b411c52 | ||
|
|
aab0f7f034 | ||
|
|
b3a0deb0e3 | ||
|
|
b9138d1395 | ||
|
|
04997b84c0 | ||
|
|
7eb9807aa5 | ||
|
|
91a4f234f1 | ||
|
|
82f2860938 | ||
|
|
41a112cd8a | ||
|
|
294ebf0c31 | ||
|
|
5443317157 | ||
|
|
47fe9d2af3 | ||
|
|
4c260a7d2b | ||
|
|
82f6fefd35 | ||
|
|
ada8001fcb | ||
|
|
8b81570035 | ||
|
|
d3e50421e4 | ||
|
|
12605f4604 | ||
|
|
2c0dd82886 | ||
|
|
f5315aacb8 | ||
|
|
5a93066870 | ||
|
|
3a73073505 | ||
|
|
ee0c0ee611 | ||
|
|
d7ea30e304 | ||
|
|
2b9ded60f7 | ||
|
|
40508822cf | ||
|
|
6f938d5f54 | ||
|
|
51dc44bfb0 | ||
|
|
7c4f2bf78a | ||
|
|
d82122de24 | ||
|
|
67c9b4cf06 | ||
|
|
4808876968 | ||
|
|
cccff21ecc | ||
|
|
d8415a97e5 | ||
|
|
85e9aa2978 | ||
|
|
b4eb0e4868 | ||
|
|
3ea348f468 | ||
|
|
81362816d6 | ||
|
|
d6efe4510f | ||
|
|
ca5a7ae18c | ||
|
|
a27652ac34 | ||
|
|
29b89efc47 | ||
|
|
ef3eef2d08 | ||
|
|
ffbbe32e36 | ||
|
|
0a5371cdee | ||
|
|
466bb142e2 | ||
|
|
d394a4ce7f | ||
|
|
71ce76e502 | ||
|
|
ae7a7dd456 | ||
|
|
4048186bb5 | ||
|
|
2b94fd9139 | ||
|
|
ec72ece86d | ||
|
|
e394a994c5 | ||
|
|
aa23b6a39a | ||
|
|
58e328a591 | ||
|
|
1730c39d70 | ||
|
|
dfeac201a2 | ||
|
|
b42152db5e | ||
|
|
171cfc0a38 | ||
|
|
d2787bdb6a | ||
|
|
44b022f003 | ||
|
|
58845276e7 | ||
|
|
a2cc093a9e | ||
|
|
fec203a751 | ||
|
|
1a06837769 | ||
|
|
18d1ce8ec8 | ||
|
|
2221d8c4e8 | ||
|
|
08548f8630 | ||
|
|
5d24c3b984 | ||
|
|
de8fd43c8b | ||
|
|
ed88761eaa | ||
|
|
4dcb37f5a2 | ||
|
|
db0562eda1 | ||
|
|
b610d5d959 | ||
|
|
5abba74f3b | ||
|
|
021c1fccfe | ||
|
|
0a30af479f | ||
|
|
a9c3f60fe7 | ||
|
|
f996e056af | ||
|
|
1073ee9e30 | ||
|
|
f94653e60e | ||
|
|
3dccf2076f | ||
|
|
3e78fe03e1 | ||
|
|
4aa8fc3519 | ||
|
|
ba3d2220e1 | ||
|
|
8057b516af | ||
|
|
f2b4431182 | ||
|
|
badec46d9a | ||
|
|
355e41f488 | ||
|
|
e0e7e1b5ca | ||
|
|
ce4b61557a | ||
|
|
52b02f3888 | ||
|
|
7535999388 | ||
|
|
dccf8580b8 | ||
|
|
e3964f3c5d | ||
|
|
375e7bde31 | ||
|
|
341f0ab12d | ||
|
|
39340c1e1b | ||
|
|
55cdc58857 | ||
|
|
4f1a9dc4e8 | ||
|
|
013818b7d0 | ||
|
|
1179438df8 | ||
|
|
47ea8f6859 | ||
|
|
670fe16486 | ||
|
|
3f0093916c | ||
|
|
9503474d06 | ||
|
|
ddf7b243e4 | ||
|
|
f37561c3c1 | ||
|
|
f01429decc | ||
|
|
c0fcb66924 | ||
|
|
5f76b9809b | ||
|
|
d61d6fec37 | ||
|
|
9fdd622824 | ||
|
|
3b8d03a189 | ||
|
|
1f1a39e5a0 | ||
|
|
d0e92cff7a | ||
|
|
5addddc792 | ||
|
|
d978892661 | ||
|
|
cfb061a6a2 | ||
|
|
381055fc93 | ||
|
|
37d12916fc | ||
|
|
944aa846c4 | ||
|
|
abca808e29 | ||
|
|
90bb610133 | ||
|
|
9c5e9fe63b | ||
|
|
00dfae24d7 | ||
|
|
d8a41fe45d | ||
|
|
30467d1c25 | ||
|
|
f8351f1d45 | ||
|
|
5924af98ab | ||
|
|
2769b61da4 | ||
|
|
bb4409221d | ||
|
|
f398c14200 | ||
|
|
27d58508dc | ||
|
|
d4dea5b226 | ||
|
|
c79dc30cba | ||
|
|
b3119ee8a9 | ||
|
|
2a1d71da5c | ||
|
|
24f31ed19e | ||
|
|
a982629ae6 | ||
|
|
85140aecab | ||
|
|
3f2e23ee88 | ||
|
|
6049c19e8a | ||
|
|
65648683a3 | ||
|
|
5d70f2c1e9 | ||
|
|
cbcfdc453e | ||
|
|
a4eb21593c | ||
|
|
05eb2c8262 | ||
|
|
fecefa3631 | ||
|
|
f8c4d5ccb0 | ||
|
|
e63e79bc8e | ||
|
|
ed76125f3d | ||
|
|
70f4e23474 | ||
|
|
f6d5b78cc8 | ||
|
|
405624b51b | ||
|
|
90c0ff22b9 | ||
|
|
67568ea886 | ||
|
|
cc29b4058d | ||
|
|
4e8243b3d5 | ||
|
|
4eb1787784 | ||
|
|
1cd1465f2c | ||
|
|
45ceca8bb6 | ||
|
|
7b385aab9e | ||
|
|
98411e5f48 | ||
|
|
b6687e2fb0 | ||
|
|
658cbb7ded | ||
|
|
08a48154fa | ||
|
|
62501a5940 | ||
|
|
ccb3dd52de | ||
|
|
3e5f4c8946 | ||
|
|
54e64c59a9 | ||
|
|
588840ff8b | ||
|
|
e6b8dfb279 | ||
|
|
73782c5389 | ||
|
|
f2b667d75e | ||
|
|
9b1588a65b | ||
|
|
0629bc04bb | ||
|
|
b0e97e6c96 | ||
|
|
7f853b0222 | ||
|
|
3d4ad4a3b4 | ||
|
|
4b1fff852a | ||
|
|
08d7d24baf | ||
|
|
9db3c3df0a | ||
|
|
672940ad6f | ||
|
|
2338601fae | ||
|
|
3e657b38a9 | ||
|
|
21861d8c51 | ||
|
|
3bb4aba395 | ||
|
|
751de5a13e | ||
|
|
29229f809b | ||
|
|
2d0dc2a389 | ||
|
|
6cbe319b80 | ||
|
|
e9fe58f818 | ||
|
|
61524e1c44 | ||
|
|
c25eaa09c9 | ||
|
|
71987e6814 | ||
|
|
b15d0710e5 | ||
|
|
9d304b3233 | ||
|
|
d062b13040 | ||
|
|
7eceab59af | ||
|
|
ed5cb3e043 | ||
|
|
574fdf9202 | ||
|
|
9ec7b809a9 | ||
|
|
4d302aff9d | ||
|
|
b70009f4a9 | ||
|
|
c24ee32f37 | ||
|
|
012d0aa4df | ||
|
|
5c97e5b672 | ||
|
|
6e1eb36f3b | ||
|
|
8809aee327 | ||
|
|
fc04c557fc | ||
|
|
115a0d2d8a | ||
|
|
2c97289ec8 | ||
|
|
6d472d17fd | ||
|
|
3a3aabfd11 | ||
|
|
8b45dd1d24 | ||
|
|
a2b36ccf31 | ||
|
|
25e30fa09d | ||
|
|
8f5bc387b4 | ||
|
|
5afe24c460 | ||
|
|
658a09f1cc | ||
|
|
a9020a3aea | ||
|
|
293c731437 | ||
|
|
5b4ae37030 | ||
|
|
9e8d126259 | ||
|
|
6d244a6e34 | ||
|
|
1f0ad4eb1e | ||
|
|
e0e0ab0426 | ||
|
|
5023d6da0b | ||
|
|
49160c7d57 | ||
|
|
4434224c29 | ||
|
|
cf3b9e5522 | ||
|
|
7ca5ac5ac7 | ||
|
|
095a3d20fb | ||
|
|
89e23b1bf4 | ||
|
|
48315d657d | ||
|
|
b73ca73776 | ||
|
|
48e4d57278 | ||
|
|
7eae25edd0 | ||
|
|
3285c1694b | ||
|
|
ede126d7d4 | ||
|
|
f778107727 | ||
|
|
630889680e | ||
|
|
e46714e0f9 | ||
|
|
86d5582f37 | ||
|
|
697ee1855b | ||
|
|
12d825ea49 | ||
|
|
b8edc85528 | ||
|
|
e2740cbefe | ||
|
|
a96e4e4472 | ||
|
|
dd26bbfe64 | ||
|
|
6b9bd473cf | ||
|
|
4be4fa6cc7 | ||
|
|
a9745e850e | ||
|
|
7b9515a47e | ||
|
|
220dce51f2 | ||
|
|
a23fc866c0 | ||
|
|
5c86966d89 | ||
|
|
29ed4d2b95 | ||
|
|
16c6c52128 | ||
|
|
8b94a0b72e | ||
|
|
c5ac76d916 | ||
|
|
b67a6db8a1 | ||
|
|
d4202161e8 | ||
|
|
2a2b39009c | ||
|
|
bf3a6e7570 | ||
|
|
069b8513d1 | ||
|
|
128b1843df | ||
|
|
fd722b1fe5 | ||
|
|
0bf087dba0 | ||
|
|
3a4b59b998 | ||
|
|
8fc9d51c45 | ||
|
|
35feb5bf93 | ||
|
|
b3a85c5462 | ||
|
|
7b0ac22c3b | ||
|
|
dca8e4b2a4 | ||
|
|
89de2dcc37 | ||
|
|
172b08dbb3 | ||
|
|
d518a3fc1b | ||
|
|
c6ed867498 | ||
|
|
4f4923e977 | ||
|
|
a5ebf29b3d | ||
|
|
ee465184c8 | ||
|
|
d7d4f1e6f2 | ||
|
|
cbf5023593 | ||
|
|
3925052f92 | ||
|
|
1934418258 | ||
|
|
2ae018b2bd | ||
|
|
8474497985 | ||
|
|
b5714cc83b | ||
|
|
133f5a7109 | ||
|
|
daa3feebf1 | ||
|
|
7b5f7d0fbf | ||
|
|
29532193cb | ||
|
|
5b4309c09d | ||
|
|
16ef582453 | ||
|
|
3e22f70c7a | ||
|
|
0a8dbe097e | ||
|
|
2c0fcf74d0 | ||
|
|
a1ab1efd5d | ||
|
|
c8fcf2d0d5 | ||
|
|
c384e2f7fb | ||
|
|
99c1c7dc1a | ||
|
|
84adec4b1a | ||
|
|
f0b202bd91 | ||
|
|
d54b7e2d93 | ||
|
|
6952ef37f5 | ||
|
|
9630bcbae8 | ||
|
|
c3f925ab9a | ||
|
|
034dc0538f | ||
|
|
b6136df836 | ||
|
|
24aacdc2a1 | ||
|
|
f91109b1ad | ||
|
|
e76e7ae8ea | ||
|
|
f7fbe85d65 | ||
|
|
0313443b29 | ||
|
|
755c30f468 | ||
|
|
b00b0cc5e5 | ||
|
|
d7985a6b41 | ||
|
|
486e816902 | ||
|
|
ef9b19c24b | ||
|
|
4ed9494176 | ||
|
|
fcd56d59d5 | ||
|
|
1cabfcfd19 | ||
|
|
37a18dbfef | ||
|
|
e7edf88713 | ||
|
|
90ff75ab35 | ||
|
|
bff1d661f5 | ||
|
|
6b59c14774 | ||
|
|
8249274eac | ||
|
|
3c6dae7814 | ||
|
|
60cf8fe640 | ||
|
|
3d89b3863f | ||
|
|
ee9364310d | ||
|
|
86b9695bc2 | ||
|
|
e05f8771b9 | ||
|
|
65619c2478 | ||
|
|
1552fa9d9e | ||
|
|
767f12b52f | ||
|
|
4071ba120e | ||
|
|
2c0e3ba01c | ||
|
|
90adf06830 | ||
|
|
cf8e7ff6ca | ||
|
|
95c3ff5043 | ||
|
|
7ea3515801 | ||
|
|
f866981a8a | ||
|
|
8f36d6f893 | ||
|
|
6dd86e9392 | ||
|
|
d22716bef0 | ||
|
|
5d9baec5e4 | ||
|
|
27d71ca2fb | ||
|
|
c024ed13d3 | ||
|
|
b9527ccab0 | ||
|
|
fa3aa2702c | ||
|
|
93e7cbb133 | ||
|
|
716ae32e02 | ||
|
|
d6d8cbcf5a | ||
|
|
efd348b266 | ||
|
|
8969b1800a | ||
|
|
2c8e026e29 | ||
|
|
a6c27eab3d | ||
|
|
9b5c57d540 | ||
|
|
c251c596e8 | ||
|
|
61188cfaef | ||
|
|
97d944fd75 | ||
|
|
d3dc1e7328 | ||
|
|
45304af369 | ||
|
|
7f422d58f2 | ||
|
|
c2491fdfad | ||
|
|
06a6e391e8 | ||
|
|
f99475f6b7 | ||
|
|
109fc00b9d | ||
|
|
c071d822e1 | ||
|
|
d2de5b4710 | ||
|
|
cf5ecd8922 | ||
|
|
b337a05b5a | ||
|
|
9ea6bee9d1 | ||
|
|
9747c26d50 | ||
|
|
bb4b764586 | ||
|
|
279b4b41e5 | ||
|
|
b644fb791d | ||
|
|
5802ed31be | ||
|
|
ac9428e96b | ||
|
|
280d9e1dd9 | ||
|
|
f7209e566c | ||
|
|
4a9ab2d1de | ||
|
|
cb74b5ee93 | ||
|
|
60eecd7001 | ||
|
|
4bd7b54bcd | ||
|
|
8923c73d1b | ||
|
|
11e64b13e2 | ||
|
|
983d9248ed | ||
|
|
7240e84328 | ||
|
|
0d55ae2532 | ||
|
|
dbd284f5dd | ||
|
|
c000a02f4a | ||
|
|
79754f48d6 | ||
|
|
dd7a40630b | ||
|
|
14406f8213 | ||
|
|
3bbd9c048d | ||
|
|
d91c4f50b4 | ||
|
|
395b7fbc42 | ||
|
|
3773e57429 | ||
|
|
4835fce62a | ||
|
|
ff814be4a0 | ||
|
|
b271b63efa | ||
|
|
23419e476a | ||
|
|
b9bd1f17b8 | ||
|
|
bcce277c36 | ||
|
|
5acbbe479e | ||
|
|
c9f9d511e0 | ||
|
|
b8cb94c498 | ||
|
|
52c736f6b9 | ||
|
|
ebd1cb7777 | ||
|
|
10decb7909 | ||
|
|
e0aab8d69d | ||
|
|
618600c753 | ||
|
|
d1aba87e37 | ||
|
|
db889f635e | ||
|
|
dd80e634f5 | ||
|
|
bec6fc1a74 | ||
|
|
5c96c7f99b | ||
|
|
7b9724f713 | ||
|
|
4cd12c85ed | ||
|
|
90651540f9 | ||
|
|
9e504d5002 | ||
|
|
faaa94423c | ||
|
|
a7c179fc86 | ||
|
|
ed1a670b9b | ||
|
|
6c3c265bd6 | ||
|
|
9d68025f2a | ||
|
|
e70972c8f9 | ||
|
|
7607be7729 | ||
|
|
db9d428ab4 | ||
|
|
0a2caea3c7 | ||
|
|
b1d1ba0e6b | ||
|
|
5e844372cb | ||
|
|
99c6911e96 | ||
|
|
dc880d7d4e | ||
|
|
c157fef76c | ||
|
|
2b2011dc49 | ||
|
|
ae451e005e | ||
|
|
8a75d41cbb | ||
|
|
5252cc0372 | ||
|
|
5022155317 | ||
|
|
d36f925c65 | ||
|
|
3ae33e0500 | ||
|
|
13e442a0c7 | ||
|
|
6288716966 | ||
|
|
47d2cf9733 | ||
|
|
ae6a9ecee4 | ||
|
|
2289bea8d9 | ||
|
|
cda90259c5 | ||
|
|
432a211f80 | ||
|
|
eaf8c4998e | ||
|
|
55601f7910 | ||
|
|
13e70475d9 | ||
|
|
2572177879 | ||
|
|
e82a2560e4 | ||
|
|
09146591eb | ||
|
|
69c6e57df3 | ||
|
|
5e181a8ec4 | ||
|
|
4354cc3054 | ||
|
|
0664427c63 | ||
|
|
49c4736d69 | ||
|
|
f0ce8f0e05 | ||
|
|
0a70afc5a3 | ||
|
|
431239a736 | ||
|
|
1ceb671683 | ||
|
|
ea40e5918c | ||
|
|
64681729ff | ||
|
|
830f2f25d1 | ||
|
|
05f0abebf0 | ||
|
|
842da980d7 | ||
|
|
d8ecbb593b | ||
|
|
8d66c372e1 | ||
|
|
7c06750d93 | ||
|
|
808fdc0944 | ||
|
|
ce25eee74b | ||
|
|
146c170dec | ||
|
|
cf06f878db | ||
|
|
e77031f1cd | ||
|
|
3f2224c3a6 | ||
|
|
2322b5bc34 | ||
|
|
83ac5e7086 | ||
|
|
09f35a2af4 | ||
|
|
fae0a9d76a | ||
|
|
9a27c9bfe5 | ||
|
|
5e75917b8d | ||
|
|
3322d13b55 | ||
|
|
851c9f8a71 | ||
|
|
b02596dfa1 | ||
|
|
02c69b202e | ||
|
|
6b2c7b56a5 | ||
|
|
820168a5ab | ||
|
|
40015642e4 | ||
|
|
7a5cffb6a8 | ||
|
|
e395e53248 | ||
|
|
97f91b1eb0 | ||
|
|
2f4159182e | ||
|
|
302a4024a8 | ||
|
|
bc17f4f70d | ||
|
|
6f33d23088 | ||
|
|
4998e2ef0b | ||
|
|
f5e0b826a6 | ||
|
|
3a3f79bb99 | ||
|
|
9efb6ed0c1 | ||
|
|
6b7956ab67 | ||
|
|
58196c2423 | ||
|
|
3940260d42 | ||
|
|
b16333c604 | ||
|
|
7bf6d1f663 | ||
|
|
7046928068 | ||
|
|
333fcbaaeb | ||
|
|
009f92c307 | ||
|
|
3e541bd061 | ||
|
|
52d08301cc | ||
|
|
49d4c239f2 | ||
|
|
748d031b36 | ||
|
|
dbe77718c8 | ||
|
|
f334974cc3 | ||
|
|
8f2ae437c6 | ||
|
|
a0efda9e71 | ||
|
|
be3d61c1c7 | ||
|
|
b24c4ef55b | ||
|
|
ff850b48ca | ||
|
|
ad3860ac40 | ||
|
|
437b7ebae1 | ||
|
|
e3305c24e1 | ||
|
|
d008e2a1d0 | ||
|
|
bb8c7eb043 | ||
|
|
e61bebd3ee | ||
|
|
99594fe517 | ||
|
|
972d208af4 | ||
|
|
2c36ec497c | ||
|
|
677895547c | ||
|
|
aec0b2986b | ||
|
|
e6025b92d8 | ||
|
|
fad9fed5ca | ||
|
|
e46246cd63 | ||
|
|
0f3be19dd7 | ||
|
|
bc568ff479 | ||
|
|
2fdc7669f3 | ||
|
|
ec8d9785ed | ||
|
|
71a80cacc3 | ||
|
|
38daeca89f | ||
|
|
7ec64a6a93 | ||
|
|
c5c6deb742 | ||
|
|
ef57fbfdda | ||
|
|
bc158e9f2b | ||
|
|
6513c53c7e | ||
|
|
5d1074065c | ||
|
|
b444082b0c | ||
|
|
d5e6419504 | ||
|
|
1bf1e1540d | ||
|
|
be1e6b11ac | ||
|
|
a486788572 | ||
|
|
e5784a1da6 | ||
|
|
2100e22276 | ||
|
|
ec08dc5fe8 | ||
|
|
c92e94e552 | ||
|
|
c7db8592c6 | ||
|
|
fc3617d9f9 | ||
|
|
34c1b040db | ||
|
|
6b85aecafe | ||
|
|
4dabadd5ea | ||
|
|
0619c96c48 | ||
|
|
b0f612b61c | ||
|
|
81caad8602 | ||
|
|
f5e28b5e1c | ||
|
|
0c206226b1 | ||
|
|
1ad5dcc1cc | ||
|
|
a512566e5b | ||
|
|
02de82af46 | ||
|
|
840e03a2d3 | ||
|
|
96b676caf3 | ||
|
|
a8219de375 | ||
|
|
db3eb7e1a0 | ||
|
|
50f51393fc | ||
|
|
8a04e332d6 | ||
|
|
12ae17aa2f | ||
|
|
657f12f966 | ||
|
|
15a7bed448 | ||
|
|
420c3b94df | ||
|
|
239c087132 | ||
|
|
d1a633c799 | ||
|
|
1c07cd92fc | ||
|
|
adc84d53b1 | ||
|
|
c3a762ceed | ||
|
|
5945638633 | ||
|
|
331acd463d | ||
|
|
9d4f41bbf9 | ||
|
|
8831165965 | ||
|
|
ed62e9331b | ||
|
|
799e604eb2 | ||
|
|
d9b69d9a1b | ||
|
|
c18b5c24b4 | ||
|
|
07f16e3d7d | ||
|
|
486f1aa4a0 | ||
|
|
075c6beb68 | ||
|
|
d6121b0c1e | ||
|
|
3292a48054 | ||
|
|
ee37764040 | ||
|
|
b6f7fced22 | ||
|
|
13456c0854 | ||
|
|
2663a52fd7 | ||
|
|
d4bbf79514 | ||
|
|
5f96cc6b82 | ||
|
|
8c8f5d045f | ||
|
|
40cf8be890 | ||
|
|
6b03dbbe75 |
@@ -1,3 +0,0 @@
|
||||
npm node_modules
|
||||
build
|
||||
.eslintrc.js.bak
|
||||
53
.eslintrc
@@ -1,19 +1,58 @@
|
||||
{
|
||||
"root": true,
|
||||
"parser": "@typescript-eslint/parser",
|
||||
"plugins": ["@typescript-eslint"],
|
||||
"extends": ["eslint:recommended", "plugin:@typescript-eslint/eslint-recommended", "plugin:@typescript-eslint/recommended"],
|
||||
"plugins": [
|
||||
"@typescript-eslint",
|
||||
"eslint-plugin-svelte",
|
||||
"eslint-plugin-import"
|
||||
],
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"plugin:@typescript-eslint/eslint-recommended",
|
||||
"plugin:@typescript-eslint/recommended"
|
||||
],
|
||||
"parserOptions": {
|
||||
"sourceType": "module"
|
||||
"sourceType": "module",
|
||||
"project": [
|
||||
"tsconfig.json"
|
||||
]
|
||||
},
|
||||
"ignorePatterns": [
|
||||
"**/node_modules/*",
|
||||
"**/jest.config.js",
|
||||
"src/lib/coverage",
|
||||
"src/lib/browsertest",
|
||||
"**/test.ts",
|
||||
"**/tests.ts",
|
||||
"**/**test.ts",
|
||||
"**/**.test.ts",
|
||||
"esbuild.*.mjs",
|
||||
"terser.*.mjs"
|
||||
],
|
||||
"rules": {
|
||||
"no-unused-vars": "off",
|
||||
"@typescript-eslint/no-unused-vars": ["error", { "args": "none" }],
|
||||
"@typescript-eslint/no-unused-vars": [
|
||||
"error",
|
||||
{
|
||||
"args": "none"
|
||||
}
|
||||
],
|
||||
"no-unused-labels": "off",
|
||||
"@typescript-eslint/ban-ts-comment": "off",
|
||||
"no-prototype-builtins": "off",
|
||||
"@typescript-eslint/no-empty-function": "off",
|
||||
"require-await": "warn",
|
||||
"no-async-promise-executor": "off",
|
||||
"@typescript-eslint/no-explicit-any": "off"
|
||||
"@typescript-eslint/require-await": "warn",
|
||||
"@typescript-eslint/no-misused-promises": "warn",
|
||||
"@typescript-eslint/no-floating-promises": "warn",
|
||||
"no-async-promise-executor": "warn",
|
||||
"@typescript-eslint/no-explicit-any": "off",
|
||||
"@typescript-eslint/no-unnecessary-type-assertion": "error",
|
||||
"no-constant-condition": [
|
||||
"error",
|
||||
{
|
||||
"checkLoops": false
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
78
.github/ISSUE_TEMPLATE/issue-report.md
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
---
|
||||
name: Issue report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
Thank you for taking the time to report this issue!
|
||||
To improve the process, I would like to ask you to let me know the information in advance.
|
||||
|
||||
All instructions and examples, and empty entries can be deleted.
|
||||
Just for your information, a [filled example](https://docs.vrtmrz.net/LiveSync/hintandtrivia/Issue+example) is also written.
|
||||
|
||||
## Abstract
|
||||
The synchronisation hung up immediately after connecting.
|
||||
|
||||
## Expected behaviour
|
||||
- Synchronisation ends with the message `Replication completed`
|
||||
- Everything synchronised
|
||||
|
||||
## Actually happened
|
||||
- Synchronisation has been cancelled with the message `TypeError ... ` (captured in the attached log, around LL.10-LL.12)
|
||||
- No files synchronised
|
||||
|
||||
## Reproducing procedure
|
||||
|
||||
1. Configure LiveSync as in the attached material.
|
||||
2. Click the replication button on the ribbon.
|
||||
3. Synchronising has begun.
|
||||
4. About two or three seconds later, we got the error `TypeError ... `.
|
||||
5. Replication has been stopped. No files synchronised.
|
||||
|
||||
Note: If you do not catch the reproducing procedure, please let me know the frequency and signs.
|
||||
|
||||
## Report materials
|
||||
If the information is not available, do not hesitate to report it as it is. You can also of course omit it if you think this is indeed unnecessary. If it is necessary, I will ask you.
|
||||
|
||||
### Report from the LiveSync
|
||||
For more information, please refer to [Making the report](https://docs.vrtmrz.net/LiveSync/hintandtrivia/Making+the+report).
|
||||
<details>
|
||||
<summary>Report from hatch</summary>
|
||||
|
||||
```
|
||||
<!-- paste here -->
|
||||
```
|
||||
</details>
|
||||
|
||||
### Obsidian debug info
|
||||
<details>
|
||||
<summary>Debug info</summary>
|
||||
|
||||
```
|
||||
<!-- paste here -->
|
||||
```
|
||||
</details>
|
||||
|
||||
### Plug-in log
|
||||
We can see the log by tapping the Document box icon. If you noticed something suspicious, please let me know.
|
||||
Note: **Please enable `Verbose Log`**. For detail, refer to [Logging](https://docs.vrtmrz.net/LiveSync/hintandtrivia/Logging), please.
|
||||
|
||||
<details>
|
||||
<summary>Plug-in log</summary>
|
||||
|
||||
```
|
||||
<!-- paste here -->
|
||||
```
|
||||
</details>
|
||||
|
||||
### Network log
|
||||
Network logs displayed in DevTools will possibly help with connection-related issues. To capture that, please refer to [DevTools](https://docs.vrtmrz.net/LiveSync/hintandtrivia/DevTools).
|
||||
|
||||
### Screenshots
|
||||
If applicable, please add screenshots to help explain your problem.
|
||||
|
||||
### Other information, insights and intuition.
|
||||
Please provide any additional context or information about the problem.
|
||||
128
.github/workflows/release.yml
vendored
@@ -10,19 +10,19 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # otherwise, you will failed to push refs to dest repo
|
||||
submodules: recursive
|
||||
- name: Use Node.js
|
||||
uses: actions/setup-node@v1
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '14.x' # You might need to adjust this value to your own version
|
||||
node-version: '24.x' # You might need to adjust this value to your own version
|
||||
# Get the version number and put it in a variable
|
||||
- name: Get Version
|
||||
id: version
|
||||
run: |
|
||||
echo "::set-output name=tag::$(git describe --abbrev=0 --tags)"
|
||||
echo "tag=$(git describe --abbrev=0 --tags)" >> $GITHUB_OUTPUT
|
||||
# Build the plugin
|
||||
- name: Build
|
||||
id: build
|
||||
@@ -36,59 +36,69 @@ jobs:
|
||||
cp main.js manifest.json styles.css README.md ${{ github.event.repository.name }}
|
||||
zip -r ${{ github.event.repository.name }}.zip ${{ github.event.repository.name }}
|
||||
# Create the release on github
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VERSION: ${{ github.ref }}
|
||||
# - name: Create Release
|
||||
# id: create_release
|
||||
# uses: actions/create-release@v1
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# VERSION: ${{ steps.version.outputs.tag }}
|
||||
# with:
|
||||
# tag_name: ${{ steps.version.outputs.tag }}
|
||||
# release_name: ${{ steps.version.outputs.tag }}
|
||||
# draft: true
|
||||
# prerelease: false
|
||||
# # Upload the packaged release file
|
||||
# - name: Upload zip file
|
||||
# id: upload-zip
|
||||
# uses: actions/upload-release-asset@v1
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# with:
|
||||
# upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
# asset_path: ./${{ github.event.repository.name }}.zip
|
||||
# asset_name: ${{ github.event.repository.name }}-${{ steps.version.outputs.tag }}.zip
|
||||
# asset_content_type: application/zip
|
||||
# # Upload the main.js
|
||||
# - name: Upload main.js
|
||||
# id: upload-main
|
||||
# uses: actions/upload-release-asset@v1
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# with:
|
||||
# upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
# asset_path: ./main.js
|
||||
# asset_name: main.js
|
||||
# asset_content_type: text/javascript
|
||||
# # Upload the manifest.json
|
||||
# - name: Upload manifest.json
|
||||
# id: upload-manifest
|
||||
# uses: actions/upload-release-asset@v1
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# with:
|
||||
# upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
# asset_path: ./manifest.json
|
||||
# asset_name: manifest.json
|
||||
# asset_content_type: application/json
|
||||
# # Upload the style.css
|
||||
# - name: Upload styles.css
|
||||
# id: upload-css
|
||||
# uses: actions/upload-release-asset@v1
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# with:
|
||||
# upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
# asset_path: ./styles.css
|
||||
# asset_name: styles.css
|
||||
# asset_content_type: text/css
|
||||
- name: Create Release and Upload Assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: ${{ github.ref }}
|
||||
draft: true
|
||||
prerelease: false
|
||||
# Upload the packaged release file
|
||||
- name: Upload zip file
|
||||
id: upload-zip
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./${{ github.event.repository.name }}.zip
|
||||
asset_name: ${{ github.event.repository.name }}-${{ steps.version.outputs.tag }}.zip
|
||||
asset_content_type: application/zip
|
||||
# Upload the main.js
|
||||
- name: Upload main.js
|
||||
id: upload-main
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./main.js
|
||||
asset_name: main.js
|
||||
asset_content_type: text/javascript
|
||||
# Upload the manifest.json
|
||||
- name: Upload manifest.json
|
||||
id: upload-manifest
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./manifest.json
|
||||
asset_name: manifest.json
|
||||
asset_content_type: application/json
|
||||
# Upload the style.css
|
||||
- name: Upload styles.css
|
||||
id: upload-css
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./styles.css
|
||||
asset_name: styles.css
|
||||
asset_content_type: text/css
|
||||
# TODO: release notes???
|
||||
files: |
|
||||
${{ github.event.repository.name }}.zip
|
||||
main.js
|
||||
manifest.json
|
||||
styles.css
|
||||
name: ${{ steps.version.outputs.tag }}
|
||||
tag_name: ${{ steps.version.outputs.tag }}
|
||||
draft: true
|
||||
8
.gitignore
vendored
@@ -8,8 +8,16 @@ package-lock.json
|
||||
|
||||
# build
|
||||
main.js
|
||||
main_org.js
|
||||
main_org_*.js
|
||||
*.js.map
|
||||
meta.json
|
||||
meta-*.json
|
||||
|
||||
|
||||
# obsidian
|
||||
data.json
|
||||
.vscode
|
||||
|
||||
# environment variables
|
||||
.env
|
||||
|
||||
2
.prettierignore
Normal file
@@ -0,0 +1,2 @@
|
||||
pouchdb-browser.js
|
||||
main_org.js
|
||||
7
.prettierrc
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"trailingComma": "es5",
|
||||
"tabWidth": 4,
|
||||
"printWidth": 120,
|
||||
"semi": true,
|
||||
"endOfLine": "lf"
|
||||
}
|
||||
134
README.md
@@ -1,96 +1,102 @@
|
||||
# Self-hosted LiveSync
|
||||
[Japanese docs](./README_ja.md) - [Chinese docs](./README_cn.md).
|
||||
|
||||
[Japanese docs](./README_ja.md) [Chinese docs](./README_cn.md).
|
||||
|
||||
Self-hosted LiveSync is a community implemented synchronization plugin.
|
||||
A self-hosted or purchased CouchDB acts as the intermediate server. Available on every obsidian-compatible platform.
|
||||
Self-hosted LiveSync is a community-developed synchronisation plug-in available on all Obsidian-compatible platforms. It leverages robust server solutions such as CouchDB or object storage systems (e.g., MinIO, S3, R2, etc.) to ensure reliable data synchronisation.
|
||||
|
||||
Note: It has no compatibility with the official "Obsidian Sync".
|
||||
Additionally, it supports peer-to-peer synchronisation using WebRTC now (experimental), enabling you to synchronise your notes directly between devices without relying on a server.
|
||||
|
||||

|
||||
|
||||
Before installing or upgrading LiveSync, please back your vault up.
|
||||
>[!IMPORTANT]
|
||||
> This plug-in is not compatible with the official "Obsidian Sync" and cannot synchronise with it.
|
||||
|
||||
## Features
|
||||
- Synchronise vaults efficiently with minimal traffic.
|
||||
- Handle conflicting modifications effectively.
|
||||
- Automatically merge simple conflicts.
|
||||
- Use open-source solutions for the server.
|
||||
- Compatible solutions are supported.
|
||||
- Support end-to-end encryption.
|
||||
- Synchronise settings, snippets, themes, and plug-ins via [Customisation Sync (Beta)](docs/settings.md#6-customization-sync-advanced) or [Hidden File Sync](docs/settings.md#7-hidden-files-advanced).
|
||||
- Enable WebRTC peer-to-peer synchronisation without requiring a `host` (Experimental).
|
||||
- This feature is still in the experimental stage. Please exercise caution when using it.
|
||||
- WebRTC is a peer-to-peer synchronisation method, so **at least one device must be online to synchronise**.
|
||||
- Instead of keeping your device online as a stable peer, you can use two pseudo-peers:
|
||||
- [livesync-serverpeer](https://github.com/vrtmrz/livesync-serverpeer): A pseudo-client running on the server for receiving and sending data between devices.
|
||||
- [webpeer](https://github.com/vrtmrz/livesync-commonlib/tree/main/apps/webpeer): A pseudo-client for receiving and sending data between devices.
|
||||
- A pre-built instance is available at [fancy-syncing.vrtmrz.net/webpeer](https://fancy-syncing.vrtmrz.net/webpeer/) (hosted on the vrtmrz blog site). This is also peer-to-peer. Feel free to use it.
|
||||
- For more information, refer to the [English explanatory article](https://fancy-syncing.vrtmrz.net/blog/0034-p2p-sync-en.html) or the [Japanese explanatory article](https://fancy-syncing.vrtmrz.net/blog/0034-p2p-sync).
|
||||
|
||||
- Visual conflict resolver included.
|
||||
- Bidirectional synchronization between devices nearly in real-time
|
||||
- You can use CouchDB or its compatibles like IBM Cloudant.
|
||||
- End-to-End encryption supported.
|
||||
- Plugin synchronization(Beta)
|
||||
- Receive WebClip from [obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf) (End-to-End encryption will not be applicable.)
|
||||
This plug-in may be particularly useful for researchers, engineers, and developers who need to keep their notes fully self-hosted for security reasons. It is also suitable for anyone seeking the peace of mind that comes with knowing their notes remain entirely private.
|
||||
|
||||
Useful for researchers, engineers and developers with a need to keep their notes fully self-hosted for security reasons. Or just anyone who would like the peace of mind knowing that their notes are fully private.
|
||||
|
||||
## IMPORTANT NOTICE
|
||||
|
||||
- Do not use in conjunction with another synchronization solution (including iCloud, Obsidian Sync). Before enabling this plugin, make sure to disable all the other synchronization methods to avoid content corruption or duplication. If you want to synchronize to two or more services, do them one by one and never enable two synchronization methods at the same time.
|
||||
This includes not putting your vault inside a cloud-synchronized folder (eg. an iCloud folder or Dropbox folder)
|
||||
- This is a synchronization plugin. Not a backup solutions. Do not rely on this for backup.
|
||||
- If the device's storage runs out, database corruption may happen.
|
||||
- Hidden files or any other invisible files wouldn't be kept in the database, thus won't be synchronized. (**and may also get deleted**)
|
||||
>[!IMPORTANT]
|
||||
> - Before installing or upgrading this plug-in, please back up your vault.
|
||||
> - Do not enable this plug-in alongside another synchronisation solution at the same time (including iCloud and Obsidian Sync).
|
||||
> - For backups, we also provide a plug-in called [Differential ZIP Backup](https://github.com/vrtmrz/diffzip).
|
||||
|
||||
## How to use
|
||||
|
||||
### Get your database ready.
|
||||
### 3-minute setup - CouchDB on fly.io
|
||||
|
||||
First, get your database ready. IBM Cloudant is preferred for testing. Or you can use your own server with CouchDB. For more information, refer below:
|
||||
1. [Setup IBM Cloudant](docs/setup_cloudant.md)
|
||||
2. [Setup your CouchDB](docs/setup_own_server.md)
|
||||
**Recommended for beginners**
|
||||
|
||||
Note: More information about alternative hosting methods needed! Currently, [using fly.io](https://github.com/vrtmrz/obsidian-livesync/discussions/85) is being discussed.
|
||||
[](https://www.youtube.com/watch?v=7sa_I1832Xc)
|
||||
|
||||
### Configure the plugin
|
||||
1. [Setup CouchDB on fly.io](docs/setup_flyio.md)
|
||||
2. Configure plug-in in [Quick Setup](docs/quick_setup.md)
|
||||
|
||||
See [Quick setup guide](doccs/../docs/quick_setup.md)
|
||||
### Manually Setup
|
||||
|
||||
## Something looks corrupted...
|
||||
1. Setup the server
|
||||
1. [Setup CouchDB on fly.io](docs/setup_flyio.md)
|
||||
2. [Setup your CouchDB](docs/setup_own_server.md)
|
||||
2. Configure plug-in in [Quick Setup](docs/quick_setup.md)
|
||||
> [!TIP]
|
||||
> Fly.io is no longer free. Fortunately, despite some issues, we can still use IBM Cloudant. Refer to [Setup IBM Cloudant](docs/setup_cloudant.md).
|
||||
> And also, we can use peer-to-peer synchronisation without a server. Or very cheap Object Storage -- Cloudflare R2 can be used for free.
|
||||
> HOWEVER, most importantly, we can use the server that we trust. Therefore, please set up your own server.
|
||||
> CouchDB can be run on a Raspberry Pi. (But please be careful about the security of your server).
|
||||
|
||||
Please open the configuration link again and Answer as below:
|
||||
- If your local database looks corrupted (in other words, when your Obsidian getting weird even standalone.)
|
||||
- Answer `No` to `Keep local DB?`
|
||||
- If your remote database looks corrupted (in other words, when something happens while replicating)
|
||||
- Answer `No` to `Keep remote DB?`
|
||||
|
||||
If you answered `No` to both, your databases will be rebuilt by the content on your device. And the remote database will lock out other devices. You have to synchronize all your devices again. (When this time, almost all your files should be synchronized with a timestamp. So you can use a existed vault).
|
||||
|
||||
## Test Server
|
||||
|
||||
Setting up an instance of Cloudant or local CouchDB is a little complicated, so I set up a [Tasting server for self-hosted-livesync](https://olstaste.vrtmrz.net/). Try it out for free!
|
||||
Note: Please read "Limitations" carefully. Do not send your private vault.
|
||||
|
||||
## Information in StatusBar
|
||||
|
||||
Synchronization status is shown in statusbar.
|
||||
Synchronization status is shown in the status bar with the following icons.
|
||||
|
||||
- Activity Indicator
|
||||
- 📲 Network request
|
||||
- Status
|
||||
- ⏹️ Stopped
|
||||
- 💤 LiveSync enabled. Waiting for changes.
|
||||
- ⚡️ Synchronization in progress.
|
||||
- ⚠ An error occurred.
|
||||
- ↑ Uploaded chunks and metadata
|
||||
- ↓ Downloaded chunks and metadata
|
||||
- ⏳ Number of pending processes
|
||||
- 🧩 Number of files waiting for their chunks.
|
||||
If you have deleted or renamed files, please wait until ⏳ icon disappeared.
|
||||
- 💤 LiveSync enabled. Waiting for changes
|
||||
- ⚡️ Synchronization in progress
|
||||
- ⚠ An error occurred
|
||||
- Statistical indicator
|
||||
- ↑ Uploaded chunks and metadata
|
||||
- ↓ Downloaded chunks and metadata
|
||||
- Progress indicator
|
||||
- 📥 Unprocessed transferred items
|
||||
- 📄 Working database operation
|
||||
- 💾 Working write storage processes
|
||||
- ⏳ Working read storage processes
|
||||
- 🛫 Pending read storage processes
|
||||
- 📬 Batched read storage processes
|
||||
- ⚙️ Working or pending storage processes of hidden files
|
||||
- 🧩 Waiting chunks
|
||||
- 🔌 Working Customisation items (Configuration, snippets, and plug-ins)
|
||||
|
||||
To prevent file and database corruption, please wait to stop Obsidian until all progress indicators have disappeared as possible (The plugin will also try to resume, though). Especially in case of if you have deleted or renamed files.
|
||||
|
||||
## Hints
|
||||
- If a folder becomes empty after a replication, it will be deleted by default. But you can toggle this behaviour. Check the [Settings](docs/settings.md).
|
||||
- LiveSync mode drains more batteries in mobile devices. Periodic sync with some automatic sync is recommended.
|
||||
- Mobile Obsidian can not connect to a non-secure (HTTP) or a locally-signed servers, even if the root certificate is installed on the device.
|
||||
- There are no 'exclude_folders' like configurations.
|
||||
- While synchronizing, files are compared by their modification time and the older ones will be overwritten by the newer ones. Then plugin checks for conflicts and if a merge is needed, a dialog will open.
|
||||
- Rarely, a file in the database could be corrupted. The plugin will not write to local storage when a file looks corrupted. If a local version of the file is on your device, the corruption could be fixed by editing the local file and synchronizing it. But if the file does not exist on any of your devices, then it can not be rescued. In this case you can delete these items from the settings dialog.
|
||||
- To stop the boot up sequence (eg. for fixing problems on databases), you can put a `redflag.md` file at the root of your vault.
|
||||
- Q: Database is growing, how can I shrink it down?
|
||||
A: each of the docs is saved with their past 100 revisions for detecting and resolving conflicts. Picturing that one device has been offline for a while, and comes online again. The device has to compare its notes with the remotely saved ones. If there exists a historic revision in which the note used to be identical, it could be updated safely (like git fast-forward). Even if that is not in revision histories, we only have to check the differences after the revision that both devices commonly have. This is like git's conflict resolving method. So, We have to make the database again like an enlarged git repo if you want to solve the root of the problem.
|
||||
- And more technical Information are in the [Technical Information](docs/tech_info.md)
|
||||
- If you want to synchronize files without obsidian, you can use [filesystem-livesync](https://github.com/vrtmrz/filesystem-livesync).
|
||||
- WebClipper is also available on Chrome Web Store:[obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf)
|
||||
## Tips and Troubleshooting
|
||||
If you are having problems getting the plugin working see: [Tips and Troubleshooting](docs/troubleshooting.md).
|
||||
|
||||
Repo is here: [obsidian-livesync-webclip](https://github.com/vrtmrz/obsidian-livesync-webclip). (Docs are work in progress.)
|
||||
## Acknowledgements
|
||||
The project has been in continual progress and harmony thanks to:
|
||||
- Many [Contributors](https://github.com/vrtmrz/obsidian-livesync/graphs/contributors).
|
||||
- Many [GitHub Sponsors](https://github.com/sponsors/vrtmrz#sponsors).
|
||||
- JetBrains Community Programs / Support for Open-Source Projects. <img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.png" alt="JetBrains logo" height="24">
|
||||
|
||||
May those who have contributed be honoured and remembered for their kindness and generosity.
|
||||
|
||||
## License
|
||||
|
||||
The source code is licensed under the MIT License.
|
||||
Licensed under the MIT License.
|
||||
|
||||
93
README_es.md
Normal file
@@ -0,0 +1,93 @@
|
||||
<!-- For translation: 20240227r0 -->
|
||||
# Self-hosted LiveSync
|
||||
[Documentación en inglés](./README_ja.md) - [Documentación en japonés](./README_ja.md) - [Documentación en chino](./README_cn.md).
|
||||
|
||||
Self-hosted LiveSync es un plugin de sincronización implementado por la comunidad, disponible en todas las plataformas compatibles con Obsidian y utiliza CouchDB o Almacenamiento de Objetos (por ejemplo, MinIO, S3, R2, etc.) como servidor.
|
||||
|
||||

|
||||
|
||||
Nota: Este plugin no puede sincronizarse con el "Obsidian Sync" oficial.
|
||||
|
||||
## Características
|
||||
|
||||
- Sincroniza bóvedas de manera eficiente con menos tráfico.
|
||||
- Buen manejo de modificaciones en conflicto.
|
||||
- Fusión automática para conflictos simples.
|
||||
- Uso de soluciones de código abierto para el servidor.
|
||||
- Pueden usarse soluciones compatibles.
|
||||
- Soporte de cifrado de extremo a extremo.
|
||||
- Sincronización de configuraciones, fragmentos, temas y complementos a través de [Sincronización de personalización \(Beta\)](#customization-sync) o [Sincronización de archivos ocultos](#hiddenfilesync)
|
||||
- WebClip de [obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf)
|
||||
|
||||
Este plugin puede ser útil para investigadores, ingenieros y desarrolladores que necesitan mantener sus notas totalmente autoalojadas por razones de seguridad, o para aquellos que deseen tener la tranquilidad de saber que sus notas son totalmente privadas.
|
||||
|
||||
>[!IMPORTANTE]
|
||||
> - Antes de instalar o actualizar este plugin, realice un respaldo de su bóveda.
|
||||
> - No active este plugin junto con otra solución de sincronización al mismo tiempo (incluyendo iCloud y Obsidian Sync).
|
||||
> - Este es un plugin de sincronización, no una solución de respaldo. No confíe en él para realizar respaldos.
|
||||
|
||||
## Cómo usar
|
||||
|
||||
### Configuración en 3 minutos - CouchDB en fly.io
|
||||
|
||||
**Recomendado para principiantes**
|
||||
|
||||
[](https://www.youtube.com/watch?v=7sa_I1832Xc)
|
||||
|
||||
1. [Configurar CouchDB en fly.io](docs/setup_flyio_es.md)
|
||||
2. Configurar el plugin en [Configuración rápida](docs/quick_setup_es.md)
|
||||
|
||||
### Configuración manual
|
||||
|
||||
1. Configurar el servidor
|
||||
1. [Configurar CouchDB en fly.io](docs/setup_flyio_es.md)
|
||||
2. [Configurar su CouchDB](docs/setup_own_server_es.md)
|
||||
2. Configura el plugin en [Configuración rápida](docs/quick_setup_es.md)
|
||||
|
||||
> [!CONSEJO]
|
||||
> Actualmente, fly.io ya no es gratuito. Afortunadamente, aunque hay algunos problemas, aún podemos usar IBM Cloudant. Aquí está como [Configurar IBM Cloudant](docs/setup_cloudant.md). ¡Se actualizará pronto!
|
||||
|
||||
|
||||
## Información en la barra de estado
|
||||
|
||||
El estado de sincronización se muestra en la barra de estado con los siguientes iconos.
|
||||
|
||||
- Indicador de actividad
|
||||
- 📲 Solicitud de red
|
||||
- Estado
|
||||
- ⏹️ Detenido
|
||||
- 💤 LiveSync activado. Esperando cambios
|
||||
- ⚡️ Sincronización en progreso
|
||||
- ⚠ Ocurrió un error
|
||||
- Indicador estadístico
|
||||
- ↑ Chunks y metadatos subidos
|
||||
- ↓ Chunks y metadatos descargados
|
||||
- Indicador de progreso
|
||||
- 📥 Elementos transferidos sin procesar
|
||||
- 📄 Operación de base de datos en curso
|
||||
- 💾 Procesos de escritura en almacenamiento en curso
|
||||
- ⏳ Procesos de lectura en almacenamiento en curso
|
||||
- 🛫 Procesos de lectura en almacenamiento pendientes
|
||||
- 📬 Procesos de lectura en almacenamiento por lotes
|
||||
- ⚙️ Procesos de almacenamiento de archivos ocultos en curso o pendientes
|
||||
- 🧩 Chunks en espera
|
||||
- 🔌 Elementos de personalización en curso (Configuración, fragmentos y plugins)
|
||||
|
||||
Para prevenir la corrupción de archivos y bases de datos, antes de detener Obsidian espere hasta que todos los indicadores de progreso hayan desaparecido (el plugin también intentará reanudar, sin embargo). Especialmente en caso de que haya eliminado o renombrado archivos.
|
||||
|
||||
|
||||
## Consejos y Solución de Problemas
|
||||
Si tienes problemas para hacer funcionar el plugin, consulta: [Consejos y solución de problemas](docs/troubleshooting_es.md).
|
||||
|
||||
## Agradecimientos
|
||||
|
||||
El proyecto ha progresado y mantenido en armonía gracias a:
|
||||
- Muchos [Colaboradores](https://github.com/vrtmrz/obsidian-livesync/graphs/contributors)
|
||||
- Muchos [Patrocinadores de GitHub](https://github.com/sponsors/vrtmrz#sponsors)
|
||||
- Programas comunitarios de JetBrains / Soporte para Proyectos de Código Abierto <img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.png" alt="JetBrains logo." height="24">
|
||||
|
||||
Que aquellos que han contribuido sean honrados y recordados por su amabilidad y generosidad.
|
||||
|
||||
## Licencia
|
||||
|
||||
Licenciado bajo la Licencia MIT.
|
||||
153
README_ja.md
@@ -1,84 +1,85 @@
|
||||
<!-- For translation: 20240227r0 -->
|
||||
# Self-hosted LiveSync
|
||||
[英語版ドキュメント](./README.md) - [中国語版ドキュメント](./README_cn.md).
|
||||
|
||||
**旧): obsidian-livesync**
|
||||
Obsidianで利用可能なすべてのプラットフォームで使える、CouchDBをサーバに使用する、コミュニティ版の同期プラグイン
|
||||
|
||||
セルフホストしたデータベースを使って、双方向のライブシンクするObsidianのプラグイン。
|
||||
**公式のSyncとは互換性はありません**
|
||||

|
||||
|
||||
**インストールする前に、Vaultのバックアップを確実に取得してください**
|
||||
|
||||
[英語版](./README.md)
|
||||
|
||||
## こんなことができるプラグインです。
|
||||
- Windows, Mac, iPad, iPhone, Android, Chromebookで動く
|
||||
- セルフホストしたデータベースに同期して
|
||||
- 複数端末で同時にその変更をほぼリアルタイムで配信し
|
||||
- さらに、他の端末での変更も別の端末に配信する、双方向リアルタイムなLiveSyncを実現でき、
|
||||
- 発生した変更の衝突はその場で解決できます。
|
||||
- 同期先のホストにはCouchDBまたはその互換DBaaSのIBM Cloudantをサーバーに使用できます。あなたのデータは、あなたのものです。
|
||||
- もちろんLiveではない同期もできます。
|
||||
- 万が一のために、サーバーに送る内容を暗号化できます(betaです)。
|
||||
- [Webクリッパー](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf) もあります(End-to-End暗号化対象外です)
|
||||
|
||||
NDAや類似の契約や義務、倫理を守る必要のある、研究者、設計者、開発者のような方に特にオススメです。
|
||||
特にエンタープライズでは、たとえEnd to Endの暗号化が行われていても、管理下にあるサーバーにのみデータを格納することが求められる場合があります。
|
||||
|
||||
# 重要なお知らせ
|
||||
|
||||
- ❌ファイルの重複や破損を避けるため、複数の同期手段を同時に使用しないでください。
|
||||
これは、Vaultをクラウド管理下のフォルダに置くことも含みます。(例えば、iCloudの管理フォルダ内に入れたり)。
|
||||
- ⚠️このプラグインは、端末間でのノートの反映を目的として作成されました。バックアップ等が目的ではありません。そのため、バックアップは必ず別のソリューションで行うようにしてください。
|
||||
- ストレージの空き容量が枯渇した場合、データベースが破損することがあります。
|
||||
|
||||
# このプラグインの使い方
|
||||
|
||||
1. Community Pluginsから、Self-holsted LiveSyncと検索しインストールするか、このリポジトリのReleasesから`main.js`, `manifest.json`, `style.css` をダウンロードしvaultの中の`.obsidian/plugins/obsidian-livesync`に入れて、Obsidianを再起動してください。
|
||||
2. サーバーをセットアップします。IBM Cloudantがお手軽かつ堅牢で便利です。完全にセルフホストする際にはお持ちのサーバーにCouchDBをインストールする必要があります。詳しくは下記を参照してください
|
||||
1. [IBM Cloudantのセットアップ](docs/setup_cloudant_ja.md)
|
||||
2. [独自のCouchDBのセットアップ](docs/setup_own_server_ja.md)
|
||||
|
||||
備考: IBM Cloudantのアカウント登録が出来ないケースがあるようです。代替を探していて、今 [using fly.io](https://github.com/vrtmrz/obsidian-livesync/discussions/85)を検討しています。
|
||||
|
||||
1. [Quick setup](docs/quick_setup_ja.md)から、セットアップウィザード使ってセットアップしてください。
|
||||
|
||||
# テストサーバー
|
||||
|
||||
もし、CouchDBをインストールしたり、Cloudantのインスタンスをセットアップしたりするのに気が引ける場合、[Self-hosted LiveSyncのテストサーバー](https://olstaste.vrtmrz.net/)を作りましたので、使ってみてください。
|
||||
|
||||
備考: 制限事項をよく確認して使用してください。くれぐれも、本当に使用している自分のVaultを同期しないようにしてください。
|
||||
|
||||
# WebClipperあります
|
||||
Self-hosted LiveSync用にWebClipperも作りました。Chrome Web Storeからダウンロードできます。
|
||||
|
||||
[obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf)
|
||||
|
||||
リポジトリはこちらです: [obsidian-livesync-webclip](https://github.com/vrtmrz/obsidian-livesync-webclip)。
|
||||
|
||||
相変わらずドキュメントは間に合っていません。
|
||||
|
||||
# ステータスバーの情報
|
||||
右下のステータスバーに、同期の状態が表示されます
|
||||
|
||||
- 同期状態
|
||||
- ⏹️ 同期は停止しています
|
||||
- 💤 同期はLiveSync中で、なにか起こるのを待っています
|
||||
- ⚡️ 同期中です
|
||||
- ⚠ エラーが発生しています
|
||||
- ↑ 送信したデータ数
|
||||
- ↓ 受信したデータ数
|
||||
- ⏳ 保留している処理の数です
|
||||
ファイルを削除したりリネームした場合、この表示が消えるまでお待ちください。
|
||||
|
||||
# さらなる補足
|
||||
- ファイルは同期された後、タイムスタンプを比較して新しければいったん新しい方で上書きされます。その後、衝突が発生したかによって、マージが行われます。
|
||||
- まれにファイルが破損することがあります。破損したファイルに関してはディスクへの反映を試みないため、実際には使用しているデバイスには少し古いファイルが残っていることが多いです。そのファイルを再度更新してもらうと、データベースが更新されて問題なくなるケースがあります。ファイルがどの端末にも存在しない場合は、設定画面から、削除できます。
|
||||
- データベースの復旧中に再起動した場合など、うまくローカルデータベースを修正できない際には、Vaultのトップに`redflag.md`というファイルを置いてください。起動時のシーケンスがスキップされます。
|
||||
- データベースが大きくなってきてるんだけど、小さくできる?→各ノートは、それぞれの古い100リビジョンとともに保存されています。例えば、しばらくオフラインだったあるデバイスが、久しぶりに同期したと想定してみてください。そのとき、そのデバイスは最新とは少し異なるリビジョンを持ってるはずです。その場合でも、リモートのリビジョン履歴にリモートのものが存在した場合、安全にマージできます。もしリビジョン履歴に存在しなかった場合、確認しなければいけない差分も、対象を存在して持っている共通のリビジョン以降のみに絞れます。ちょうどGitのような方法で、衝突を解決している形になるのです。そのため、肥大化したリポジトリの解消と同様に、本質的にデータベースを小さくしたい場合は、データベースの作り直しが必要です。
|
||||
- その他の技術的なお話は、[技術的な内容](docs/tech_info_ja.md)に書いてあります。
|
||||
※公式のSyncと同期することはできません。
|
||||
|
||||
|
||||
# ライセンス
|
||||
## 機能
|
||||
- 高効率・低トラフィックでVault同士を同期
|
||||
- 競合解決がいい感じ
|
||||
- 単純な競合なら自動マージします
|
||||
- OSSソリューションを同期サーバに使用
|
||||
- 互換ソリューションも使用可能です
|
||||
- End-to-End暗号化実装済み
|
||||
- 設定・スニペット・テーマ、プラグインの同期が可能
|
||||
- [Webクリッパー](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf) もあります
|
||||
|
||||
The source code is licensed MIT.
|
||||
|
||||
NDAや類似の契約や義務、倫理を守る必要のある、研究者、設計者、開発者のような方に特にオススメです。
|
||||
|
||||
|
||||
>[!IMPORTANT]
|
||||
> - インストール・アップデート前には必ずVaultをバックアップしてください
|
||||
> - 複数の同期ソリューションを同時に有効にしないでください(これはiCloudや公式のSyncも含みます)
|
||||
> - このプラグインは同期プラグインです。バックアップとして使用しないでください
|
||||
|
||||
|
||||
## このプラグインの使い方
|
||||
|
||||
### 3分セットアップ - CouchDB on fly.io
|
||||
|
||||
**はじめての方におすすめ**
|
||||
|
||||
[](https://www.youtube.com/watch?v=7sa_I1832Xc)
|
||||
|
||||
1. [Fly.ioにCouchDBをセットアップする](docs/setup_flyio.md)
|
||||
2. [Quick Setup](docs/quick_setup_ja.md)でプラグインを設定する
|
||||
|
||||
|
||||
### Manually Setup
|
||||
|
||||
1. サーバのセットアップ
|
||||
1. [Fly.ioにCouchDBをセットアップする](docs/setup_flyio.md)
|
||||
2. [CouchDBをセットアップする](docs/setup_own_server_ja.md)
|
||||
2. [Quick Setup](docs/quick_setup_ja.md)でプラグインを設定する
|
||||
|
||||
> [!TIP]
|
||||
> IBM Cloudantもまだ使用できますが、いくつかの理由で現在はおすすめしていません。[IBM Cloudantのセットアップ](docs/setup_cloudant_ja.md)はまだあります。
|
||||
|
||||
## ステータスバーの説明
|
||||
|
||||
同期ステータスはステータスバーに、下記のアイコンとともに表示されます
|
||||
|
||||
- アクティビティー
|
||||
- 📲 ネットワーク接続中
|
||||
- 同期ステータス
|
||||
- ⏹️ 停止中
|
||||
- 💤 変更待ち(LiveSync中)
|
||||
- ⚡️ 同期の進行中
|
||||
- ⚠ エラー
|
||||
- 統計情報
|
||||
- ↑ アップロードしたチャンクとメタデータ数
|
||||
- ↓ ダウンロードしたチャンクとメタデータ数
|
||||
- 進捗情報
|
||||
- 📥 転送後、未処理の項目数
|
||||
- 📄 稼働中データベース操作数
|
||||
- 💾 稼働中のストレージ書き込み数操作数
|
||||
- ⏳ 稼働中のストレージ読み込み数操作数
|
||||
- 🛫 待機中のストレージ読み込み数操作数
|
||||
- ⚙️ 隠しファイルの操作数(待機・稼働中合計)
|
||||
- 🧩 取得待ちを行っているチャンク数
|
||||
- 🔌 設定同期関連の操作数
|
||||
|
||||
データベースやファイルの破損を避けるため、Obsidianの終了は進捗情報が表示されなくなるまで待ってください(プラグインも復帰を試みますが)。特にファイルを削除やリネームした場合は気をつけてください。
|
||||
|
||||
|
||||
## Tips and Troubleshooting
|
||||
何かこまったら、[Tips and Troubleshooting](docs/troubleshooting.md)をご参照ください。
|
||||
|
||||
## License
|
||||
|
||||
Licensed under the MIT License.
|
||||
46
docker-compose.traefik.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
# For details and other explanations about this file refer to:
|
||||
# https://github.com/vrtmrz/obsidian-livesync/blob/main/docs/setup_own_server.md#traefik
|
||||
|
||||
version: "2.1"
|
||||
services:
|
||||
couchdb:
|
||||
image: couchdb:latest
|
||||
container_name: obsidian-livesync
|
||||
user: 1000:1000
|
||||
environment:
|
||||
- COUCHDB_USER=username
|
||||
- COUCHDB_PASSWORD=password
|
||||
volumes:
|
||||
- ./data:/opt/couchdb/data
|
||||
- ./local.ini:/opt/couchdb/etc/local.ini
|
||||
# Ports not needed when already passed to Traefik
|
||||
#ports:
|
||||
# - 5984:5984
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- proxy
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
# The Traefik Network
|
||||
- "traefik.docker.network=proxy"
|
||||
# Don't forget to replace 'obsidian-livesync.example.org' with your own domain
|
||||
- "traefik.http.routers.obsidian-livesync.rule=Host(`obsidian-livesync.example.org`)"
|
||||
# The 'websecure' entryPoint is basically your HTTPS entrypoint. Check the next code snippet if you are encountering problems only; you probably have a working traefik configuration if this is not your first container you are reverse proxying.
|
||||
- "traefik.http.routers.obsidian-livesync.entrypoints=websecure"
|
||||
- "traefik.http.routers.obsidian-livesync.service=obsidian-livesync"
|
||||
- "traefik.http.services.obsidian-livesync.loadbalancer.server.port=5984"
|
||||
- "traefik.http.routers.obsidian-livesync.tls=true"
|
||||
# Replace the string 'letsencrypt' with your own certificate resolver
|
||||
- "traefik.http.routers.obsidian-livesync.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.routers.obsidian-livesync.middlewares=obsidiancors"
|
||||
# The part needed for CORS to work on Traefik 2.x starts here
|
||||
- "traefik.http.middlewares.obsidiancors.headers.accesscontrolallowmethods=GET,PUT,POST,HEAD,DELETE"
|
||||
- "traefik.http.middlewares.obsidiancors.headers.accesscontrolallowheaders=accept,authorization,content-type,origin,referer"
|
||||
- "traefik.http.middlewares.obsidiancors.headers.accesscontrolalloworiginlist=app://obsidian.md,capacitor://localhost,http://localhost"
|
||||
- "traefik.http.middlewares.obsidiancors.headers.accesscontrolmaxage=3600"
|
||||
- "traefik.http.middlewares.obsidiancors.headers.addvaryheader=true"
|
||||
- "traefik.http.middlewares.obsidiancors.headers.accessControlAllowCredentials=true"
|
||||
|
||||
networks:
|
||||
proxy:
|
||||
external: true
|
||||
34
docs/adding_translations.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# How to add translations
|
||||
|
||||
## Getting ready
|
||||
|
||||
1. Clone this repository recursively.
|
||||
```sh
|
||||
git clone --recursive https://github.com/vrtmrz/obsidian-livesync
|
||||
```
|
||||
2. Make `ls-debug` folder under your vault's `.obsidian` folder (as like `.../dev/.obsidian/ls-debug`).
|
||||
|
||||
## Add translations for already defined terms
|
||||
|
||||
1. Install dependencies, and build the plug-in as dev. build.
|
||||
```sh
|
||||
cd obsidian-livesync
|
||||
npm i -D
|
||||
npm run buildDev
|
||||
```
|
||||
|
||||
2. Copy the `main.js` to `.obsidian/plugins/obsidian-livesync` folder of your vault, and run Obsidian-Self-hosted LiveSync.
|
||||
3. You will get the `missing-translation-yyyy-mm-dd.jsonl`, please fill in new translations.
|
||||
4. Build the plug-in again, and confirm that displayed things were expected.
|
||||
5. Merge them into `rosetta.ts`, and make the PR to `https://github.com/vrtmrz/livesync-commonlib`.
|
||||
|
||||
## Make messages to be translated
|
||||
|
||||
1. Find the message that you want to be translated.
|
||||
2. Change the literal to use `$tf`, like below.
|
||||
```diff
|
||||
- Logger("Could not determine passphrase to save data.json! You probably make the configuration sure again!", LOG_LEVEL_URGENT);
|
||||
+ Logger($tf('someKeyForPassphraseError'), LOG_LEVEL_URGENT);
|
||||
```
|
||||
3. Make the PR to `https://github.com/vrtmrz/obsidian-livesync`.
|
||||
4. Follow the steps of "Add translations for already defined terms" to add the translations.
|
||||
BIN
docs/all_toggles.png
Normal file
|
After Width: | Height: | Size: 7.3 KiB |
122
docs/design_docs/chunk_aggregation_by_prefix.md
Normal file
@@ -0,0 +1,122 @@
|
||||
# [WITHDRAWN] Chunk Aggregation by Prefix
|
||||
|
||||
## Goal
|
||||
|
||||
To address the "document explosion" and storage bloat issues caused by the current chunking mechanism, while preserving the benefits of content-addressable storage and efficient delta synchronisation. This design aims to significantly reduce the number of documents in the database and simplify Garbage Collection (GC).
|
||||
|
||||
## Motivation
|
||||
|
||||
Our current synchronisation solution splits files into content-defined chunks, with each chunk stored as a separate document in CouchDB, identified by its hash. This architecture effectively leverages CouchDB's replication for automatic deduplication and efficient transfer.
|
||||
|
||||
However, this approach faces significant challenges as the number of files and edits increases:
|
||||
1. **Document Explosion:** A large vault can generate millions of chunk documents, severely degrading CouchDB's performance, particularly during view building and replication.
|
||||
2. **Storage Bloat & GC Difficulty:** Obsolete chunks generated during edits are difficult to identify and remove. Since CouchDB's deletion (`_deleted: true`) is a soft delete, and compaction is a heavy, space-intensive operation, unused chunks perpetually consume storage, making GC impractical for many users.
|
||||
3. **The "Eden" Problem:** A previous attempt, "Keep newborn chunks in Eden", aimed to mitigate this by embedding volatile chunks within the parent document. While it reduced the number of standalone chunks, it introduced a new issue: the parent document's history (`_revs_info`) became excessively large, causing its own form of database bloat and making compaction equally necessary but difficult to manage.
|
||||
|
||||
This new design addresses the root cause—the sheer number of documents—by aggregating chunks into sets.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- The new implementation must maintain the core benefit of deduplication to ensure efficient synchronisation.
|
||||
- The solution must not introduce a single point of bottleneck and should handle concurrent writes from multiple clients gracefully.
|
||||
- The system must provide a clear and feasible strategy for Garbage Collection.
|
||||
- The design should be forward-compatible, allowing for a smooth migration path for existing users.
|
||||
|
||||
## Outlined Methods and Implementation Plans
|
||||
|
||||
### Abstract
|
||||
|
||||
This design introduces a two-tiered document structure to manage chunks: **Index Documents** and **Data Documents**. Chunks are no longer stored as individual documents. Instead, they are grouped into `Data Documents` based on a common hash prefix. The existence and location of each chunk are tracked by `Index Documents`, which are also grouped by the same prefix. This approach dramatically reduces the total document count.
|
||||
|
||||
### Detailed Implementation
|
||||
|
||||
**1. Document Structure:**
|
||||
|
||||
- **Index Document:** Maps chunk hashes to their corresponding Data Document ID. Identified by a prefix of the chunk hash.
|
||||
- `_id`: `idx:{prefix}` (e.g., `idx:a9f1b`)
|
||||
- Content:
|
||||
```json
|
||||
{
|
||||
"_id": "idx:a9f1b",
|
||||
"_rev": "...",
|
||||
"chunks": {
|
||||
"a9f1b12...": "dat:a9f1b-001",
|
||||
"a9f1b34...": "dat:a9f1b-001",
|
||||
"a9f1b56...": "dat:a9f1b-002"
|
||||
}
|
||||
}
|
||||
```
|
||||
- **Data Document:** Contains the actual chunk data as base64-encoded strings. Identified by a prefix and a sequential number.
|
||||
- `_id`: `dat:{prefix}-{sequence}` (e.g., `dat:a9f1b-001`)
|
||||
- Content:
|
||||
```json
|
||||
{
|
||||
"_id": "dat:a9f1b-001",
|
||||
"_rev": "...",
|
||||
"chunks": {
|
||||
"a9f1b12...": "...", // base64 data
|
||||
"a9f1b34...": "..." // base64 data
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**2. Configuration:**
|
||||
|
||||
- `chunk_prefix_length`: The number of characters from the start of a chunk hash to use as a prefix (e.g., `5`). This determines the granularity of aggregation.
|
||||
- `data_doc_size_limit`: The maximum size for a single Data Document to prevent it from becoming too large (e.g., 1MB). When this limit is reached, a new Data Document with an incremented sequence number is created.
|
||||
|
||||
**3. Write/Save Operation Flow:**
|
||||
|
||||
When a client creates new chunks:
|
||||
1. For each new chunk, determine its hash prefix.
|
||||
2. Read the corresponding `Index Document` (e.g., `idx:a9f1b`).
|
||||
3. From the index, determine which of the new chunks already exist in the database.
|
||||
4. For the **truly new chunks only**:
|
||||
a. Read the last `Data Document` for that prefix (e.g., `dat:a9f1b-005`).
|
||||
b. If it is nearing its size limit, create a new one (`dat:a9f1b-006`).
|
||||
c. Add the new chunk data to the Data Document and save it.
|
||||
5. Update the `Index Document` with the locations of the newly added chunks.
|
||||
|
||||
**4. Handling Write Conflicts:**
|
||||
|
||||
Concurrent writes to the same `Index Document` or `Data Document` from multiple clients will cause conflicts (409 Conflict). This is expected and must be handled gracefully. Since additions are incremental, the client application must implement a **retry-and-merge loop**:
|
||||
1. Attempt to save the document.
|
||||
2. On a conflict, re-fetch the latest version of the document from the server.
|
||||
3. Merge its own changes into the latest version.
|
||||
4. Attempt to save again.
|
||||
5. Repeat until successful or a retry limit is reached.
|
||||
|
||||
**5. Garbage Collection (GC):**
|
||||
|
||||
GC becomes a manageable, periodic batch process:
|
||||
1. Scan all file metadata documents to build a master set of all *currently referenced* chunk hashes.
|
||||
2. Iterate through all `Index Documents`. For each chunk listed:
|
||||
a. If the chunk hash is not in the master reference set, it is garbage.
|
||||
b. Remove the garbage entry from the `Index Document`.
|
||||
c. Remove the corresponding data from its `Data Document`.
|
||||
3. If a `Data Document` becomes empty after this process, it can be deleted.
|
||||
|
||||
## Test Strategy
|
||||
|
||||
1. **Unit Tests:** Implement tests for the conflict resolution logic (retry-and-merge loop) to ensure robustness.
|
||||
2. **Integration Tests:**
|
||||
- Verify that concurrent writes from multiple simulated clients result in a consistent, merged state without data loss.
|
||||
- Run a full synchronisation scenario and confirm the resulting database has a significantly lower document count compared to the previous implementation.
|
||||
3. **GC Test:** Simulate a scenario where files are deleted, run the GC process, and verify that orphaned chunks are correctly removed from both Index and Data documents, and that storage is reclaimed after compaction.
|
||||
4. **Migration Test:** Develop and test a "rebuild" process for existing users, which migrates their chunk data into the new aggregated structure.
|
||||
|
||||
## Documentation Strategy
|
||||
|
||||
- This design document will be published to explain the new architecture.
|
||||
- The configuration options (`chunk_prefix_length`, etc.) will be documented for advanced users.
|
||||
- A guide for the migration/rebuild process will be provided.
|
||||
|
||||
## Future Work
|
||||
|
||||
The separation of index and data opens up a powerful possibility. While this design initially implements both within CouchDB, the `Data Documents` could be offloaded to a dedicated object storage service such as **S3, MinIO, or Cloudflare R2**.
|
||||
|
||||
In such a hybrid model, CouchDB would handle only the lightweight `Index Documents` and file metadata, serving as a high-speed synchronisation and coordination layer. The bulky chunk data would reside in a more cost-effective and scalable blob store. This would represent the ultimate evolution of this architecture, combining the best of both worlds.
|
||||
|
||||
## Consideration and Conclusion
|
||||
|
||||
This design directly addresses the scalability limitations of the original chunk-per-document model. By aggregating chunks into sets, it significantly reduces the document count, which in turn improves database performance and makes maintenance feasible. The explicit handling of write conflicts and a clear strategy for garbage collection make this a robust and sustainable long-term solution. It effectively resolves the problems identified in previous approaches, including the "Eden" experiment, by tackling the root cause of database bloat. This architecture provides a solid foundation for future growth and scalability.
|
||||
127
docs/design_docs/intention_of_chunks.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# [WIP] The design intent explanation for using metadata and chunks
|
||||
|
||||
## Abstract
|
||||
|
||||
## Goal
|
||||
|
||||
- To explain the following:
|
||||
- What metadata and chunks are
|
||||
- The design intent of using metadata and chunks
|
||||
|
||||
## Background and Motivation
|
||||
|
||||
We are using PouchDB and CouchDB for storing files and synchronising them. PouchDB is a JavaScript database that stores data on the device (browser, and of course, Obsidian), while CouchDB is a NoSQL database that stores data on the server. The two databases can be synchronised to keep data consistent across devices via the CouchDB replication protocol. This is a powerful and flexible way to store and synchronise data, including conflict management, but it is not well suited for files. Therefore, we needed to manage how to store files and synchronise them.
|
||||
|
||||
## Terminology
|
||||
|
||||
- Password:
|
||||
- A string used to authenticate the user.
|
||||
|
||||
- Passphrase:
|
||||
- A string used to encrypt and decrypt data.
|
||||
- This is not a password.
|
||||
|
||||
- Encrypt:
|
||||
- To convert data into a format that is unreadable to anyone.
|
||||
- Can be decrypted by the user who has the passphrase.
|
||||
- Should be 1:n, containing random data to ensure that even the same data, when encrypted, results in different outputs.
|
||||
|
||||
- Obfuscate:
|
||||
- To convert data into a format that is not easily readable.
|
||||
- Can be decrypted by the user who has the passphrase.
|
||||
- Should be 1:1, containing no random data, and the same data is always obfuscated to the same result. It is necessarily unreadable.
|
||||
|
||||
- Hash:
|
||||
- To convert data into a fixed-length string that is not easily readable.
|
||||
- Cannot be decrypted.
|
||||
- Should be 1:1, containing no random data, and the same data is always hashed to the same result.
|
||||
|
||||
## Designs
|
||||
|
||||
### Principles
|
||||
|
||||
- To synchronise and handle conflicts, we should keep the history of modifications.
|
||||
- No data should be lost. Even though some extra data may be stored, it should be removed later, safely.
|
||||
- Each stored data item should be as small as possible to transfer efficiently, but not so small as to be inefficient.
|
||||
- Any type of file should be supported, including binary files.
|
||||
- Encryption should be supported efficiently.
|
||||
- This method should not depart too far from the PouchDB/CouchDB philosophy. It needs to leave room for other `remote`s, to benefit from custom replicators.
|
||||
|
||||
As a result, we have adopted the following design.
|
||||
|
||||
- Files are stored as one metadata entry and multiple chunks.
|
||||
- Chunks are content-addressable, and the metadata contains the ids of the chunks.
|
||||
- Chunks may be referenced from multiple metadata entries. They should be efficiently managed to avoid redundancy.
|
||||
|
||||
### Metadata Design
|
||||
|
||||
The metadata contains the following information:
|
||||
|
||||
| Field | Type | Description | Note |
|
||||
| -------- | -------------------- | ---------------------------- | ----------------------------------------------------------------------------------------------------- |
|
||||
| _id | string | The id of the metadata | It is created from the file path |
|
||||
| _rev | string | The revision of the metadata | It is created by PouchDB |
|
||||
| children | [string] | The ids of the chunks | |
|
||||
| path | string | The path of the file | If Obfuscate path has been enabled, it has been encrypted |
|
||||
| size | number | The size of the metadata | Not respected; for troubleshooting |
|
||||
| ctime | string | The creation timestamp | This is not used to compare files, but when writing to storage, it will be used |
|
||||
| mtime | string | The modification timestamp | This will be used to compare files, and will be written to storage |
|
||||
| type | `plain` \| `newnote` | The type of the file | Children of type `plain` will not be base64 encoded, while `newnote` will be |
|
||||
| e_ | boolean | The file is encrypted | Encryption is processed during transfer to the remote. In local storage, this property does not exist |
|
||||
|
||||
#### Decision Rule for `_id` of Metadata
|
||||
|
||||
```ts
|
||||
// Note: This is pseudo code.
|
||||
let _id = PATH;
|
||||
if (!HANDLE_FILES_AS_CASE_SENSITIVE) {
|
||||
_id = _id.toLowerCase();
|
||||
}
|
||||
if (_id.startsWith("_")) {
|
||||
_id = "/" + _id;
|
||||
}
|
||||
if (OBFUSCATE_PATH) {
|
||||
_id = `f:${OBFUSCATE_PATH(_id, E2EE_PASSPHRASE)}`;
|
||||
}
|
||||
return _id;
|
||||
```
|
||||
|
||||
#### Expected Questions
|
||||
|
||||
- Why do we need to handle files as case-sensitive?
|
||||
- Some filesystems are case-sensitive, while others are not. For example, Windows is not case-sensitive, while Linux is. Therefore, we need to handle files as case-sensitive to manage conflicts.
|
||||
- The trade-off is that you will not be able to manage files with different cases, so this can be disabled if you only have case-sensitive terminals.
|
||||
- Why obfuscate the path?
|
||||
- E2EE only encrypts the content of the file, not metadata. Hence, E2EE alone is not enough to protect the vault completely. The path is also part of the metadata, so it should be obfuscated. This is a trade-off between security and performance. However, if you title a note with sensitive information, you should obfuscate the path.
|
||||
- What is `f:`?
|
||||
- It is a prefix to indicate that the path is obfuscated. It is used to distinguish between normal paths and obfuscated paths. Due to file enumeration, Self-hosted LiveSync should scan the files to find the metadata, excluding chunks and other information.
|
||||
- Why does an unobfuscated path not start with `f:`?
|
||||
- For compatibility. Self-hosted LiveSync, by its nature, must also be able to handle files created with newer versions as far as possible.
|
||||
|
||||
### Chunk Design
|
||||
|
||||
#### Chunk Structure
|
||||
|
||||
The chunk contains the following information:
|
||||
|
||||
| Field | Type | Description | Note |
|
||||
| ----- | ------------ | ------------------------- | ----------------------------------------------------------------------------------------------------- |
|
||||
| _id | `h:{string}` | The id of the chunk | It is created from the hash of the chunk content |
|
||||
| _rev | string | The revision of the chunk | It is created by PouchDB |
|
||||
| data | string | The content of the chunk | |
|
||||
| type | `leaf` | Fixed | |
|
||||
| e_ | boolean | The chunk is encrypted | Encryption is processed during transfer to the remote. In local storage, this property does not exist |
|
||||
|
||||
**SORRY, TO BE WRITTEN, BUT WE HAVE IMPLEMENTED `v2`, WHICH REQUIRES MORE INFORMATION.**
|
||||
|
||||
### How they are unified
|
||||
|
||||
## Deduplication and Optimisation
|
||||
|
||||
## Synchronisation Strategy
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
## Security and Privacy
|
||||
|
||||
## Edge Cases
|
||||
117
docs/design_docs/tired_chunk_pack.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# [IN DESIGN] Tiered Chunk Storage with Live Compaction
|
||||
|
||||
** VERY IMPORTANT NOTE: This design must be used with the new journal synchronisation method. Otherwise, we risk introducing the bloat of changes from hot-pack into the Bucket. (CouchDB/PouchDB can synchronise only the most recent changes, or resolve conflicts.) Previous Journal Sync **IS NOT**. Please proceed with caution. **
|
||||
|
||||
## Goal
|
||||
|
||||
To establish a highly efficient, robust, and scalable synchronisation architecture by introducing a tiered storage system inspired by Log-Structured Merge-Trees (LSM-Trees). This design aims to address the challenges of real-time synchronisation, specifically the massive generation of transient data, while minimising storage bloat and ensuring high performance.
|
||||
|
||||
## Motivation
|
||||
|
||||
Our previous designs, including "Chunk Aggregation by Prefix", successfully addressed the "document explosion" problem. However, the introduction of real-time editor synchronisation exposed a new, critical challenge: the constant generation of short-lived "garbage" chunks during user input. This "garbage storm" places immense pressure on storage, I/O, and the Garbage Collection (GC) process.
|
||||
|
||||
A simple aggregation strategy is insufficient because it treats all data equally, mixing valuable, stable chunks with transient, garbage chunks in permanent storage. This leads to storage bloat and inefficient compaction. We require a system that can intelligently distinguish between "hot" (volatile) and "cold" (stable) data, processing them in the most efficient manner possible.
|
||||
|
||||
## Outlined Methods and Implementation Plans
|
||||
|
||||
### Abstract
|
||||
|
||||
This design implements a two-tiered storage system within CouchDB.
|
||||
1. **Level 0 – Hot Storage:** A set of "Hot-Packs", one for each active client. These act as fast, append-only logs for all newly created chunks. They serve as a temporary staging area, absorbing the "garbage storm" of real-time editing.
|
||||
2. **Level 1 – Cold Storage:** The permanent, immutable storage for stable chunks, consisting of **Index Documents** for fast lookups and **Data Documents (Cold-Packs)** for storing chunk data.
|
||||
|
||||
A background "Compaction" process continuously promotes stable chunks from Hot Storage to Cold Storage, while automatically discarding garbage. This keeps the permanent storage clean and highly optimised.
|
||||
|
||||
### Detailed Implementation
|
||||
|
||||
**1. Document Structure:**
|
||||
|
||||
- **Hot-Pack Document (Level 0):** A per-client, append-only log.
|
||||
- `_id`: `hotpack:{client_id}` (`client_id` could be the same as the `deviceNodeID` used in the `accepted_nodes` in MILESTONE_DOC; enables database 'lockout' for safe synchronisation)
|
||||
- Content: A log of chunk creation events.
|
||||
```json
|
||||
{
|
||||
"_id": "hotpack:a9f1b12...",
|
||||
"_rev": "...",
|
||||
"log": [
|
||||
{ "hash": "abc...", "data": "...", "ts": ..., "file_id": "file1" },
|
||||
{ "hash": "def...", "data": "...", "ts": ..., "file_id": "file2" }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
- **Index Document (Level 1):** A fast, prefix-based lookup table for stable chunks.
|
||||
- `_id`: `idx:{prefix}` (e.g., `idx:a9f1b`)
|
||||
- Content: Maps a chunk hash to the ID of the Cold-Pack it resides in.
|
||||
```json
|
||||
{
|
||||
"_id": "idx:a9f1b",
|
||||
"chunks": { "a9f1b12...": "dat:1678886400" }
|
||||
}
|
||||
```
|
||||
|
||||
- **Cold-Pack Document (Level 1):** An immutable data block created by the compaction process.
|
||||
- `_id`: `dat:{timestamp_or_uuid}` (e.g., `dat:1678886400123`)
|
||||
- Content: A collection of stable chunks.
|
||||
```json
|
||||
{
|
||||
"_id": "dat:1678886400123",
|
||||
"chunks": { "a9f1b12...": "...", "c3d4e5f...": "..." }
|
||||
}
|
||||
```
|
||||
|
||||
- **Hot-Pack List Document:** A central registry of all active Hot-Packs. This might be a computed document that clients maintain in memory on startup.
|
||||
- `_id`: `hotpack_list`
|
||||
- Content: `{"active_clients": ["hotpack:a9f1b12...", "hotpack:c3d4e5f..."]}`
|
||||
|
||||
**2. Write/Save Operation Flow (Real-time Editing):**
|
||||
|
||||
1. A client generates a new chunk.
|
||||
2. It **immediately appends** the chunk object (`{hash, data, ts, file_id}`) to its **own** Hot-Pack document's `log` array within its local PouchDB. This operation is extremely fast.
|
||||
3. The PouchDB synchronisation process replicates this change to the remote CouchDB and other clients in the background. No other Hot-Packs are consulted during this write operation.
|
||||
|
||||
**3. Read/Load Operation Flow:**
|
||||
|
||||
To find a chunk's data:
|
||||
1. The client first consults its in-memory list of active Hot-Pack IDs (see section 5).
|
||||
2. It searches for the chunk hash in all **Hot-Pack documents**, starting from its own, then others. It reads them in reverse log order (newest first).
|
||||
3. If not found, it consults the appropriate **Index Document (`idx:...`)** to get the ID of the Cold-Pack.
|
||||
4. It then reads the chunk data from the corresponding **Cold-Pack document (`dat:...`)**.
|
||||
|
||||
**4. Compaction & Promotion Process (The "GC"):**
|
||||
|
||||
This is a background task run periodically by clients, or triggered when the number of unprocessed log entries exceeds a threshold (to maintain the ability to synchronise with the remote database, which has a limited document size).
|
||||
1. The client takes its own Hot-Pack (`hotpack:{client_id}`) and scans its `log` array from the beginning (oldest first).
|
||||
2. For each chunk in the log, it checks if the chunk is still referenced in the latest revision of any file.
|
||||
- **If not referenced (Garbage):** The log entry is simply discarded.
|
||||
- **If referenced (Stable):** The chunk is added to a "promotion batch".
|
||||
3. After scanning a certain number of log entries, the client takes the "promotion batch".
|
||||
4. It creates one or more new, immutable **Cold-Pack (`dat:...`)** documents to store the chunk data from the batch.
|
||||
5. It updates the corresponding **Index (`idx:...`)** documents to point to the new Cold-Pack(s).
|
||||
6. Once the promotion is successfully saved to the database, it **removes the processed entries from its Hot-Pack's `log` array**. This is a critical step to prevent reprocessing and keep the Hot-Pack small.
|
||||
|
||||
**5. Hot-Pack List Management:**
|
||||
|
||||
To know which Hot-Packs to read, clients will:
|
||||
1. On startup, load the `hotpack_list` document into memory.
|
||||
2. Use PouchDB's live `changes` feed to monitor the creation of new `hotpack:*` documents.
|
||||
3. Upon detecting an unknown Hot-Pack, the client updates its in-memory list and attempts to update the central `hotpack_list` document (on a best-effort basis, with conflict resolution).
|
||||
|
||||
## Planned Test Strategy
|
||||
|
||||
1. **Unit Tests:** Test the Compaction/Promotion logic extensively. Ensure garbage is correctly identified and stable chunks are promoted correctly.
|
||||
2. **Integration Tests:** Simulate a multi-client real-time editing session.
|
||||
- Verify that writes are fast and responsive.
|
||||
- Confirm that transient garbage chunks do not pollute the Cold Storage.
|
||||
- Confirm that after a period of inactivity, compaction runs and the Hot-Packs shrink.
|
||||
3. **Stress Tests:** Simulate many clients joining and leaving to test the robustness of the `hotpack_list` management.
|
||||
|
||||
## Documentation Strategy
|
||||
|
||||
- This design document will serve as the core architectural reference.
|
||||
- The roles of each document type (Hot-Pack, Index, Cold-Pack, List) will be clearly explained for future developers.
|
||||
- The logic of the Compaction/Promotion process will be detailed.
|
||||
|
||||
## Consideration and Conclusion
|
||||
|
||||
This tiered storage design is a direct evolution, born from the lessons of previous architectures. It embraces the ephemeral nature of data in real-time applications. By creating a "staging area" (Hot-Packs) for volatile data, it protects the integrity and performance of the permanent "cold" storage. The Compaction process acts as a self-cleaning mechanism, ensuring that only valuable, stable data is retained long-term. This is not just an optimisation; it is a fundamental shift that enables robust, high-performance, and scalable real-time synchronisation on top of CouchDB.
|
||||
97
docs/design_docs/tired_chunk_pack_bucket.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# [IN DESIGN] Tiered Chunk Storage for Bucket Sync
|
||||
|
||||
## Goal
|
||||
|
||||
To evolve the "Journal Sync" mechanism by integrating the Tiered Storage architecture. This design aims to drastically reduce the size and number of sync packs, minimise storage consumption on the backend bucket, and establish a clear, efficient process for Garbage Collection, all while remaining protocol-agnostic.
|
||||
|
||||
## Motivation
|
||||
|
||||
The original "Journal Sync" liberates us from CouchDB's protocol, but it still packages and transfers entire document changes, including bulky and often transient chunk data. In a real-time or frequent-editing scenario, this results in:
|
||||
1. **Bloated Sync Packs:** Packs become large with redundant or short-lived chunk data, increasing upload and download times.
|
||||
2. **Inefficient Storage:** The backend bucket stores numerous packs containing overlapping and obsolete chunk data, wasting space.
|
||||
3. **Impractical Garbage Collection:** Identifying and purging obsolete *chunk data* from within the pack-based journal history is extremely difficult.
|
||||
|
||||
This new design addresses these problems by fundamentally changing *what* is synchronised in the journal packs. We will synchronise lightweight metadata and logs, while handling bulk data separately.
|
||||
|
||||
## Outlined methods and implementation plans
|
||||
|
||||
### Abstract
|
||||
|
||||
This design adapts the Tiered Storage model for a bucket-based backend. The backend bucket is partitioned into distinct areas for different data types. The "Journal Sync" process is now responsible for synchronising only the "hot" volatile data and lightweight metadata. A separate, asynchronous "Compaction" process, which can be run by any client, is responsible for migrating stable data into permanent, deduplicated "cold" storage.
|
||||
|
||||
### Detailed Implementation
|
||||
|
||||
**1. Bucket Structure:**
|
||||
|
||||
The backend bucket will have four distinct logical areas (prefixes):
|
||||
- `packs/`: For "Journal Sync" packs, containing the journal of metadata and Hot-Log changes.
|
||||
- `hot_logs/`: A dedicated area for each client's "Hot-Log," containing newly created, volatile chunks.
|
||||
- `indices/`: For prefix-based Index files, mapping chunk hashes to their permanent location in Cold Storage.
|
||||
- `cold_chunks/`: For deduplicated, stable chunk data, stored by content hash.
|
||||
|
||||
**2. Data Structures (Client-side PouchDB & Backend Bucket):**
|
||||
|
||||
- **Client Metadata:** Standard file metadata documents, kept in the client's PouchDB.
|
||||
- **Hot-Log (in `hot_logs/`):** A per-client, append-only log file on the bucket.
|
||||
- Path: `hot_logs/{client_id}.jsonlog`
|
||||
- Content: A sequence of JSON objects, one per line, representing chunk creation events. `{"hash": "...", "data": "...", "ts": ..., "file_id": "..."}`
|
||||
|
||||
- **Index File (in `indices/`):** A JSON file for a given hash prefix.
|
||||
- Path: `indices/{prefix}.json`
|
||||
- Content: Maps a chunk hash to its content hash (which is its key in `cold_chunks/`). `{"hash_abc...": true, "hash_def...": true}`
|
||||
|
||||
- **Cold Chunk (in `cold_chunks/`):** The raw, immutable, deduplicated chunk data.
|
||||
- Path: `cold_chunks/{chunk_hash}`
|
||||
|
||||
**3. "Journal Sync" - Send/Receive Operation (Not Live):**
|
||||
|
||||
This process is now extremely lightweight.
|
||||
1. **Send:**
|
||||
a. The client takes all newly generated chunks and **appends them to its own Hot-Log file (`hot_logs/{client_id}.jsonlog`)** on the bucket.
|
||||
b. The client updates its local file metadata in PouchDB.
|
||||
c. It then creates a "Journal Sync" pack containing **only the PouchDB journal of the file metadata changes.** This pack is very small as it contains no chunk data.
|
||||
d. The pack is uploaded to `packs/`.
|
||||
|
||||
2. **Receive:**
|
||||
a. The client downloads new packs from `packs/` and applies the metadata journal to its local PouchDB.
|
||||
b. It downloads the latest versions of all **other clients' Hot-Log files** from `hot_logs/`.
|
||||
c. Now the client has a complete, up-to-date view of all metadata and all "hot" chunks.
|
||||
|
||||
**4. Read/Load Operation Flow:**
|
||||
|
||||
To find a chunk's data:
|
||||
1. The client searches for the chunk hash in its local copy of all **Hot-Logs**.
|
||||
2. If not found, it downloads and consults the appropriate **Index file (`indices/{prefix}.json`)**.
|
||||
3. If the index confirms existence, it downloads the data from **`cold_chunks/{chunk_hash}`**.
|
||||
|
||||
**5. Compaction & Promotion Process (Asynchronous "GC"):**
|
||||
|
||||
This is a deliberate, offline-capable process that any client can choose to run.
|
||||
1. The client "leases" its own Hot-Log for compaction.
|
||||
2. It reads its entire `hot_logs/{client_id}.jsonlog`.
|
||||
3. For each chunk in the log, it checks if the chunk is referenced in the *current, latest state* of the file metadata.
|
||||
- **If not referenced (Garbage):** The log entry is discarded.
|
||||
- **If referenced (Stable):** The chunk is added to a "promotion batch."
|
||||
4. For each chunk in the promotion batch:
|
||||
a. It checks the corresponding `indices/{prefix}.json` to see if the chunk already exists in Cold Storage.
|
||||
b. If it does not exist, it **uploads the chunk data to `cold_chunks/{chunk_hash}`** and updates the `indices/{prefix}.json` file.
|
||||
5. Once the entire Hot-Log has been processed, the client **deletes its `hot_logs/{client_id}.jsonlog` file** (or truncates it to empty), effectively completing the cycle.
|
||||
|
||||
## Test strategy
|
||||
|
||||
1. **Component Tests:** Test the Compaction process independently. Ensure it correctly identifies stable versus garbage chunks and populates the `cold_chunks/` and `indices/` areas correctly.
|
||||
2. **Integration Tests:**
|
||||
- Simulate a multi-client sync cycle. Verify that sync packs in `packs/` are small.
|
||||
- Confirm that `hot_logs/` are correctly created and updated.
|
||||
- Run the Compaction process and verify that data migrates correctly to cold storage and the hot log is cleared.
|
||||
3. **Conflict Tests:** Simulate two clients trying to compact the same index file simultaneously and ensure the outcome is consistent (for example, via a locking mechanism or last-write-wins).
|
||||
|
||||
## Documentation strategy
|
||||
|
||||
- This design document will be the primary reference for the bucket-based architecture.
|
||||
- The structure of the backend bucket (`packs/`, `hot_logs/`, etc.) will be clearly defined.
|
||||
- A detailed description of how to run the Compaction process will be provided to users.
|
||||
|
||||
## Consideration and Conclusion
|
||||
|
||||
By applying the Tiered Storage model to "Journal Sync", we transform it into a remarkably efficient system. The synchronisation of everyday changes becomes extremely fast and lightweight, as only metadata journals are exchanged. The heavy lifting of data deduplication and permanent storage is offloaded to a separate, asynchronous Compaction process. This clear separation of concerns makes the system highly scalable, minimises storage costs, and finally provides a practical, robust solution for Garbage Collection in a protocol-agnostic, bucket-based environment.
|
||||
50
docs/design_docs_of_journalsync.md
Normal file
@@ -0,0 +1,50 @@
|
||||
## The design document of the journal sync
|
||||
|
||||
Original title: Synchronise without CouchDB
|
||||
|
||||
### Goal
|
||||
- Synchronise vaults without CouchDB
|
||||
|
||||
### Motivation
|
||||
- Serving CouchDB is not pretty easy.
|
||||
- Full spec DBaaS (Paid IBM Cloudant) is a bit expensive and lacking of alternatives.
|
||||
- Securing alternatives, from just one protocol.
|
||||
|
||||
### Prerequisite
|
||||
- We should have multiple implementations of the server software.
|
||||
- We should also be able to use SaaS, with a choice of options.
|
||||
- We should require them a reasonable sense of cost, ideally free of charge for trials.
|
||||
- We should be able to serve some instance of the server software, as OSS — with transparency, availability of auditing, and the fact that they actually took place.
|
||||
|
||||
### Methods and implementations
|
||||
|
||||
Ordinarily, local pouchDB and the remote CouchDB are synchronised by sending each missing document through several conversations in their replication protocol. However, to achieve this plan, we cannot rely on CouchDB and its protocols. This limitation is so harsh. However, Overcoming this means gaining new possibilities. After some trials, It was concluded that synchronisation could be completed even if the actions that could be performed were limited to uploading, downloading and retrieving the list. This means we can use any old-fashioned WebDAV server, and Sophisticated “Object storages” such as Self-hosted MinIO, S3, and R2 or any we like. This is realised by sharing and complementing the differences of the journal by each client. Therefore, The focus is therefore on how to identify which are the differences and send them without dynamic communication.
|
||||
|
||||
All clients manage their data in PouchDB. I know this is probably known information, but it has its own journal.
|
||||
|
||||
First, all clients should record to what point in the journal they sent themselves last time. The client then packs from the previous point to the latest when sending and also updates their record. This pack is uploaded to the server with the name starting with the timestamp of its creation. This is the send operation.
|
||||
|
||||
Conversely, when receiving, the packs uploaded to the server that have not yet been received are received in order. This is easy as their names are in date order. When the process is successfully completed, the names of the files received are recorded. The journals from this pack are then reflected in their own database. Conflict resolution is left to PouchDB, so the client only needs to do the work of applying any differences. And here is the key: the client records the ID and revision of the document that was in the journal and applied.
|
||||
|
||||
This key works when creating a pack. When creating a pack, the client omits this 'document recorded as received and used'. This is because received and applied means that it has already been sent by another client and exists on the server. This ensures that unnecessary transmissions do not take place.
|
||||
|
||||
Synchronisation is then always started by receiving. This is a little trick to avoid including unnecessary documents in the pack.
|
||||
|
||||
These behaviours allow clients to voluntarily send and receive only the missing parts of the journal that are not stored on the server, without having to communicate with each other, and still keep a single, consistent journal on the server.
|
||||
|
||||
Source codes actually implemented this is already committed into the repository.
|
||||
|
||||
### Test strategy
|
||||
|
||||
This implementation replaces the synchronisation performed by CouchDB. Therefore, testing was simply done by comparing the same changes to the same vault, replicated in CouchDB, with those done by this implementation.
|
||||
|
||||
### Documentation strategy
|
||||
|
||||
- Documentation should be done in a quick setup, at least.
|
||||
- As several server implementations can be selected, the description is omitted with regard to specific configuration values.
|
||||
- A MinIO set-up might be nice to have. However, it is not considered essential.
|
||||
- It would be a good opportunity to also publish these design documents.
|
||||
|
||||
### Consideration and Conclusion
|
||||
|
||||
This design offers a novel approach to journal synchronisation without relying on CouchDB. It leverages PouchDB's journaling capabilities and leverages simple server-side storage for efficient data exchange. Hence, the new design could be said to have gotten a broader outlook.
|
||||
106
docs/design_docs_of_keep_newborn_chunks.md
Normal file
@@ -0,0 +1,106 @@
|
||||
# Keep newborn chunks in Eden
|
||||
|
||||
Notice: deprecated. please refer to the result section of this document.
|
||||
|
||||
## Goal
|
||||
|
||||
Reduce the number of chunks which in volatile, and reduce the usage of storage of the remote database in middle or long term.
|
||||
|
||||
## Motivation
|
||||
|
||||
- In the current implementation, Self-hosted LiveSync splits documents into metadata and multiple chunks. In particular, chunks are split so that they do not exceed a certain length.
|
||||
- This is to optimise the transfer and take advantage of the properties of CouchDB. This also complies with the restriction of IBM Cloudant on the size of a single document.
|
||||
- However, creating chunks halfway through each editing operation increases the number of unnecessary chunks.
|
||||
- Chunks are shared by several documents. For this reason, it is not clear whether these chunks are needed or not unless all revisions of all documents are checked. This makes it difficult to remove unnecessary data.
|
||||
- On the other hand, chunks are done in units that can be neatly divided as markdown to ensure relatively accurate de-duplication, even if they are created simultaneously on multiple terminals. Therefore, it is unlikely that the data in the editing process will be reused.
|
||||
- For this reason, we have made features such as Batch save available, but they are not a fundamental solution.
|
||||
- As a result, there is a large amount of data that cannot be erased and is probably unused. Therefore, `Fetch chunks on demand` is currently performed for optimal communication.
|
||||
- If the generation of unnecessary chunks is sufficiently reduced, this function will become unnecessary.
|
||||
- The problem is that this unnecessary chunking slows down both local and remote operations.
|
||||
|
||||
## Prerequisite
|
||||
|
||||
- The implementation must be able to control the size of the document appropriately so that it does not become non-transferable (1).
|
||||
- The implementation must be such that data corruption can be avoided even if forward compatibility is not maintained; due to the nature of Self-hosted LiveSync, backward version connexions are expected.
|
||||
- Viewed as a feature:
|
||||
- This feature should be disabled for migration users.
|
||||
- This feature should be enabled for new users and after rebuilds of migrated users.
|
||||
- Therefore, back into the implementation view, Ideally, the implementation should be such that data recovery can be achieved by immediately upgrading after replication.
|
||||
|
||||
## Outlined methods and implementation plans
|
||||
|
||||
### Abstract
|
||||
|
||||
To store and transfer only stable chunks independently and share them from multiple documents after stabilisation, new chunks, i.e. chunks that are considered non-stable, are modified to be stored in the document and transferred with the document. In this case, care should be taken not to exceed prerequisite (1).
|
||||
|
||||
If this is achieved, the non-leaf document will not be transferred, and even if it is, the chunk will be stored in the document, so that the size can be reduced by the compaction.
|
||||
|
||||
Details are given below.
|
||||
|
||||
1. The document will henceforth have the property eden.
|
||||
```typescript
|
||||
// Paritally Type
|
||||
type EntryWithEden = {
|
||||
eden: {
|
||||
[key: DocumentID]: {
|
||||
data: string;
|
||||
epoch: number; // The document revision which this chunk has been born.
|
||||
};
|
||||
};
|
||||
};
|
||||
```
|
||||
2. The following configuration items are added:
|
||||
Note: These configurations should be shared as `Tweaks value` between each client.
|
||||
- useEden : boolean
|
||||
- Max chunks in eden : number
|
||||
- Max Total chunk lengths in eden: number
|
||||
- Max age while in eden: number
|
||||
3. In the document saving operation, chunks are added to Eden within each document, having the revision number of the existing document. And if some chunks in eden are not used in the operating revision, they would be removed.
|
||||
Then after being so chosen, a few chunks are also chosen to be graduated as an independent `chunk` in following rules, and they would be left the eden:
|
||||
- Those that have already been confirmed to exist as independent chunks.
|
||||
- This confirmation of existence may ideally be determined by a fast first-order determination, e.g. by a Bloom filter.
|
||||
- Those whose length exceeds the configured maximum length.
|
||||
- Those have aged over the configured value, since epoch at the operating revision.
|
||||
- Those whose total length, when added up when they are arranged in reverse order of the revision in which they were generated, is after the point at which they exceed the max length in the configuration. Or, those after the configured maximum items.
|
||||
4. In the document loading operation, chunks are firstly read from these eden.
|
||||
5. In End-to-End Encryption, property `eden` of documents will also be encrypted.
|
||||
|
||||
### Note
|
||||
|
||||
- When this feature has been enabled, forward compatibility is temporarily lost. However, it is detected as missing chunks, and this data is not reflected in the storage in the old version. Therefore, no data loss will occur.
|
||||
|
||||
## Test strategy
|
||||
|
||||
1. Confirm that synchronisation with the previous version is possible with this feature disabled.
|
||||
2. With this feature enabled, connect from the previous version and confirm that errors are detected in the previous version but the files are not corrupted.
|
||||
3. Ensure that the two versions with this feature enabled can withstand normal use.
|
||||
|
||||
## Documentation strategy
|
||||
|
||||
- This document is published, and will be referred from the release note.
|
||||
- Indeed, we lack a fulfilled configuration table. Efforts will be made and, if they can be produced, this document will then be referenced. But not required while in the experimental or beta feature.
|
||||
- However, this might be an essential feature. Further efforts are desired.
|
||||
|
||||
## Results from actual operation
|
||||
|
||||
After implementing this feature, we have been using it for a while. The following results were obtained.
|
||||
|
||||
- Drawbacks were thought not to be a problem, but they were actually a problem:
|
||||
- A document with `Eden` has a quite larger history compared to a document without `Eden`.
|
||||
- Self-hosted LiveSync does not perform compaction aggressively, which results in the remote database becoming partially bloated.
|
||||
- Compaction of the Remote Database (CouchDB) requires the same amount of free space as the size of the database. Therefore, it is not possible to perform compaction on a remote database if we reached to the maximum size of the database. It means that when we detect it, it is too late.
|
||||
- We have mentioned that `We need compaction` in previous sections. However, but it was so hard to be determined whether the compaction is required or not, until the database is bloated. (Of course, it requires some time to compact the database, and, literally, some document loses its history. It is not a good idea to perform frequently and meaninglessly. We need manual decision, but indeed difficult to normal users).
|
||||
|
||||
### Consideration and Conclusion
|
||||
|
||||
This feature results in two aspects:
|
||||
|
||||
- For the users who are familiar with the CouchDB, this feature is a bit useful. They can watch and handle the database by themselves.
|
||||
- For the users who are not familiar with the CouchDB, i.e., normal users, this feature is not so useful, either. They are not familiar with the database, and they do not know how to handle it. Therefore, they cannot decide whether the compaction is required or not.
|
||||
|
||||
Hence, this feature would be kept as an experimental feature, but it is not enabled by default. In addition to that, it is marked as deprecated. Detailed notice will be noisy for the users who are not familiar with the CouchDB. Details would be kept in this document, for the future.
|
||||
It is not recommended to use this feature, unless the person who is familiar with the CouchDB and the database management.
|
||||
|
||||
Vorotamoroz has written this document. Bias: I am the first author of this plug-in, familiar with the CouchDB.
|
||||
|
||||
Research and development has been frozen on 2025-04-11. But, bugs will be fixed if they are found. Please feel free to report them.
|
||||
55
docs/design_docs_of_sharing_tweak_value.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Sharing `Tweak values`
|
||||
|
||||
NOTE: This is the planned feature design document. This is planned, but not be implemented now (v0.23.3). This has not reached the design freeze and will be added to from time to time.
|
||||
|
||||
## Goal
|
||||
|
||||
Share `Tweak values` between clients to match the chunk lengths, and match per-server configurations for better performance.
|
||||
|
||||
## Motivation
|
||||
|
||||
- In the current implementation, Self-hosted LiveSync splits documents into metadata and multiple chunks. In particular, chunks are split so that they do not exceed a certain length.
|
||||
- This is to optimise the transfer and take advantage of the properties of CouchDB. This also complies with the restriction of IBM Cloudant on the size of a single document.
|
||||
- The length of this chunk is adjusted according to a configured factor. Therefore, if this is inconsistent between clients, de-duplication will not work. This is because, in fact, they point to the same content in total, but are split in different places. This results in unnecessary transfers or storage consumption.
|
||||
- The same applies to hash algorithms.
|
||||
- There are more configurations which `preferred to be matched`, even if it is not required. such as the maximum size of files to be handled and the interval between requests to the remote database, unless there are specific circumstances.
|
||||
- To avoid the tragedy of "Too many toggles", "Unexpected transfer amount", or "Poor performance" at once, the plug-in should know these problems or potential problems and be able to let us know.
|
||||
|
||||
## Prerequisite
|
||||
- We must be informed of a discrepancy in a configured value that is required to be absolutely consistent and be able to make a decision on the spot.
|
||||
- We should be able to see on the configuration dialogue, that there is a discrepancy between configured values that should be matched, and it should be possible to adjust them to a specific one of them (or default).
|
||||
- We must not be exposed to unexpected; such as leaking credentials or their secrets.
|
||||
|
||||
## Outlined methods and implementation plans
|
||||
### Abstract
|
||||
- In the current implementation, each client checks the remote database for the existence of their node information, to detect whether the remote database accepts them.
|
||||
- This is what 'Lock' is all about.
|
||||
- To achieve this feature, the client will also send each configuration value. However, the configuration contains credentials and/or secret values. Hence we cannot send all of them.
|
||||
- With a favourable prediction, Self-hosted LiveSync will continue to increase in feature. Each time this happens, the number of configuration values to be kept secret will also increase. Therefore, they must be handled by an allow-list.
|
||||
- This allow-listed configuration are the `Tweak values`.
|
||||
- If the plug-in detects mismatched `Tweak values` on checking the remote database, the plug-in will ask us to decide which is win (Mine, or theirs).
|
||||
- Node information is one of the documents. Therefore, it will be replicated and saved locally. While showing dialogue, show the notice on each `Match preferred` configuration.
|
||||
|
||||
## Note
|
||||
This feature should be mostly harmless. We will not be able to disable this.
|
||||
|
||||
## Test strategy
|
||||
|
||||
A: During synchronisation.
|
||||
1. No message shall be displayed with all settings matched.
|
||||
2. Message shall be displayed when there are mismatched, required match items.
|
||||
1. The setting values can be changed according to the message.
|
||||
2. The message can be ignored.
|
||||
3. The message shall not be displayed even if there are mismatched items which is recommended to be matched.
|
||||
|
||||
B: On the setting dialogue.
|
||||
1. All mismatched items shall be highlighted in some way.
|
||||
|
||||
## Documentation strategy
|
||||
|
||||
- This document is published, and will be referred from the release note.
|
||||
- Indeed, we lack a fulfilled configuration table. Efforts will be made and, if they can be produced, this document will then be referenced. But not required while in the experimental or beta feature.
|
||||
- However, this might be an essential feature. Further efforts are desired.
|
||||
|
||||
### Consideration and Conclusion
|
||||
To be described after implemented, tested, and, released.
|
||||
@@ -1,97 +1,129 @@
|
||||
# Quick setup
|
||||
The Setup wizard has been implemented since v0.15.0. This simplifies the initial set-up.
|
||||
|
||||
Note: The subsequent devices should be set up using the `Copy setup URI` and `Open setup URI`.
|
||||
[Japanese docs](./quick_setup_ja.md) - [Chinese docs](./quick_setup_cn.md).
|
||||
|
||||
## How to open and use wizard
|
||||
Open from `🪄 Setup wizard` in the setting dialogue. If there is no configuration or no synchronisation settings have been activated, it should already be open.
|
||||
The plugin has so many configuration options to deal with different circumstances. However, only a few settings are required in the normal cases. Therefore, `The Setup wizard` has been implemented to simplify the setup.
|
||||
|
||||

|
||||
|
||||
### Discard the existing configuration and set up
|
||||
If you have made any settings, this button allows you to discard them all before setting up.
|
||||
There are three methods to set up Self-hosted LiveSync.
|
||||
|
||||
### Do not discard the existing configuration and set up
|
||||
Simply reconfigure. Be careful. In wizard mode, you cannot see all configuration items, even if they have been configured.
|
||||
1. [Using setup URIs](#1-using-setup-uris) *(Recommended)*
|
||||
2. [Minimal setup](#2-minimal-setup)
|
||||
3. [Full manually setup the and Enable on this dialogue](#3-manually-setup)
|
||||
|
||||
Pressing `Next` on any of these will put the configuration dialog into wizard mode.
|
||||
## At the first device
|
||||
|
||||
### Wizard mode
|
||||
### 1. Using setup URIs
|
||||
|
||||
> [!TIP]
|
||||
> What is the setup URI? Why is it required?
|
||||
> The setup URI is the encrypted representation of Self-hosted LiveSync configuration as a URI. This starts `obsidian://setuplivesync?settings=`. This is encrypted with a passphrase, so that it can be shared relatively securely between devices. It is a bit long, but it is one line. This allows a series of settings to be set at once without any inconsistencies.
|
||||
>
|
||||
> If you have configured the remote database by [Automated setup on Fly.io](./setup_flyio.md#a-very-automated-setup) or [set up your server with the tool](./setup_own_server.md#1-generate-the-setup-uri-on-a-desktop-device-or-server), **you should have one of them**
|
||||
|
||||
In this procedure, [this video](https://youtu.be/7sa_I1832Xc?t=146) may help us.
|
||||
|
||||
1. Click `Use` button (Or launch `Use the copied setup URI` from Command palette).
|
||||
2. Paste the Setup URI into the dialogue
|
||||
3. Type the passphrase of the Setup URI
|
||||
4. Answer `yes` for `Importing LiveSync's conf, OK?`.
|
||||
5. Answer `Set it up as secondary or subsequent device` for `How would you like to set it up?`.
|
||||
6. Initialisation will begin, please hold a while.
|
||||
7. You will asked about the hidden file synchronisation, answer as you like.
|
||||
1. If you are new to Self-hosted LiveSync, we can configure it later so leave it once.
|
||||
8. Synchronisation has been started! `Reload app without saving` is recommended after the indicators of Self-hosted LiveSync disappear.
|
||||
|
||||
OK, we can proceed the [next step](#).
|
||||
|
||||
### 2. Minimal setup
|
||||
|
||||
If you do not have any setup URI, Press the `start` button. The setting dialogue turns into the wizard mode and will display only minimal items.
|
||||
|
||||
>[!TIP]
|
||||
> We can generate the setup URI with the tool in any time. Please use [this tool](./setup_own_server.md#1-generate-the-setup-uri-on-a-desktop-device-or-server).
|
||||
|
||||

|
||||
|
||||
We can set it up step by step.
|
||||
|
||||
## Remote Database configuration
|
||||
#### Select the remote type
|
||||
|
||||
### Remote database configuration
|
||||
1. Select the Remote Type from dropdown list.
|
||||
We now have a choice between CouchDB (and its compatibles) and object storage (MinIO, S3, R2). CouchDB is the first choice and is also recommended. And supporting Object Storage is an experimental feature.
|
||||
|
||||
Enter the information in the database we have set up.
|
||||
#### Remote configuration
|
||||
|
||||
##### CouchDB
|
||||
|
||||
Enter the information for the database we have set up.
|
||||
|
||||

|
||||
|
||||
### End to End Encryption
|
||||
##### Object Storage
|
||||
|
||||

|
||||
1. Enter the information for the S3 API and bucket.
|
||||
|
||||
If End to End encryption is enabled, the possibility of a third party who does not know the Passphrase being able to read the contents of the Remote database in the event that they are leaked is reduced. So we strongly recommend to enable it.
|
||||
Encryption is based on 256-bit AES-GCM.
|
||||
This setting can be disabled if you are inside a closed network and it is clear that you will not be accessed by third parties.
|
||||

|
||||
|
||||
### Test database connectionとCheck database configuraion
|
||||
Note 1: if you use S3, you can leave the Endpoint URL empty.
|
||||
Note 2: if your Object Storage cannot configure the CORS setting fully, you may able to connect to the server by enabling the `Use Custom HTTP Handler` toggle.
|
||||
|
||||
Here we can check the status of the connection to the database and the database settings.
|
||||
2. Press `Test` of `Test Connection` once and ensure you can connect to the Object Storage.
|
||||
|
||||
#### Only CouchDB: Test database connection and Check database configuration
|
||||
|
||||
We can check the connectivity to the database, and the database settings.
|
||||
|
||||

|
||||
|
||||
#### Test Database Connection
|
||||
Check whether we can connect to the database. If it fails, there are a number of reasons, but once you have done the `Check database configuration`, check if it fails there too.
|
||||
#### Only CouchDB: Check and Fix database configuration
|
||||
|
||||
#### Check database configuration
|
||||
|
||||
Check the database settings and fix any deficiencies on the spot.
|
||||
Check the database settings and fix any problems on the spot.
|
||||
|
||||

|
||||
|
||||
This item may vary depending on the connection. In the above case, press all three Fix buttons.
|
||||
If the Fix buttons disappear and all become check marks, we are done.
|
||||
|
||||

|
||||
#### Confidentiality configuration (Optional but very preferred)
|
||||
|
||||
### Next
|
||||
Go to the Local Database configuration.
|
||||

|
||||
|
||||
### Discard exist database and proceed
|
||||
Discard the contents of the Remote database and go to the Local Database configuration.
|
||||
Enable End-to-end encryption and the contents of your notes will be encrypted at the moment it leaves the device. We strongly recommend enabling it. And `Path Obfuscation` also obfuscates filenames. Now stable and recommended.
|
||||
|
||||
## Local Database confiuration
|
||||
These setting can be disabled if you are inside a closed network and it is clear that you will not be accessed by third parties.
|
||||
|
||||

|
||||
> [!TIP]
|
||||
> Encryption is based on 256-bit AES-GCM.
|
||||
|
||||
Configure the local database. If we already have a Vaults with Self-hosted LiveSync installed and having same directory name as currently we are setting up, please specify a different suffix than the Vault you have already set up here.
|
||||
We should proceed to the Next step.
|
||||
|
||||
## Miscellaneous
|
||||
Finally, finish the miscellaneous configurations and select a preset for synchronisation.
|
||||
#### Sync Settings
|
||||
Finally, finish the wizard by selecting a preset for synchronisation.
|
||||
|
||||
Note: If you are going to use Object Storage, you cannot select `LiveSync`.
|
||||
|
||||

|
||||
|
||||
The `Show status inside editor` can be enabled to your liking. If enabled, the status is displayed in the top right-hand corner of the editor.
|
||||
Select any synchronisation methods we want to use and `Apply`. If database initialisation is required, it will be performed at this time. When `All done!` is displayed, we are ready to synchronise.
|
||||
|
||||

|
||||
|
||||
From Presets, select the synchronisation method we want to use and `Apply` to initialise and build the local and remote databases as required.
|
||||
If `All done!' is displayed, we are done. Automatically, `Copy setup URI` will open and we will be asked for a passphrase to encrypt the `Setup URI`.
|
||||
The dialogue of `Copy settings as a new setup URI` will be open automatically. Please input a passphrase to encrypt the new `Setup URI`. (This passphrase is to encrypt the setup URI, not the vault).
|
||||
|
||||

|
||||
|
||||
Set the passphrase as you like.
|
||||
The Setup URI will be copied to the clipboard, which you can then transfer to the second and subsequent devices in some way.
|
||||
The Setup URI will be copied to the clipboard, please make a note(Not in Obsidian) of this.
|
||||
|
||||
# How to set up the second and subsequent units
|
||||
After installing Self-hosted LiveSync on the device, select `Open setup URI` from the command palette and enter the setup URI you transferred. Afterwards, enter your passphrase and a setup wizard will open.
|
||||
Answer the following.
|
||||
>[!TIP]
|
||||
We can copy this in any time by `Copy current settings as a new setup URI`.
|
||||
|
||||
- `Yes` to `Importing LiveSync's conf, OK?`
|
||||
- `Set it up as secondary or subsequent device` to `How would you like to set it up?`.
|
||||
### 3. Manually setup
|
||||
|
||||
Then, The configuration will now take effect and replication will start. Your files will be synchronised soon!
|
||||
It is strongly recommended to perform a "minimal set-up" first and set up the other contents after making sure has been synchronised.
|
||||
|
||||
However, if you have some specific reasons to configure it manually, please click the `Enable` button of `Enable LiveSync on this device as the set-up was completed manually`.
|
||||
And, please copy the setup URI by `Copy current settings as a new setup URI` and make a note(Not in Obsidian) of this.
|
||||
|
||||
## At the subsequent device
|
||||
After installing Self-hosted LiveSync on the first device, we should have a setup URI. **The first choice is to use it**. Please share it with the device you want to setup.
|
||||
|
||||
It is completely same as [Using setup URIs on the first device](#1-using-setup-uris). Please refer it.
|
||||
|
||||
93
docs/quick_setup_cn.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# 快速配置 (Quick setup)
|
||||
|
||||
该插件有较多配置项, 可以应对不同的情况. 不过, 实际使用的设置并不多. 因此, 我们采用了 "设置向导 (The Setup wizard)" 来简化初始设置.
|
||||
|
||||
Note: 建议使用 `Copy setup URI` and `Open setup URI` 来设置后续设备.
|
||||
|
||||
## 设置向导 (The Setup wizard)
|
||||
|
||||
在设置对话框中打开 `🧙♂️ Setup wizard`. 如果之前未配置插件, 则会自动打开该页面.
|
||||
|
||||

|
||||
|
||||
- 放弃现有配置并进行设置
|
||||
如果您先前有过任何设置, 此按钮允许您在设置前放弃所有更改.
|
||||
|
||||
- 保留现有配置和设置
|
||||
快速重新配置. 请注意, 在向导模式下, 您无法看到所有已经配置过的配置项.
|
||||
|
||||
在上述选项中按下 `Next`, 配置对话框将进入向导模式 (wizard mode).
|
||||
|
||||
### 向导模式 (Wizard mode)
|
||||
|
||||

|
||||
|
||||
接下来将介绍如何逐步使用向导模式.
|
||||
|
||||
## 配置远程数据库
|
||||
|
||||
### 开始配置远程数据库
|
||||
|
||||
输入已部署好的数据库的信息.
|
||||
|
||||

|
||||
|
||||
#### 测试数据库连接并检查数据库配置
|
||||
|
||||
我们可以检查数据库的连接性和数据库设置.
|
||||
|
||||

|
||||
|
||||
#### 测试数据库连接
|
||||
|
||||
检查是否能成功连接数据库. 如果连接失败, 可能是多种原因导致的, 但请先点击 `Check database configuration` 来检查数据库配置是否有问题.
|
||||
|
||||
#### 检查数据库配置
|
||||
|
||||
检查数据库设置并修复问题.
|
||||
|
||||

|
||||
|
||||
Config check 的显示内容可能因不同连接而异. 在上图情况下, 按下所有三个修复按钮.
|
||||
如果修复按钮消失, 全部变为复选标记, 则表示修复完成.
|
||||
|
||||
### 加密配置
|
||||
|
||||

|
||||
|
||||
为您的数据库加密, 以防数据库意外曝光; 启用端到端加密后, 笔记内容在离开设备时就会被加密. 我们强烈建议启用该功能. `路径混淆 (Path Obfuscation)` 还能混淆文件名. 现已稳定并推荐使用.
|
||||
加密基于 256 位 AES-GCM.
|
||||
如果你在一个封闭的网络中, 而且很明显第三方不会访问你的文件, 则可以禁用这些设置.
|
||||
|
||||

|
||||
|
||||
#### Next
|
||||
|
||||
转到同步设置.
|
||||
|
||||
#### 放弃现有数据库并继续
|
||||
|
||||
清除远程数据库的内容, 然后转到同步设置.
|
||||
|
||||
### 同步设置
|
||||
|
||||
最后, 选择一个同步预设完成向导.
|
||||
|
||||

|
||||
|
||||
选择我们要使用的任何同步方法, 然后 `Apply` 初始化并按要求建立本地和远程数据库. 如果显示 `All done!`, 我们就完成了. `Copy setup URI` 将自动打开,并要求我们输入密码以加密 `Setup URI`.
|
||||
|
||||

|
||||
|
||||
根据需要设置密码。.
|
||||
设置 URI (Setup URI) 将被复制到剪贴板, 然后您可以通过某种方式将其传输到第二个及后续设备.
|
||||
|
||||
## 如何设置第二单元和后续单元 (the second and subsequent units)
|
||||
|
||||
在第一台设备上安装 Self-hosted LiveSync 后, 从命令面板上选择 `Open setup URI`, 然后输入您传输的设置 URI (Setup URI). 然后输入密码,安装向导就会打开.
|
||||
在弹窗中选择以下内容.
|
||||
|
||||
- `Importing LiveSync's conf, OK?` 选择 `Yes`
|
||||
- `How would you like to set it up?`. 选择 `Set it up as secondary or subsequent device`
|
||||
|
||||
然后, 配置将生效并开始复制. 您的文件很快就会同步! 您可能需要关闭设置对话框并重新打开, 才能看到设置字段正确填充, 但它们都将设置好.
|
||||
@@ -1,10 +1,10 @@
|
||||
# Quick setup
|
||||
v0.15.0からSetup wizardが実装されました。これで、初回セットアップがシンプルになります。
|
||||
このプラグインには、いろいろな状況に対応するための非常に多くの設定オプションがあります。しかし、実際に使用する設定項目はそれほど多くはありません。そこで、初期設定を簡略化するために、「セットアップウィザード」を実装しています。
|
||||
※なお、次のデバイスからは、`Copy setup URI`と`Open setup URI`を使ってセットアップしてください。
|
||||
|
||||
|
||||
## Wizardの使い方
|
||||
`🪄 Setup wizard` から開きます。もしセットアップされていなかったり、同期設定が何も有効になっていない場合はデフォルトで開いています。
|
||||
`🧙♂️ Setup wizard` から開きます。もしセットアップされていなかったり、同期設定が何も有効になっていない場合はデフォルトで開いています。
|
||||
|
||||

|
||||
|
||||
@@ -32,20 +32,12 @@ v0.15.0からSetup wizardが実装されました。これで、初回セット
|
||||
|
||||
これらはデータベースをセットアップした際に決めた情報です。
|
||||
|
||||
### End to End暗号化の設定
|
||||
|
||||

|
||||
|
||||
End to End暗号化を有効にした場合、万が一Remote databaseの内容が流出してもPassphraseを知らない第三者にそれを読まれる可能性が低くなります。そのため、有効化を強く推奨します。
|
||||
暗号化は256bitのAES-GCMを採用しています。
|
||||
この設定は、あなたが閉じたネットワークの内側にいて、かつ第三者からアクセスされない事が明確な場合には無効にできます。
|
||||
|
||||
### Test database connectionとCheck database configuraion
|
||||
### Test database connectionとCheck database configuration
|
||||
ここで、データベースへの接続状況と、データベース設定を確認します。
|
||||

|
||||
|
||||
#### Test Database Connection
|
||||
データベースに接続出来るか自体を確認します。失敗する場合はいくつか理由がありますが、一度Check database configurationを行ってそちらでも失敗するか確認してください。
|
||||
データベースに接続できるか自体を確認します。失敗する場合はいくつか理由がありますが、一度Check database configurationを行ってそちらでも失敗するか確認してください。
|
||||
|
||||
#### Check database configuration
|
||||
データベースの設定を確認し、不備がある場合はその場で修正します。
|
||||
@@ -55,6 +47,15 @@ End to End暗号化を有効にした場合、万が一Remote databaseの内容
|
||||
この項目は接続先によって異なる場合があります。上記の場合、みっつのFixボタンを順にすべて押してください。
|
||||
Fixボタンがなくなり、すべてチェックマークになれば完了です。
|
||||
|
||||
### 機密性設定
|
||||
|
||||

|
||||
|
||||
意図しないデータベースの暴露に備えて、End to End Encryptionを有効にします。この項目を有効にした場合、デバイスを出る瞬間にノートの内容が暗号化されます。`Path Obfuscation`を有効にすると、ファイル名も難読化されます。現在は安定しているため、こちらも推奨されます。
|
||||
暗号化には256bitのAES-GCMを採用しています。
|
||||
これらの設定は、あなたが閉じたネットワークの内側にいて、かつ第三者からアクセスされない事が明確な場合には無効にできます。
|
||||
|
||||
|
||||

|
||||
|
||||
### Next
|
||||
@@ -63,20 +64,13 @@ Fixボタンがなくなり、すべてチェックマークになれば完了
|
||||
### Discard exist database and proceed
|
||||
すでにRemote databaseがある場合、Remote databaseの内容を破棄してから次へ進みます
|
||||
|
||||
## Local Database confiuration
|
||||

|
||||
ローカルのデータベースを設定します。もし、すでにSelf-hosted LiveSyncをインストールしたVaultがあり、そのVaultと同じデータベース名を使用している場合は、ここですでに設定したVaultとは異なるsuffixを指定してください。
|
||||
|
||||
## Miscellaneous
|
||||
最後にその他の設定を行います。
|
||||
## Sync Settings
|
||||
最後に同期方法の設定を行います。
|
||||
|
||||

|
||||
|
||||
`Show status inside editor`はお好みで有効化してください。有効にするとエディターの右上にステータスが表示されます。
|
||||
|
||||

|
||||
|
||||
Presetsから、使用する同期方法を選び`Apply`を行うと、必要に応じてローカル・リモートのデータベースを初期化・構築します。
|
||||
Presetsから、いずれかの同期方法を選び`Apply`を行うと、必要に応じてローカル・リモートのデータベースを初期化・構築します。
|
||||
All done! と表示されれば完了です。自動的に、`Copy setup URI`が開き、`Setup URI`を暗号化するパスフレーズを聞かれます。
|
||||
|
||||

|
||||
|
||||
854
docs/settings.md
@@ -1,267 +1,759 @@
|
||||
NOTE: This document surely became outdated. I'll improve this doc in a while. but your contributions are always welcome.
|
||||
NOTE: This document not completed. I'll improve this doc in a while. but your contributions are always welcome.
|
||||
|
||||
# Settings of this plugin
|
||||
# Settings of Self-hosted LiveSync
|
||||
|
||||
The settings dialog has been quite long, so I split each configuration into tabs.
|
||||
If you feel something, please feel free to inform me.
|
||||
There are many settings in Self-hosted LiveSync. This document describes each setting in detail (not how-to). Configuration and settings are divided into several categories and indicated by icons. The icon is as follows:
|
||||
|
||||
| icon | description |
|
||||
| :---: | ----------------------------------------------------------------- |
|
||||
| 🛰️ | [Remote Database Configurations](#remote-database-configurations) |
|
||||
| 📦 | [Local Database Configurations](#local-database-configurations) |
|
||||
| ⚙️ | [General Settings](#general-settings) |
|
||||
| 🔁 | [Sync Settings](#sync-settings) |
|
||||
| 🔧 | [Miscellaneous](#miscellaneous) |
|
||||
| 🧰 | [Hatch](#miscellaneous) |
|
||||
| 🔌 | [Plugin and its settings](#plugin-and-its-settings) |
|
||||
| 🚑 | [Corrupted data](#corrupted-data) |
|
||||
| Icon | Description |
|
||||
| :--: | ------------------------------------------------------------------ |
|
||||
| 💬 | [0. Change Log](#0-change-log) |
|
||||
| 🧙♂️ | [1. Setup](#1-setup) |
|
||||
| ⚙️ | [2. General Settings](#2-general-settings) |
|
||||
| 🛰️ | [3. Remote Configuration](#3-remote-configuration) |
|
||||
| 🔄 | [4. Sync Settings](#4-sync-settings) |
|
||||
| 🚦 | [5. Selector (Advanced)](#5-selector-advanced) |
|
||||
| 🔌 | [6. Customization sync (Advanced)](#6-customization-sync-advanced) |
|
||||
| 🧰 | [7. Hatch](#7-hatch) |
|
||||
| 🔧 | [8. Advanced (Advanced)](#8-advanced-advanced) |
|
||||
| 💪 | [9. Power users (Power User)](#9-power-users-power-user) |
|
||||
| 🩹 | [10. Patches (Edge Case)](#10-patches-edge-case) |
|
||||
| 🎛️ | [11. Maintenance](#11-maintenance) |
|
||||
|
||||
## Remote Database Configurations
|
||||
Configure settings of synchronize server. If any synchronization is enabled, you can't edit this section. Please disable all synchronization to change.
|
||||
## 0. Change Log
|
||||
|
||||
### URI
|
||||
URI of CouchDB. In the case of Cloudant, It's "External Endpoint(preferred)".
|
||||
**Do not end it up with a slash** when it doesn't contain the database name.
|
||||
This pane shows version up information. You can check what has been changed in recent versions.
|
||||
|
||||
### Username
|
||||
Your CouchDB's Username. With administrator's privilege is preferred.
|
||||
## 1. Setup
|
||||
|
||||
### Password
|
||||
Your CouchDB's Password.
|
||||
Note: This password is saved into your Obsidian's vault in plain text.
|
||||
This pane is used for setting up Self-hosted LiveSync. There are several options to set up Self-hosted LiveSync.
|
||||
|
||||
### Database Name
|
||||
The Database name to synchronize.
|
||||
⚠️If not exist, created automatically.
|
||||
### 1. Quick Setup
|
||||
|
||||
Most preferred method to setup Self-hosted LiveSync. You can setup Self-hosted LiveSync with a few clicks.
|
||||
|
||||
### End to End Encryption
|
||||
Encrypt your database. It affects only the database, your files are left as plain.
|
||||
#### Connect with Setup URI
|
||||
|
||||
The encryption algorithm is AES-GCM.
|
||||
Setup the Self-hosted LiveSync with the `setup URI` which is [copied from another device](#copy-current-settings-as-a-new-setup-uri) or the setup script.
|
||||
|
||||
Note: If you want to use "Plugins and their settings", you have to enable this.
|
||||
#### Manual setup
|
||||
|
||||
### Passphrase
|
||||
The passphrase to used as the key of encryption. Please use the long text.
|
||||
Step-by-step setup for Self-hosted LiveSync. You can setup Self-hosted LiveSync manually with Minimal setting items.
|
||||
|
||||
### Apply
|
||||
Set the End to End encryption enabled and its passphrase for use in replication.
|
||||
If you change the passphrase of a existing database, overwriting the remote database is strongly recommended.
|
||||
#### Enable LiveSync
|
||||
|
||||
This button only appears when the setup was not completed. If you have completed the setup manually, you can enable LiveSync on this device by this button.
|
||||
|
||||
### Overwrite remote database
|
||||
Overwrite the remote database by the local database using the passphrase you applied.
|
||||
### 2. To setup other devices
|
||||
|
||||
#### Copy the current settings to a Setup URI
|
||||
|
||||
### Rebuild
|
||||
Rebuild remote and local databases with local files. It will delete all document history and retained chunks, and shrink the database.
|
||||
You can copy the current settings as a new setup URI. And this URI can be used to setup the other devices as [Use the copied setup URI](#use-the-copied-setup-uri).
|
||||
|
||||
### Test Database connection
|
||||
You can check the connection by clicking this button.
|
||||
### 3. Reset
|
||||
|
||||
### Check database configuration
|
||||
You can check and modify your CouchDB's configuration from here directly.
|
||||
#### Discard existing settings and databases
|
||||
|
||||
### Lock remote database.
|
||||
Other devices are banned from the database when you have locked the database.
|
||||
If you have something troubled with other devices, you can protect the vault and remote database by your device.
|
||||
Reset the Self-hosted LiveSync settings and databases.
|
||||
**Hazardous operation. Please be careful when using this.**
|
||||
|
||||
## Local Database Configurations
|
||||
"Local Database" is created inside your obsidian.
|
||||
### 4. Enable extra and advanced features
|
||||
|
||||
### Batch database update
|
||||
Delay database update until raise replication, open another file, window visibility changed, or file events except for file modification.
|
||||
This option can not be used with LiveSync at the same time.
|
||||
To keep the set-up dialogue simple, some panes are hidden in default. You can enable them here.
|
||||
|
||||
#### Enable advanced features
|
||||
|
||||
### Fetch rebuilt DB.
|
||||
If one device rebuilds or locks the remote database, every other device will be locked out from the remote database until it fetches rebuilt DB.
|
||||
Setting key: useAdvancedMode
|
||||
|
||||
### minimum chunk size and LongLine threshold
|
||||
The configuration of chunk splitting.
|
||||
Following panes will be shown when you enable this setting.
|
||||
| Icon | Description |
|
||||
| :--: | ------------------------------------------------------------------ |
|
||||
| 🚦 | [5. Selector (Advanced)](#5-selector-advanced) |
|
||||
| 🔌 | [6. Customization sync (Advanced)](#6-customization-sync-advanced) |
|
||||
| 🔧 | [8. Advanced (Advanced)](#8-advanced-advanced) |
|
||||
|
||||
Self-hosted LiveSync splits the note into chunks for efficient synchronization. This chunk should be longer than "Minimum chunk size".
|
||||
#### Enable poweruser features
|
||||
|
||||
Specifically, the length of the chunk is determined by the following orders.
|
||||
Setting key: usePowerUserMode
|
||||
|
||||
1. Find the nearest newline character, and if it is farther than LongLineThreshold, this piece becomes an independent chunk.
|
||||
Following panes will be shown when you enable this setting.
|
||||
| Icon | Description |
|
||||
| :--: | ------------------------------------------------------------------ |
|
||||
| 💪 | [9. Power users (Power User)](#9-power-users-power-user) |
|
||||
|
||||
2. If not, find nearest to these items.
|
||||
1. Newline character
|
||||
2. Empty line (Windows style)
|
||||
3. Empty line (non-Windows style)
|
||||
3. Compare the farther in these 3 positions and next "\[newline\]#" position, pick a shorter piece to as chunk.
|
||||
#### Enable edge case treatment features
|
||||
|
||||
This rule was made empirically from my dataset. If this rule acts as badly on your data. Please give me the information.
|
||||
Setting key: useEdgeCaseMode
|
||||
|
||||
You can dump saved note structure to `Dump informations of this doc`. Replace every character to x except newline and "#" when sending information to me.
|
||||
Following panes will be shown when you enable this setting.
|
||||
| Icon | Description |
|
||||
| :--: | ------------------------------------------------------------------ |
|
||||
| 🩹 | [10. Patches (Edge Case)](#10-patches-edge-case) |
|
||||
|
||||
Default values are 20 letters and 250 letters.
|
||||
## 2. General Settings
|
||||
|
||||
## General Settings
|
||||
### 1. Appearance
|
||||
|
||||
### Do not show low-priority log
|
||||
If you enable this option, log only the entries with the popup.
|
||||
#### Display Language
|
||||
|
||||
### Verbose log
|
||||
Setting key: displayLanguage
|
||||
|
||||
## Sync Settings
|
||||
You can change the display language. It is independent of the system language and/or Obsidian's language.
|
||||
Note: Not all messages have been translated. And, please revert to "Default" when reporting errors. Of course, your contribution to translation is always welcome!
|
||||
|
||||
### LiveSync
|
||||
Do LiveSync.
|
||||
#### Show status inside the editor
|
||||
|
||||
It is the one of raison d'être of this plugin.
|
||||
Setting key: showStatusOnEditor
|
||||
|
||||
Useful, but this method drains many batteries on the mobile and uses not the ignorable amount of data transfer.
|
||||
We can show the status of synchronisation inside the editor.
|
||||
|
||||
This method is exclusive to other synchronization methods.
|
||||
Reflected after reboot
|
||||
|
||||
### Periodic Sync
|
||||
Synchronize periodically.
|
||||
#### Show status as icons only
|
||||
|
||||
### Periodic Sync Interval
|
||||
Unit is seconds.
|
||||
Setting key: showOnlyIconsOnEditor
|
||||
|
||||
### Sync on Save
|
||||
Synchronize when the note has been modified or created.
|
||||
Show status as icons only. This is useful when you want to save space on the status bar.
|
||||
|
||||
### Sync on File Open
|
||||
Synchronize when the note is opened.
|
||||
#### Show status on the status bar
|
||||
|
||||
### Sync on Start
|
||||
Synchronize when Obsidian started.
|
||||
Setting key: showStatusOnStatusbar
|
||||
|
||||
### Use Trash for deleted files
|
||||
When the file has been deleted on remote devices, deletion will be replicated to the local device and the file will be deleted.
|
||||
We can show the status of synchronisation on the status bar. (Default: On)
|
||||
|
||||
If this option is enabled, move deleted files into the trash instead delete actually.
|
||||
### 2. Logging
|
||||
|
||||
### Do not delete empty folder
|
||||
Self-hosted LiveSync will delete the folder when the folder becomes empty. If this option is enabled, leave it as an empty folder.
|
||||
#### Show only notifications
|
||||
|
||||
### Use newer file if conflicted (beta)
|
||||
Always use the newer file to resolve and overwrite when conflict has occurred.
|
||||
Setting key: lessInformationInLog
|
||||
|
||||
Prevent logging and show only notification. Please disable when you report the logs
|
||||
|
||||
### Experimental.
|
||||
### Sync hidden files
|
||||
#### Verbose Log
|
||||
|
||||
Synchronize hidden files.
|
||||
Setting key: showVerboseLog
|
||||
|
||||
- Scan hidden files before replication.
|
||||
If you enable this option, all hidden files are scanned once before replication.
|
||||
Show verbose log. Please enable when you report the logs
|
||||
|
||||
- Scan hidden files periodicaly.
|
||||
If you enable this option, all hidden files will be scanned each [n] seconds.
|
||||
## 3. Remote Configuration
|
||||
|
||||
Hidden files are not actively detected, so we need scanning.
|
||||
### 1. Remote Server
|
||||
|
||||
Each scan stores the file with their modification time. And if the file has been disappeared, the fact is also stored. Then, When the entry of the hidden file has been replicated, it will be reflected in the storage if the entry is newer than storage.
|
||||
#### Remote Type
|
||||
|
||||
Therefore, the clock must be adjusted. If the modification time is determined to be older, the changeset will be skipped or cancelled (It means, **deleted**), even if the file spawned in a hidden folder.
|
||||
Setting key: remoteType
|
||||
|
||||
### Advanced settings
|
||||
Self-hosted LiveSync using PouchDB and synchronizes with the remote by [this protocol](https://docs.couchdb.org/en/stable/replication/protocol.html).
|
||||
So, it splits every entry into chunks to be acceptable by the database with limited payload size and document size.
|
||||
Remote server type
|
||||
|
||||
However, it was not enough.
|
||||
According to [2.4.2.5.2. Upload Batch of Changed Documents](https://docs.couchdb.org/en/stable/replication/protocol.html#upload-batch-of-changed-documents) in [Replicate Changes](https://docs.couchdb.org/en/stable/replication/protocol.html#replicate-changes), it might become a bigger request.
|
||||
### 2. Notification
|
||||
|
||||
Unfortunately, there is no way to deal with this automatically by size for every request.
|
||||
Therefore, I made it possible to configure this.
|
||||
#### Notify when the estimated remote storage size exceeds on start up
|
||||
|
||||
Note: If you set these values lower number, the number of requests will increase.
|
||||
Therefore, if you are far from the server, the total throughput will be low, and the traffic will increase.
|
||||
Setting key: notifyThresholdOfRemoteStorageSize
|
||||
|
||||
### Batch size
|
||||
Number of change feed items to process at a time. Defaults to 250.
|
||||
MB (0 to disable). We can get a notification when the estimated remote storage size exceeds this value.
|
||||
|
||||
### Batch limit
|
||||
Number of batches to process at a time. Defaults to 40. This along with batch size controls how many docs are kept in memory at a time.
|
||||
### 3. Privacy & Encryption
|
||||
|
||||
## Miscellaneous
|
||||
#### End-to-End Encryption
|
||||
|
||||
### Show status inside editor
|
||||
Show information inside the editor pane.
|
||||
It would be useful for mobile.
|
||||
Setting key: encrypt
|
||||
|
||||
### Check integrity on saving
|
||||
Check all chunks are correctly saved on saving.
|
||||
Enable end-to-end encryption. enabling this is recommend. If you change the passphrase, you need to rebuild databases (You will be informed).
|
||||
|
||||
### Presets
|
||||
You can set synchronization method at once as these pattern:
|
||||
- LiveSync
|
||||
- LiveSync : enabled
|
||||
- Batch database update : disabled
|
||||
- Periodic Sync : disabled
|
||||
- Sync on Save : disabled
|
||||
- Sync on File Open : disabled
|
||||
- Sync on Start : disabled
|
||||
- Periodic w/ batch
|
||||
- LiveSync : disabled
|
||||
- Batch database update : enabled
|
||||
- Periodic Sync : enabled
|
||||
- Sync on Save : disabled
|
||||
- Sync on File Open : enabled
|
||||
- Sync on Start : enabled
|
||||
- Disable all sync
|
||||
- LiveSync : disabled
|
||||
- Batch database update : disabled
|
||||
- Periodic Sync : disabled
|
||||
- Sync on Save : disabled
|
||||
- Sync on File Open : disabled
|
||||
- Sync on Start : disabled
|
||||
#### Passphrase
|
||||
|
||||
Setting key: passphrase
|
||||
|
||||
## Hatch
|
||||
From here, everything is under the hood. Please handle it with care.
|
||||
Encrypting passphrase. If you change the passphrase, you need to rebuild databases (You will be informed).
|
||||
|
||||
When there are problems with synchronization, the warning message is shown Under this section header.
|
||||
#### Path Obfuscation
|
||||
|
||||
- Pattern 1
|
||||

|
||||
This message is shown when the remote database is locked and your device is not marked as "resolved".
|
||||
Almost it is happened by enabling End-to-End encryption or History has been dropped.
|
||||
If you enabled End-to-End encryption, you can unlock the remote database by "Apply and receive" automatically. Or "Drop and receive" when you dropped. If you want to unlock manually, click "mark this device as resolved".
|
||||
Setting key: usePathObfuscation
|
||||
|
||||
- Pattern 2
|
||||

|
||||
The remote database indicates that has been unlocked Pattern 1.
|
||||
When you mark all devices as resolved, you can unlock the database.
|
||||
But, there's no problem even if you leave it as it is.
|
||||
In default, the path of the file is not obfuscated to improve the performance. If you enable this, the path of the file will be obfuscated. This is useful when you want to hide the path of the file.
|
||||
|
||||
### Verify and repair all files
|
||||
read all files in the vault, and update them into the database if there's diff or could not read from the database.
|
||||
#### Use dynamic iteration count (Experimental)
|
||||
|
||||
### Suspend file watching
|
||||
If enable this option, Self-hosted LiveSync dismisses every file change or deletes the event.
|
||||
Setting key: useDynamicIterationCount
|
||||
|
||||
From here, these commands are used inside applying encryption passphrases or dropping histories.
|
||||
This is an experimental feature and not recommended. If you enable this, the iteration count of the encryption will be dynamically determined. This is useful when you want to improve the performance.
|
||||
|
||||
Usually, doesn't use it so much. But sometimes it could be handy.
|
||||
---
|
||||
|
||||
## Plugins and settings (beta)
|
||||
**now writing from here onwards, sorry**
|
||||
|
||||
### Enable plugin synchronization
|
||||
If you want to use this feature, you have to activate this feature by this switch.
|
||||
---
|
||||
|
||||
### Sweep plugins automatically
|
||||
Plugin sweep will run before replication automatically.
|
||||
### 4. Fetch settings
|
||||
|
||||
### Sweep plugins periodically
|
||||
Plugin sweep will run each 1 minute.
|
||||
#### Fetch config from remote server
|
||||
|
||||
### Notify updates
|
||||
When replication is complete, a message will be notified if a newer version of the plugin applied to this device is configured on another device.
|
||||
Fetch necessary settings from already configured remote server.
|
||||
|
||||
### Device and Vault name
|
||||
To save the plugins, you have to set a unique name every each device.
|
||||
### 5. Minio,S3,R2
|
||||
|
||||
### Open
|
||||
Open the "Plugins and their settings" dialog.
|
||||
#### Endpoint URL
|
||||
|
||||
### Corrupted or missing data
|
||||

|
||||
Setting key: endpoint
|
||||
|
||||
When Self-hosted LiveSync could not write to the file on the storage, the files are shown here. If you have the old data in your vault, change it once, it will be cured. Or you can use the "File History" plugin.
|
||||
#### Access Key
|
||||
|
||||
Setting key: accessKey
|
||||
|
||||
#### Secret Key
|
||||
|
||||
Setting key: secretKey
|
||||
|
||||
#### Region
|
||||
|
||||
Setting key: region
|
||||
|
||||
#### Bucket Name
|
||||
|
||||
Setting key: bucket
|
||||
|
||||
#### Use Custom HTTP Handler
|
||||
|
||||
Setting key: useCustomRequestHandler
|
||||
Enable this if your Object Storage doesn't support CORS
|
||||
|
||||
#### Test Connection
|
||||
|
||||
#### Apply Settings
|
||||
|
||||
### 6. CouchDB
|
||||
|
||||
#### Server URI
|
||||
|
||||
Setting key: couchDB_URI
|
||||
|
||||
#### Username
|
||||
|
||||
Setting key: couchDB_USER
|
||||
username
|
||||
|
||||
#### Password
|
||||
|
||||
Setting key: couchDB_PASSWORD
|
||||
password
|
||||
|
||||
#### Database Name
|
||||
|
||||
Setting key: couchDB_DBNAME
|
||||
|
||||
#### Test Database Connection
|
||||
|
||||
Open database connection. If the remote database is not found and you have permission to create a database, the database will be created.
|
||||
|
||||
#### Validate Database Configuration
|
||||
|
||||
Checks and fixes any potential issues with the database config.
|
||||
|
||||
#### Apply Settings
|
||||
|
||||
## 4. Sync Settings
|
||||
|
||||
### 1. Synchronization Preset
|
||||
|
||||
#### Presets
|
||||
|
||||
Setting key: preset
|
||||
Apply preset configuration
|
||||
|
||||
### 2. Synchronization Method
|
||||
|
||||
#### Sync Mode
|
||||
|
||||
Setting key: syncMode
|
||||
|
||||
#### Periodic Sync interval
|
||||
|
||||
Setting key: periodicReplicationInterval
|
||||
Interval (sec)
|
||||
|
||||
#### Sync on Save
|
||||
|
||||
Setting key: syncOnSave
|
||||
Starts synchronisation when a file is saved.
|
||||
|
||||
#### Sync on Editor Save
|
||||
|
||||
Setting key: syncOnEditorSave
|
||||
When you save a file in the editor, start a sync automatically
|
||||
|
||||
#### Sync on File Open
|
||||
|
||||
Setting key: syncOnFileOpen
|
||||
Forces the file to be synced when opened.
|
||||
|
||||
#### Sync on Startup
|
||||
|
||||
Setting key: syncOnStart
|
||||
Automatically Sync all files when opening Obsidian.
|
||||
|
||||
#### Sync after merging file
|
||||
|
||||
Setting key: syncAfterMerge
|
||||
Sync automatically after merging files
|
||||
|
||||
### 3. Update thinning
|
||||
|
||||
#### Batch database update
|
||||
|
||||
Setting key: batchSave
|
||||
Reducing the frequency with which on-disk changes are reflected into the DB
|
||||
|
||||
#### Minimum delay for batch database updating
|
||||
|
||||
Setting key: batchSaveMinimumDelay
|
||||
Seconds. Saving to the local database will be delayed until this value after we stop typing or saving.
|
||||
|
||||
#### Maximum delay for batch database updating
|
||||
|
||||
Setting key: batchSaveMaximumDelay
|
||||
Saving will be performed forcefully after this number of seconds.
|
||||
|
||||
### 4. Deletion Propagation (Advanced)
|
||||
|
||||
#### Use the trash bin
|
||||
|
||||
Setting key: trashInsteadDelete
|
||||
Move remotely deleted files to the trash, instead of deleting.
|
||||
|
||||
#### Keep empty folder
|
||||
|
||||
Setting key: doNotDeleteFolder
|
||||
Should we keep folders that don't have any files inside?
|
||||
|
||||
### 5. Conflict resolution (Advanced)
|
||||
|
||||
#### (BETA) Always overwrite with a newer file
|
||||
|
||||
Setting key: resolveConflictsByNewerFile
|
||||
Testing only - Resolve file conflicts by syncing newer copies of the file, this can overwrite modified files. Be Warned.
|
||||
|
||||
#### Delay conflict resolution of inactive files
|
||||
|
||||
Setting key: checkConflictOnlyOnOpen
|
||||
Should we only check for conflicts when a file is opened?
|
||||
|
||||
#### Delay merge conflict prompt for inactive files.
|
||||
|
||||
Setting key: showMergeDialogOnlyOnActive
|
||||
Should we prompt you about conflicting files when a file is opened?
|
||||
|
||||
### 6. Sync settings via markdown (Advanced)
|
||||
|
||||
#### Filename
|
||||
|
||||
Setting key: settingSyncFile
|
||||
Save settings to a markdown file. You will be notified when new settings arrive. You can set different files by the platform.
|
||||
|
||||
#### Write credentials in the file
|
||||
|
||||
Setting key: writeCredentialsForSettingSync
|
||||
(Not recommended) If set, credentials will be stored in the file.
|
||||
|
||||
#### Notify all setting files
|
||||
|
||||
Setting key: notifyAllSettingSyncFile
|
||||
|
||||
### 7. Hidden Files (Advanced)
|
||||
|
||||
#### Hidden file synchronization
|
||||
|
||||
#### Enable Hidden files sync
|
||||
|
||||
#### Scan for hidden files before replication
|
||||
|
||||
Setting key: syncInternalFilesBeforeReplication
|
||||
|
||||
#### Scan hidden files periodically
|
||||
|
||||
Setting key: syncInternalFilesInterval
|
||||
Seconds, 0 to disable
|
||||
|
||||
## 5. Selector (Advanced)
|
||||
|
||||
### 1. Normal Files
|
||||
|
||||
#### Synchronising files
|
||||
|
||||
(RegExp) Empty to sync all files. Set filter as a regular expression to limit synchronising files.
|
||||
|
||||
#### Non-Synchronising files
|
||||
|
||||
(RegExp) If this is set, any changes to local and remote files that match this will be skipped.
|
||||
|
||||
#### Maximum file size
|
||||
|
||||
Setting key: syncMaxSizeInMB
|
||||
(MB) If this is set, changes to local and remote files that are larger than this will be skipped. If the file becomes smaller again, a newer one will be used.
|
||||
|
||||
#### (Beta) Use ignore files
|
||||
|
||||
Setting key: useIgnoreFiles
|
||||
If this is set, changes to local files which are matched by the ignore files will be skipped. Remote changes are determined using local ignore files.
|
||||
|
||||
#### Ignore files
|
||||
|
||||
Setting key: ignoreFiles
|
||||
Comma separated `.gitignore, .dockerignore`
|
||||
|
||||
### 2. Hidden Files (Advanced)
|
||||
|
||||
#### Ignore patterns
|
||||
|
||||
#### Add default patterns
|
||||
|
||||
## 6. Customization sync (Advanced)
|
||||
|
||||
### 1. Customization Sync
|
||||
|
||||
#### Device name
|
||||
|
||||
Setting key: deviceAndVaultName
|
||||
Unique name between all synchronized devices. To edit this setting, please disable customization sync once.
|
||||
|
||||
#### Per-file-saved customization sync
|
||||
|
||||
Setting key: usePluginSyncV2
|
||||
If enabled per-filed efficient customization sync will be used. We need a small migration when enabling this. And all devices should be updated to v0.23.18. Once we enabled this, we lost a compatibility with old versions.
|
||||
|
||||
#### Enable customization sync
|
||||
|
||||
Setting key: usePluginSync
|
||||
|
||||
#### Scan customization automatically
|
||||
|
||||
Setting key: autoSweepPlugins
|
||||
Scan customization before replicating.
|
||||
|
||||
#### Scan customization periodically
|
||||
|
||||
Setting key: autoSweepPluginsPeriodic
|
||||
Scan customization every 1 minute.
|
||||
|
||||
#### Notify customized
|
||||
|
||||
Setting key: notifyPluginOrSettingUpdated
|
||||
Notify when other device has newly customized.
|
||||
|
||||
#### Open
|
||||
|
||||
Open the dialog
|
||||
|
||||
## 7. Hatch
|
||||
|
||||
### 1. Reporting Issue
|
||||
|
||||
#### Make report to inform the issue
|
||||
|
||||
#### Write logs into the file
|
||||
|
||||
Setting key: writeLogToTheFile
|
||||
Warning! This will have a serious impact on performance. And the logs will not be synchronised under the default name. Please be careful with logs; they often contain your confidential information.
|
||||
|
||||
### 2. Scram Switches
|
||||
|
||||
#### Suspend file watching
|
||||
|
||||
Setting key: suspendFileWatching
|
||||
Stop watching for file changes.
|
||||
|
||||
#### Suspend database reflecting
|
||||
|
||||
Setting key: suspendParseReplicationResult
|
||||
Stop reflecting database changes to storage files.
|
||||
|
||||
### 3. Recovery and Repair
|
||||
|
||||
#### Recreate missing chunks for all files
|
||||
|
||||
This will recreate chunks for all files. If there were missing chunks, this may fix the errors.
|
||||
|
||||
#### Resolve All conflicted files by the newer one
|
||||
|
||||
Resolve all conflicted files by the newer one. Caution: This will overwrite the older one, and cannot resurrect the overwritten one.
|
||||
|
||||
#### Verify and repair all files
|
||||
|
||||
Compare the content of files between on local database and storage. If not matched, you will be asked which one you want to keep.
|
||||
|
||||
#### Check and convert non-path-obfuscated files
|
||||
|
||||
### 4. Reset
|
||||
|
||||
#### Back to non-configured
|
||||
|
||||
#### Delete all customization sync data
|
||||
|
||||
## 8. Advanced (Advanced)
|
||||
|
||||
### 1. Memory cache
|
||||
|
||||
#### Memory cache size (by total items)
|
||||
|
||||
Setting key: hashCacheMaxCount
|
||||
|
||||
#### Memory cache size (by total characters)
|
||||
|
||||
Setting key: hashCacheMaxAmount
|
||||
(Mega chars)
|
||||
|
||||
### 2. Local Database Tweak
|
||||
|
||||
#### Enhance chunk size
|
||||
|
||||
Setting key: customChunkSize
|
||||
|
||||
#### Use splitting-limit-capped chunk splitter
|
||||
|
||||
Setting key: enableChunkSplitterV2
|
||||
If enabled, chunks will be split into no more than 100 items. However, dedupe is slightly weaker.
|
||||
|
||||
#### Use Segmented-splitter
|
||||
|
||||
Setting key: useSegmenter
|
||||
If this enabled, chunks will be split into semantically meaningful segments. Not all platforms support this feature.
|
||||
|
||||
### 3. Transfer Tweak
|
||||
|
||||
#### Fetch chunks on demand
|
||||
|
||||
Setting key: readChunksOnline
|
||||
(ex. Read chunks online) If this option is enabled, LiveSync reads chunks online directly instead of replicating them locally. Increasing Custom chunk size is recommended.
|
||||
|
||||
#### Batch size of on-demand fetching
|
||||
|
||||
Setting key: concurrencyOfReadChunksOnline
|
||||
|
||||
#### The delay for consecutive on-demand fetches
|
||||
|
||||
Setting key: minimumIntervalOfReadChunksOnline
|
||||
|
||||
## 9. Power users (Power User)
|
||||
|
||||
### 1. Remote Database Tweak
|
||||
|
||||
#### Incubate Chunks in Document (Beta)
|
||||
|
||||
Setting key: useEden
|
||||
If enabled, newly created chunks are temporarily kept within the document, and graduated to become independent chunks once stabilised.
|
||||
|
||||
#### Maximum Incubating Chunks
|
||||
|
||||
Setting key: maxChunksInEden
|
||||
The maximum number of chunks that can be incubated within the document. Chunks exceeding this number will immediately graduate to independent chunks.
|
||||
|
||||
#### Maximum Incubating Chunk Size
|
||||
|
||||
Setting key: maxTotalLengthInEden
|
||||
The maximum total size of chunks that can be incubated within the document. Chunks exceeding this size will immediately graduate to independent chunks.
|
||||
|
||||
#### Maximum Incubation Period
|
||||
|
||||
Setting key: maxAgeInEden
|
||||
The maximum duration for which chunks can be incubated within the document. Chunks exceeding this period will graduate to independent chunks.
|
||||
|
||||
#### Data Compression (Experimental)
|
||||
|
||||
Setting key: enableCompression
|
||||
|
||||
### 2. CouchDB Connection Tweak
|
||||
|
||||
#### Batch size
|
||||
|
||||
Setting key: batch_size
|
||||
Number of changes to sync at a time. Defaults to 50. Minimum is 2.
|
||||
|
||||
#### Batch limit
|
||||
|
||||
Setting key: batches_limit
|
||||
Number of batches to process at a time. Defaults to 40. Minimum is 2. This along with batch size controls how many docs are kept in memory at a time.
|
||||
|
||||
#### Use timeouts instead of heartbeats
|
||||
|
||||
Setting key: useTimeouts
|
||||
If this option is enabled, PouchDB will hold the connection open for 60 seconds, and if no change arrives in that time, close and reopen the socket, instead of holding it open indefinitely. Useful when a proxy limits request duration but can increase resource usage.
|
||||
|
||||
### 3. Configuration Encryption
|
||||
|
||||
#### Encrypting sensitive configuration items
|
||||
|
||||
Setting key: configPassphraseStore
|
||||
|
||||
#### Passphrase of sensitive configuration items
|
||||
|
||||
Setting key: configPassphrase
|
||||
This passphrase will not be copied to another device. It will be set to `Default` until you configure it again.
|
||||
|
||||
### 4. Developer
|
||||
|
||||
#### Enable Developers' Debug Tools.
|
||||
|
||||
Setting key: enableDebugTools
|
||||
Requires restart of Obsidian
|
||||
|
||||
## 10. Patches (Edge Case)
|
||||
|
||||
### 1. Compatibility (Metadata)
|
||||
|
||||
#### Do not keep metadata of deleted files.
|
||||
|
||||
Setting key: deleteMetadataOfDeletedFiles
|
||||
|
||||
#### Delete old metadata of deleted files on start-up
|
||||
|
||||
Setting key: automaticallyDeleteMetadataOfDeletedFiles
|
||||
(Days passed, 0 to disable automatic-deletion)
|
||||
|
||||
### 2. Compatibility (Conflict Behaviour)
|
||||
|
||||
#### Always prompt merge conflicts
|
||||
|
||||
Setting key: disableMarkdownAutoMerge
|
||||
Should we prompt you for every single merge, even if we can safely merge automatcially?
|
||||
|
||||
#### Apply Latest Change if Conflicting
|
||||
|
||||
Setting key: writeDocumentsIfConflicted
|
||||
Enable this option to automatically apply the most recent change to documents even when it conflicts
|
||||
|
||||
### 3. Compatibility (Database structure)
|
||||
|
||||
#### (Obsolete) Use an old adapter for compatibility (obsolete)
|
||||
|
||||
Setting key: useIndexedDBAdapter
|
||||
Before v0.17.16, we used an old adapter for the local database. Now the new adapter is preferred. However, it needs local database rebuilding. Please disable this toggle when you have enough time. If leave it enabled, also while fetching from the remote database, you will be asked to disable this.
|
||||
|
||||
#### Compute revisions for chunks (Previous behaviour)
|
||||
|
||||
Setting key: doNotUseFixedRevisionForChunks
|
||||
If this enabled, all chunks will be stored with the revision made from its content. (Previous behaviour)
|
||||
|
||||
#### Handle files as Case-Sensitive
|
||||
|
||||
Setting key: handleFilenameCaseSensitive
|
||||
If this enabled, All files are handled as case-Sensitive (Previous behaviour).
|
||||
|
||||
### 4. Compatibility (Internal API Usage)
|
||||
|
||||
#### Scan changes on customization sync
|
||||
|
||||
Setting key: watchInternalFileChanges
|
||||
Do not use internal API
|
||||
|
||||
### 5. Edge case addressing (Database)
|
||||
|
||||
#### Database suffix
|
||||
|
||||
Setting key: additionalSuffixOfDatabaseName
|
||||
LiveSync could not handle multiple vaults which have same name without different prefix, This should be automatically configured.
|
||||
|
||||
#### The Hash algorithm for chunk IDs (Experimental)
|
||||
|
||||
Setting key: hashAlg
|
||||
|
||||
### 6. Edge case addressing (Behaviour)
|
||||
|
||||
#### Fetch database with previous behaviour
|
||||
|
||||
Setting key: doNotSuspendOnFetching
|
||||
|
||||
#### Keep empty folder
|
||||
|
||||
Setting key: doNotDeleteFolder
|
||||
Should we keep folders that don't have any files inside?
|
||||
|
||||
### 7. Edge case addressing (Processing)
|
||||
|
||||
#### Do not split chunks in the background
|
||||
|
||||
Setting key: disableWorkerForGeneratingChunks
|
||||
If disabled(toggled), chunks will be split on the UI thread (Previous behaviour).
|
||||
|
||||
#### Process small files in the foreground
|
||||
|
||||
Setting key: processSmallFilesInUIThread
|
||||
If enabled, the file under 1kb will be processed in the UI thread.
|
||||
|
||||
### 8. Compatibility (Trouble addressed)
|
||||
|
||||
#### Do not check configuration mismatch before replication
|
||||
|
||||
Setting key: disableCheckingConfigMismatch
|
||||
|
||||
## 11. Maintenance
|
||||
|
||||
### 1. Scram!
|
||||
|
||||
#### Lock Server
|
||||
|
||||
Lock the remote server to prevent synchronization with other devices.
|
||||
|
||||
#### Emergency restart
|
||||
|
||||
Disables all synchronization and restart.
|
||||
|
||||
### 2. Syncing
|
||||
|
||||
#### Resend
|
||||
|
||||
Resend all chunks to the remote.
|
||||
|
||||
#### Reset journal received history
|
||||
|
||||
Initialise journal received history. On the next sync, every item except this device sent will be downloaded again.
|
||||
|
||||
#### Reset journal sent history
|
||||
|
||||
Initialise journal sent history. On the next sync, every item except this device received will be sent again.
|
||||
|
||||
### 3. Rebuilding Operations (Local)
|
||||
|
||||
#### Fetch from remote
|
||||
|
||||
Restore or reconstruct local database from remote.
|
||||
|
||||
#### Fetch rebuilt DB (Save local documents before)
|
||||
|
||||
Restore or reconstruct local database from remote database but use local chunks.
|
||||
|
||||
### 4. Total Overhaul
|
||||
|
||||
#### Rebuild everything
|
||||
|
||||
Rebuild local and remote database with local files.
|
||||
|
||||
### 5. Rebuilding Operations (Remote Only)
|
||||
|
||||
#### Perform cleanup
|
||||
|
||||
Reduces storage space by discarding all non-latest revisions. This requires the same amount of free space on the remote server and the local client.
|
||||
|
||||
#### Overwrite remote
|
||||
|
||||
Overwrite remote with local DB and passphrase.
|
||||
|
||||
#### Reset all journal counter
|
||||
|
||||
Initialise all journal history, On the next sync, every item will be received and sent.
|
||||
|
||||
#### Purge all journal counter
|
||||
|
||||
Purge all download/upload cache.
|
||||
|
||||
#### Fresh Start Wipe
|
||||
|
||||
Delete all data on the remote server.
|
||||
|
||||
### 6. Deprecated
|
||||
|
||||
#### Run database cleanup
|
||||
|
||||
Attempt to shrink the database by deleting unused chunks. This may not work consistently. Use the 'Rebuild everything' under Total Overhaul.
|
||||
|
||||
### 7. Reset
|
||||
|
||||
#### Delete local database to reset or uninstall Self-hosted LiveSync
|
||||
|
||||
252
docs/setup_flyio.md
Normal file
@@ -0,0 +1,252 @@
|
||||
<!-- For translation: 20240209r0 -->
|
||||
# Setup CouchDB on fly.io
|
||||
|
||||
This is how to configure fly.io and CouchDB on it for Self-hosted LiveSync.
|
||||
|
||||
> [!WARNING]
|
||||
> It is **your** instance. In Obsidian, we have files locally. Hence, do not hesitate to destroy the remote database if you feel something have got weird. We can launch and switch to the new CouchDB instance anytime[^1].
|
||||
>
|
||||
[^1]: Actually, I am always building the database for reproduction of the issue like so.
|
||||
|
||||
> [!NOTE]
|
||||
> **What and why is the Fly.io?**
|
||||
> At some point, we started to experience problems related to our IBM Cloudant account. At the same time, Self-hosted LiveSync started to improve its functionality, requiring CouchDB in a more natural state to use all its features.
|
||||
>
|
||||
> Then we found Fly.io. Fly.io is the PaaS Platform, which can be useable for a very reasonable price. It generally falls within the `Free Allowances` range in most cases.
|
||||
|
||||
## Required materials
|
||||
|
||||
- A valid credit or debit card.
|
||||
|
||||
## Setup CouchDB instance
|
||||
|
||||
### A. Very automated setup
|
||||
|
||||
[](https://www.youtube.com/watch?v=7sa_I1832Xc)
|
||||
|
||||
1. Open [setup-flyio-on-the-fly-v2.ipynb](../setup-flyio-on-the-fly-v2.ipynb).
|
||||
2. Press the `Open in Colab` button.
|
||||
3. Choose a region and run all blocks (Refer to video).
|
||||
1. If you do not have the account yet, the sign-up page will be shown, please follow the instructions. The [Official document is here](https://fly.io/docs/hands-on/sign-up/).
|
||||
4. Copy the Setup-URI and Use it in the Obsidian.
|
||||
5. You have been synchronised. Use the Setup-URI in subsequent devices.
|
||||
|
||||
Steps 4 and 5 are detailed in the [Quick Setup](./quick_setup.md#1-using-setup-uris).
|
||||
|
||||
> [!NOTE]
|
||||
> Your automatically configured configurations will be shown on the result in the Colab note like below, and **it will not be saved**. Please make a note of it somewhere.
|
||||
> ```
|
||||
> -- YOUR CONFIGURATION --
|
||||
> URL : https://billowing-dawn-6619.fly.dev
|
||||
> username: billowing-cherry-22580
|
||||
> password: misty-dew-13571
|
||||
> region : nrt
|
||||
> ```
|
||||
|
||||
### B. Scripted Setup
|
||||
|
||||
Please refer to the document of [deploy-server.sh](../utils/readme.md#deploy-serversh).
|
||||
|
||||
### C. Manual Setup
|
||||
|
||||
| Used in the text | Meaning and where to use | Memo |
|
||||
| ---------------- | --------------------------- | ------------------------------------------------------------------------ |
|
||||
| campanella | Username | It is less likely to fail if it consists only of letters and numbers. |
|
||||
| dfusiuada9suy | Password | |
|
||||
| nrt | Region to make the instance | We can use any [region](https://fly.io/docs/reference/regions/) near us. |
|
||||
|
||||
#### 1. Install flyctl
|
||||
|
||||
- Mac or Linux
|
||||
|
||||
```sh
|
||||
$ curl -L https://fly.io/install.sh | sh
|
||||
```
|
||||
|
||||
- Windows
|
||||
|
||||
```powershell
|
||||
$ iwr https://fly.io/install.ps1 -useb | iex
|
||||
```
|
||||
|
||||
#### 2. Sign up or Sign in to fly.io
|
||||
|
||||
- Sign up
|
||||
|
||||
```bash
|
||||
$ fly auth signup
|
||||
```
|
||||
|
||||
- Sign in
|
||||
|
||||
```bash
|
||||
$ fly auth login
|
||||
```
|
||||
|
||||
For more information, please refer to [Sign up](https://fly.io/docs/hands-on/sign-up/) and [Sign in](https://fly.io/docs/hands-on/sign-in/).
|
||||
|
||||
#### 3. Make a configuration file
|
||||
|
||||
1. Make `fly.toml` from template `fly.template.toml`.
|
||||
We can simply copy and rename the file. The template is on [utils/flyio/fly.template.toml](../utils/flyio/fly.template.toml)
|
||||
2. Decide the instance name, initialize the App, and set credentials.
|
||||
|
||||
>[!TIP]
|
||||
> - The name `billowing-dawn-6619` is randomly decided name, and it will be a part of the CouchDB URL. It should be globally unique. Therefore, it is recommended to use something random for this name.
|
||||
> - Explicit naming is very good for humans. However, we do not often get the chance to actually enter this manually (have designed so). This database may contain important information for you. The needle should be hidden in the haystack.
|
||||
|
||||
|
||||
```bash
|
||||
$ fly launch --name=billowing-dawn-6619 --env="COUCHDB_USER=campanella" --copy-config=true --detach --no-deploy --region nrt --yes
|
||||
$ fly secrets set COUCHDB_PASSWORD=dfusiuada9suy
|
||||
```
|
||||
|
||||
#### 4. Deploy
|
||||
|
||||
```
|
||||
$ flyctl deploy
|
||||
An existing fly.toml file was found
|
||||
Using build strategies '[the "couchdb:latest" docker image]'. Remove [build] from fly.toml to force a rescan
|
||||
Creating app in /home/vorotamoroz/dev/obsidian-livesync/utils/flyio
|
||||
We're about to launch your app on Fly.io. Here's what you're getting:
|
||||
|
||||
Organization: vorotamoroz (fly launch defaults to the personal org)
|
||||
Name: billowing-dawn-6619 (specified on the command line)
|
||||
Region: Tokyo, Japan (specified on the command line)
|
||||
App Machines: shared-cpu-1x, 256MB RAM (specified on the command line)
|
||||
Postgres: <none> (not requested)
|
||||
Redis: <none> (not requested)
|
||||
|
||||
Created app 'billowing-dawn-6619' in organization 'personal'
|
||||
Admin URL: https://fly.io/apps/billowing-dawn-6619
|
||||
Hostname: billowing-dawn-6619.fly.dev
|
||||
Wrote config file fly.toml
|
||||
Validating /home/vorotamoroz/dev/obsidian-livesync/utils/flyio/fly.toml
|
||||
Platform: machines
|
||||
✓ Configuration is valid
|
||||
Your app is ready! Deploy with `flyctl deploy`
|
||||
Secrets are staged for the first deployment
|
||||
==> Verifying app config
|
||||
Validating /home/vorotamoroz/dev/obsidian-livesync/utils/flyio/fly.toml
|
||||
Platform: machines
|
||||
✓ Configuration is valid
|
||||
--> Verified app config
|
||||
==> Building image
|
||||
Searching for image 'couchdb:latest' remotely...
|
||||
image found: img_ox20prk63084j1zq
|
||||
|
||||
Watch your deployment at https://fly.io/apps/billowing-dawn-6619/monitoring
|
||||
|
||||
Provisioning ips for billowing-dawn-6619
|
||||
Dedicated ipv6: 2a09:8280:1::37:fde9
|
||||
Shared ipv4: 66.241.124.163
|
||||
Add a dedicated ipv4 with: fly ips allocate-v4
|
||||
|
||||
Creating a 1 GB volume named 'couchdata' for process group 'app'. Use 'fly vol extend' to increase its size
|
||||
This deployment will:
|
||||
* create 1 "app" machine
|
||||
|
||||
No machines in group app, launching a new machine
|
||||
|
||||
WARNING The app is not listening on the expected address and will not be reachable by fly-proxy.
|
||||
You can fix this by configuring your app to listen on the following addresses:
|
||||
- 0.0.0.0:5984
|
||||
Found these processes inside the machine with open listening sockets:
|
||||
PROCESS | ADDRESSES
|
||||
-----------------*---------------------------------------
|
||||
/.fly/hallpass | [fdaa:0:73b9:a7b:22e:3851:7f28:2]:22
|
||||
|
||||
Finished launching new machines
|
||||
|
||||
NOTE: The machines for [app] have services with 'auto_stop_machines = true' that will be stopped when idling
|
||||
|
||||
-------
|
||||
Checking DNS configuration for billowing-dawn-6619.fly.dev
|
||||
|
||||
Visit your newly deployed app at https://billowing-dawn-6619.fly.dev/
|
||||
```
|
||||
|
||||
#### 5. Apply CouchDB configuration
|
||||
|
||||
After the initial setup, CouchDB needs some more customisations to be used from Self-hosted LiveSync. It can be configured in browsers or by HTTP-REST APIs.
|
||||
|
||||
This section is set up using the REST API.
|
||||
|
||||
1. Prepare environment variables.
|
||||
|
||||
- Mac or Linux:
|
||||
|
||||
```bash
|
||||
export couchHost=https://billowing-dawn-6619.fly.dev
|
||||
export couchUser=campanella
|
||||
export couchPwd=dfusiuada9suy
|
||||
```
|
||||
|
||||
- Windows
|
||||
|
||||
```powershell
|
||||
set couchHost https://billowing-dawn-6619.fly.dev
|
||||
set couchUser campanella
|
||||
set couchPwd dfusiuada9suy
|
||||
$creds = [System.Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes("${couchUser}:${couchPwd}"))
|
||||
```
|
||||
|
||||
2. Perform cluster setup
|
||||
|
||||
- Mac or Linux
|
||||
|
||||
```bash
|
||||
curl -X POST "${couchHost}/_cluster_setup" -H "Content-Type: application/json" -d "{\"action\":\"enable_single_node\",\"username\":\"${couchUser}\",\"password\":\"${couchPwd}\",\"bind_address\":\"0.0.0.0\",\"port\":5984,\"singlenode\":true}" --user "${couchUser}:${couchPwd}"
|
||||
```
|
||||
|
||||
- Windows
|
||||
|
||||
```powershell
|
||||
iwr -UseBasicParsing -Method 'POST' -ContentType 'application/json; charset=utf-8' -Headers @{ 'Authorization' = 'Basic ' + $creds } "${couchHost}/_cluster_setup" -Body "{""action"":""enable_single_node"",""username"":""${couchUser}"",""password"":""${couchPwd}"",""bind_address"":""0.0.0.0"",""port"":5984,""singlenode"":true}"
|
||||
```
|
||||
|
||||
Note: if the response code is not 200. We have to retry the request once again.
|
||||
If you run the request several times and it does not result in 200, something is wrong. Please report it.
|
||||
|
||||
3. Configure parameters
|
||||
|
||||
- Mac or Linux
|
||||
|
||||
```bash
|
||||
curl -X PUT "${couchHost}/_node/nonode@nohost/_config/chttpd/require_valid_user" -H "Content-Type: application/json" -d '"true"' --user "${couchUser}:${couchPwd}"
|
||||
curl -X PUT "${couchHost}/_node/nonode@nohost/_config/chttpd_auth/require_valid_user" -H "Content-Type: application/json" -d '"true"' --user "${couchUser}:${couchPwd}"
|
||||
curl -X PUT "${couchHost}/_node/nonode@nohost/_config/httpd/WWW-Authenticate" -H "Content-Type: application/json" -d '"Basic realm=\"couchdb\""' --user "${couchUser}:${couchPwd}"
|
||||
curl -X PUT "${couchHost}/_node/nonode@nohost/_config/httpd/enable_cors" -H "Content-Type: application/json" -d '"true"' --user "${couchUser}:${couchPwd}"
|
||||
curl -X PUT "${couchHost}/_node/nonode@nohost/_config/chttpd/enable_cors" -H "Content-Type: application/json" -d '"true"' --user "${couchUser}:${couchPwd}"
|
||||
curl -X PUT "${couchHost}/_node/nonode@nohost/_config/chttpd/max_http_request_size" -H "Content-Type: application/json" -d '"4294967296"' --user "${couchUser}:${couchPwd}"
|
||||
curl -X PUT "${couchHost}/_node/nonode@nohost/_config/couchdb/max_document_size" -H "Content-Type: application/json" -d '"50000000"' --user "${couchUser}:${couchPwd}"
|
||||
curl -X PUT "${couchHost}/_node/nonode@nohost/_config/cors/credentials" -H "Content-Type: application/json" -d '"true"' --user "${couchUser}:${couchPwd}"
|
||||
curl -X PUT "${couchHost}/_node/nonode@nohost/_config/cors/origins" -H "Content-Type: application/json" -d '"app://obsidian.md,capacitor://localhost,http://localhost"' --user "${couchUser}:${couchPwd}"
|
||||
```
|
||||
|
||||
- Windows
|
||||
|
||||
```powershell
|
||||
iwr -UseBasicParsing -Method 'PUT' -ContentType 'application/json; charset=utf-8' -Headers @{ 'Authorization' = 'Basic ' + $creds } "${couchHost}/_node/nonode@nohost/_config/chttpd/require_valid_user" -Body '"true"'
|
||||
iwr -UseBasicParsing -Method 'PUT' -ContentType 'application/json; charset=utf-8' -Headers @{ 'Authorization' = 'Basic ' + $creds } "${couchHost}/_node/nonode@nohost/_config/chttpd_auth/require_valid_user" -Body '"true"'
|
||||
iwr -UseBasicParsing -Method 'PUT' -ContentType 'application/json; charset=utf-8' -Headers @{ 'Authorization' = 'Basic ' + $creds } "${couchHost}/_node/nonode@nohost/_config/httpd/WWW-Authenticate" -Body '"Basic realm=\"couchdb\""'
|
||||
iwr -UseBasicParsing -Method 'PUT' -ContentType 'application/json; charset=utf-8' -Headers @{ 'Authorization' = 'Basic ' + $creds } "${couchHost}/_node/nonode@nohost/_config/httpd/enable_cors" -Body '"true"'
|
||||
iwr -UseBasicParsing -Method 'PUT' -ContentType 'application/json; charset=utf-8' -Headers @{ 'Authorization' = 'Basic ' + $creds } "${couchHost}/_node/nonode@nohost/_config/chttpd/enable_cors" -Body '"true"'
|
||||
iwr -UseBasicParsing -Method 'PUT' -ContentType 'application/json; charset=utf-8' -Headers @{ 'Authorization' = 'Basic ' + $creds } "${couchHost}/_node/nonode@nohost/_config/chttpd/max_http_request_size" -Body '"4294967296"'
|
||||
iwr -UseBasicParsing -Method 'PUT' -ContentType 'application/json; charset=utf-8' -Headers @{ 'Authorization' = 'Basic ' + $creds } "${couchHost}/_node/nonode@nohost/_config/couchdb/max_document_size" -Body '"50000000"'
|
||||
iwr -UseBasicParsing -Method 'PUT' -ContentType 'application/json; charset=utf-8' -Headers @{ 'Authorization' = 'Basic ' + $creds } "${couchHost}/_node/nonode@nohost/_config/cors/credentials" -Body '"true"'
|
||||
iwr -UseBasicParsing -Method 'PUT' -ContentType 'application/json; charset=utf-8' -Headers @{ 'Authorization' = 'Basic ' + $creds } "${couchHost}/_node/nonode@nohost/_config/cors/origins" -Body '"app://obsidian.md,capacitor://localhost,http://localhost"'
|
||||
```
|
||||
|
||||
Note: Each of these should also be repeated until finished in 200.
|
||||
|
||||
#### 6. Use it from Self-hosted LiveSync
|
||||
|
||||
Now the CouchDB is ready to use from Self-hosted LiveSync. We can use `https://billowing-dawn-6619.fly.dev` in URI, `campanella` in `Username` and `dfusiuada9suy` in `Password` on Self-hosted LiveSync. The `Database name` could be anything you want.
|
||||
Please refer to the [Minimal Setup of the Quick Setup](./quick_setup.md#2-minimal-setup).
|
||||
|
||||
## Delete the Instance
|
||||
|
||||
If you want to delete the CouchDB instance, you can do that in [fly.io Dashboard](https://fly.io/dashboard/personal)
|
||||
|
||||
If you have done with [B. Scripted Setup](#b-scripted-setup), we can use [delete-server.sh](../utils/readme.md#delete-serversh).
|
||||
@@ -1,90 +1,212 @@
|
||||
# Setup CouchDB to your server
|
||||
# Setup a CouchDB server
|
||||
|
||||
## Table of Contents
|
||||
|
||||
## Install CouchDB and access from PC or Mac
|
||||
- [Setup a CouchDB server](#setup-a-couchdb-server)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [1. Prepare CouchDB](#1-prepare-couchdb)
|
||||
- [A. Using Docker](#a-using-docker)
|
||||
- [1. Prepare](#1-prepare)
|
||||
- [2. Run docker container](#2-run-docker-container)
|
||||
- [B. Using Docker Compose](#b-using-docker-compose)
|
||||
- [1. Prepare](#1-prepare-1)
|
||||
- [2. Creating Compose file](#2-create-a-docker-composeyml-file-with-the-following-added-to-it)
|
||||
- [3. Boot check](#3-run-the-docker-compose-file-to-boot-check)
|
||||
- [4. Starting Docker Compose in background](#4-run-the-docker-compose-file-in-the-background)
|
||||
- [C. Install CouchDB directly](#c-install-couchdb-directly)
|
||||
- [2. Run couchdb-init.sh for initialise](#2-run-couchdb-initsh-for-initialise)
|
||||
- [3. Expose CouchDB to the Internet](#3-expose-couchdb-to-the-internet)
|
||||
- [4. Client Setup](#4-client-setup)
|
||||
- [1. Generate the setup URI on a desktop device or server](#1-generate-the-setup-uri-on-a-desktop-device-or-server)
|
||||
- [2. Setup Self-hosted LiveSync to Obsidian](#2-setup-self-hosted-livesync-to-obsidian)
|
||||
- [Manual setup information](#manual-setup-information)
|
||||
- [Setting up your domain](#setting-up-your-domain)
|
||||
- [Reverse Proxies](#reverse-proxies)
|
||||
- [Traefik](#traefik)
|
||||
---
|
||||
|
||||
The easiest way to set up the CouchDB is using the [docker image]((https://hub.docker.com/_/couchdb)).
|
||||
## 1. Prepare CouchDB
|
||||
### A. Using Docker
|
||||
|
||||
But some additional configurations are required in `local.ini` to use from Self-hosted LiveSync, like below:
|
||||
#### 1. Prepare
|
||||
```bash
|
||||
|
||||
```
|
||||
[couchdb]
|
||||
single_node=true
|
||||
# Adding environment variables.
|
||||
export hostname=localhost:5984
|
||||
export username=goojdasjdas #Please change as you like.
|
||||
export password=kpkdasdosakpdsa #Please change as you like
|
||||
|
||||
[chttpd]
|
||||
require_valid_user = true
|
||||
|
||||
[chttpd_auth]
|
||||
require_valid_user = true
|
||||
authentication_redirect = /_utils/session.html
|
||||
|
||||
[httpd]
|
||||
WWW-Authenticate = Basic realm="couchdb"
|
||||
enable_cors = true
|
||||
|
||||
[cors]
|
||||
origins = app://obsidian.md,capacitor://localhost,http://localhost
|
||||
credentials = true
|
||||
headers = accept, authorization, content-type, origin, referer
|
||||
methods = GET, PUT, POST, HEAD, DELETE
|
||||
max_age = 3600
|
||||
# Creating the save data & configuration directories.
|
||||
mkdir couchdb-data
|
||||
mkdir couchdb-etc
|
||||
```
|
||||
|
||||
Make `local.ini` and run with docker run like this, you can launch the CouchDB.
|
||||
#### 2. Run docker container
|
||||
1. Boot Check.
|
||||
```
|
||||
$ docker run --rm -it -e COUCHDB_USER=admin -e COUCHDB_PASSWORD=password -v /path/to/local.ini:/opt/couchdb/etc/local.ini -p 5984:5984 couchdb
|
||||
$ docker run --name couchdb-for-ols --rm -it -e COUCHDB_USER=${username} -e COUCHDB_PASSWORD=${password} -v ${PWD}/couchdb-data:/opt/couchdb/data -v ${PWD}/couchdb-etc:/opt/couchdb/etc/local.d -p 5984:5984 couchdb
|
||||
```
|
||||
*Remember to replace the path with the path to your local.ini*
|
||||
Note: At this time, the file owner of local.ini became 5984:5984. It's the limitation docker image. please change the owner before editing local.ini again.
|
||||
> [!WARNING]
|
||||
> If your container threw an error or exited unexpectedly, please check the permission of couchdb-data, and couchdb-etc.
|
||||
> Once CouchDB starts, these directories will be owned by uid:`5984`. Please chown it for that uid again.
|
||||
|
||||
If you could confirm that Self-hosted LiveSync can sync with the server, launch docker image as background as you like.
|
||||
|
||||
Example to run docker in detached mode:
|
||||
2. Enable it in the background
|
||||
```
|
||||
$ docker run -d --restart always -e COUCHDB_USER=admin -e COUCHDB_PASSWORD=password -v /path/to/local.ini:/opt/couchdb/etc/local.ini -p 5984:5984 couchdb
|
||||
```
|
||||
*Remember to replace the path with the path to your local.ini*
|
||||
|
||||
## Access from mobile device
|
||||
If you want to access Self-hosted LiveSync from mobile devices, you need a valid SSL certificate.
|
||||
|
||||
### Testing from mobile
|
||||
In the testing phase, [localhost.run](http://localhost.run/) or something like services is very useful.
|
||||
|
||||
example on using localhost.run)
|
||||
```
|
||||
$ ssh -R 80:localhost:5984 nokey@localhost.run
|
||||
Warning: Permanently added the RSA host key for IP address '35.171.254.69' to the list of known hosts.
|
||||
|
||||
===============================================================================
|
||||
Welcome to localhost.run!
|
||||
|
||||
Follow your favourite reverse tunnel at [https://twitter.com/localhost_run].
|
||||
|
||||
**You need a SSH key to access this service.**
|
||||
If you get a permission denied follow Gitlab's most excellent howto:
|
||||
https://docs.gitlab.com/ee/ssh/
|
||||
*Only rsa and ed25519 keys are supported*
|
||||
|
||||
To set up and manage custom domains go to https://admin.localhost.run/
|
||||
|
||||
More details on custom domains (and how to enable subdomains of your custom
|
||||
domain) at https://localhost.run/docs/custom-domains
|
||||
|
||||
To explore using localhost.run visit the documentation site:
|
||||
https://localhost.run/docs/
|
||||
|
||||
===============================================================================
|
||||
|
||||
|
||||
** your connection id is xxxxxxxxxxxxxxxxxxxxxxxxxxxx, please mention it if you send me a message about an issue. **
|
||||
|
||||
xxxxxxxx.localhost.run tunneled with tls termination, https://xxxxxxxx.localhost.run
|
||||
Connection to localhost.run closed by remote host.
|
||||
Connection to localhost.run closed.
|
||||
$ docker run --name couchdb-for-ols -d --restart always -e COUCHDB_USER=${username} -e COUCHDB_PASSWORD=${password} -v ${PWD}/couchdb-data:/opt/couchdb/data -v ${PWD}/couchdb-etc:/opt/couchdb/etc/local.d -p 5984:5984 couchdb
|
||||
```
|
||||
|
||||
https://xxxxxxxx.localhost.run is the temporary server address.
|
||||
Congrats, move on to [step 2](#2-run-couchdb-initsh-for-initialise)
|
||||
### B. Using Docker Compose
|
||||
|
||||
#### 1. Prepare
|
||||
|
||||
```
|
||||
# Creating the save data & configuration directories.
|
||||
mkdir couchdb-data
|
||||
mkdir couchdb-etc
|
||||
```
|
||||
|
||||
#### 2. Create a `docker-compose.yml` file with the following added to it
|
||||
```
|
||||
services:
|
||||
couchdb:
|
||||
image: couchdb:latest
|
||||
container_name: couchdb-for-ols
|
||||
user: 5984:5984
|
||||
environment:
|
||||
- COUCHDB_USER=<INSERT USERNAME HERE> #Please change as you like.
|
||||
- COUCHDB_PASSWORD=<INSERT PASSWORD HERE> #Please change as you like.
|
||||
volumes:
|
||||
- ./couchdb-data:/opt/couchdb/data
|
||||
- ./couchdb-etc:/opt/couchdb/etc/local.d
|
||||
ports:
|
||||
- 5984:5984
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
#### 3. Run the Docker Compose file to boot check
|
||||
|
||||
```
|
||||
docker compose up
|
||||
# Or if using the old version
|
||||
docker-compose up
|
||||
```
|
||||
> [!WARNING]
|
||||
> If your container threw an error or exited unexpectedly, please check the permission of couchdb-data, and couchdb-etc.
|
||||
> Once CouchDB starts, these directories will be owned by uid:`5984`. Please chown it for that uid again.
|
||||
|
||||
#### 4. Run the Docker Compose file in the background
|
||||
If all went well and didn't throw any errors, `CTRL+C` out of it, and then run this command
|
||||
```
|
||||
docker compose up -d
|
||||
# Or if using the old version
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
Congrats, move on to [step 2](#2-run-couchdb-initsh-for-initialise)
|
||||
|
||||
|
||||
### C. Install CouchDB directly
|
||||
Please refer to the [official document](https://docs.couchdb.org/en/stable/install/index.html). However, we do not have to configure it fully. Just the administrator needs to be configured.
|
||||
|
||||
## 2. Run couchdb-init.sh for initialise
|
||||
```
|
||||
curl -s https://raw.githubusercontent.com/vrtmrz/obsidian-livesync/main/utils/couchdb/couchdb-init.sh | bash
|
||||
```
|
||||
|
||||
If it results like the following:
|
||||
```
|
||||
-- Configuring CouchDB by REST APIs... -->
|
||||
{"ok":true}
|
||||
""
|
||||
""
|
||||
""
|
||||
""
|
||||
""
|
||||
""
|
||||
""
|
||||
""
|
||||
""
|
||||
<-- Configuring CouchDB by REST APIs Done!
|
||||
```
|
||||
|
||||
Your CouchDB has been initialised successfully. If you want this manually, please read the script.
|
||||
|
||||
If you are using Docker Compose and the above command does not work or displays `ERROR: Hostname missing`, you can try running the following command, replacing the placeholders with your own values:
|
||||
```
|
||||
curl -s https://raw.githubusercontent.com/vrtmrz/obsidian-livesync/main/utils/couchdb/couchdb-init.sh | hostname=http://<YOUR SERVER IP>:5984 username=<INSERT USERNAME HERE> password=<INSERT PASSWORD HERE> bash
|
||||
```
|
||||
|
||||
## 3. Expose CouchDB to the Internet
|
||||
|
||||
- You can skip this instruction if you using only in intranet and only with desktop devices.
|
||||
- For mobile devices, Obsidian requires a valid SSL certificate. Usually, it needs exposing the internet.
|
||||
|
||||
Whatever solutions we can use. For simplicity, the following sample uses Cloudflare Zero Trust for testing.
|
||||
|
||||
```
|
||||
cloudflared tunnel --url http://localhost:5984
|
||||
```
|
||||
|
||||
You will then get the following output:
|
||||
|
||||
```
|
||||
2024-02-14T10:35:25Z INF Thank you for trying Cloudflare Tunnel. Doing so, without a Cloudflare account, is a quick way to experiment and try it out. However, be aware that these account-less Tunnels have no uptime guarantee. If you intend to use Tunnels in production you should use a pre-created named tunnel by following: https://developers.cloudflare.com/cloudflare-one/connections/connect-apps
|
||||
2024-02-14T10:35:25Z INF Requesting new quick Tunnel on trycloudflare.com...
|
||||
2024-02-14T10:35:26Z INF +--------------------------------------------------------------------------------------------+
|
||||
2024-02-14T10:35:26Z INF | Your quick Tunnel has been created! Visit it at (it may take some time to be reachable): |
|
||||
2024-02-14T10:35:26Z INF | https://tiles-photograph-routine-groundwater.trycloudflare.com |
|
||||
2024-02-14T10:35:26Z INF +--------------------------------------------------------------------------------------------+
|
||||
:
|
||||
:
|
||||
:
|
||||
```
|
||||
Now `https://tiles-photograph-routine-groundwater.trycloudflare.com` is our server. Make it into the background once, please.
|
||||
|
||||
|
||||
## 4. Client Setup
|
||||
> [!TIP]
|
||||
> Now manual configuration is not recommended for some reasons. However, if you want to do so, please use `Setup wizard`. The recommended extra configurations will be also set.
|
||||
|
||||
### 1. Generate the setup URI on a desktop device or server
|
||||
```bash
|
||||
export hostname=https://tiles-photograph-routine-groundwater.trycloudflare.com #Point to your vault
|
||||
export database=obsidiannotes #Please change as you like
|
||||
export passphrase=dfsapkdjaskdjasdas #Please change as you like
|
||||
export username=johndoe
|
||||
export password=abc123
|
||||
deno run -A https://raw.githubusercontent.com/vrtmrz/obsidian-livesync/main/utils/flyio/generate_setupuri.ts
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> What is the `passphrase`? Is it different from `uri_passphrase`?
|
||||
> Yes, the `passphrase` we have exported now is for an End-to-End Encryption passphrase.
|
||||
> And, `uri_passphrase` that used in the `generate_setupuri.ts` is a different one; for decrypting Set-up URI at using that.
|
||||
> Why: I (vorotamoroz) think that the passphrase of the Setup-URI should be different from the E2EE passphrase to prevent exposure caused by operational errors or the possibility of evil in our environment. On top of that, I believe that it is desirable for the Setup-URI to be random. Setup-URI is inevitably long, so it goes through the clipboard. I think that its passphrase should not go through the same path, so it should essentially be typed manually.
|
||||
> Hence, if we keep empty for uri_passphrase, generate_setupuri.ts generates an adjective-noun-randomnumber passphrase so that we can remember it without going through the clipboard.
|
||||
|
||||
You will then get the following output:
|
||||
|
||||
```bash
|
||||
obsidian://setuplivesync?settings=%5B%22tm2DpsOE74nJAryprZO2M93wF%2Fvg.......4b26ed33230729%22%5D
|
||||
|
||||
Your passphrase of Setup-URI is: patient-haze
|
||||
This passphrase is never shown again, so please note it in a safe place.
|
||||
```
|
||||
|
||||
Please keep your passphrase of Setup-URI.
|
||||
|
||||
### 2. Setup Self-hosted LiveSync to Obsidian
|
||||
[This video](https://youtu.be/7sa_I1832Xc?t=146) may help us.
|
||||
1. Install Self-hosted LiveSync
|
||||
2. Choose `Use the copied setup URI` from the command palette and paste the setup URI. (obsidian://setuplivesync?settings=.....).
|
||||
3. Type the previously displayed passphrase (`patient-haze`) for setup-uri passphrase.
|
||||
4. Answer `yes` and `Set it up...`, and finish the first dialogue with `Keep them disabled`.
|
||||
5. `Reload app without save` once.
|
||||
|
||||
---
|
||||
|
||||
## Manual setup information
|
||||
|
||||
### Setting up your domain
|
||||
|
||||
@@ -92,6 +214,77 @@ Set the A record of your domain to point to your server, and host reverse proxy
|
||||
Note: Mounting CouchDB on the top directory is not recommended.
|
||||
Using Caddy is a handy way to serve the server with SSL automatically.
|
||||
|
||||
I have published [docker-compose.yml and ini files](https://github.com/vrtmrz/self-hosted-livesync-server) that launches Caddy and CouchDB at once. Please try it out.
|
||||
I have published [docker-compose.yml and ini files](https://github.com/vrtmrz/self-hosted-livesync-server) that launch Caddy and CouchDB at once. If you are using Traefik you can check the [Reverse Proxies](#reverse-proxies) section below.
|
||||
|
||||
And, be sure to check the server log and be careful of malicious access.
|
||||
And, be sure to check the server log and be careful of malicious access.
|
||||
|
||||
|
||||
## Reverse Proxies
|
||||
|
||||
### Traefik
|
||||
|
||||
If you are using Traefik, this [docker-compose.yml](https://github.com/vrtmrz/obsidian-livesync/blob/main/docker-compose.traefik.yml) file (also pasted below) has all the right CORS parameters set. It assumes you have an external network called `proxy`.
|
||||
|
||||
```yaml
|
||||
version: "2.1"
|
||||
services:
|
||||
couchdb:
|
||||
image: couchdb:latest
|
||||
container_name: obsidian-livesync
|
||||
user: 1000:1000
|
||||
environment:
|
||||
- COUCHDB_USER=username
|
||||
- COUCHDB_PASSWORD=password
|
||||
volumes:
|
||||
- ./data:/opt/couchdb/data
|
||||
- ./local.ini:/opt/couchdb/etc/local.ini
|
||||
# Ports not needed when already passed to Traefik
|
||||
#ports:
|
||||
# - 5984:5984
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- proxy
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
# The Traefik Network
|
||||
- "traefik.docker.network=proxy"
|
||||
# Don't forget to replace 'obsidian-livesync.example.org' with your own domain
|
||||
- "traefik.http.routers.obsidian-livesync.rule=Host(`obsidian-livesync.example.org`)"
|
||||
# The 'websecure' entryPoint is basically your HTTPS entrypoint. Check the next code snippet if you are encountering problems only; you probably have a working traefik configuration if this is not your first container you are reverse proxying.
|
||||
- "traefik.http.routers.obsidian-livesync.entrypoints=websecure"
|
||||
- "traefik.http.routers.obsidian-livesync.service=obsidian-livesync"
|
||||
- "traefik.http.services.obsidian-livesync.loadbalancer.server.port=5984"
|
||||
- "traefik.http.routers.obsidian-livesync.tls=true"
|
||||
# Replace the string 'letsencrypt' with your own certificate resolver
|
||||
- "traefik.http.routers.obsidian-livesync.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.routers.obsidian-livesync.middlewares=obsidiancors"
|
||||
# The part needed for CORS to work on Traefik 2.x starts here
|
||||
- "traefik.http.middlewares.obsidiancors.headers.accesscontrolallowmethods=GET,PUT,POST,HEAD,DELETE"
|
||||
- "traefik.http.middlewares.obsidiancors.headers.accesscontrolallowheaders=accept,authorization,content-type,origin,referer"
|
||||
- "traefik.http.middlewares.obsidiancors.headers.accesscontrolalloworiginlist=app://obsidian.md,capacitor://localhost,http://localhost"
|
||||
- "traefik.http.middlewares.obsidiancors.headers.accesscontrolmaxage=3600"
|
||||
- "traefik.http.middlewares.obsidiancors.headers.addvaryheader=true"
|
||||
- "traefik.http.middlewares.obsidiancors.headers.accessControlAllowCredentials=true"
|
||||
|
||||
networks:
|
||||
proxy:
|
||||
external: true
|
||||
```
|
||||
|
||||
Partial `traefik.yml` config file mentioned in above:
|
||||
```yml
|
||||
...
|
||||
|
||||
entryPoints:
|
||||
web:
|
||||
address: ":80"
|
||||
http:
|
||||
redirections:
|
||||
entryPoint:
|
||||
to: "websecure"
|
||||
scheme: "https"
|
||||
websecure:
|
||||
address: ":443"
|
||||
|
||||
...
|
||||
```
|
||||
|
||||
@@ -1,8 +1,19 @@
|
||||
# 在你自己的服务器上设置 CouchDB
|
||||
|
||||
## 目录
|
||||
- [配置 CouchDB](#配置-CouchDB)
|
||||
- [运行 CouchDB](#运行-CouchDB)
|
||||
- [Docker CLI](#docker-cli)
|
||||
- [Docker Compose](#docker-compose)
|
||||
- [创建数据库](#创建数据库)
|
||||
- [从移动设备访问](#从移动设备访问)
|
||||
- [移动设备测试](#移动设备测试)
|
||||
- [设置你的域名](#设置你的域名)
|
||||
---
|
||||
|
||||
> 注:提供了 [docker-compose.yml 和 ini 文件](https://github.com/vrtmrz/self-hosted-livesync-server) 可以同时启动 Caddy 和 CouchDB。推荐直接使用该 docker-compose 配置进行搭建。(若使用,请查阅链接中的文档,而不是这个文档)
|
||||
|
||||
## 安装 CouchDB 并从 PC 或 Mac 上访问
|
||||
## 配置 CouchDB
|
||||
|
||||
设置 CouchDB 的最简单方法是使用 [CouchDB docker image]((https://hub.docker.com/_/couchdb)).
|
||||
|
||||
@@ -11,9 +22,11 @@
|
||||
```
|
||||
[couchdb]
|
||||
single_node=true
|
||||
max_document_size = 50000000
|
||||
|
||||
[chttpd]
|
||||
require_valid_user = true
|
||||
max_http_request_size = 4294967296
|
||||
|
||||
[chttpd_auth]
|
||||
require_valid_user = true
|
||||
@@ -31,17 +44,62 @@ methods = GET, PUT, POST, HEAD, DELETE
|
||||
max_age = 3600
|
||||
```
|
||||
|
||||
创建 `local.ini` 并用如下指令启动 CouchDB:
|
||||
```
|
||||
$ docker run --rm -it -e COUCHDB_USER=admin -e COUCHDB_PASSWORD=password -v .local.ini:/opt/couchdb/etc/local.ini -p 5984:5984 couchdb
|
||||
```
|
||||
Note: 此时 local.ini 的文件所有者会变成 5984:5984。这是 docker 镜像的限制,请修改文件所有者后再编辑 local.ini。
|
||||
## 运行 CouchDB
|
||||
|
||||
在确定 Self-hosted LiveSync 可以和服务器同步后,可以后台启动 docker 镜像:
|
||||
### Docker CLI
|
||||
|
||||
你可以通过指定 `local.ini` 配置运行 CouchDB:
|
||||
|
||||
```
|
||||
$ docker run -d --restart always -e COUCHDB_USER=admin -e COUCHDB_PASSWORD=password -v .local.ini:/opt/couchdb/etc/local.ini -p 5984:5984 couchdb
|
||||
$ docker run --rm -it -e COUCHDB_USER=admin -e COUCHDB_PASSWORD=password -v /path/to/local.ini:/opt/couchdb/etc/local.ini -p 5984:5984 couchdb
|
||||
```
|
||||
*记得将上述命令中的 local.ini 挂载路径替换成实际的存放路径*
|
||||
|
||||
后台运行:
|
||||
```
|
||||
$ docker run -d --restart always -e COUCHDB_USER=admin -e COUCHDB_PASSWORD=password -v /path/to/local.ini:/opt/couchdb/etc/local.ini -p 5984:5984 couchdb
|
||||
```
|
||||
*记得将上述命令中的 local.ini 挂载路径替换成实际的存放路径*
|
||||
|
||||
### Docker Compose
|
||||
创建一个文件夹, 将你的 `local.ini` 放在文件夹内, 然后在文件夹内创建 `docker-compose.yml`. 请确保对 `local.ini` 有读写权限并且确保在容器运行后能创建 `data` 文件夹. 文件夹结构大概如下:
|
||||
```
|
||||
obsidian-livesync
|
||||
├── docker-compose.yml
|
||||
└── local.ini
|
||||
```
|
||||
|
||||
可以参照以下内容编辑 `docker-compose.yml`:
|
||||
```yaml
|
||||
version: "2.1"
|
||||
services:
|
||||
couchdb:
|
||||
image: couchdb
|
||||
container_name: obsidian-livesync
|
||||
user: 1000:1000
|
||||
environment:
|
||||
- COUCHDB_USER=admin
|
||||
- COUCHDB_PASSWORD=password
|
||||
volumes:
|
||||
- ./data:/opt/couchdb/data
|
||||
- ./local.ini:/opt/couchdb/etc/local.ini
|
||||
ports:
|
||||
- 5984:5984
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
最后, 创建并启动容器:
|
||||
```
|
||||
# -d will launch detached so the container runs in background
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
## 创建数据库
|
||||
|
||||
CouchDB 部署成功后, 需要手动创建一个数据库, 方便插件连接并同步.
|
||||
|
||||
1. 访问 `http://localhost:5984/_utils`, 输入帐号密码后进入管理页面
|
||||
2. 点击 Create Database, 然后根据个人喜好创建数据库
|
||||
|
||||
## 从移动设备访问
|
||||
如果你想要从移动设备访问 Self-hosted LiveSync,你需要一个合法的 SSL 证书。
|
||||
@@ -92,4 +150,4 @@ Note: 不推荐将 CouchDB 挂载到根目录
|
||||
|
||||
提供了 [docker-compose.yml 和 ini 文件](https://github.com/vrtmrz/self-hosted-livesync-server) 可以同时启动 Caddy 和 CouchDB。
|
||||
|
||||
注意检查服务器日志,当心恶意访问。
|
||||
注意检查服务器日志,当心恶意访问。
|
||||
|
||||
@@ -8,9 +8,11 @@ CouchDBを構築するには、[Dockerのイメージ](https://hub.docker.com/_/
|
||||
```
|
||||
[couchdb]
|
||||
single_node=true
|
||||
max_document_size = 50000000
|
||||
|
||||
[chttpd]
|
||||
require_valid_user = true
|
||||
max_http_request_size = 4294967296
|
||||
|
||||
[chttpd_auth]
|
||||
require_valid_user = true
|
||||
|
||||
24
docs/terms.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Notes on Terminology, Spelling, Vocabulary Conventions
|
||||
|
||||
## Spelling and Vocabulary conventions
|
||||
|
||||
1. Almost all of the english words are written in British English. For example, "organisation" instead of "organization", "synchronisation" instead of "synchronization", etc. This convention originated from the author's personal preference but is now maintained for consistency.
|
||||
|
||||
2. Idiomatic terms, such as used in HTML, CSS, and JavaScript, are usually be aligned with the language used in the technology. For example, "color" instead of "colour", "program" instead of "programme", etc. Especially, terms which are used for attributes, properties, and methods are notable.
|
||||
|
||||
3. We use `dialogue` in documentation for consistency. While `dialog` may appear in source code, particularly in class names, method names, and attributes (following technical conventions in No. 2), we consistently use `dialogue` for user-facing messages and general documentation text. This approach balances No. 1 with No. 2.
|
||||
|
||||
4. Contractions are not used. For example, "do not" instead of "don't", "cannot" instead of "can't", etc. especially `'d`.
|
||||
- We may encounter difficulties with tenses.
|
||||
|
||||
5. However, try using affirmative forms, `Discard` instead of `Do not keep`, `Continue` instead of `Do not stop`, etc.
|
||||
- Some languages, such as Japanese, have a different meaning for `yes` and `no` between affirmative and negative questions.
|
||||
|
||||
## Terminology
|
||||
|
||||
- Self-hosted LiveSync
|
||||
- This plug-in name. `Self-hosted` is one word.
|
||||
- LiveSync
|
||||
- Very confusing term.
|
||||
- As shorten-form of `Self-hosted LiveSync`.
|
||||
- As a name of synchronisation mode. This should be changed to `Continuos`, in contrast to `Periodic`.
|
||||
81
docs/tips/jwt-on-couchdb.md
Normal file
@@ -0,0 +1,81 @@
|
||||
---
|
||||
title: "JWT Authentication on CouchDB"
|
||||
livesync-version: 0.25.24
|
||||
tags:
|
||||
- tips
|
||||
- CouchDB
|
||||
- JWT
|
||||
authors:
|
||||
- vorotamoroz
|
||||
---
|
||||
|
||||
# JWT Authentication on CouchDB
|
||||
|
||||
When using CouchDB as a backend for Self-hosted LiveSync, it is possible to enhance security by employing JWT (JSON Web Token) Authentication. In particular, using asymmetric keys (ES256 and ES512) provides greater security against token interception.
|
||||
|
||||
## Setting up JWT Authentication (Asymmetrical Key Example)
|
||||
|
||||
### 1. Generate a key pair
|
||||
|
||||
We can use `openssl` to generate an EC key pair as follows:
|
||||
|
||||
```bash
|
||||
# Generate private key
|
||||
# ES512 for secp521r1 curve, we can also use ES256 for prime256v1 curve
|
||||
openssl ecparam -name secp521r1 -genkey -noout | openssl pkcs8 -topk8 -inform PEM -nocrypt -out private_key.pem
|
||||
# openssl ecparam -name prime256v1 -genkey -noout | openssl pkcs8 -topk8 -inform PEM -nocrypt -out private_key.pem
|
||||
# Generate public key in SPKI format
|
||||
openssl ec -in private_key.pem -pubout -outform PEM -out public_key.pem
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> A key generator will be provided again in a future version of the user interface.
|
||||
|
||||
### 2. Configure CouchDB to accept JWT tokens
|
||||
|
||||
The following configuration is required:
|
||||
|
||||
| Key | Value | Note |
|
||||
| ------------------------------ | ----------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| chttpd/authentication_handlers | {chttpd_auth, jwt_authentication_handler} | In total, it may be `{chttpd_auth, jwt_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}`, or something similar. |
|
||||
| jwt_auth/required_claims | "exp" | |
|
||||
| jwt_keys/ec:your_key_id | Your public key in PEM (SPKI) format | Replace `your_key_id` with your actual key ID. You can decide as you like. Note that you can add multiple keys if needed. If you want to use HSxxx, you should set `jwt_keys/hmac:your_key_id` with your HMAC secret. |
|
||||
|
||||
|
||||
Note: When configuring CouchDB via web interface (Fauxton), new-lines on the public key should be replaced with `\n` for header and footer lines (So wired, but true I have tested). as follows:
|
||||
```
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
\nMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQBq0irb/+K0Qzo7ayIHj0Xtthcntjz
|
||||
r665J5UYdEQMiTtku5rnp95RuN97uA2pPOJOacMBAoiVUnZ1pqEBz9xH9yoAixji
|
||||
Ju...........................................................gTt
|
||||
/xtqrJRwrEy986oRZRQ=
|
||||
\n-----END PUBLIC KEY-----
|
||||
```
|
||||
|
||||
For detailed information, please refer to the [CouchDB JWT Authentication Documentation](https://docs.couchdb.org/en/stable/api/server/authn.html#jwt-authentication).
|
||||
|
||||
### 3. Configure Self-hosted LiveSync to use JWT Authentication
|
||||
|
||||
| Setting | Description |
|
||||
| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Use JWT Authentication | Enable this option to use JWT Authentication. |
|
||||
| JWT Algorithm | Select the JWT signing algorithm (e.g., ES256, ES512) that matches your key pair. |
|
||||
| JWT Key | Paste your private key in PEM (pkcs8) format. |
|
||||
| JWT Expiration Duration | Set the token expiration time in minutes. Locally cached tokens are also invalidated after this duration. |
|
||||
| JWT Key ID (kid) | Enter the key ID that you used when configuring CouchDB, i.e., the one that replaced `your_key_id`. |
|
||||
| JWT Subject (sub) | Set your user ID; this overrides the original `Username` setting. If you have detected access with `Username`, you have failed to authorise with JWT. |
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Self-hosted LiveSync requests to CouchDB treat the user as `_admin`. If you want to restrict access, configure `jwt_auth/roles_claim_name` to a custom claim name. (Self-hosted LiveSync always sets `_couchdb.roles` with the value `["_admin"]`).
|
||||
|
||||
### 4. Test the configuration
|
||||
|
||||
Just try to `Test Settings and Continue` in the remote setup dialogue. If you have successfully authenticated, you are all set.
|
||||
|
||||
## Additional Notes
|
||||
|
||||
This feature is still experimental. Please ensure to test thoroughly in your environment before deploying to production.
|
||||
|
||||
However, we think that this is a great step towards enhancing security when using CouchDB with Self-hosted LiveSync. We shall enable this setting by default in future releases.
|
||||
|
||||
We would love to hear your feedback and any issues you encounter.
|
||||
29
docs/tips/p2p-sync-tips.md
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
title: "Peer-to-Peer Synchronisation Tips"
|
||||
livesync-version: 0.25.24
|
||||
tags:
|
||||
- tips
|
||||
- p2p
|
||||
authors:
|
||||
- vorotamoroz
|
||||
---
|
||||
|
||||
# Peer-to-Peer Synchronisation Tips
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Peer-to-peer synchronisation is still an experimental feature. Although we have made every effort to ensure its reliability, it may not function correctly in all environments.
|
||||
|
||||
## Difficulties with Peer-to-Peer Synchronisation
|
||||
|
||||
It is often the case that peer-to-peer connections do not function correctly, for instance, when using mobile data services.
|
||||
In such circumstances, we recommend connecting all devices to a single Virtual Private Network (VPN). It is advisable to select a service, such as Tailscale, which facilitates direct communication between peers wherever possible.
|
||||
Should one be in an environment where even Tailscale is unable to connect, or where it cannot be lawfully installed, please continue reading.
|
||||
|
||||
## A More Detailed Explanation
|
||||
|
||||
The failure of a Peer-to-Peer connection via WebRTC can be attributed to several factors. These may include an unsuccessful UDP hole-punching attempt, or an intermediary gateway intentionally terminating the connection. Troubleshooting this matter is not a simple undertaking. Furthermore, and rather unfortunately, gateway administrators are typically aware of this type of network behaviour. Whilst a legitimate purpose for such traffic can be cited, such as for web conferencing, this is often insufficient to prevent it from being blocked.
|
||||
|
||||
This situation, however, is the primary reason that our project does not provide a TURN server. Although it is said that a TURN server within WebRTC does not decrypt communications, the project holds the view that the risk of a malicious party impersonating a TURN server must be avoided. Consequently, configuring a TURN server for relay communication is not currently possible through the user interface. Furthermore, there is no official project TURN server, which is to say, one that could be monitored by a third party.
|
||||
|
||||
We request that you provide your own server, using your own Fully Qualified Domain Name (FQDN), and subsequently enter its details into the advanced settings.
|
||||
For testing purposes, Cloudflare's Real-Time TURN Service is exceedingly convenient and offers a generous amount of free data. However, it must be noted that because it is a well-known destination, such traffic is highly conspicuous. There is also a significant possibility that it may be blocked by default. We advise proceeding with caution.
|
||||
364
docs/troubleshooting.md
Normal file
@@ -0,0 +1,364 @@
|
||||
# Tips and Troubleshooting
|
||||
|
||||
- [Tips and Troubleshooting](#tips-and-troubleshooting)
|
||||
- [Tips](#tips)
|
||||
- [CORS avoidance](#cors-avoidance)
|
||||
- [CORS configuration with reverse proxy](#cors-configuration-with-reverse-proxy)
|
||||
- [Nginx](#nginx)
|
||||
- [Nginx and subdirectory](#nginx-and-subdirectory)
|
||||
- [Caddy](#caddy)
|
||||
- [Caddy and subdirectory](#caddy-and-subdirectory)
|
||||
- [Apache](#apache)
|
||||
- [Show all setting panes](#show-all-setting-panes)
|
||||
- [How to resolve `Tweaks Mismatched of Changed`](#how-to-resolve-tweaks-mismatched-of-changed)
|
||||
- [Notable bugs and fixes](#notable-bugs-and-fixes)
|
||||
- [Binary files get bigger on iOS](#binary-files-get-bigger-on-ios)
|
||||
- [Some setting name has been changed](#some-setting-name-has-been-changed)
|
||||
- [FAQ](#faq)
|
||||
- [Why `Use an old adapter for compatibility` is somehow enabled in my vault?](#why-use-an-old-adapter-for-compatibility-is-somehow-enabled-in-my-vault)
|
||||
- [ZIP (or any extensions) files were not synchronised. Why?](#zip-or-any-extensions-files-were-not-synchronised-why)
|
||||
- [I hope to report the issue, but you said you needs `Report`. How to make it?](#i-hope-to-report-the-issue-but-you-said-you-needs-report-how-to-make-it)
|
||||
- [Where can I check the log?](#where-can-i-check-the-log)
|
||||
- [Why are the logs volatile and ephemeral?](#why-are-the-logs-volatile-and-ephemeral)
|
||||
- [Some network logs are not written into the file.](#some-network-logs-are-not-written-into-the-file)
|
||||
- [If a file were deleted or trimmed, the capacity of the database should be reduced, right?](#if-a-file-were-deleted-or-trimmed-the-capacity-of-the-database-should-be-reduced-right)
|
||||
- [How to launch the DevTools](#how-to-launch-the-devtools)
|
||||
- [On Desktop Devices](#on-desktop-devices)
|
||||
- [On Android](#on-android)
|
||||
- [On iOS, iPadOS devices](#on-ios-ipados-devices)
|
||||
- [How can I use the DevTools?](#how-can-i-use-the-devtools)
|
||||
- [Checking the network log](#checking-the-network-log)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [While using Cloudflare Tunnels, often Obsidian API fallback and `524` error occurs.](#while-using-cloudflare-tunnels-often-obsidian-api-fallback-and-524-error-occurs)
|
||||
- [On the mobile device, cannot synchronise on the local network!](#on-the-mobile-device-cannot-synchronise-on-the-local-network)
|
||||
- [I think that something bad happening on the vault...](#i-think-that-something-bad-happening-on-the-vault)
|
||||
- [Old tips](#old-tips)
|
||||
|
||||
<!-- - -->
|
||||
|
||||
## Tips
|
||||
|
||||
### CORS avoidance
|
||||
If we are unable to configure CORS properly for any reason (for example, if we cannot configure non-administered network devices), we may choose to ignore CORS.
|
||||
To use the Obsidian API (also known as the Non-Native API) to bypass CORS, we can enable the toggle ``Use Request API to avoid `inevitable` CORS problem``.
|
||||
<!-- Add **Long explanation of CORS** here for integrity -->
|
||||
|
||||
### CORS configuration with reverse proxy
|
||||
|
||||
- IMPORTANT: CouchDB handles CORS by itself. Do not process CORS on the reverse
|
||||
proxy.
|
||||
- Do not process `Option` requests on the reverse proxy!
|
||||
- Make sure `host` and `X-Forwarded-For` headers are forwarded to the CouchDB.
|
||||
- If you are using a subdirectory, make sure to handle it properly. More
|
||||
detailed information is in the
|
||||
[CouchDB documentation](https://docs.couchdb.org/en/stable/best-practices/reverse-proxies.html).
|
||||
|
||||
Minimal configurations are as follows:
|
||||
|
||||
#### Nginx
|
||||
|
||||
```nginx
|
||||
location / {
|
||||
proxy_pass http://localhost:5984;
|
||||
proxy_redirect off;
|
||||
proxy_buffering off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
```
|
||||
|
||||
#### Nginx and subdirectory
|
||||
|
||||
```nginx
|
||||
location /couchdb {
|
||||
rewrite ^ $request_uri;
|
||||
rewrite ^/couchdb/(.*) /$1 break;
|
||||
proxy_pass http://localhost:5984$uri;
|
||||
proxy_redirect off;
|
||||
proxy_buffering off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location /_session {
|
||||
proxy_pass http://localhost:5984/_session;
|
||||
proxy_redirect off;
|
||||
proxy_buffering off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
```
|
||||
|
||||
#### Caddy
|
||||
|
||||
```caddyfile
|
||||
domain.com {
|
||||
reverse_proxy localhost:5984
|
||||
}
|
||||
```
|
||||
|
||||
#### Caddy and subdirectory
|
||||
|
||||
```caddyfile
|
||||
domain.com {
|
||||
reverse_proxy /couchdb/* localhost:5984
|
||||
reverse_proxy /_session/* localhost:5984/_session
|
||||
}
|
||||
```
|
||||
|
||||
#### Apache
|
||||
|
||||
Sorry, Apache is not recommended for CouchDB. Omit the configuration from here.
|
||||
Please refer to the
|
||||
[Official documentation](https://docs.couchdb.org/en/stable/best-practices/reverse-proxies.html#reverse-proxying-with-apache-http-server).
|
||||
|
||||
### Show all setting panes
|
||||
|
||||
Full pane is not shown by default. To show all panes, please toggle all in
|
||||
`🧙♂️ Wizard` -> `Enable extra and advanced features`.
|
||||
|
||||
For your information, the all panes are as follows:
|
||||

|
||||
|
||||
### How to resolve `Tweaks Mismatched of Changed`
|
||||
|
||||
(Since v0.23.17)
|
||||
|
||||
If you have changed some configurations or tweaks which should be unified
|
||||
between the devices, you will be asked how to reflect (or not) other devices at
|
||||
the next synchronisation. It also occurs on the device itself, where changes are
|
||||
made, to prevent unexpected configuration changes from unwanted propagation.\
|
||||
(We may thank this behaviour if we have synchronised or backed up and restored
|
||||
Self-hosted LiveSync. At least, for me so).
|
||||
|
||||
Following dialogue will be shown: 
|
||||
|
||||
- If we want to propagate the setting of the device, we should choose
|
||||
`Update with mine`.
|
||||
- On other devices, we should choose `Use configured` to accept and use the
|
||||
configured configuration.
|
||||
- `Dismiss` can postpone a decision. However, we cannot synchronise until we
|
||||
have decided.
|
||||
|
||||
Rest assured that in most cases we can choose `Use configured`. (Unless you are
|
||||
certain that you have not changed the configuration).
|
||||
|
||||
If we see it for the first time, it reflects the settings of the device that has
|
||||
been synchronised with the remote for the first time since the upgrade.
|
||||
Probably, we can accept that.
|
||||
|
||||
<!-- Add here -->
|
||||
|
||||
## Notable bugs and fixes
|
||||
|
||||
### Binary files get bigger on iOS
|
||||
|
||||
- Reported at: v0.20.x
|
||||
- Fixed at: v0.21.2 (Fixed but not reviewed)
|
||||
- Required action: larger files will not be fixed automatically, please perform
|
||||
`Verify and repair all files`. If our local database and storage are not
|
||||
matched, we will be asked to apply which one.
|
||||
|
||||
### Some setting name has been changed
|
||||
|
||||
- Fixed at: v0.22.6
|
||||
|
||||
| Previous name | New name |
|
||||
| ---------------------------- | ---------------------------------------- |
|
||||
| Open setup URI | Use the copied setup URI |
|
||||
| Copy setup URI | Copy current settings as a new setup URI |
|
||||
| Setup Wizard | Minimal Setup |
|
||||
| Check database configuration | Check and Fix database configuration |
|
||||
|
||||
## FAQ
|
||||
|
||||
### Why `Use an old adapter for compatibility` is somehow enabled in my vault?
|
||||
|
||||
Because you are a compassionate and experienced user. Before v0.17.16, we used
|
||||
an old adapter for the local database. At that time, current default adapter has
|
||||
not been stable. The new adapter has better performance and has a new feature
|
||||
like purging. Therefore, we should use new adapters and current default is so.
|
||||
|
||||
However, when switching from an old adapter to a new adapter, some converting or
|
||||
local database rebuilding is required, and it takes a few time. It was a long
|
||||
time ago now, but we once inconvenienced everyone in a hurry when we changed the
|
||||
format of our database. For these reasons, this toggle is automatically on if we
|
||||
have upgraded from vault which using an old adapter.
|
||||
|
||||
When you rebuild everything or fetch from the remote again, you will be asked to
|
||||
switch this.
|
||||
|
||||
Therefore, experienced users (especially those stable enough not to have to
|
||||
rebuild the database) may have this toggle enabled in their Vault. Please
|
||||
disable it when you have enough time.
|
||||
|
||||
### ZIP (or any extensions) files were not synchronised. Why?
|
||||
|
||||
It depends on Obsidian detects. May toggling `Detect all extensions` of
|
||||
`File and links` (setting of Obsidian) will help us.
|
||||
|
||||
### I hope to report the issue, but you said you needs `Report`. How to make it?
|
||||
|
||||
We can copy the report to the clipboard, by pressing the `Make report` button on
|
||||
the `Hatch` pane. 
|
||||
|
||||
### Where can I check the log?
|
||||
|
||||
We can launch the log pane by `Show log` on the command palette. And if you have
|
||||
troubled something, please enable the `Verbose Log` on the `General Setting`
|
||||
pane.
|
||||
|
||||
However, the logs would not be kept so long and cleared when restarted. If you
|
||||
want to check the logs, please enable `Write logs into the file` temporarily.
|
||||
|
||||

|
||||
|
||||
> [!IMPORTANT]
|
||||
>
|
||||
> - Writing logs into the file will impact the performance.
|
||||
> - Please make sure that you have erased all your confidential information
|
||||
> before reporting issue.
|
||||
|
||||
### Why are the logs volatile and ephemeral?
|
||||
|
||||
To avoid unexpected exposure to our confidential things.
|
||||
|
||||
### Some network logs are not written into the file.
|
||||
|
||||
Especially the CORS error will be reported as a general error to the plug-in for
|
||||
security reasons. So we cannot detect and log it. We are only able to
|
||||
investigate them by [Checking the network log](#checking-the-network-log).
|
||||
|
||||
### If a file were deleted or trimmed, the capacity of the database should be reduced, right?
|
||||
|
||||
No, even though if files were deleted, chunks were not deleted. Self-hosted
|
||||
LiveSync splits the files into multiple chunks and transfers only newly created.
|
||||
This behaviour enables us to less traffic. And, the chunks will be shared
|
||||
between the files to reduce the total usage of the database.
|
||||
|
||||
And one more thing, we can handle the conflicts on any device even though it has
|
||||
happened on other devices. This means that conflicts will happen in the past,
|
||||
after the time we have synchronised. Hence we cannot collect and delete the
|
||||
unused chunks even though if we are not currently referenced.
|
||||
|
||||
To shrink the database size, `Rebuild everything` only reliably and effectively.
|
||||
But do not worry, if we have synchronised well. We have the actual and real
|
||||
files. Only it takes a bit of time and traffics.
|
||||
|
||||
### How to launch the DevTools
|
||||
|
||||
#### On Desktop Devices
|
||||
We can launch the DevTools by pressing `ctrl`+`shift`+`i` (`Command`+`shift`+`i` on Mac).
|
||||
|
||||
#### On Android
|
||||
Please refer to [Remote debug Android devices](https://developer.chrome.com/docs/devtools/remote-debugging/).
|
||||
Once the DevTools have been launched, everything operates the same as on a PC.
|
||||
|
||||
#### On iOS, iPadOS devices
|
||||
If we have a Mac, we can inspect from Safari on the Mac. Please refer to [Inspecting iOS and iPadOS](https://developer.apple.com/documentation/safari-developer-tools/inspecting-ios).
|
||||
|
||||
|
||||
### How can I use the DevTools?
|
||||
|
||||
#### Checking the network log
|
||||
|
||||
1. Open the network pane.
|
||||
2. Find the requests marked in red.\
|
||||

|
||||
3. Capture the `Headers`, `Payload`, and, `Response`. **Please be sure to keep
|
||||
important information confidential**. If the `Response` contains secrets, you
|
||||
can omitted that. Note: Headers contains a some credentials. **The path of
|
||||
the request URL, Remote Address, authority, and authorization must be
|
||||
concealed.**\
|
||||

|
||||
|
||||
## Troubleshooting
|
||||
|
||||
<!-- Add here -->
|
||||
|
||||
### While using Cloudflare Tunnels, often Obsidian API fallback and `524` error occurs.
|
||||
|
||||
A `524` error occurs when the request to the server is not completed within a
|
||||
`specified time`. This is a timeout error from Cloudflare. From the reported
|
||||
issue, it seems to be 100 seconds. (#627).
|
||||
|
||||
Therefore, this error returns from Cloudflare, not from the server. Hence, the
|
||||
result contains no CORS field. It means that this response makes the Obsidian
|
||||
API fallback.
|
||||
|
||||
However, even if the Obsidian API fallback occurs, the request is still not
|
||||
completed within the `specified time`, 100 seconds.
|
||||
|
||||
To solve this issue, we need to configure the timeout settings.
|
||||
|
||||
Please enable the toggle in `💪 Power users` -> `CouchDB Connection Tweak` ->
|
||||
`Use timeouts instead of heartbeats`.
|
||||
|
||||
### On the mobile device, cannot synchronise on the local network!
|
||||
|
||||
Obsidian mobile is not able to connect to the non-secure end-point, such as
|
||||
starting with `http://`. Make sure your URI of CouchDB. Also not able to use a
|
||||
self-signed certificate.
|
||||
|
||||
### I think that something bad happening on the vault...
|
||||
|
||||
Place `redflag.md` on top of the vault, and restart Obsidian. The most simple
|
||||
way is to create a new note and rename it to `redflag`. Of course, we can put it
|
||||
without Obsidian.
|
||||
|
||||
If there is `redflag.md`, Self-hosted LiveSync suspends all database and storage
|
||||
processes.
|
||||
|
||||
There are some options to use `redflag.md`.
|
||||
|
||||
| Filename | Human-Friendly Name | Description |
|
||||
| ------------- | ------------------- | ------------------------------------------------------------------------------------ |
|
||||
| `redflag.md` | - | Suspends all processes. |
|
||||
| `redflag2.md` | `flag_rebuild.md` | Suspends all processes, and rebuild both local and remote databases by local files. |
|
||||
| `redflag3.md` | `flag_fetch.md` | Suspends all processes, discard the local database, and fetch from the remote again. |
|
||||
|
||||
When fetching everything remotely or performing a rebuild, restarting Obsidian
|
||||
is performed once for safety reasons. At that time, Self-hosted LiveSync uses
|
||||
these files to determine whether the process should be carried out. (The use of
|
||||
normal markdown files is a trick to externally force cancellation in the event
|
||||
of faults in the rebuild or fetch function itself, especially on mobile
|
||||
devices). This mechanism is also used for set-up. And just for information,
|
||||
these files are also not subject to synchronisation.
|
||||
|
||||
However, occasionally the deletion of files may fail. This should generally work
|
||||
normally after restarting Obsidian. (As far as I can observe).
|
||||
|
||||
### Old tips
|
||||
|
||||
- Rarely, a file in the database could be corrupted. The plugin will not write
|
||||
to local storage when a file looks corrupted. If a local version of the file
|
||||
is on your device, the corruption could be fixed by editing the local file and
|
||||
synchronizing it. But if the file does not exist on any of your devices, then
|
||||
it can not be rescued. In this case, you can delete these items from the
|
||||
settings dialog.
|
||||
- To stop the boot-up sequence (eg. for fixing problems on databases), you can
|
||||
put a `redflag.md` file (or directory) at the root of your vault. Tip for iOS:
|
||||
a redflag directory can be created at the root of the vault using the File
|
||||
application.
|
||||
- Also, with `redflag2.md` placed, we can automatically rebuild both the local
|
||||
and the remote databases during the boot-up sequence. With `redflag3.md`, we
|
||||
can discard only the local database and fetch from the remote again.
|
||||
- Q: The database is growing, how can I shrink it down? A: each of the docs is
|
||||
saved with their past 100 revisions for detecting and resolving conflicts.
|
||||
Picturing that one device has been offline for a while, and comes online
|
||||
again. The device has to compare its notes with the remotely saved ones. If
|
||||
there exists a historic revision in which the note used to be identical, it
|
||||
could be updated safely (like git fast-forward). Even if that is not in
|
||||
revision histories, we only have to check the differences after the revision
|
||||
that both devices commonly have. This is like git's conflict-resolving method.
|
||||
So, We have to make the database again like an enlarged git repo if you want
|
||||
to solve the root of the problem.
|
||||
- And more technical Information is in the [Technical Information](tech_info.md)
|
||||
- If you want to synchronize files without obsidian, you can use
|
||||
[filesystem-livesync](https://github.com/vrtmrz/filesystem-livesync).
|
||||
- WebClipper is also available on Chrome Web
|
||||
Store:[obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf)
|
||||
|
||||
Repo is here:
|
||||
[obsidian-livesync-webclip](https://github.com/vrtmrz/obsidian-livesync-webclip).
|
||||
(Docs are a work in progress.)
|
||||
BIN
docs/tweak_mismatch_dialogue.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
@@ -1,44 +1,194 @@
|
||||
//@ts-check
|
||||
|
||||
import esbuild from "esbuild";
|
||||
import process from "process";
|
||||
import builtins from "builtin-modules";
|
||||
import sveltePlugin from "esbuild-svelte";
|
||||
import sveltePreprocess from "svelte-preprocess";
|
||||
import { sveltePreprocess } from "svelte-preprocess";
|
||||
import fs from "node:fs";
|
||||
const banner = `/*
|
||||
THIS IS A GENERATED/BUNDLED FILE BY ESBUILD
|
||||
if you want to view the source, please visit the github repository of this plugin
|
||||
*/
|
||||
`;
|
||||
// import terser from "terser";
|
||||
import { minify } from "terser";
|
||||
import inlineWorkerPlugin from "esbuild-plugin-inline-worker";
|
||||
import { terserOption } from "./terser.config.mjs";
|
||||
import path from "node:path";
|
||||
|
||||
const prod = process.argv[2] === "production";
|
||||
const manifestJson = JSON.parse(fs.readFileSync("./manifest.json"));
|
||||
const packageJson = JSON.parse(fs.readFileSync("./package.json"));
|
||||
const prod = process.argv[2] === "production" || process.env?.BUILD_MODE === "production";
|
||||
const keepTest = true; //!prod;
|
||||
|
||||
const manifestJson = JSON.parse(fs.readFileSync("./manifest.json") + "");
|
||||
const packageJson = JSON.parse(fs.readFileSync("./package.json") + "");
|
||||
const updateInfo = JSON.stringify(fs.readFileSync("./updates.md") + "");
|
||||
esbuild
|
||||
.build({
|
||||
banner: {
|
||||
js: banner,
|
||||
|
||||
const PATHS_TEST_INSTALL = process.env?.PATHS_TEST_INSTALL || "";
|
||||
const PATH_TEST_INSTALL = PATHS_TEST_INSTALL.split(path.delimiter).map(p => p.trim()).filter(p => p.length);
|
||||
if (!prod) {
|
||||
if (PATH_TEST_INSTALL) {
|
||||
console.log(`Built files will be copied to ${PATH_TEST_INSTALL}`);
|
||||
} else {
|
||||
console.log("Development build: You can install the plug-in to Obsidian for testing by exporting the PATHS_TEST_INSTALL environment variable with the paths to your vault plugins directories separated by your system path delimiter (':' on Unix, ';' on Windows).");
|
||||
}
|
||||
} else {
|
||||
console.log("Production build");
|
||||
}
|
||||
|
||||
const moduleAliasPlugin = {
|
||||
name: "module-alias",
|
||||
setup(build) {
|
||||
build.onResolve({ filter: /.(dev)(.ts|)$/ }, (args) => {
|
||||
// console.log(args.path);
|
||||
if (prod) {
|
||||
let prodTs = args.path.replace(".dev", ".prod");
|
||||
const statFile = prodTs.endsWith(".ts") ? prodTs : prodTs + ".ts";
|
||||
const realPath = path.join(args.resolveDir, statFile);
|
||||
console.log(`Checking ${statFile}`);
|
||||
if (fs.existsSync(realPath)) {
|
||||
console.log(`Replaced ${args.path} with ${prodTs}`);
|
||||
return {
|
||||
path: realPath,
|
||||
namespace: "file",
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
build.onResolve({ filter: /.(platform)(.ts|)$/ }, (args) => {
|
||||
// console.log(args.path);
|
||||
if (prod) {
|
||||
let prodTs = args.path.replace(".platform", ".obsidian");
|
||||
const statFile = prodTs.endsWith(".ts") ? prodTs : prodTs + ".ts";
|
||||
const realPath = path.join(args.resolveDir, statFile);
|
||||
console.log(`Checking ${statFile}`);
|
||||
if (fs.existsSync(realPath)) {
|
||||
console.log(`Replaced ${args.path} with ${prodTs}`);
|
||||
return {
|
||||
path: realPath,
|
||||
namespace: "file",
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
/** @type esbuild.Plugin[] */
|
||||
const plugins = [
|
||||
{
|
||||
name: "my-plugin",
|
||||
setup(build) {
|
||||
let count = 0;
|
||||
build.onEnd(async (result) => {
|
||||
if (count++ === 0) {
|
||||
console.log("first build:");
|
||||
if (prod) {
|
||||
console.log("MetaFile:");
|
||||
if (result.metafile) {
|
||||
fs.writeFileSync("meta.json", JSON.stringify(result.metafile));
|
||||
let text = await esbuild.analyzeMetafile(result.metafile, {
|
||||
verbose: true,
|
||||
});
|
||||
// console.log(text);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
console.log("subsequent build:");
|
||||
}
|
||||
const filename = `meta-${prod ? "prod" : "dev"}.json`;
|
||||
await fs.promises.writeFile(filename, JSON.stringify(result.metafile, null, 2));
|
||||
if (prod) {
|
||||
console.log("Performing terser");
|
||||
const src = fs.readFileSync("./main_org.js").toString();
|
||||
// @ts-ignore
|
||||
const ret = await minify(src, terserOption);
|
||||
if (ret && ret.code) {
|
||||
fs.writeFileSync("./main.js", ret.code);
|
||||
}
|
||||
console.log("Finished terser");
|
||||
} else {
|
||||
fs.copyFileSync("./main_org.js", "./main.js");
|
||||
}
|
||||
if (PATH_TEST_INSTALL) {
|
||||
for (const installPath of PATH_TEST_INSTALL) {
|
||||
const realPath = path.resolve(installPath);
|
||||
console.log(`Copying built files to ${realPath}`);
|
||||
if (!fs.existsSync(realPath)) {
|
||||
console.warn(`Test install path ${installPath} does not exist`);
|
||||
continue;
|
||||
}
|
||||
const manifestX = JSON.parse(fs.readFileSync("./manifest.json") + "");
|
||||
manifestX.version = manifestJson.version + "." + Date.now();
|
||||
fs.writeFileSync(path.join(installPath, "manifest.json"), JSON.stringify(manifestX, null, 2));
|
||||
fs.copyFileSync("./main.js", path.join(installPath, "main.js"));
|
||||
fs.copyFileSync("./styles.css", path.join(installPath, "styles.css"));
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
entryPoints: ["src/main.ts"],
|
||||
bundle: true,
|
||||
define: {
|
||||
"MANIFEST_VERSION": `"${manifestJson.version}"`,
|
||||
"PACKAGE_VERSION": `"${packageJson.version}"`,
|
||||
"UPDATE_INFO": `${updateInfo}`,
|
||||
},
|
||||
external: ["obsidian", "electron", ...builtins],
|
||||
format: "cjs",
|
||||
watch: !prod,
|
||||
target: "es2018",
|
||||
logLevel: "info",
|
||||
sourcemap: prod ? false : "inline",
|
||||
treeShaking: true,
|
||||
plugins: [
|
||||
sveltePlugin({
|
||||
preprocess: sveltePreprocess(),
|
||||
compilerOptions: { css: true },
|
||||
}),
|
||||
],
|
||||
outfile: "main.js",
|
||||
})
|
||||
.catch(() => process.exit(1));
|
||||
},
|
||||
];
|
||||
|
||||
const externals = [
|
||||
"obsidian",
|
||||
"electron",
|
||||
"crypto",
|
||||
"@codemirror/autocomplete",
|
||||
"@codemirror/collab",
|
||||
"@codemirror/commands",
|
||||
"@codemirror/language",
|
||||
"@codemirror/lint",
|
||||
"@codemirror/search",
|
||||
"@codemirror/state",
|
||||
"@codemirror/view",
|
||||
"@lezer/common",
|
||||
"@lezer/highlight",
|
||||
"@lezer/lr",
|
||||
];
|
||||
const context = await esbuild.context({
|
||||
banner: {
|
||||
js: "// Leave it all to terser",
|
||||
},
|
||||
entryPoints: ["src/main.ts"],
|
||||
bundle: true,
|
||||
define: {
|
||||
MANIFEST_VERSION: `"${manifestJson.version}"`,
|
||||
PACKAGE_VERSION: `"${packageJson.version}"`,
|
||||
UPDATE_INFO: `${updateInfo}`,
|
||||
global: "window",
|
||||
},
|
||||
external: externals,
|
||||
// minifyWhitespace: true,
|
||||
format: "cjs",
|
||||
target: "es2018",
|
||||
logLevel: "info",
|
||||
platform: "browser",
|
||||
metafile: true,
|
||||
sourcemap: prod ? false : "inline",
|
||||
treeShaking: false,
|
||||
outfile: "main_org.js",
|
||||
mainFields: ["browser", "module", "main"],
|
||||
minifyWhitespace: false,
|
||||
minifySyntax: false,
|
||||
minifyIdentifiers: false,
|
||||
minify: false,
|
||||
dropLabels: prod && !keepTest ? ["TEST", "DEV"] : [],
|
||||
// keepNames: true,
|
||||
plugins: [
|
||||
moduleAliasPlugin,
|
||||
inlineWorkerPlugin({
|
||||
external: externals,
|
||||
treeShaking: true,
|
||||
}),
|
||||
sveltePlugin({
|
||||
preprocess: sveltePreprocess(),
|
||||
compilerOptions: { css: "injected", preserveComments: false },
|
||||
}),
|
||||
...plugins,
|
||||
],
|
||||
});
|
||||
|
||||
if (prod) {
|
||||
await context.rebuild();
|
||||
process.exit(0);
|
||||
} else {
|
||||
await context.watch();
|
||||
}
|
||||
|
||||
99
eslint.config.mjs
Normal file
@@ -0,0 +1,99 @@
|
||||
import typescriptEslint from "@typescript-eslint/eslint-plugin";
|
||||
import svelte from "eslint-plugin-svelte";
|
||||
import _import from "eslint-plugin-import";
|
||||
import { fixupPluginRules } from "@eslint/compat";
|
||||
import tsParser from "@typescript-eslint/parser";
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import js from "@eslint/js";
|
||||
import { FlatCompat } from "@eslint/eslintrc";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
const compat = new FlatCompat({
|
||||
baseDirectory: __dirname,
|
||||
recommendedConfig: js.configs.recommended,
|
||||
allConfig: js.configs.all,
|
||||
});
|
||||
|
||||
export default [
|
||||
{
|
||||
ignores: [
|
||||
"**/node_modules/*",
|
||||
"**/jest.config.js",
|
||||
"src/lib/coverage",
|
||||
"src/lib/browsertest",
|
||||
"**/test.ts",
|
||||
"**/tests.ts",
|
||||
"**/**test.ts",
|
||||
"**/**.test.ts",
|
||||
"**/esbuild.*.mjs",
|
||||
"**/terser.*.mjs",
|
||||
"**/node_modules",
|
||||
"**/build",
|
||||
"**/.eslintrc.js.bak",
|
||||
"src/lib/src/patches/pouchdb-utils",
|
||||
"**/esbuild.config.mjs",
|
||||
"**/rollup.config.js",
|
||||
"modules/octagonal-wheels/rollup.config.js",
|
||||
"modules/octagonal-wheels/dist/**/*",
|
||||
"src/lib/test",
|
||||
"src/lib/src/cli",
|
||||
"**/main.js",
|
||||
"src/lib/apps/webpeer/*"
|
||||
],
|
||||
},
|
||||
...compat.extends(
|
||||
"eslint:recommended",
|
||||
"plugin:@typescript-eslint/eslint-recommended",
|
||||
"plugin:@typescript-eslint/recommended"
|
||||
),
|
||||
{
|
||||
plugins: {
|
||||
"@typescript-eslint": typescriptEslint,
|
||||
svelte,
|
||||
import: fixupPluginRules(_import),
|
||||
},
|
||||
|
||||
languageOptions: {
|
||||
parser: tsParser,
|
||||
ecmaVersion: 5,
|
||||
sourceType: "module",
|
||||
|
||||
parserOptions: {
|
||||
project: ["tsconfig.json"],
|
||||
},
|
||||
},
|
||||
|
||||
rules: {
|
||||
"no-unused-vars": "off",
|
||||
|
||||
"@typescript-eslint/no-unused-vars": [
|
||||
"error",
|
||||
{
|
||||
args: "none",
|
||||
},
|
||||
],
|
||||
|
||||
"no-unused-labels": "off",
|
||||
"@typescript-eslint/ban-ts-comment": "off",
|
||||
"no-prototype-builtins": "off",
|
||||
"@typescript-eslint/no-empty-function": "off",
|
||||
"require-await": "error",
|
||||
"@typescript-eslint/require-await": "warn",
|
||||
"@typescript-eslint/no-misused-promises": "warn",
|
||||
"@typescript-eslint/no-floating-promises": "warn",
|
||||
"no-async-promise-executor": "warn",
|
||||
"@typescript-eslint/no-explicit-any": "off",
|
||||
"@typescript-eslint/no-unnecessary-type-assertion": "error",
|
||||
|
||||
"no-constant-condition": [
|
||||
"error",
|
||||
{
|
||||
checkLoops: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
1
example.env
Normal file
@@ -0,0 +1 @@
|
||||
PATHS_TEST_INSTALL=your-vault-plugin-path:and-another-path
|
||||
BIN
images/devtools1.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
BIN
images/devtools2.png
Normal file
|
After Width: | Height: | Size: 41 KiB |
BIN
images/hatch.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
|
Before Width: | Height: | Size: 39 KiB After Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 6.2 KiB |
BIN
images/quick_setup_3b.png
Normal file
|
After Width: | Height: | Size: 74 KiB |
|
Before Width: | Height: | Size: 31 KiB After Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 5.7 KiB |
|
Before Width: | Height: | Size: 10 KiB |
|
Before Width: | Height: | Size: 35 KiB After Width: | Height: | Size: 6.4 KiB |
BIN
images/write_logs_into_the_file.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
10
manifest-beta.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"id": "obsidian-livesync",
|
||||
"name": "Self-hosted LiveSync",
|
||||
"version": "0.25.24.beta3",
|
||||
"minAppVersion": "0.9.12",
|
||||
"description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
|
||||
"author": "vorotamoroz",
|
||||
"authorUrl": "https://github.com/vrtmrz",
|
||||
"isDesktopOnly": false
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
{
|
||||
"id": "obsidian-livesync",
|
||||
"name": "Self-hosted LiveSync",
|
||||
"version": "0.15.5",
|
||||
"version": "0.25.32",
|
||||
"minAppVersion": "0.9.12",
|
||||
"description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
|
||||
"author": "vorotamoroz",
|
||||
"authorUrl": "https://github.com/vrtmrz",
|
||||
"isDesktopOnly": false
|
||||
}
|
||||
}
|
||||
|
||||
21146
package-lock.json
generated
116
package.json
@@ -1,46 +1,102 @@
|
||||
{
|
||||
"name": "obsidian-livesync",
|
||||
"version": "0.15.5",
|
||||
"version": "0.25.32",
|
||||
"description": "Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
|
||||
"main": "main.js",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "node esbuild.config.mjs",
|
||||
"bakei18n": "npx tsx ./src/lib/_tools/bakei18n.ts",
|
||||
"i18n:bakejson": "npx tsx ./src/lib/_tools/bakei18n.ts",
|
||||
"i18n:yaml2json": "npx tsx ./src/lib/_tools/yaml2json.ts",
|
||||
"i18n:json2yaml": "npx tsx ./src/lib/_tools/json2yaml.ts",
|
||||
"prettyjson": "prettier --config ./.prettierrc ./src/lib/src/common/messagesJson/*.json --write --log-level error",
|
||||
"postbakei18n": "prettier --config ./.prettierrc ./src/lib/src/common/messages/*.ts --write --log-level error",
|
||||
"posti18n:yaml2json": "npm run prettyjson",
|
||||
"predev": "npm run bakei18n",
|
||||
"dev": "node --env-file=.env esbuild.config.mjs",
|
||||
"prebuild": "npm run bakei18n",
|
||||
"build": "node esbuild.config.mjs production",
|
||||
"lint": "eslint src"
|
||||
"buildDev": "node esbuild.config.mjs dev",
|
||||
"lint": "eslint src",
|
||||
"svelte-check": "svelte-check --tsconfig ./tsconfig.json",
|
||||
"tsc-check": "tsc --noEmit",
|
||||
"pretty": "npm run prettyNoWrite -- --write --log-level error",
|
||||
"prettyCheck": "npm run prettyNoWrite -- --check",
|
||||
"prettyNoWrite": "prettier --config ./.prettierrc \"**/*.js\" \"**/*.ts\" \"**/*.json\" ",
|
||||
"check": "npm run lint && npm run svelte-check",
|
||||
"unittest": "deno test -A --no-check --coverage=cov_profile --v8-flags=--expose-gc --trace-leaks ./src/"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "vorotamoroz",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@rollup/plugin-commonjs": "^18.0.0",
|
||||
"@rollup/plugin-node-resolve": "^11.2.1",
|
||||
"@rollup/plugin-typescript": "^8.2.1",
|
||||
"@types/diff-match-patch": "^1.0.32",
|
||||
"@types/pouchdb": "^6.4.0",
|
||||
"@types/pouchdb-browser": "^6.1.3",
|
||||
"@typescript-eslint/eslint-plugin": "^5.7.0",
|
||||
"@typescript-eslint/parser": "^5.0.0",
|
||||
"builtin-modules": "^3.2.0",
|
||||
"esbuild": "0.13.12",
|
||||
"esbuild-svelte": "^0.7.0",
|
||||
"eslint": "^7.32.0",
|
||||
"eslint-config-airbnb-base": "^14.2.1",
|
||||
"eslint-plugin-import": "^2.25.2",
|
||||
"obsidian": "^0.15.4",
|
||||
"postcss": "^8.4.14",
|
||||
"postcss-load-config": "^3.1.4",
|
||||
"rollup": "^2.32.1",
|
||||
"svelte": "^3.49.0",
|
||||
"svelte-preprocess": "^4.10.7",
|
||||
"tslib": "^2.2.0",
|
||||
"typescript": "^4.2.4"
|
||||
"@chialab/esbuild-plugin-worker": "^0.18.1",
|
||||
"@eslint/compat": "^1.2.7",
|
||||
"@eslint/eslintrc": "^3.3.0",
|
||||
"@eslint/js": "^9.21.0",
|
||||
"@sveltejs/vite-plugin-svelte": "^6.2.1",
|
||||
"@tsconfig/svelte": "^5.0.5",
|
||||
"@types/deno": "^2.3.0",
|
||||
"@types/diff-match-patch": "^1.0.36",
|
||||
"@types/node": "^22.13.8",
|
||||
"@types/pouchdb": "^6.4.2",
|
||||
"@types/pouchdb-adapter-http": "^6.1.6",
|
||||
"@types/pouchdb-adapter-idb": "^6.1.7",
|
||||
"@types/pouchdb-browser": "^6.1.5",
|
||||
"@types/pouchdb-core": "^7.0.15",
|
||||
"@types/pouchdb-mapreduce": "^6.1.10",
|
||||
"@types/pouchdb-replication": "^6.4.7",
|
||||
"@types/transform-pouch": "^1.0.6",
|
||||
"@typescript-eslint/eslint-plugin": "8.46.2",
|
||||
"@typescript-eslint/parser": "8.46.2",
|
||||
"builtin-modules": "5.0.0",
|
||||
"esbuild": "0.25.0",
|
||||
"esbuild-plugin-inline-worker": "^0.1.1",
|
||||
"esbuild-svelte": "^0.9.3",
|
||||
"eslint": "^9.38.0",
|
||||
"eslint-plugin-import": "^2.32.0",
|
||||
"eslint-plugin-svelte": "^3.12.4",
|
||||
"events": "^3.3.0",
|
||||
"glob": "^11.0.3",
|
||||
"obsidian": "^1.8.7",
|
||||
"postcss": "^8.5.3",
|
||||
"postcss-load-config": "^6.0.1",
|
||||
"pouchdb-adapter-http": "^9.0.0",
|
||||
"pouchdb-adapter-idb": "^9.0.0",
|
||||
"pouchdb-adapter-indexeddb": "^9.0.0",
|
||||
"pouchdb-adapter-memory": "^9.0.0",
|
||||
"pouchdb-core": "^9.0.0",
|
||||
"pouchdb-errors": "^9.0.0",
|
||||
"pouchdb-find": "^9.0.0",
|
||||
"pouchdb-mapreduce": "^9.0.0",
|
||||
"pouchdb-merge": "^9.0.0",
|
||||
"pouchdb-replication": "^9.0.0",
|
||||
"pouchdb-utils": "^9.0.0",
|
||||
"prettier": "3.5.2",
|
||||
"svelte": "5.41.1",
|
||||
"svelte-check": "^4.3.3",
|
||||
"svelte-preprocess": "^6.0.3",
|
||||
"terser": "^5.39.0",
|
||||
"transform-pouch": "^2.0.0",
|
||||
"tslib": "^2.8.1",
|
||||
"tsx": "^4.20.6",
|
||||
"typescript": "5.9.3",
|
||||
"yaml": "^2.8.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-s3": "^3.808.0",
|
||||
"@smithy/fetch-http-handler": "^5.0.2",
|
||||
"@smithy/md5-js": "^4.0.2",
|
||||
"@smithy/middleware-apply-body-checksum": "^4.1.0",
|
||||
"@smithy/protocol-http": "^5.1.0",
|
||||
"@smithy/querystring-builder": "^4.0.2",
|
||||
"diff-match-patch": "^1.0.5",
|
||||
"esbuild": "0.13.12",
|
||||
"esbuild-svelte": "^0.7.0",
|
||||
"idb": "^7.0.2",
|
||||
"xxhash-wasm": "^0.4.2"
|
||||
"fflate": "^0.8.2",
|
||||
"idb": "^8.0.3",
|
||||
"minimatch": "^10.0.2",
|
||||
"octagonal-wheels": "^0.1.44",
|
||||
"qrcode-generator": "^1.4.4",
|
||||
"trystero": "^0.22.0",
|
||||
"xxhash-wasm-102": "npm:xxhash-wasm@^1.0.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
import typescript from "@rollup/plugin-typescript";
|
||||
import { nodeResolve } from "@rollup/plugin-node-resolve";
|
||||
import commonjs from "@rollup/plugin-commonjs";
|
||||
|
||||
const isProd = process.env.BUILD === "production";
|
||||
|
||||
const banner = `/*
|
||||
THIS IS A GENERATED/BUNDLED FILE BY ROLLUP
|
||||
if you want to view the source visit the plugins github repository
|
||||
*/
|
||||
`;
|
||||
|
||||
export default {
|
||||
input: "./src/main.ts",
|
||||
output: {
|
||||
dir: ".",
|
||||
sourcemap: "inline",
|
||||
sourcemapExcludeSources: isProd,
|
||||
format: "cjs",
|
||||
exports: "default",
|
||||
banner,
|
||||
},
|
||||
external: ["obsidian"],
|
||||
plugins: [
|
||||
typescript({ exclude: ["pouchdb-browser.js", "pouchdb-browser-webpack"] }),
|
||||
nodeResolve({
|
||||
browser: true,
|
||||
}),
|
||||
commonjs(),
|
||||
],
|
||||
};
|
||||
151
setup-flyio-on-the-fly-v2.ipynb
Normal file
@@ -0,0 +1,151 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "view-in-github"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/gist/vrtmrz/9402b101746e08e969b1a4f5f0deb465/setup-flyio-on-the-fly-v2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "AzLlAcLFRO5A"
|
||||
},
|
||||
"source": [
|
||||
"- Initial version 7th Feb. 2024"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "z1x8DQpa9opC"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Install prerequesties\n",
|
||||
"!curl -L https://fly.io/install.sh | sh\n",
|
||||
"!curl -fsSL https://deno.land/x/install/install.sh | sh\n",
|
||||
"!apt update && apt -y install jq\n",
|
||||
"import os\n",
|
||||
"%env PATH=/root/.fly/bin:/root/.deno/bin/:{os.environ[\"PATH\"]}\n",
|
||||
"!git clone --recursive https://github.com/vrtmrz/obsidian-livesync"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "mGN08BaFDviy"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Login up sign up\n",
|
||||
"!flyctl auth signup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "BBFTFOP6vA8m"
|
||||
},
|
||||
"source": [
|
||||
"Select a region and execute the block."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "TNl0A603EF9E"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# see https://fly.io/docs/reference/regions/\n",
|
||||
"region = \"nrt/Tokyo, Japan\" #@param [\"ams/Amsterdam, Netherlands\",\"arn/Stockholm, Sweden\",\"atl/Atlanta, Georgia (US)\",\"bog/Bogotá, Colombia\",\"bos/Boston, Massachusetts (US)\",\"cdg/Paris, France\",\"den/Denver, Colorado (US)\",\"dfw/Dallas, Texas (US)\",\"ewr/Secaucus, NJ (US)\",\"eze/Ezeiza, Argentina\",\"gdl/Guadalajara, Mexico\",\"gig/Rio de Janeiro, Brazil\",\"gru/Sao Paulo, Brazil\",\"hkg/Hong Kong, Hong Kong\",\"iad/Ashburn, Virginia (US)\",\"jnb/Johannesburg, South Africa\",\"lax/Los Angeles, California (US)\",\"lhr/London, United Kingdom\",\"mad/Madrid, Spain\",\"mia/Miami, Florida (US)\",\"nrt/Tokyo, Japan\",\"ord/Chicago, Illinois (US)\",\"otp/Bucharest, Romania\",\"phx/Phoenix, Arizona (US)\",\"qro/Querétaro, Mexico\",\"scl/Santiago, Chile\",\"sea/Seattle, Washington (US)\",\"sin/Singapore, Singapore\",\"sjc/San Jose, California (US)\",\"syd/Sydney, Australia\",\"waw/Warsaw, Poland\",\"yul/Montreal, Canada\",\"yyz/Toronto, Canada\" ] {allow-input: true}\n",
|
||||
"%env region={region.split(\"/\")[0]}\n",
|
||||
"#%env appame=\n",
|
||||
"#%env username=\n",
|
||||
"#%env password=\n",
|
||||
"#%env database=\n",
|
||||
"#%env passphrase=\n",
|
||||
"\n",
|
||||
"# automatic setup leave it -->\n",
|
||||
"%cd obsidian-livesync/utils/flyio\n",
|
||||
"!./deploy-server.sh | tee deploy-result.txt\n",
|
||||
"\n",
|
||||
"## Show result button\n",
|
||||
"from IPython.display import HTML\n",
|
||||
"last_line=\"\"\n",
|
||||
"with open('deploy-result.txt', 'r') as f:\n",
|
||||
" last_line = f.readlines()[-1]\n",
|
||||
" last_line = str.strip(last_line)\n",
|
||||
"\n",
|
||||
"if last_line.startswith(\"obsidian://\"):\n",
|
||||
" result = HTML(f\"Copy your setup-URI with this button! -> <button onclick=\\\"navigator.clipboard.writeText('{last_line}')\\\">Copy setup uri</button><br>Importing passphrase is displayed one. <br>If you want to synchronise in live mode, please apply a preset after ensuring the imported configuration works.\")\n",
|
||||
"else:\n",
|
||||
" result = \"Failed to encrypt the setup URI\"\n",
|
||||
"result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "oeIzExnEKhFp"
|
||||
},
|
||||
"source": [
|
||||
"If you see the `Copy setup URI` button, Congratulations! Your CouchDB is ready to use! Please click the button. And open this on Obsidian.\n",
|
||||
"\n",
|
||||
"And, you should keep the output to your secret memo.\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "sdQrqOjERN3K"
|
||||
},
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"If you want to delete this CouchDB instance, you can do it by executing next cell. \n",
|
||||
"If your fly.toml has been gone, access https://fly.io/dashboard and check the existing app."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "7JMSkNvVIIfg"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!./delete-server.sh"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"authorship_tag": "ABX9TyMexQ5pErH5LBG2tENtEVWf",
|
||||
"include_colab_link": true,
|
||||
"private_outputs": true,
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
import { App, Modal } from "obsidian";
|
||||
import { DIFF_DELETE, DIFF_EQUAL, DIFF_INSERT } from "diff-match-patch";
|
||||
import { diff_result } from "./lib/src/types";
|
||||
import { escapeStringToHTML } from "./lib/src/utils";
|
||||
|
||||
export class ConflictResolveModal extends Modal {
|
||||
// result: Array<[number, string]>;
|
||||
result: diff_result;
|
||||
callback: (remove_rev: string) => Promise<void>;
|
||||
|
||||
constructor(app: App, diff: diff_result, callback: (remove_rev: string) => Promise<void>) {
|
||||
super(app);
|
||||
this.result = diff;
|
||||
this.callback = callback;
|
||||
}
|
||||
|
||||
onOpen() {
|
||||
const { contentEl } = this;
|
||||
|
||||
contentEl.empty();
|
||||
|
||||
contentEl.createEl("h2", { text: "This document has conflicted changes." });
|
||||
const div = contentEl.createDiv("");
|
||||
div.addClass("op-scrollable");
|
||||
let diff = "";
|
||||
for (const v of this.result.diff) {
|
||||
const x1 = v[0];
|
||||
const x2 = v[1];
|
||||
if (x1 == DIFF_DELETE) {
|
||||
diff += "<span class='deleted'>" + escapeStringToHTML(x2) + "</span>";
|
||||
} else if (x1 == DIFF_EQUAL) {
|
||||
diff += "<span class='normal'>" + escapeStringToHTML(x2) + "</span>";
|
||||
} else if (x1 == DIFF_INSERT) {
|
||||
diff += "<span class='added'>" + escapeStringToHTML(x2) + "</span>";
|
||||
}
|
||||
}
|
||||
|
||||
diff = diff.replace(/\n/g, "<br>");
|
||||
div.innerHTML = diff;
|
||||
const div2 = contentEl.createDiv("");
|
||||
const date1 = new Date(this.result.left.mtime).toLocaleString();
|
||||
const date2 = new Date(this.result.right.mtime).toLocaleString();
|
||||
div2.innerHTML = `
|
||||
<span class='deleted'>A:${date1}</span><br /><span class='added'>B:${date2}</span><br>
|
||||
`;
|
||||
contentEl.createEl("button", { text: "Keep A" }, (e) => {
|
||||
e.addEventListener("click", async () => {
|
||||
await this.callback(this.result.right.rev);
|
||||
this.callback = null;
|
||||
this.close();
|
||||
});
|
||||
});
|
||||
contentEl.createEl("button", { text: "Keep B" }, (e) => {
|
||||
e.addEventListener("click", async () => {
|
||||
await this.callback(this.result.left.rev);
|
||||
this.callback = null;
|
||||
this.close();
|
||||
});
|
||||
});
|
||||
contentEl.createEl("button", { text: "Concat both" }, (e) => {
|
||||
e.addEventListener("click", async () => {
|
||||
await this.callback("");
|
||||
this.callback = null;
|
||||
this.close();
|
||||
});
|
||||
});
|
||||
contentEl.createEl("button", { text: "Not now" }, (e) => {
|
||||
e.addEventListener("click", () => {
|
||||
this.close();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
onClose() {
|
||||
const { contentEl } = this;
|
||||
contentEl.empty();
|
||||
if (this.callback != null) {
|
||||
this.callback(null);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
import { TFile, Modal, App } from "obsidian";
|
||||
import { path2id } from "./utils";
|
||||
import { base64ToArrayBuffer, base64ToString, escapeStringToHTML, isValidPath } from "./lib/src/utils";
|
||||
import ObsidianLiveSyncPlugin from "./main";
|
||||
import { DIFF_DELETE, DIFF_EQUAL, DIFF_INSERT, diff_match_patch } from "diff-match-patch";
|
||||
import { LoadedEntry, LOG_LEVEL } from "./lib/src/types";
|
||||
import { Logger } from "./lib/src/logger";
|
||||
|
||||
export class DocumentHistoryModal extends Modal {
|
||||
plugin: ObsidianLiveSyncPlugin;
|
||||
range: HTMLInputElement;
|
||||
contentView: HTMLDivElement;
|
||||
info: HTMLDivElement;
|
||||
fileInfo: HTMLDivElement;
|
||||
showDiff = false;
|
||||
|
||||
file: string;
|
||||
|
||||
revs_info: PouchDB.Core.RevisionInfo[] = [];
|
||||
currentDoc: LoadedEntry;
|
||||
currentText = "";
|
||||
currentDeleted = false;
|
||||
|
||||
constructor(app: App, plugin: ObsidianLiveSyncPlugin, file: TFile | string) {
|
||||
super(app);
|
||||
this.plugin = plugin;
|
||||
this.file = (file instanceof TFile) ? file.path : file;
|
||||
if (localStorage.getItem("ols-history-highlightdiff") == "1") {
|
||||
this.showDiff = true;
|
||||
}
|
||||
}
|
||||
async loadFile() {
|
||||
const db = this.plugin.localDatabase;
|
||||
try {
|
||||
const w = await db.localDatabase.get(path2id(this.file), { revs_info: true });
|
||||
this.revs_info = w._revs_info.filter((e) => e.status == "available");
|
||||
this.range.max = `${this.revs_info.length - 1}`;
|
||||
this.range.value = this.range.max;
|
||||
this.fileInfo.setText(`${this.file} / ${this.revs_info.length} revisions`);
|
||||
await this.loadRevs();
|
||||
} catch (ex) {
|
||||
if (ex.status && ex.status == 404) {
|
||||
this.range.max = "0";
|
||||
this.range.value = "";
|
||||
this.range.disabled = true;
|
||||
this.showDiff
|
||||
this.contentView.setText(`History of this file was not recorded.`);
|
||||
}
|
||||
}
|
||||
}
|
||||
async loadRevs() {
|
||||
if (this.revs_info.length == 0) return;
|
||||
const db = this.plugin.localDatabase;
|
||||
const index = this.revs_info.length - 1 - (this.range.value as any) / 1;
|
||||
const rev = this.revs_info[index];
|
||||
const w = await db.getDBEntry(path2id(this.file), { rev: rev.rev }, false, false, true);
|
||||
this.currentText = "";
|
||||
this.currentDeleted = false;
|
||||
if (w === false) {
|
||||
this.currentDeleted = true;
|
||||
this.info.innerHTML = "";
|
||||
this.contentView.innerHTML = `Could not read this revision<br>(${rev.rev})`;
|
||||
} else {
|
||||
this.currentDoc = w;
|
||||
this.info.innerHTML = `Modified:${new Date(w.mtime).toLocaleString()}`;
|
||||
let result = "";
|
||||
const w1data = w.datatype == "plain" ? w.data : base64ToString(w.data);
|
||||
this.currentDeleted = w.deleted;
|
||||
this.currentText = w1data;
|
||||
if (this.showDiff) {
|
||||
const prevRevIdx = this.revs_info.length - 1 - ((this.range.value as any) / 1 - 1);
|
||||
if (prevRevIdx >= 0 && prevRevIdx < this.revs_info.length) {
|
||||
const oldRev = this.revs_info[prevRevIdx].rev;
|
||||
const w2 = await db.getDBEntry(path2id(this.file), { rev: oldRev }, false, false, true);
|
||||
if (w2 != false) {
|
||||
const dmp = new diff_match_patch();
|
||||
const w2data = w2.datatype == "plain" ? w2.data : base64ToString(w2.data);
|
||||
const diff = dmp.diff_main(w2data, w1data);
|
||||
dmp.diff_cleanupSemantic(diff);
|
||||
for (const v of diff) {
|
||||
const x1 = v[0];
|
||||
const x2 = v[1];
|
||||
if (x1 == DIFF_DELETE) {
|
||||
result += "<span class='history-deleted'>" + escapeStringToHTML(x2) + "</span>";
|
||||
} else if (x1 == DIFF_EQUAL) {
|
||||
result += "<span class='history-normal'>" + escapeStringToHTML(x2) + "</span>";
|
||||
} else if (x1 == DIFF_INSERT) {
|
||||
result += "<span class='history-added'>" + escapeStringToHTML(x2) + "</span>";
|
||||
}
|
||||
}
|
||||
|
||||
result = result.replace(/\n/g, "<br>");
|
||||
} else {
|
||||
result = escapeStringToHTML(w1data);
|
||||
}
|
||||
} else {
|
||||
result = escapeStringToHTML(w1data);
|
||||
}
|
||||
} else {
|
||||
result = escapeStringToHTML(w1data);
|
||||
}
|
||||
this.contentView.innerHTML = (this.currentDeleted ? "(At this revision, the file has been deleted)\n" : "") + result;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
onOpen() {
|
||||
const { contentEl } = this;
|
||||
|
||||
contentEl.empty();
|
||||
contentEl.createEl("h2", { text: "Document History" });
|
||||
this.fileInfo = contentEl.createDiv("");
|
||||
this.fileInfo.addClass("op-info");
|
||||
const divView = contentEl.createDiv("");
|
||||
divView.addClass("op-flex");
|
||||
|
||||
divView.createEl("input", { type: "range" }, (e) => {
|
||||
this.range = e;
|
||||
e.addEventListener("change", (e) => {
|
||||
this.loadRevs();
|
||||
});
|
||||
e.addEventListener("input", (e) => {
|
||||
this.loadRevs();
|
||||
});
|
||||
});
|
||||
contentEl
|
||||
.createDiv("", (e) => {
|
||||
e.createEl("label", {}, (label) => {
|
||||
label.appendChild(
|
||||
createEl("input", { type: "checkbox" }, (checkbox) => {
|
||||
if (this.showDiff) {
|
||||
checkbox.checked = true;
|
||||
}
|
||||
checkbox.addEventListener("input", (evt: any) => {
|
||||
this.showDiff = checkbox.checked;
|
||||
localStorage.setItem("ols-history-highlightdiff", this.showDiff == true ? "1" : "");
|
||||
this.loadRevs();
|
||||
});
|
||||
})
|
||||
);
|
||||
label.appendText("Highlight diff");
|
||||
});
|
||||
})
|
||||
.addClass("op-info");
|
||||
this.info = contentEl.createDiv("");
|
||||
this.info.addClass("op-info");
|
||||
this.loadFile();
|
||||
const div = contentEl.createDiv({ text: "Loading old revisions..." });
|
||||
this.contentView = div;
|
||||
div.addClass("op-scrollable");
|
||||
div.addClass("op-pre");
|
||||
const buttons = contentEl.createDiv("");
|
||||
buttons.createEl("button", { text: "Copy to clipboard" }, (e) => {
|
||||
e.addClass("mod-cta");
|
||||
e.addEventListener("click", async () => {
|
||||
await navigator.clipboard.writeText(this.currentText);
|
||||
Logger(`Old content copied to clipboard`, LOG_LEVEL.NOTICE);
|
||||
});
|
||||
});
|
||||
async function focusFile(path: string) {
|
||||
const targetFile = app.vault
|
||||
.getFiles()
|
||||
.find((f) => f.path === path);
|
||||
if (targetFile) {
|
||||
const leaf = app.workspace.getLeaf(false);
|
||||
await leaf.openFile(targetFile);
|
||||
} else {
|
||||
Logger("The file could not view on the editor", LOG_LEVEL.NOTICE)
|
||||
}
|
||||
}
|
||||
buttons.createEl("button", { text: "Back to this revision" }, (e) => {
|
||||
e.addClass("mod-cta");
|
||||
e.addEventListener("click", async () => {
|
||||
const pathToWrite = this.file.startsWith("i:") ? this.file.substring("i:".length) : this.file;
|
||||
if (!isValidPath(pathToWrite)) {
|
||||
Logger("Path is not valid to write content.", LOG_LEVEL.INFO);
|
||||
}
|
||||
if (this.currentDoc?.datatype == "plain") {
|
||||
await this.app.vault.adapter.write(pathToWrite, this.currentDoc.data);
|
||||
await focusFile(pathToWrite);
|
||||
this.close();
|
||||
} else if (this.currentDoc?.datatype == "newnote") {
|
||||
await this.app.vault.adapter.writeBinary(pathToWrite, base64ToArrayBuffer(this.currentDoc.data));
|
||||
await focusFile(pathToWrite);
|
||||
this.close();
|
||||
} else {
|
||||
|
||||
Logger(`Could not parse entry`, LOG_LEVEL.NOTICE);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
onClose() {
|
||||
const { contentEl } = this;
|
||||
contentEl.empty();
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
import { deleteDB, IDBPDatabase, openDB } from "idb";
|
||||
export interface KeyValueDatabase {
|
||||
get<T>(key: string): Promise<T>;
|
||||
set<T>(key: string, value: T): Promise<IDBValidKey>;
|
||||
del(key: string): Promise<void>;
|
||||
clear(): Promise<void>;
|
||||
keys(query?: IDBValidKey | IDBKeyRange, count?: number): Promise<IDBValidKey[]>;
|
||||
close(): void;
|
||||
destroy(): void;
|
||||
}
|
||||
const databaseCache: { [key: string]: IDBPDatabase<any> } = {};
|
||||
export const OpenKeyValueDatabase = async (dbKey: string): Promise<KeyValueDatabase> => {
|
||||
if (dbKey in databaseCache) {
|
||||
databaseCache[dbKey].close();
|
||||
delete databaseCache[dbKey];
|
||||
}
|
||||
const storeKey = dbKey;
|
||||
const dbPromise = openDB(dbKey, 1, {
|
||||
upgrade(db) {
|
||||
db.createObjectStore(storeKey);
|
||||
},
|
||||
});
|
||||
let db: IDBPDatabase<any> = null;
|
||||
db = await dbPromise;
|
||||
databaseCache[dbKey] = db;
|
||||
return {
|
||||
get<T>(key: string): Promise<T> {
|
||||
return db.get(storeKey, key);
|
||||
},
|
||||
set<T>(key: string, value: T) {
|
||||
return db.put(storeKey, value, key);
|
||||
},
|
||||
del(key: string) {
|
||||
return db.delete(storeKey, key);
|
||||
},
|
||||
clear() {
|
||||
return db.clear(storeKey);
|
||||
},
|
||||
keys(query?: IDBValidKey | IDBKeyRange, count?: number) {
|
||||
return db.getAllKeys(storeKey, query, count);
|
||||
},
|
||||
close() {
|
||||
delete databaseCache[dbKey];
|
||||
return db.close();
|
||||
},
|
||||
async destroy() {
|
||||
delete databaseCache[dbKey];
|
||||
db.close();
|
||||
await deleteDB(dbKey);
|
||||
},
|
||||
};
|
||||
};
|
||||
@@ -1,173 +0,0 @@
|
||||
import { requestUrl, RequestUrlParam, RequestUrlResponse } from "obsidian";
|
||||
import { KeyValueDatabase, OpenKeyValueDatabase } from "./KeyValueDB.js";
|
||||
import { LocalPouchDBBase } from "./lib/src/LocalPouchDBBase.js";
|
||||
import { Logger } from "./lib/src/logger.js";
|
||||
import { PouchDB } from "./lib/src/pouchdb-browser.js";
|
||||
import { EntryDoc, LOG_LEVEL } from "./lib/src/types.js";
|
||||
import { enableEncryption } from "./lib/src/utils.js";
|
||||
import { isValidRemoteCouchDBURI } from "./lib/src/utils_couchdb.js";
|
||||
import { id2path, path2id } from "./utils.js";
|
||||
|
||||
export class LocalPouchDB extends LocalPouchDBBase {
|
||||
|
||||
kvDB: KeyValueDatabase;
|
||||
id2path(filename: string): string {
|
||||
return id2path(filename);
|
||||
}
|
||||
path2id(filename: string): string {
|
||||
return path2id(filename);
|
||||
}
|
||||
CreatePouchDBInstance<T>(name?: string, options?: PouchDB.Configuration.DatabaseConfiguration): PouchDB.Database<T> {
|
||||
return new PouchDB(name, options);
|
||||
}
|
||||
beforeOnUnload(): void {
|
||||
this.kvDB.close();
|
||||
}
|
||||
onClose(): void {
|
||||
this.kvDB.close();
|
||||
}
|
||||
async onInitializeDatabase(): Promise<void> {
|
||||
this.kvDB = await OpenKeyValueDatabase(this.dbname + "-livesync-kv");
|
||||
}
|
||||
async onResetDatabase(): Promise<void> {
|
||||
await this.kvDB.destroy();
|
||||
}
|
||||
|
||||
last_successful_post = false;
|
||||
getLastPostFailedBySize() {
|
||||
return this.last_successful_post;
|
||||
}
|
||||
async fetchByAPI(request: RequestUrlParam): Promise<RequestUrlResponse> {
|
||||
const ret = await requestUrl(request);
|
||||
if (ret.status - (ret.status % 100) !== 200) {
|
||||
const er: Error & { status?: number } = new Error(`Request Error:${ret.status}`);
|
||||
if (ret.json) {
|
||||
er.message = ret.json.reason;
|
||||
er.name = `${ret.json.error ?? ""}:${ret.json.message ?? ""}`;
|
||||
}
|
||||
er.status = ret.status;
|
||||
throw er;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
async connectRemoteCouchDB(uri: string, auth: { username: string; password: string }, disableRequestURI: boolean, passphrase: string | boolean): Promise<string | { db: PouchDB.Database<EntryDoc>; info: PouchDB.Core.DatabaseInfo }> {
|
||||
if (!isValidRemoteCouchDBURI(uri)) return "Remote URI is not valid";
|
||||
if (uri.toLowerCase() != uri) return "Remote URI and database name could not contain capital letters.";
|
||||
if (uri.indexOf(" ") !== -1) return "Remote URI and database name could not contain spaces.";
|
||||
let authHeader = "";
|
||||
if (auth.username && auth.password) {
|
||||
const utf8str = String.fromCharCode.apply(null, new TextEncoder().encode(`${auth.username}:${auth.password}`));
|
||||
const encoded = window.btoa(utf8str);
|
||||
authHeader = "Basic " + encoded;
|
||||
} else {
|
||||
authHeader = "";
|
||||
}
|
||||
// const _this = this;
|
||||
|
||||
const conf: PouchDB.HttpAdapter.HttpAdapterConfiguration = {
|
||||
adapter: "http",
|
||||
auth,
|
||||
fetch: async (url: string | Request, opts: RequestInit) => {
|
||||
let size = "";
|
||||
const localURL = url.toString().substring(uri.length);
|
||||
const method = opts.method ?? "GET";
|
||||
if (opts.body) {
|
||||
const opts_length = opts.body.toString().length;
|
||||
if (opts_length > 1024 * 1024 * 10) {
|
||||
// over 10MB
|
||||
if (uri.contains(".cloudantnosqldb.")) {
|
||||
this.last_successful_post = false;
|
||||
Logger("This request should fail on IBM Cloudant.", LOG_LEVEL.VERBOSE);
|
||||
throw new Error("This request should fail on IBM Cloudant.");
|
||||
}
|
||||
}
|
||||
size = ` (${opts_length})`;
|
||||
}
|
||||
|
||||
if (!disableRequestURI && typeof url == "string" && typeof (opts.body ?? "") == "string") {
|
||||
const body = opts.body as string;
|
||||
|
||||
const transformedHeaders = { ...(opts.headers as Record<string, string>) };
|
||||
if (authHeader != "") transformedHeaders["authorization"] = authHeader;
|
||||
delete transformedHeaders["host"];
|
||||
delete transformedHeaders["Host"];
|
||||
delete transformedHeaders["content-length"];
|
||||
delete transformedHeaders["Content-Length"];
|
||||
const requestParam: RequestUrlParam = {
|
||||
url: url as string,
|
||||
method: opts.method,
|
||||
body: body,
|
||||
headers: transformedHeaders,
|
||||
contentType: "application/json",
|
||||
// contentType: opts.headers,
|
||||
};
|
||||
|
||||
try {
|
||||
const r = await this.fetchByAPI(requestParam);
|
||||
if (method == "POST" || method == "PUT") {
|
||||
this.last_successful_post = r.status - (r.status % 100) == 200;
|
||||
} else {
|
||||
this.last_successful_post = true;
|
||||
}
|
||||
Logger(`HTTP:${method}${size} to:${localURL} -> ${r.status}`, LOG_LEVEL.DEBUG);
|
||||
|
||||
return new Response(r.arrayBuffer, {
|
||||
headers: r.headers,
|
||||
status: r.status,
|
||||
statusText: `${r.status}`,
|
||||
});
|
||||
} catch (ex) {
|
||||
Logger(`HTTP:${method}${size} to:${localURL} -> failed`, LOG_LEVEL.VERBOSE);
|
||||
// limit only in bulk_docs.
|
||||
if (url.toString().indexOf("_bulk_docs") !== -1) {
|
||||
this.last_successful_post = false;
|
||||
}
|
||||
Logger(ex);
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
// -old implementation
|
||||
|
||||
try {
|
||||
const response: Response = await fetch(url, opts);
|
||||
if (method == "POST" || method == "PUT") {
|
||||
this.last_successful_post = response.ok;
|
||||
} else {
|
||||
this.last_successful_post = true;
|
||||
}
|
||||
Logger(`HTTP:${method}${size} to:${localURL} -> ${response.status}`, LOG_LEVEL.DEBUG);
|
||||
return response;
|
||||
} catch (ex) {
|
||||
Logger(`HTTP:${method}${size} to:${localURL} -> failed`, LOG_LEVEL.VERBOSE);
|
||||
// limit only in bulk_docs.
|
||||
if (url.toString().indexOf("_bulk_docs") !== -1) {
|
||||
this.last_successful_post = false;
|
||||
}
|
||||
Logger(ex);
|
||||
throw ex;
|
||||
}
|
||||
// return await fetch(url, opts);
|
||||
},
|
||||
};
|
||||
|
||||
const db: PouchDB.Database<EntryDoc> = new PouchDB<EntryDoc>(uri, conf);
|
||||
if (passphrase && typeof passphrase === "string") {
|
||||
enableEncryption(db, passphrase);
|
||||
}
|
||||
try {
|
||||
const info = await db.info();
|
||||
return { db: db, info: info };
|
||||
} catch (ex) {
|
||||
let msg = `${ex.name}:${ex.message}`;
|
||||
if (ex.name == "TypeError" && ex.message == "Failed to fetch") {
|
||||
msg += "\n**Note** This error caused by many reasons. The only sure thing is you didn't touch the server.\nTo check details, open inspector.";
|
||||
}
|
||||
Logger(ex, LOG_LEVEL.VERBOSE);
|
||||
return msg;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
import { App, Modal } from "obsidian";
|
||||
import { escapeStringToHTML } from "./lib/src/utils";
|
||||
import ObsidianLiveSyncPlugin from "./main";
|
||||
|
||||
export class LogDisplayModal extends Modal {
|
||||
plugin: ObsidianLiveSyncPlugin;
|
||||
logEl: HTMLDivElement;
|
||||
constructor(app: App, plugin: ObsidianLiveSyncPlugin) {
|
||||
super(app);
|
||||
this.plugin = plugin;
|
||||
}
|
||||
updateLog() {
|
||||
let msg = "";
|
||||
for (const v of this.plugin.logMessage) {
|
||||
msg += escapeStringToHTML(v) + "<br>";
|
||||
}
|
||||
this.logEl.innerHTML = msg;
|
||||
}
|
||||
onOpen() {
|
||||
const { contentEl } = this;
|
||||
|
||||
contentEl.empty();
|
||||
contentEl.createEl("h2", { text: "Sync Status" });
|
||||
const div = contentEl.createDiv("");
|
||||
div.addClass("op-scrollable");
|
||||
div.addClass("op-pre");
|
||||
this.logEl = div;
|
||||
this.updateLog = this.updateLog.bind(this);
|
||||
this.plugin.addLogHook = this.updateLog;
|
||||
this.updateLog();
|
||||
}
|
||||
onClose() {
|
||||
const { contentEl } = this;
|
||||
contentEl.empty();
|
||||
this.plugin.addLogHook = null;
|
||||
}
|
||||
}
|
||||
@@ -1,295 +0,0 @@
|
||||
<script lang="ts">
|
||||
import ObsidianLiveSyncPlugin from "./main";
|
||||
import { onMount } from "svelte";
|
||||
import { DevicePluginList, PluginDataEntry } from "./types";
|
||||
import { versionNumberString2Number } from "./lib/src/utils";
|
||||
|
||||
type JudgeResult = "" | "NEWER" | "EVEN" | "EVEN_BUT_DIFFERENT" | "OLDER" | "REMOTE_ONLY";
|
||||
|
||||
interface PluginDataEntryDisp extends PluginDataEntry {
|
||||
versionInfo: string;
|
||||
mtimeInfo: string;
|
||||
mtimeFlag: JudgeResult;
|
||||
versionFlag: JudgeResult;
|
||||
}
|
||||
|
||||
export let plugin: ObsidianLiveSyncPlugin;
|
||||
let plugins: PluginDataEntry[] = [];
|
||||
let deviceAndPlugins: { [key: string]: PluginDataEntryDisp[] } = {};
|
||||
let devicePluginList: [string, PluginDataEntryDisp[]][] = null;
|
||||
let ownPlugins: DevicePluginList = null;
|
||||
let showOwnPlugins = false;
|
||||
let targetList: { [key: string]: boolean } = {};
|
||||
|
||||
function saveTargetList() {
|
||||
window.localStorage.setItem("ols-plugin-targetlist", JSON.stringify(targetList));
|
||||
}
|
||||
|
||||
function loadTargetList() {
|
||||
let e = window.localStorage.getItem("ols-plugin-targetlist") || "{}";
|
||||
try {
|
||||
targetList = JSON.parse(e);
|
||||
} catch (_) {
|
||||
// NO OP.
|
||||
}
|
||||
}
|
||||
|
||||
function clearSelection() {
|
||||
targetList = {};
|
||||
}
|
||||
|
||||
async function updateList() {
|
||||
let x = await plugin.getPluginList();
|
||||
ownPlugins = x.thisDevicePlugins;
|
||||
plugins = Object.values(x.allPlugins);
|
||||
let targetListItems = Array.from(new Set(plugins.map((e) => e.deviceVaultName + "---" + e.manifest.id)));
|
||||
let newTargetList: { [key: string]: boolean } = {};
|
||||
for (const id of targetListItems) {
|
||||
for (const tag of ["---plugin", "---setting"]) {
|
||||
newTargetList[id + tag] = id + tag in targetList && targetList[id + tag];
|
||||
}
|
||||
}
|
||||
targetList = newTargetList;
|
||||
saveTargetList();
|
||||
}
|
||||
|
||||
$: {
|
||||
deviceAndPlugins = {};
|
||||
for (const p of plugins) {
|
||||
if (p.deviceVaultName == plugin.deviceAndVaultName && !showOwnPlugins) {
|
||||
continue;
|
||||
}
|
||||
if (!(p.deviceVaultName in deviceAndPlugins)) {
|
||||
deviceAndPlugins[p.deviceVaultName] = [];
|
||||
}
|
||||
let dispInfo: PluginDataEntryDisp = { ...p, versionInfo: "", mtimeInfo: "", versionFlag: "", mtimeFlag: "" };
|
||||
dispInfo.versionInfo = p.manifest.version;
|
||||
let x = new Date().getTime() / 1000;
|
||||
let mtime = p.mtime / 1000;
|
||||
let diff = (x - mtime) / 60;
|
||||
if (p.mtime == 0) {
|
||||
dispInfo.mtimeInfo = `-`;
|
||||
} else if (diff < 60) {
|
||||
dispInfo.mtimeInfo = `${diff | 0} Mins ago`;
|
||||
} else if (diff < 60 * 24) {
|
||||
dispInfo.mtimeInfo = `${(diff / 60) | 0} Hours ago`;
|
||||
} else if (diff < 60 * 24 * 10) {
|
||||
dispInfo.mtimeInfo = `${(diff / (60 * 24)) | 0} Days ago`;
|
||||
} else {
|
||||
dispInfo.mtimeInfo = new Date(dispInfo.mtime).toLocaleString();
|
||||
}
|
||||
// compare with own plugin
|
||||
let id = p.manifest.id;
|
||||
|
||||
if (id in ownPlugins) {
|
||||
// Which we have.
|
||||
const ownPlugin = ownPlugins[id];
|
||||
let localVer = versionNumberString2Number(ownPlugin.manifest.version);
|
||||
let pluginVer = versionNumberString2Number(p.manifest.version);
|
||||
if (localVer > pluginVer) {
|
||||
dispInfo.versionFlag = "OLDER";
|
||||
} else if (localVer == pluginVer) {
|
||||
if (ownPlugin.manifestJson + (ownPlugin.styleCss ?? "") + ownPlugin.mainJs != p.manifestJson + (p.styleCss ?? "") + p.mainJs) {
|
||||
dispInfo.versionFlag = "EVEN_BUT_DIFFERENT";
|
||||
} else {
|
||||
dispInfo.versionFlag = "EVEN";
|
||||
}
|
||||
} else if (localVer < pluginVer) {
|
||||
dispInfo.versionFlag = "NEWER";
|
||||
}
|
||||
if ((ownPlugin.dataJson ?? "") == (p.dataJson ?? "")) {
|
||||
if (ownPlugin.mtime == 0 && p.mtime == 0) {
|
||||
dispInfo.mtimeFlag = "";
|
||||
} else {
|
||||
dispInfo.mtimeFlag = "EVEN";
|
||||
}
|
||||
} else {
|
||||
if (((ownPlugin.mtime / 1000) | 0) > ((p.mtime / 1000) | 0)) {
|
||||
dispInfo.mtimeFlag = "OLDER";
|
||||
} else if (((ownPlugin.mtime / 1000) | 0) == ((p.mtime / 1000) | 0)) {
|
||||
dispInfo.mtimeFlag = "EVEN_BUT_DIFFERENT";
|
||||
} else if (((ownPlugin.mtime / 1000) | 0) < ((p.mtime / 1000) | 0)) {
|
||||
dispInfo.mtimeFlag = "NEWER";
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dispInfo.versionFlag = "REMOTE_ONLY";
|
||||
dispInfo.mtimeFlag = "REMOTE_ONLY";
|
||||
}
|
||||
|
||||
deviceAndPlugins[p.deviceVaultName].push(dispInfo);
|
||||
}
|
||||
devicePluginList = Object.entries(deviceAndPlugins);
|
||||
}
|
||||
|
||||
function getDispString(stat: JudgeResult): string {
|
||||
if (stat == "") return "";
|
||||
if (stat == "NEWER") return " (Newer)";
|
||||
if (stat == "OLDER") return " (Older)";
|
||||
if (stat == "EVEN") return " (Even)";
|
||||
if (stat == "EVEN_BUT_DIFFERENT") return " (Even but different)";
|
||||
if (stat == "REMOTE_ONLY") return " (Remote Only)";
|
||||
return "";
|
||||
}
|
||||
|
||||
onMount(async () => {
|
||||
loadTargetList();
|
||||
await updateList();
|
||||
});
|
||||
|
||||
function toggleShowOwnPlugins() {
|
||||
showOwnPlugins = !showOwnPlugins;
|
||||
}
|
||||
|
||||
function toggleTarget(key: string) {
|
||||
targetList[key] = !targetList[key];
|
||||
saveTargetList();
|
||||
}
|
||||
|
||||
function toggleAll(devicename: string) {
|
||||
for (const c in targetList) {
|
||||
if (c.startsWith(devicename)) {
|
||||
targetList[c] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function sweepPlugins() {
|
||||
//@ts-ignore
|
||||
await plugin.app.plugins.loadManifests();
|
||||
await plugin.sweepPlugin(true);
|
||||
updateList();
|
||||
}
|
||||
|
||||
async function applyPlugins() {
|
||||
for (const c in targetList) {
|
||||
if (targetList[c] == true) {
|
||||
const [deviceAndVault, id, opt] = c.split("---");
|
||||
if (deviceAndVault in deviceAndPlugins) {
|
||||
const entry = deviceAndPlugins[deviceAndVault].find((e) => e.manifest.id == id);
|
||||
if (entry) {
|
||||
if (opt == "plugin") {
|
||||
if (entry.versionFlag != "EVEN") await plugin.applyPlugin(entry);
|
||||
} else if (opt == "setting") {
|
||||
if (entry.mtimeFlag != "EVEN") await plugin.applyPluginData(entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//@ts-ignore
|
||||
await plugin.app.plugins.loadManifests();
|
||||
await plugin.sweepPlugin(true);
|
||||
updateList();
|
||||
}
|
||||
|
||||
async function checkUpdates() {
|
||||
await plugin.checkPluginUpdate();
|
||||
}
|
||||
async function replicateAndRefresh() {
|
||||
await plugin.replicate(true);
|
||||
updateList();
|
||||
}
|
||||
</script>
|
||||
|
||||
<div>
|
||||
<h1>Plugins and their settings</h1>
|
||||
<div class="ols-plugins-div-buttons">
|
||||
Show own items
|
||||
<div class="checkbox-container" class:is-enabled={showOwnPlugins} on:click={toggleShowOwnPlugins} />
|
||||
</div>
|
||||
<div class="sls-plugins-wrap">
|
||||
<table class="sls-plugins-tbl">
|
||||
<tr style="position:sticky">
|
||||
<th class="sls-plugins-tbl-device-head">Name</th>
|
||||
<th class="sls-plugins-tbl-device-head">Info</th>
|
||||
<th class="sls-plugins-tbl-device-head">Target</th>
|
||||
</tr>
|
||||
{#if !devicePluginList}
|
||||
<tr>
|
||||
<td colspan="3" class="sls-table-tail tcenter"> Retrieving... </td>
|
||||
</tr>
|
||||
{:else if devicePluginList.length == 0}
|
||||
<tr>
|
||||
<td colspan="3" class="sls-table-tail tcenter"> No plugins found. </td>
|
||||
</tr>
|
||||
{:else}
|
||||
{#each devicePluginList as [deviceName, devicePlugins]}
|
||||
<tr>
|
||||
<th colspan="2" class="sls-plugins-tbl-device-head">{deviceName}</th>
|
||||
<th class="sls-plugins-tbl-device-head">
|
||||
<button class="mod-cta" on:click={() => toggleAll(deviceName)}>✔</button>
|
||||
</th>
|
||||
</tr>
|
||||
{#each devicePlugins as plugin}
|
||||
<tr>
|
||||
<td class="sls-table-head">{plugin.manifest.name}</td>
|
||||
<td class="sls-table-tail tcenter">{plugin.versionInfo}{getDispString(plugin.versionFlag)}</td>
|
||||
<td class="sls-table-tail tcenter">
|
||||
{#if plugin.versionFlag === "EVEN" || plugin.versionFlag === ""}
|
||||
-
|
||||
{:else}
|
||||
<div class="wrapToggle">
|
||||
<div
|
||||
class="checkbox-container"
|
||||
class:is-enabled={targetList[plugin.deviceVaultName + "---" + plugin.manifest.id + "---plugin"]}
|
||||
on:click={() => toggleTarget(plugin.deviceVaultName + "---" + plugin.manifest.id + "---plugin")}
|
||||
/>
|
||||
</div>
|
||||
{/if}
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="sls-table-head">Settings</td>
|
||||
<td class="sls-table-tail tcenter">{plugin.mtimeInfo}{getDispString(plugin.mtimeFlag)}</td>
|
||||
<td class="sls-table-tail tcenter">
|
||||
{#if plugin.mtimeFlag === "EVEN" || plugin.mtimeFlag === ""}
|
||||
-
|
||||
{:else}
|
||||
<div class="wrapToggle">
|
||||
<div
|
||||
class="checkbox-container"
|
||||
class:is-enabled={targetList[plugin.deviceVaultName + "---" + plugin.manifest.id + "---setting"]}
|
||||
on:click={() => toggleTarget(plugin.deviceVaultName + "---" + plugin.manifest.id + "---setting")}
|
||||
/>
|
||||
</div>
|
||||
{/if}
|
||||
</td>
|
||||
</tr>
|
||||
<tr class="divider">
|
||||
<th colspan="3" />
|
||||
</tr>
|
||||
{/each}
|
||||
{/each}
|
||||
{/if}
|
||||
</table>
|
||||
</div>
|
||||
<div class="ols-plugins-div-buttons">
|
||||
<button class="" on:click={replicateAndRefresh}>Replicate and refresh</button>
|
||||
<button class="" on:click={clearSelection}>Clear Selection</button>
|
||||
</div>
|
||||
|
||||
<div class="ols-plugins-div-buttons">
|
||||
<button class="mod-cta" on:click={checkUpdates}>Check Updates</button>
|
||||
<button class="mod-cta" on:click={sweepPlugins}>Scan installed</button>
|
||||
<button class="mod-cta" on:click={applyPlugins}>Apply all</button>
|
||||
</div>
|
||||
<!-- <div class="ols-plugins-div-buttons">-->
|
||||
<!-- <button class="mod-warning" on:click={applyPlugins}>Delete all selected</button>-->
|
||||
<!-- </div>-->
|
||||
</div>
|
||||
|
||||
<style>
|
||||
.ols-plugins-div-buttons {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
justify-content: flex-end;
|
||||
margin-top: 8px;
|
||||
}
|
||||
|
||||
.wrapToggle {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-content: center;
|
||||
}
|
||||
</style>
|
||||
43
src/common/KeyValueDB.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
import { deleteDB, type IDBPDatabase, openDB } from "idb";
|
||||
import type { KeyValueDatabase } from "../lib/src/interfaces/KeyValueDatabase.ts";
|
||||
const databaseCache: { [key: string]: IDBPDatabase<any> } = {};
|
||||
export const OpenKeyValueDatabase = async (dbKey: string): Promise<KeyValueDatabase> => {
|
||||
if (dbKey in databaseCache) {
|
||||
databaseCache[dbKey].close();
|
||||
delete databaseCache[dbKey];
|
||||
}
|
||||
const storeKey = dbKey;
|
||||
const dbPromise = openDB(dbKey, 1, {
|
||||
upgrade(db, _oldVersion, _newVersion, _transaction, _event) {
|
||||
return db.createObjectStore(storeKey);
|
||||
},
|
||||
});
|
||||
const db = await dbPromise;
|
||||
databaseCache[dbKey] = db;
|
||||
return {
|
||||
async get<T>(key: IDBValidKey): Promise<T> {
|
||||
return await db.get(storeKey, key);
|
||||
},
|
||||
async set<T>(key: IDBValidKey, value: T) {
|
||||
return await db.put(storeKey, value, key);
|
||||
},
|
||||
async del(key: IDBValidKey) {
|
||||
return await db.delete(storeKey, key);
|
||||
},
|
||||
async clear() {
|
||||
return await db.clear(storeKey);
|
||||
},
|
||||
async keys(query?: IDBValidKey | IDBKeyRange, count?: number) {
|
||||
return await db.getAllKeys(storeKey, query, count);
|
||||
},
|
||||
close() {
|
||||
delete databaseCache[dbKey];
|
||||
return db.close();
|
||||
},
|
||||
async destroy() {
|
||||
delete databaseCache[dbKey];
|
||||
db.close();
|
||||
await deleteDB(dbKey);
|
||||
},
|
||||
};
|
||||
};
|
||||
28
src/common/SvelteItemView.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { ItemView } from "obsidian";
|
||||
import { type mount, unmount } from "svelte";
|
||||
|
||||
export abstract class SvelteItemView extends ItemView {
|
||||
abstract instantiateComponent(target: HTMLElement): ReturnType<typeof mount> | Promise<ReturnType<typeof mount>>;
|
||||
component?: ReturnType<typeof mount>;
|
||||
async onOpen() {
|
||||
await super.onOpen();
|
||||
this.contentEl.empty();
|
||||
await this._dismountComponent();
|
||||
this.component = await this.instantiateComponent(this.contentEl);
|
||||
return;
|
||||
}
|
||||
async _dismountComponent() {
|
||||
if (this.component) {
|
||||
await unmount(this.component);
|
||||
this.component = undefined;
|
||||
}
|
||||
}
|
||||
async onClose() {
|
||||
await super.onClose();
|
||||
if (this.component) {
|
||||
await unmount(this.component);
|
||||
this.component = undefined;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
49
src/common/events.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
import { eventHub } from "../lib/src/hub/hub";
|
||||
import type ObsidianLiveSyncPlugin from "../main";
|
||||
|
||||
export const EVENT_PLUGIN_LOADED = "plugin-loaded";
|
||||
export const EVENT_PLUGIN_UNLOADED = "plugin-unloaded";
|
||||
export const EVENT_FILE_SAVED = "file-saved";
|
||||
export const EVENT_LEAF_ACTIVE_CHANGED = "leaf-active-changed";
|
||||
|
||||
export const EVENT_REQUEST_OPEN_SETTINGS = "request-open-settings";
|
||||
export const EVENT_REQUEST_OPEN_SETTING_WIZARD = "request-open-setting-wizard";
|
||||
export const EVENT_REQUEST_OPEN_SETUP_URI = "request-open-setup-uri";
|
||||
export const EVENT_REQUEST_COPY_SETUP_URI = "request-copy-setup-uri";
|
||||
export const EVENT_REQUEST_SHOW_SETUP_QR = "request-show-setup-qr";
|
||||
|
||||
export const EVENT_REQUEST_RELOAD_SETTING_TAB = "reload-setting-tab";
|
||||
|
||||
export const EVENT_REQUEST_OPEN_PLUGIN_SYNC_DIALOG = "request-open-plugin-sync-dialog";
|
||||
|
||||
export const EVENT_REQUEST_OPEN_P2P = "request-open-p2p";
|
||||
export const EVENT_REQUEST_CLOSE_P2P = "request-close-p2p";
|
||||
|
||||
export const EVENT_REQUEST_RUN_DOCTOR = "request-run-doctor";
|
||||
export const EVENT_REQUEST_RUN_FIX_INCOMPLETE = "request-run-fix-incomplete";
|
||||
export const EVENT_ON_UNRESOLVED_ERROR = "on-unresolved-error";
|
||||
|
||||
// export const EVENT_FILE_CHANGED = "file-changed";
|
||||
|
||||
declare global {
|
||||
interface LSEvents {
|
||||
[EVENT_PLUGIN_LOADED]: ObsidianLiveSyncPlugin;
|
||||
[EVENT_PLUGIN_UNLOADED]: undefined;
|
||||
[EVENT_REQUEST_OPEN_PLUGIN_SYNC_DIALOG]: undefined;
|
||||
[EVENT_REQUEST_OPEN_SETTINGS]: undefined;
|
||||
[EVENT_REQUEST_OPEN_SETTING_WIZARD]: undefined;
|
||||
[EVENT_REQUEST_RELOAD_SETTING_TAB]: undefined;
|
||||
[EVENT_LEAF_ACTIVE_CHANGED]: undefined;
|
||||
[EVENT_REQUEST_CLOSE_P2P]: undefined;
|
||||
[EVENT_REQUEST_OPEN_P2P]: undefined;
|
||||
[EVENT_REQUEST_OPEN_SETUP_URI]: undefined;
|
||||
[EVENT_REQUEST_COPY_SETUP_URI]: undefined;
|
||||
[EVENT_REQUEST_SHOW_SETUP_QR]: undefined;
|
||||
[EVENT_REQUEST_RUN_DOCTOR]: string;
|
||||
[EVENT_REQUEST_RUN_FIX_INCOMPLETE]: undefined;
|
||||
[EVENT_ON_UNRESOLVED_ERROR]: undefined;
|
||||
}
|
||||
}
|
||||
|
||||
export * from "../lib/src/events/coreEvents.ts";
|
||||
export { eventHub };
|
||||
12
src/common/obsidianEvents.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import type { TFile } from "../deps";
|
||||
import type { FilePathWithPrefix, LoadedEntry } from "../lib/src/common/types";
|
||||
|
||||
export const EVENT_REQUEST_SHOW_HISTORY = "show-history";
|
||||
|
||||
declare global {
|
||||
interface LSEvents {
|
||||
[EVENT_REQUEST_SHOW_HISTORY]:
|
||||
| { file: TFile; fileOnDB: LoadedEntry }
|
||||
| { file: FilePathWithPrefix; fileOnDB: LoadedEntry };
|
||||
}
|
||||
}
|
||||
7
src/common/stores.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
import { PersistentMap } from "octagonal-wheels/dataobject/PersistentMap";
|
||||
|
||||
export let sameChangePairs: PersistentMap<number[]>;
|
||||
|
||||
export function initializeStores(vaultName: string) {
|
||||
sameChangePairs = new PersistentMap<number[]>(`ls-persist-same-changes-${vaultName}`);
|
||||
}
|
||||
69
src/common/types.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
import { type PluginManifest, TFile } from "../deps.ts";
|
||||
import { type DatabaseEntry, type EntryBody, type FilePath } from "../lib/src/common/types.ts";
|
||||
export type { CacheData, FileEventItem } from "../lib/src/common/types.ts";
|
||||
|
||||
export interface PluginDataEntry extends DatabaseEntry {
|
||||
deviceVaultName: string;
|
||||
mtime: number;
|
||||
manifest: PluginManifest;
|
||||
mainJs: string;
|
||||
manifestJson: string;
|
||||
styleCss?: string;
|
||||
// it must be encrypted.
|
||||
dataJson?: string;
|
||||
_conflicts?: string[];
|
||||
type: "plugin";
|
||||
}
|
||||
|
||||
export interface PluginList {
|
||||
[key: string]: PluginDataEntry[];
|
||||
}
|
||||
|
||||
export interface DevicePluginList {
|
||||
[key: string]: PluginDataEntry;
|
||||
}
|
||||
export const PERIODIC_PLUGIN_SWEEP = 60;
|
||||
|
||||
export interface InternalFileInfo {
|
||||
path: FilePath;
|
||||
mtime: number;
|
||||
ctime: number;
|
||||
size: number;
|
||||
deleted?: boolean;
|
||||
}
|
||||
|
||||
export interface FileInfo {
|
||||
path: FilePath;
|
||||
mtime: number;
|
||||
ctime: number;
|
||||
size: number;
|
||||
deleted?: boolean;
|
||||
file: TFile;
|
||||
}
|
||||
|
||||
export type queueItem = {
|
||||
entry: EntryBody;
|
||||
missingChildren: string[];
|
||||
timeout?: number;
|
||||
done?: boolean;
|
||||
warned?: boolean;
|
||||
};
|
||||
|
||||
// Hidden items (Now means `chunk`)
|
||||
export const CHeader = "h:";
|
||||
|
||||
// Plug-in Stored Container (Obsolete)
|
||||
export const PSCHeader = "ps:";
|
||||
export const PSCHeaderEnd = "ps;";
|
||||
|
||||
// Internal data Container
|
||||
export const ICHeader = "i:";
|
||||
export const ICHeaderEnd = "i;";
|
||||
export const ICHeaderLength = ICHeader.length;
|
||||
|
||||
// Internal data Container (eXtended)
|
||||
export const ICXHeader = "ix:";
|
||||
|
||||
export const FileWatchEventQueueMax = 10;
|
||||
|
||||
export { configURIBase, configURIBaseQR } from "../lib/src/common/types.ts";
|
||||
568
src/common/utils.ts
Normal file
@@ -0,0 +1,568 @@
|
||||
import { normalizePath, Platform, TAbstractFile, type RequestUrlParam, requestUrl } from "../deps.ts";
|
||||
import {
|
||||
path2id_base,
|
||||
id2path_base,
|
||||
isValidFilenameInLinux,
|
||||
isValidFilenameInDarwin,
|
||||
isValidFilenameInWidows,
|
||||
isValidFilenameInAndroid,
|
||||
stripAllPrefixes,
|
||||
} from "../lib/src/string_and_binary/path.ts";
|
||||
|
||||
import { Logger } from "../lib/src/common/logger.ts";
|
||||
import {
|
||||
LOG_LEVEL_INFO,
|
||||
LOG_LEVEL_NOTICE,
|
||||
LOG_LEVEL_VERBOSE,
|
||||
type AnyEntry,
|
||||
type CouchDBCredentials,
|
||||
type DocumentID,
|
||||
type EntryHasPath,
|
||||
type FilePath,
|
||||
type FilePathWithPrefix,
|
||||
type UXFileInfo,
|
||||
type UXFileInfoStub,
|
||||
} from "../lib/src/common/types.ts";
|
||||
import { CHeader, ICHeader, ICHeaderLength, ICXHeader, PSCHeader } from "./types.ts";
|
||||
import type ObsidianLiveSyncPlugin from "../main.ts";
|
||||
import { writeString } from "../lib/src/string_and_binary/convert.ts";
|
||||
import { fireAndForget } from "../lib/src/common/utils.ts";
|
||||
import { sameChangePairs } from "./stores.ts";
|
||||
|
||||
import { scheduleTask } from "octagonal-wheels/concurrency/task";
|
||||
import { EVENT_PLUGIN_UNLOADED, eventHub } from "./events.ts";
|
||||
import { promiseWithResolver, type PromiseWithResolvers } from "octagonal-wheels/promises";
|
||||
import { AuthorizationHeaderGenerator } from "../lib/src/replication/httplib.ts";
|
||||
import type { KeyValueDatabase } from "../lib/src/interfaces/KeyValueDatabase.ts";
|
||||
|
||||
export { scheduleTask, cancelTask, cancelAllTasks } from "octagonal-wheels/concurrency/task";
|
||||
|
||||
// For backward compatibility, using the path for determining id.
|
||||
// Only CouchDB unacceptable ID (that starts with an underscore) has been prefixed with "/".
|
||||
// The first slash will be deleted when the path is normalized.
|
||||
export async function path2id(
|
||||
filename: FilePathWithPrefix | FilePath,
|
||||
obfuscatePassphrase: string | false,
|
||||
caseInsensitive: boolean
|
||||
): Promise<DocumentID> {
|
||||
const temp = filename.split(":");
|
||||
const path = temp.pop();
|
||||
const normalizedPath = normalizePath(path as FilePath);
|
||||
temp.push(normalizedPath);
|
||||
const fixedPath = temp.join(":") as FilePathWithPrefix;
|
||||
|
||||
const out = await path2id_base(fixedPath, obfuscatePassphrase, caseInsensitive);
|
||||
return out;
|
||||
}
|
||||
export function id2path(id: DocumentID, entry?: EntryHasPath): FilePathWithPrefix {
|
||||
const filename = id2path_base(id, entry);
|
||||
const temp = filename.split(":");
|
||||
const path = temp.pop();
|
||||
const normalizedPath = normalizePath(path as FilePath);
|
||||
temp.push(normalizedPath);
|
||||
const fixedPath = temp.join(":") as FilePathWithPrefix;
|
||||
return fixedPath;
|
||||
}
|
||||
export function getPath(entry: AnyEntry) {
|
||||
return id2path(entry._id, entry);
|
||||
}
|
||||
export function getPathWithoutPrefix(entry: AnyEntry) {
|
||||
const f = getPath(entry);
|
||||
return stripAllPrefixes(f);
|
||||
}
|
||||
|
||||
export function getPathFromTFile(file: TAbstractFile) {
|
||||
return file.path as FilePath;
|
||||
}
|
||||
|
||||
export function isInternalFile(file: UXFileInfoStub | string | FilePathWithPrefix) {
|
||||
if (typeof file == "string") return file.startsWith(ICHeader);
|
||||
if (file.isInternal) return true;
|
||||
return false;
|
||||
}
|
||||
export function getPathFromUXFileInfo(file: UXFileInfoStub | string | FilePathWithPrefix) {
|
||||
if (typeof file == "string") return file as FilePathWithPrefix;
|
||||
return file.path;
|
||||
}
|
||||
export function getStoragePathFromUXFileInfo(file: UXFileInfoStub | string | FilePathWithPrefix) {
|
||||
if (typeof file == "string") return stripAllPrefixes(file as FilePathWithPrefix);
|
||||
return stripAllPrefixes(file.path);
|
||||
}
|
||||
export function getDatabasePathFromUXFileInfo(file: UXFileInfoStub | string | FilePathWithPrefix) {
|
||||
if (typeof file == "string" && file.startsWith(ICXHeader)) return file as FilePathWithPrefix;
|
||||
const prefix = isInternalFile(file) ? ICHeader : "";
|
||||
if (typeof file == "string") return (prefix + stripAllPrefixes(file as FilePathWithPrefix)) as FilePathWithPrefix;
|
||||
return (prefix + stripAllPrefixes(file.path)) as FilePathWithPrefix;
|
||||
}
|
||||
|
||||
const memos: { [key: string]: any } = {};
|
||||
export function memoObject<T>(key: string, obj: T): T {
|
||||
memos[key] = obj;
|
||||
return memos[key] as T;
|
||||
}
|
||||
export async function memoIfNotExist<T>(key: string, func: () => T | Promise<T>): Promise<T> {
|
||||
if (!(key in memos)) {
|
||||
const w = func();
|
||||
const v = w instanceof Promise ? await w : w;
|
||||
memos[key] = v;
|
||||
}
|
||||
return memos[key] as T;
|
||||
}
|
||||
export function retrieveMemoObject<T>(key: string): T | false {
|
||||
if (key in memos) {
|
||||
return memos[key];
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
export function disposeMemoObject(key: string) {
|
||||
delete memos[key];
|
||||
}
|
||||
|
||||
export function isValidPath(filename: string) {
|
||||
if (Platform.isDesktop) {
|
||||
// if(Platform.isMacOS) return isValidFilenameInDarwin(filename);
|
||||
if (process.platform == "darwin") return isValidFilenameInDarwin(filename);
|
||||
if (process.platform == "linux") return isValidFilenameInLinux(filename);
|
||||
return isValidFilenameInWidows(filename);
|
||||
}
|
||||
if (Platform.isAndroidApp) return isValidFilenameInAndroid(filename);
|
||||
if (Platform.isIosApp) return isValidFilenameInDarwin(filename);
|
||||
//Fallback
|
||||
Logger("Could not determine platform for checking filename", LOG_LEVEL_VERBOSE);
|
||||
return isValidFilenameInWidows(filename);
|
||||
}
|
||||
|
||||
export function trimPrefix(target: string, prefix: string) {
|
||||
return target.startsWith(prefix) ? target.substring(prefix.length) : target;
|
||||
}
|
||||
|
||||
/**
|
||||
* returns is internal chunk of file
|
||||
* @param id ID
|
||||
* @returns
|
||||
*/
|
||||
export function isInternalMetadata(id: FilePath | FilePathWithPrefix | DocumentID): boolean {
|
||||
return id.startsWith(ICHeader);
|
||||
}
|
||||
export function stripInternalMetadataPrefix<T extends FilePath | FilePathWithPrefix | DocumentID>(id: T): T {
|
||||
return id.substring(ICHeaderLength) as T;
|
||||
}
|
||||
export function id2InternalMetadataId(id: DocumentID): DocumentID {
|
||||
return (ICHeader + id) as DocumentID;
|
||||
}
|
||||
|
||||
// const CHeaderLength = CHeader.length;
|
||||
export function isChunk(str: string): boolean {
|
||||
return str.startsWith(CHeader);
|
||||
}
|
||||
|
||||
export function isPluginMetadata(str: string): boolean {
|
||||
return str.startsWith(PSCHeader);
|
||||
}
|
||||
export function isCustomisationSyncMetadata(str: string): boolean {
|
||||
return str.startsWith(ICXHeader);
|
||||
}
|
||||
|
||||
export class PeriodicProcessor {
|
||||
_process: () => Promise<any>;
|
||||
_timer?: number = undefined;
|
||||
_plugin: ObsidianLiveSyncPlugin;
|
||||
constructor(plugin: ObsidianLiveSyncPlugin, process: () => Promise<any>) {
|
||||
this._plugin = plugin;
|
||||
this._process = process;
|
||||
eventHub.onceEvent(EVENT_PLUGIN_UNLOADED, () => {
|
||||
this.disable();
|
||||
});
|
||||
}
|
||||
async process() {
|
||||
try {
|
||||
await this._process();
|
||||
} catch (ex) {
|
||||
Logger(ex);
|
||||
}
|
||||
}
|
||||
enable(interval: number) {
|
||||
this.disable();
|
||||
if (interval == 0) return;
|
||||
this._timer = window.setInterval(
|
||||
() =>
|
||||
fireAndForget(async () => {
|
||||
await this.process();
|
||||
if (this._plugin.services?.appLifecycle?.hasUnloaded()) {
|
||||
this.disable();
|
||||
}
|
||||
}),
|
||||
interval
|
||||
);
|
||||
this._plugin.registerInterval(this._timer);
|
||||
}
|
||||
disable() {
|
||||
if (this._timer !== undefined) {
|
||||
window.clearInterval(this._timer);
|
||||
this._timer = undefined;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const _requestToCouchDBFetch = async (
|
||||
baseUri: string,
|
||||
username: string,
|
||||
password: string,
|
||||
path?: string,
|
||||
body?: string | any,
|
||||
method?: string
|
||||
) => {
|
||||
const utf8str = String.fromCharCode.apply(null, [...writeString(`${username}:${password}`)]);
|
||||
const encoded = window.btoa(utf8str);
|
||||
const authHeader = "Basic " + encoded;
|
||||
const transformedHeaders: Record<string, string> = {
|
||||
authorization: authHeader,
|
||||
"content-type": "application/json",
|
||||
};
|
||||
const uri = `${baseUri}/${path}`;
|
||||
const requestParam = {
|
||||
url: uri,
|
||||
method: method || (body ? "PUT" : "GET"),
|
||||
headers: new Headers(transformedHeaders),
|
||||
contentType: "application/json",
|
||||
body: JSON.stringify(body),
|
||||
};
|
||||
return await fetch(uri, requestParam);
|
||||
};
|
||||
|
||||
export const _requestToCouchDB = async (
|
||||
baseUri: string,
|
||||
credentials: CouchDBCredentials,
|
||||
origin: string,
|
||||
path?: string,
|
||||
body?: any,
|
||||
method?: string,
|
||||
customHeaders?: Record<string, string>
|
||||
) => {
|
||||
// Create each time to avoid caching.
|
||||
const authHeaderGen = new AuthorizationHeaderGenerator();
|
||||
const authHeader = await authHeaderGen.getAuthorizationHeader(credentials);
|
||||
const transformedHeaders: Record<string, string> = { authorization: authHeader, origin: origin, ...customHeaders };
|
||||
const uri = `${baseUri}/${path}`;
|
||||
const requestParam: RequestUrlParam = {
|
||||
url: uri,
|
||||
method: method || (body ? "PUT" : "GET"),
|
||||
headers: transformedHeaders,
|
||||
contentType: "application/json",
|
||||
body: body ? JSON.stringify(body) : undefined,
|
||||
};
|
||||
return await requestUrl(requestParam);
|
||||
};
|
||||
/**
|
||||
* @deprecated Use requestToCouchDBWithCredentials instead.
|
||||
*/
|
||||
export const requestToCouchDB = async (
|
||||
baseUri: string,
|
||||
username: string,
|
||||
password: string,
|
||||
origin: string = "",
|
||||
key?: string,
|
||||
body?: string,
|
||||
method?: string,
|
||||
customHeaders?: Record<string, string>
|
||||
) => {
|
||||
const uri = `_node/_local/_config${key ? "/" + key : ""}`;
|
||||
return await _requestToCouchDB(
|
||||
baseUri,
|
||||
{ username, password, type: "basic" },
|
||||
origin,
|
||||
uri,
|
||||
body,
|
||||
method,
|
||||
customHeaders
|
||||
);
|
||||
};
|
||||
|
||||
export function requestToCouchDBWithCredentials(
|
||||
baseUri: string,
|
||||
credentials: CouchDBCredentials,
|
||||
origin: string = "",
|
||||
key?: string,
|
||||
body?: string,
|
||||
method?: string,
|
||||
customHeaders?: Record<string, string>
|
||||
) {
|
||||
const uri = `_node/_local/_config${key ? "/" + key : ""}`;
|
||||
return _requestToCouchDB(baseUri, credentials, origin, uri, body, method, customHeaders);
|
||||
}
|
||||
|
||||
export const BASE_IS_NEW = Symbol("base");
|
||||
export const TARGET_IS_NEW = Symbol("target");
|
||||
export const EVEN = Symbol("even");
|
||||
|
||||
// Why 2000? : ZIP FILE Does not have enough resolution.
|
||||
const resolution = 2000;
|
||||
export function compareMTime(
|
||||
baseMTime: number,
|
||||
targetMTime: number
|
||||
): typeof BASE_IS_NEW | typeof TARGET_IS_NEW | typeof EVEN {
|
||||
const truncatedBaseMTime = ~~(baseMTime / resolution) * resolution;
|
||||
const truncatedTargetMTime = ~~(targetMTime / resolution) * resolution;
|
||||
// Logger(`Resolution MTime ${truncatedBaseMTime} and ${truncatedTargetMTime} `, LOG_LEVEL_VERBOSE);
|
||||
if (truncatedBaseMTime == truncatedTargetMTime) return EVEN;
|
||||
if (truncatedBaseMTime > truncatedTargetMTime) return BASE_IS_NEW;
|
||||
if (truncatedBaseMTime < truncatedTargetMTime) return TARGET_IS_NEW;
|
||||
throw new Error("Unexpected error");
|
||||
}
|
||||
|
||||
function getKey(file: AnyEntry | string | UXFileInfoStub) {
|
||||
const key = typeof file == "string" ? file : stripAllPrefixes(file.path);
|
||||
return key;
|
||||
}
|
||||
|
||||
export function markChangesAreSame(file: AnyEntry | string | UXFileInfoStub, mtime1: number, mtime2: number) {
|
||||
if (mtime1 === mtime2) return true;
|
||||
const key = getKey(file);
|
||||
const pairs = sameChangePairs.get(key, []) || [];
|
||||
if (pairs.some((e) => e == mtime1 || e == mtime2)) {
|
||||
sameChangePairs.set(key, [...new Set([...pairs, mtime1, mtime2])]);
|
||||
} else {
|
||||
sameChangePairs.set(key, [mtime1, mtime2]);
|
||||
}
|
||||
}
|
||||
|
||||
export function unmarkChanges(file: AnyEntry | string | UXFileInfoStub) {
|
||||
const key = getKey(file);
|
||||
sameChangePairs.delete(key);
|
||||
}
|
||||
export function isMarkedAsSameChanges(file: UXFileInfoStub | AnyEntry | string, mtimes: number[]) {
|
||||
const key = getKey(file);
|
||||
const pairs = sameChangePairs.get(key, []) || [];
|
||||
if (mtimes.every((e) => pairs.indexOf(e) !== -1)) {
|
||||
return EVEN;
|
||||
}
|
||||
}
|
||||
export function compareFileFreshness(
|
||||
baseFile: UXFileInfoStub | AnyEntry | undefined,
|
||||
checkTarget: UXFileInfo | AnyEntry | undefined
|
||||
): typeof BASE_IS_NEW | typeof TARGET_IS_NEW | typeof EVEN {
|
||||
if (baseFile === undefined && checkTarget == undefined) return EVEN;
|
||||
if (baseFile == undefined) return TARGET_IS_NEW;
|
||||
if (checkTarget == undefined) return BASE_IS_NEW;
|
||||
|
||||
const modifiedBase = "stat" in baseFile ? (baseFile?.stat?.mtime ?? 0) : (baseFile?.mtime ?? 0);
|
||||
const modifiedTarget = "stat" in checkTarget ? (checkTarget?.stat?.mtime ?? 0) : (checkTarget?.mtime ?? 0);
|
||||
|
||||
if (modifiedBase && modifiedTarget && isMarkedAsSameChanges(baseFile, [modifiedBase, modifiedTarget])) {
|
||||
return EVEN;
|
||||
}
|
||||
return compareMTime(modifiedBase, modifiedTarget);
|
||||
}
|
||||
|
||||
const _cached = new Map<
|
||||
string,
|
||||
{
|
||||
value: any;
|
||||
context: Map<string, any>;
|
||||
}
|
||||
>();
|
||||
|
||||
export type MemoOption = {
|
||||
key: string;
|
||||
forceUpdate?: boolean;
|
||||
validator?: (context: Map<string, any>) => boolean;
|
||||
};
|
||||
|
||||
export function useMemo<T>(
|
||||
{ key, forceUpdate, validator }: MemoOption,
|
||||
updateFunc: (context: Map<string, any>, prev: T) => T
|
||||
): T {
|
||||
const cached = _cached.get(key);
|
||||
const context = cached?.context || new Map<string, any>();
|
||||
if (cached && !forceUpdate && (!validator || (validator && !validator(context)))) {
|
||||
return cached.value;
|
||||
}
|
||||
const value = updateFunc(context, cached?.value);
|
||||
if (value !== cached?.value) {
|
||||
_cached.set(key, { value, context });
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
// const _static = new Map<string, any>();
|
||||
const _staticObj = new Map<
|
||||
string,
|
||||
{
|
||||
value: any;
|
||||
}
|
||||
>();
|
||||
|
||||
export function useStatic<T>(key: string): { value: T | undefined };
|
||||
export function useStatic<T>(key: string, initial: T): { value: T };
|
||||
export function useStatic<T>(key: string, initial?: T) {
|
||||
// if (!_static.has(key) && initial) {
|
||||
// _static.set(key, initial);
|
||||
// }
|
||||
const obj = _staticObj.get(key);
|
||||
if (obj !== undefined) {
|
||||
return obj;
|
||||
} else {
|
||||
// let buf = initial;
|
||||
const obj = {
|
||||
_buf: initial,
|
||||
get value() {
|
||||
return this._buf as T;
|
||||
},
|
||||
set value(value: T) {
|
||||
this._buf = value;
|
||||
},
|
||||
};
|
||||
_staticObj.set(key, obj);
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
export function disposeMemo(key: string) {
|
||||
_cached.delete(key);
|
||||
}
|
||||
|
||||
export function disposeAllMemo() {
|
||||
_cached.clear();
|
||||
}
|
||||
|
||||
export function displayRev(rev: string) {
|
||||
const [number, hash] = rev.split("-");
|
||||
return `${number}-${hash.substring(0, 6)}`;
|
||||
}
|
||||
|
||||
type DocumentProps = {
|
||||
id: DocumentID;
|
||||
rev?: string;
|
||||
prefixedPath: FilePathWithPrefix;
|
||||
path: FilePath;
|
||||
isDeleted: boolean;
|
||||
revDisplay: string;
|
||||
shortenedId: string;
|
||||
shortenedPath: string;
|
||||
};
|
||||
|
||||
export function getDocProps(doc: AnyEntry): DocumentProps {
|
||||
const id = doc._id;
|
||||
const shortenedId = id.substring(0, 10);
|
||||
const prefixedPath = getPath(doc);
|
||||
const path = stripAllPrefixes(prefixedPath);
|
||||
const rev = doc._rev;
|
||||
const revDisplay = rev ? displayRev(rev) : "0-NOREVS";
|
||||
// const prefix = prefixedPath.substring(0, prefixedPath.length - path.length);
|
||||
const shortenedPath = path.substring(0, 10);
|
||||
const isDeleted = doc._deleted || doc.deleted || false;
|
||||
return { id, rev, revDisplay, prefixedPath, path, isDeleted, shortenedId, shortenedPath };
|
||||
}
|
||||
|
||||
export function getLogLevel(showNotice: boolean) {
|
||||
return showNotice ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO;
|
||||
}
|
||||
|
||||
export type MapLike<K, V> = {
|
||||
set(key: K, value: V): Map<K, V>;
|
||||
clear(): void;
|
||||
delete(key: K): boolean;
|
||||
get(key: K): V | undefined;
|
||||
has(key: K): boolean;
|
||||
keys: () => IterableIterator<K>;
|
||||
get size(): number;
|
||||
};
|
||||
|
||||
export async function autosaveCache<K, V>(db: KeyValueDatabase, mapKey: string): Promise<MapLike<K, V>> {
|
||||
const savedData = (await db.get<Map<K, V>>(mapKey)) ?? new Map<K, V>();
|
||||
const _commit = () => {
|
||||
try {
|
||||
scheduleTask("commit-map-save-" + mapKey, 250, async () => {
|
||||
await db.set(mapKey, savedData);
|
||||
});
|
||||
} catch {
|
||||
// NO OP.
|
||||
}
|
||||
};
|
||||
return {
|
||||
set(key: K, value: V) {
|
||||
const modified = savedData.get(key) !== value;
|
||||
const result = savedData.set(key, value);
|
||||
if (modified) {
|
||||
_commit();
|
||||
}
|
||||
return result;
|
||||
},
|
||||
clear(): void {
|
||||
savedData.clear();
|
||||
_commit();
|
||||
},
|
||||
delete(key: K): boolean {
|
||||
const result = savedData.delete(key);
|
||||
if (result) {
|
||||
_commit();
|
||||
}
|
||||
return result;
|
||||
},
|
||||
get(key: K): V | undefined {
|
||||
return savedData.get(key);
|
||||
},
|
||||
has(key) {
|
||||
return savedData.has(key);
|
||||
},
|
||||
keys() {
|
||||
return savedData.keys();
|
||||
},
|
||||
get size() {
|
||||
return savedData.size;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function onlyInNTimes(n: number, proc: (progress: number) => any) {
|
||||
let counter = 0;
|
||||
return function () {
|
||||
if (counter++ % n == 0) {
|
||||
proc(counter);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const waitingTasks = {} as Record<string, { task?: PromiseWithResolvers<any>; previous: number; leastNext: number }>;
|
||||
|
||||
export function rateLimitedSharedExecution<T>(key: string, interval: number, proc: () => Promise<T>): Promise<T> {
|
||||
if (!(key in waitingTasks)) {
|
||||
waitingTasks[key] = { task: undefined, previous: 0, leastNext: 0 };
|
||||
}
|
||||
if (waitingTasks[key].task) {
|
||||
// Extend the previous execution time.
|
||||
waitingTasks[key].leastNext = Date.now() + interval;
|
||||
return waitingTasks[key].task.promise;
|
||||
}
|
||||
|
||||
const previous = waitingTasks[key].previous;
|
||||
|
||||
const delay = previous == 0 ? 0 : Math.max(interval - (Date.now() - previous), 0);
|
||||
|
||||
const task = promiseWithResolver<T>();
|
||||
void task.promise.finally(() => {
|
||||
if (waitingTasks[key].task === task) {
|
||||
waitingTasks[key].task = undefined;
|
||||
waitingTasks[key].previous = Math.max(Date.now(), waitingTasks[key].leastNext);
|
||||
}
|
||||
});
|
||||
waitingTasks[key] = {
|
||||
task,
|
||||
previous: Date.now(),
|
||||
leastNext: Date.now() + interval,
|
||||
};
|
||||
void scheduleTask("thin-out-" + key, delay, async () => {
|
||||
try {
|
||||
task.resolve(await proc());
|
||||
} catch (ex) {
|
||||
task.reject(ex);
|
||||
}
|
||||
});
|
||||
return task.promise;
|
||||
}
|
||||
export function updatePreviousExecutionTime(key: string, timeDelta: number = 0) {
|
||||
if (!(key in waitingTasks)) {
|
||||
waitingTasks[key] = { task: undefined, previous: 0, leastNext: 0 };
|
||||
}
|
||||
waitingTasks[key].leastNext = Math.max(Date.now() + timeDelta, waitingTasks[key].leastNext);
|
||||
}
|
||||
39
src/deps.ts
Normal file
@@ -0,0 +1,39 @@
|
||||
import { type FilePath } from "./lib/src/common/types.ts";
|
||||
|
||||
export {
|
||||
addIcon,
|
||||
App,
|
||||
debounce,
|
||||
Editor,
|
||||
FuzzySuggestModal,
|
||||
MarkdownRenderer,
|
||||
MarkdownView,
|
||||
Modal,
|
||||
Notice,
|
||||
Platform,
|
||||
Plugin,
|
||||
PluginSettingTab,
|
||||
requestUrl,
|
||||
sanitizeHTMLToDom,
|
||||
Setting,
|
||||
stringifyYaml,
|
||||
TAbstractFile,
|
||||
TextAreaComponent,
|
||||
TFile,
|
||||
TFolder,
|
||||
parseYaml,
|
||||
ItemView,
|
||||
WorkspaceLeaf,
|
||||
} from "obsidian";
|
||||
export type {
|
||||
DataWriteOptions,
|
||||
PluginManifest,
|
||||
RequestUrlParam,
|
||||
RequestUrlResponse,
|
||||
MarkdownFileInfo,
|
||||
ListedFiles,
|
||||
} from "obsidian";
|
||||
import { normalizePath as normalizePath_ } from "obsidian";
|
||||
const normalizePath = normalizePath_ as <T extends string | FilePath>(from: T) => T;
|
||||
export { normalizePath };
|
||||
export { type Diff, DIFF_DELETE, DIFF_EQUAL, DIFF_INSERT, diff_match_patch } from "diff-match-patch";
|
||||
126
src/dialogs.ts
@@ -1,126 +0,0 @@
|
||||
import { App, FuzzySuggestModal, Modal, Setting } from "obsidian";
|
||||
import ObsidianLiveSyncPlugin from "./main";
|
||||
|
||||
//@ts-ignore
|
||||
import PluginPane from "./PluginPane.svelte";
|
||||
|
||||
export class PluginDialogModal extends Modal {
|
||||
plugin: ObsidianLiveSyncPlugin;
|
||||
logEl: HTMLDivElement;
|
||||
component: PluginPane = null;
|
||||
|
||||
constructor(app: App, plugin: ObsidianLiveSyncPlugin) {
|
||||
super(app);
|
||||
this.plugin = plugin;
|
||||
}
|
||||
|
||||
onOpen() {
|
||||
const { contentEl } = this;
|
||||
if (this.component == null) {
|
||||
this.component = new PluginPane({
|
||||
target: contentEl,
|
||||
props: { plugin: this.plugin },
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
onClose() {
|
||||
if (this.component != null) {
|
||||
this.component.$destroy();
|
||||
this.component = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export class InputStringDialog extends Modal {
|
||||
result: string | false = false;
|
||||
onSubmit: (result: string | boolean) => void;
|
||||
title: string;
|
||||
key: string;
|
||||
placeholder: string;
|
||||
isManuallyClosed = false;
|
||||
|
||||
constructor(app: App, title: string, key: string, placeholder: string, onSubmit: (result: string | false) => void) {
|
||||
super(app);
|
||||
this.onSubmit = onSubmit;
|
||||
this.title = title;
|
||||
this.placeholder = placeholder;
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
onOpen() {
|
||||
const { contentEl } = this;
|
||||
|
||||
contentEl.createEl("h1", { text: this.title });
|
||||
|
||||
new Setting(contentEl).setName(this.key).addText((text) =>
|
||||
text.onChange((value) => {
|
||||
this.result = value;
|
||||
})
|
||||
);
|
||||
|
||||
new Setting(contentEl).addButton((btn) =>
|
||||
btn
|
||||
.setButtonText("Ok")
|
||||
.setCta()
|
||||
.onClick(() => {
|
||||
this.isManuallyClosed = true;
|
||||
this.close();
|
||||
})
|
||||
).addButton((btn) =>
|
||||
btn
|
||||
.setButtonText("Cancel")
|
||||
.setCta()
|
||||
.onClick(() => {
|
||||
this.close();
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
onClose() {
|
||||
const { contentEl } = this;
|
||||
contentEl.empty();
|
||||
if (this.isManuallyClosed) {
|
||||
this.onSubmit(this.result);
|
||||
} else {
|
||||
this.onSubmit(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
export class PopoverSelectString extends FuzzySuggestModal<string> {
|
||||
app: App;
|
||||
callback: (e: string) => void = () => { };
|
||||
getItemsFun: () => string[] = () => {
|
||||
return ["yes", "no"];
|
||||
|
||||
}
|
||||
|
||||
constructor(app: App, note: string, placeholder: string | null, getItemsFun: () => string[], callback: (e: string) => void) {
|
||||
super(app);
|
||||
this.app = app;
|
||||
this.setPlaceholder((placeholder ?? "y/n) ") + note);
|
||||
if (getItemsFun) this.getItemsFun = getItemsFun;
|
||||
this.callback = callback;
|
||||
}
|
||||
|
||||
getItems(): string[] {
|
||||
return this.getItemsFun();
|
||||
}
|
||||
|
||||
getItemText(item: string): string {
|
||||
return item;
|
||||
}
|
||||
|
||||
onChooseItem(item: string, evt: MouseEvent | KeyboardEvent): void {
|
||||
// debugger;
|
||||
this.callback(item);
|
||||
this.callback = null;
|
||||
}
|
||||
onClose(): void {
|
||||
setTimeout(() => {
|
||||
if (this.callback != null) {
|
||||
this.callback("");
|
||||
}
|
||||
}, 100);
|
||||
}
|
||||
}
|
||||
1818
src/features/ConfigSync/CmdConfigSync.ts
Normal file
470
src/features/ConfigSync/PluginCombo.svelte
Normal file
@@ -0,0 +1,470 @@
|
||||
<script lang="ts">
|
||||
import {
|
||||
ConfigSync,
|
||||
PluginDataExDisplayV2,
|
||||
type IPluginDataExDisplay,
|
||||
type PluginDataExFile,
|
||||
} from "./CmdConfigSync.ts";
|
||||
import { Logger } from "../../lib/src/common/logger";
|
||||
import { type FilePath, LOG_LEVEL_INFO, LOG_LEVEL_NOTICE, LOG_LEVEL_VERBOSE } from "../../lib/src/common/types";
|
||||
import { getDocData, timeDeltaToHumanReadable, unique } from "../../lib/src/common/utils";
|
||||
import type ObsidianLiveSyncPlugin from "../../main";
|
||||
// import { askString } from "../../common/utils";
|
||||
import { Menu } from "obsidian";
|
||||
|
||||
export let list: IPluginDataExDisplay[] = [];
|
||||
export let thisTerm = "";
|
||||
export let hideNotApplicable = false;
|
||||
export let selectNewest = 0;
|
||||
export let selectNewestStyle = 0;
|
||||
export let applyAllPluse = 0;
|
||||
|
||||
export let applyData: (data: IPluginDataExDisplay) => Promise<boolean>;
|
||||
export let compareData: (
|
||||
dataA: IPluginDataExDisplay,
|
||||
dataB: IPluginDataExDisplay,
|
||||
compareEach?: boolean
|
||||
) => Promise<boolean>;
|
||||
export let deleteData: (data: IPluginDataExDisplay) => Promise<boolean>;
|
||||
export let hidden: boolean;
|
||||
export let plugin: ObsidianLiveSyncPlugin;
|
||||
export let isMaintenanceMode: boolean = false;
|
||||
export let isFlagged: boolean = false;
|
||||
const addOn = plugin.getAddOn<ConfigSync>(ConfigSync.name)!;
|
||||
if (!addOn) {
|
||||
Logger(`Could not load the add-on ${ConfigSync.name}`, LOG_LEVEL_INFO);
|
||||
throw new Error(`Could not load the add-on ${ConfigSync.name}`);
|
||||
}
|
||||
|
||||
export let selected = "";
|
||||
let freshness = "";
|
||||
let equivalency = "";
|
||||
let version = "";
|
||||
let canApply: boolean = false;
|
||||
let canCompare: boolean = false;
|
||||
let pickToCompare: boolean = false;
|
||||
let currentSelectNewest = 0;
|
||||
let currentApplyAll = 0;
|
||||
|
||||
// Selectable terminals
|
||||
let terms = [] as string[];
|
||||
|
||||
async function comparePlugin(local: IPluginDataExDisplay | undefined, remote: IPluginDataExDisplay | undefined) {
|
||||
let freshness = "";
|
||||
let equivalency = "";
|
||||
let version = "";
|
||||
let contentCheck = false;
|
||||
let canApply: boolean = false;
|
||||
let canCompare = false;
|
||||
if (!local && !remote) {
|
||||
// NO OP. what's happened?
|
||||
freshness = "";
|
||||
} else if (local && !remote) {
|
||||
freshness = "Local only";
|
||||
} else if (remote && !local) {
|
||||
freshness = "Remote only";
|
||||
canApply = true;
|
||||
} else {
|
||||
const dtDiff = (local?.mtime ?? 0) - (remote?.mtime ?? 0);
|
||||
const diff = timeDeltaToHumanReadable(Math.abs(dtDiff));
|
||||
if (dtDiff / 1000 < -10) {
|
||||
// freshness = "✓ Newer";
|
||||
freshness = `Newer (${diff})`;
|
||||
canApply = true;
|
||||
contentCheck = true;
|
||||
} else if (dtDiff / 1000 > 10) {
|
||||
// freshness = "⚠ Older";
|
||||
freshness = `Older (${diff})`;
|
||||
canApply = true;
|
||||
contentCheck = true;
|
||||
} else {
|
||||
freshness = "Same";
|
||||
canApply = false;
|
||||
contentCheck = true;
|
||||
}
|
||||
}
|
||||
const localVersionStr = local?.version || "0.0.0";
|
||||
const remoteVersionStr = remote?.version || "0.0.0";
|
||||
if (local?.version || remote?.version) {
|
||||
const compare = `${localVersionStr}`.localeCompare(remoteVersionStr, undefined, { numeric: true });
|
||||
if (compare == 0) {
|
||||
version = "Same";
|
||||
} else if (compare < 0) {
|
||||
version = `Lower (${localVersionStr} < ${remoteVersionStr})`;
|
||||
} else if (compare > 0) {
|
||||
version = `Higher (${localVersionStr} > ${remoteVersionStr})`;
|
||||
}
|
||||
}
|
||||
|
||||
if (contentCheck) {
|
||||
if (local && remote) {
|
||||
const { canApply, equivalency, canCompare } = await checkEquivalency(local, remote);
|
||||
return { canApply, freshness, equivalency, version, canCompare };
|
||||
}
|
||||
}
|
||||
return { canApply, freshness, equivalency, version, canCompare };
|
||||
}
|
||||
|
||||
async function checkEquivalency(local: IPluginDataExDisplay, remote: IPluginDataExDisplay) {
|
||||
let equivalency = "";
|
||||
let canApply = false;
|
||||
let canCompare = false;
|
||||
const filenames = [...new Set([...local.files.map((e) => e.filename), ...remote.files.map((e) => e.filename)])];
|
||||
const matchingStatus = filenames
|
||||
.map((filename) => {
|
||||
const localFile = local.files.find((e) => e.filename == filename);
|
||||
const remoteFile = remote.files.find((e) => e.filename == filename);
|
||||
if (!localFile && !remoteFile) {
|
||||
return 0b0000000;
|
||||
} else if (localFile && !remoteFile) {
|
||||
return 0b0000010; //"LOCAL_ONLY";
|
||||
} else if (!localFile && remoteFile) {
|
||||
return 0b0001000; //"REMOTE ONLY"
|
||||
} else if (localFile && remoteFile) {
|
||||
const localDoc = getDocData(localFile.data);
|
||||
const remoteDoc = getDocData(remoteFile.data);
|
||||
if (localDoc == remoteDoc) {
|
||||
return 0b0000100; //"EVEN"
|
||||
} else {
|
||||
return 0b0010000; //"DIFFERENT";
|
||||
}
|
||||
} else {
|
||||
return 0b0010000; //"DIFFERENT";
|
||||
}
|
||||
})
|
||||
.reduce((p, c) => p | (c as number), 0 as number);
|
||||
if (matchingStatus == 0b0000100) {
|
||||
equivalency = "Same";
|
||||
canApply = false;
|
||||
} else if (matchingStatus <= 0b0000100) {
|
||||
equivalency = "Same or local only";
|
||||
canApply = false;
|
||||
} else if (matchingStatus == 0b0010000) {
|
||||
canApply = true;
|
||||
canCompare = true;
|
||||
equivalency = "Different";
|
||||
} else {
|
||||
canApply = true;
|
||||
canCompare = true;
|
||||
equivalency = "Mixed";
|
||||
}
|
||||
return { equivalency, canApply, canCompare };
|
||||
}
|
||||
|
||||
async function performCompare(local: IPluginDataExDisplay | undefined, remote: IPluginDataExDisplay | undefined) {
|
||||
const result = await comparePlugin(local, remote);
|
||||
canApply = result.canApply;
|
||||
freshness = result.freshness;
|
||||
equivalency = result.equivalency;
|
||||
version = result.version;
|
||||
canCompare = result.canCompare;
|
||||
pickToCompare = false;
|
||||
if (canCompare) {
|
||||
if (
|
||||
local?.files.length == remote?.files.length &&
|
||||
local?.files.length == 1 &&
|
||||
local?.files[0].filename == remote?.files[0].filename
|
||||
) {
|
||||
pickToCompare = false;
|
||||
} else {
|
||||
pickToCompare = true;
|
||||
// pickToCompare = false;
|
||||
// canCompare = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function updateTerms(list: IPluginDataExDisplay[], selectNewest: boolean, isMaintenanceMode: boolean) {
|
||||
const local = list.find((e) => e.term == thisTerm);
|
||||
// selected = "";
|
||||
if (isMaintenanceMode) {
|
||||
terms = [...new Set(list.map((e) => e.term))];
|
||||
} else if (hideNotApplicable) {
|
||||
const termsTmp = [];
|
||||
const wk = [...new Set(list.map((e) => e.term))];
|
||||
for (const termName of wk) {
|
||||
const remote = list.find((e) => e.term == termName);
|
||||
if ((await comparePlugin(local, remote)).canApply) {
|
||||
termsTmp.push(termName);
|
||||
}
|
||||
}
|
||||
terms = [...termsTmp];
|
||||
} else {
|
||||
terms = [...new Set(list.map((e) => e.term))].filter((e) => e != thisTerm);
|
||||
}
|
||||
let newest: IPluginDataExDisplay | undefined = local;
|
||||
if (selectNewest) {
|
||||
for (const term of terms) {
|
||||
const remote = list.find((e) => e.term == term);
|
||||
if (remote && remote.mtime && (newest?.mtime || 0) < remote.mtime) {
|
||||
newest = remote;
|
||||
}
|
||||
}
|
||||
if (newest && newest.term != thisTerm) {
|
||||
selected = newest.term;
|
||||
}
|
||||
// selectNewest = false;
|
||||
}
|
||||
if (terms.indexOf(selected) < 0) {
|
||||
selected = "";
|
||||
}
|
||||
}
|
||||
$: {
|
||||
// React pulse and select
|
||||
let doSelectNewest = false;
|
||||
if (selectNewest != currentSelectNewest) {
|
||||
if (selectNewestStyle == 1) {
|
||||
doSelectNewest = true;
|
||||
} else if (selectNewestStyle == 2) {
|
||||
doSelectNewest = isFlagged;
|
||||
} else if (selectNewestStyle == 3) {
|
||||
selected = "";
|
||||
}
|
||||
// currentSelectNewest = selectNewest;
|
||||
}
|
||||
updateTerms(list, doSelectNewest, isMaintenanceMode);
|
||||
currentSelectNewest = selectNewest;
|
||||
}
|
||||
$: {
|
||||
// React pulse and apply
|
||||
const doApply = applyAllPluse != currentApplyAll;
|
||||
currentApplyAll = applyAllPluse;
|
||||
if (doApply && selected) {
|
||||
if (!hidden) {
|
||||
applySelected();
|
||||
}
|
||||
}
|
||||
}
|
||||
$: {
|
||||
freshness = "";
|
||||
equivalency = "";
|
||||
version = "";
|
||||
canApply = false;
|
||||
if (selected == "") {
|
||||
// NO OP.
|
||||
} else if (selected == thisTerm) {
|
||||
freshness = "This device";
|
||||
canApply = false;
|
||||
} else {
|
||||
const local = list.find((e) => e.term == thisTerm);
|
||||
const remote = list.find((e) => e.term == selected);
|
||||
performCompare(local, remote);
|
||||
}
|
||||
}
|
||||
async function applySelected() {
|
||||
const local = list.find((e) => e.term == thisTerm);
|
||||
const selectedItem = list.find((e) => e.term == selected);
|
||||
if (selectedItem && (await applyData(selectedItem))) {
|
||||
addOn.updatePluginList(true, local?.documentPath);
|
||||
}
|
||||
}
|
||||
async function compareSelected() {
|
||||
const local = list.find((e) => e.term == thisTerm);
|
||||
const selectedItem = list.find((e) => e.term == selected);
|
||||
await compareItems(local, selectedItem);
|
||||
}
|
||||
async function compareItems(
|
||||
local: IPluginDataExDisplay | undefined,
|
||||
remote: IPluginDataExDisplay | undefined,
|
||||
filename?: string
|
||||
) {
|
||||
if (local && remote) {
|
||||
if (!filename) {
|
||||
if (await compareData(local, remote)) {
|
||||
addOn.updatePluginList(true, local.documentPath);
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
const localCopy =
|
||||
local instanceof PluginDataExDisplayV2 ? new PluginDataExDisplayV2(local) : { ...local };
|
||||
const remoteCopy =
|
||||
remote instanceof PluginDataExDisplayV2 ? new PluginDataExDisplayV2(remote) : { ...remote };
|
||||
localCopy.files = localCopy.files.filter((e) => e.filename == filename);
|
||||
remoteCopy.files = remoteCopy.files.filter((e) => e.filename == filename);
|
||||
if (await compareData(localCopy, remoteCopy, true)) {
|
||||
addOn.updatePluginList(true, local.documentPath);
|
||||
}
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
if (!remote && !local) {
|
||||
Logger(`Could not find both remote and local item`, LOG_LEVEL_INFO);
|
||||
} else if (!remote) {
|
||||
Logger(`Could not find remote item`, LOG_LEVEL_INFO);
|
||||
} else if (!local) {
|
||||
Logger(`Could not locally item`, LOG_LEVEL_INFO);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function pickCompareItem(evt: MouseEvent) {
|
||||
const local = list.find((e) => e.term == thisTerm);
|
||||
const selectedItem = list.find((e) => e.term == selected);
|
||||
if (!local) return;
|
||||
if (!selectedItem) return;
|
||||
const menu = new Menu();
|
||||
menu.addItem((item) => item.setTitle("Compare file").setIsLabel(true));
|
||||
menu.addSeparator();
|
||||
const files = unique(local.files.map((e) => e.filename).concat(selectedItem.files.map((e) => e.filename)));
|
||||
const convDate = (dt: PluginDataExFile | undefined) => {
|
||||
if (!dt) return "(Missing)";
|
||||
const d = new Date(dt.mtime);
|
||||
return d.toLocaleString();
|
||||
};
|
||||
for (const filename of files) {
|
||||
menu.addItem((item) => {
|
||||
const localFile = local.files.find((e) => e.filename == filename);
|
||||
const remoteFile = selectedItem.files.find((e) => e.filename == filename);
|
||||
const title = `${filename} (${convDate(localFile)} <--> ${convDate(remoteFile)})`;
|
||||
item.setTitle(title).onClick((e) => compareItems(local, selectedItem, filename));
|
||||
});
|
||||
}
|
||||
menu.showAtMouseEvent(evt);
|
||||
}
|
||||
async function deleteSelected() {
|
||||
const selectedItem = list.find((e) => e.term == selected);
|
||||
// const deletedPath = selectedItem.documentPath;
|
||||
if (selectedItem && (await deleteData(selectedItem))) {
|
||||
addOn.reloadPluginList(true);
|
||||
}
|
||||
}
|
||||
async function duplicateItem() {
|
||||
const local = list.find((e) => e.term == thisTerm);
|
||||
if (!local) {
|
||||
Logger(`Could not find local item`, LOG_LEVEL_VERBOSE);
|
||||
return;
|
||||
}
|
||||
const duplicateTermName = await plugin.confirm.askString("Duplicate", "device name", "");
|
||||
if (duplicateTermName) {
|
||||
if (duplicateTermName.contains("/")) {
|
||||
Logger(`We can not use "/" to the device name`, LOG_LEVEL_NOTICE);
|
||||
return;
|
||||
}
|
||||
const key = `${plugin.app.vault.configDir}/${local.files[0].filename}`;
|
||||
await addOn.storeCustomizationFiles(key as FilePath, duplicateTermName);
|
||||
await addOn.updatePluginList(false, addOn.filenameToUnifiedKey(key, duplicateTermName));
|
||||
}
|
||||
}
|
||||
</script>
|
||||
|
||||
{#if terms.length > 0}
|
||||
<span class="spacer"></span>
|
||||
{#if !hidden}
|
||||
<span class="chip-wrap">
|
||||
<span class="chip modified">{freshness}</span>
|
||||
<span class="chip content">{equivalency}</span>
|
||||
<span class="chip version">{version}</span>
|
||||
</span>
|
||||
<select bind:value={selected}>
|
||||
<option value={""}>-</option>
|
||||
{#each terms as term}
|
||||
<option value={term}>{term}</option>
|
||||
{/each}
|
||||
</select>
|
||||
{#if canApply || (isMaintenanceMode && selected != "")}
|
||||
{#if canCompare}
|
||||
{#if pickToCompare}
|
||||
<button on:click={pickCompareItem}>🗃️</button>
|
||||
{:else}
|
||||
<!--🔍 -->
|
||||
<button on:click={compareSelected}>⮂</button>
|
||||
{/if}
|
||||
{:else}
|
||||
<!-- svelte-ignore a11y_consider_explicit_label -->
|
||||
<button disabled></button>
|
||||
{/if}
|
||||
<button on:click={applySelected}>✓</button>
|
||||
{:else}
|
||||
<!-- svelte-ignore a11y_consider_explicit_label -->
|
||||
<button disabled></button>
|
||||
<!-- svelte-ignore a11y_consider_explicit_label -->
|
||||
<button disabled></button>
|
||||
{/if}
|
||||
{#if isMaintenanceMode}
|
||||
{#if selected != ""}
|
||||
<button on:click={deleteSelected}>🗑️</button>
|
||||
{:else}
|
||||
<button on:click={duplicateItem}>📑</button>
|
||||
{/if}
|
||||
{/if}
|
||||
{/if}
|
||||
{:else}
|
||||
<span class="spacer"></span>
|
||||
<span class="message even">All the same or non-existent</span>
|
||||
<!-- svelte-ignore a11y_consider_explicit_label -->
|
||||
<button disabled></button>
|
||||
<!-- svelte-ignore a11y_consider_explicit_label -->
|
||||
<button disabled></button>
|
||||
{/if}
|
||||
|
||||
<style>
|
||||
.spacer {
|
||||
min-width: 1px;
|
||||
flex-grow: 1;
|
||||
}
|
||||
button {
|
||||
margin: 2px 4px;
|
||||
min-width: 3em;
|
||||
max-width: 4em;
|
||||
}
|
||||
button:disabled {
|
||||
border: none;
|
||||
box-shadow: none;
|
||||
background-color: transparent;
|
||||
visibility: collapse;
|
||||
}
|
||||
button:disabled:hover {
|
||||
border: none;
|
||||
box-shadow: none;
|
||||
background-color: transparent;
|
||||
visibility: collapse;
|
||||
}
|
||||
span.message {
|
||||
color: var(--text-muted);
|
||||
font-size: var(--font-ui-smaller);
|
||||
padding: 0 1em;
|
||||
line-height: var(--line-height-tight);
|
||||
}
|
||||
/* span.messages {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
} */
|
||||
:global(.is-mobile) .spacer {
|
||||
margin-left: auto;
|
||||
}
|
||||
|
||||
.chip-wrap {
|
||||
display: flex;
|
||||
gap: 2px;
|
||||
flex-direction: column;
|
||||
justify-content: center;
|
||||
align-items: flex-start;
|
||||
}
|
||||
.chip {
|
||||
display: inline-block;
|
||||
border-radius: 2px;
|
||||
font-size: 0.8em;
|
||||
padding: 0 4px;
|
||||
margin: 0 2px;
|
||||
border-color: var(--tag-border-color);
|
||||
background-color: var(--tag-background);
|
||||
color: var(--tag-color);
|
||||
}
|
||||
.chip:empty {
|
||||
display: none;
|
||||
}
|
||||
.chip:not(:empty)::before {
|
||||
min-width: 1.8em;
|
||||
display: inline-block;
|
||||
}
|
||||
.chip.content:not(:empty)::before {
|
||||
content: "📄: ";
|
||||
}
|
||||
.chip.version:not(:empty)::before {
|
||||
content: "🏷️: ";
|
||||
}
|
||||
.chip.modified:not(:empty)::before {
|
||||
content: "📅: ";
|
||||
}
|
||||
</style>
|
||||
37
src/features/ConfigSync/PluginDialogModal.ts
Normal file
@@ -0,0 +1,37 @@
|
||||
import { mount, unmount } from "svelte";
|
||||
import { App, Modal } from "../../deps.ts";
|
||||
import ObsidianLiveSyncPlugin from "../../main.ts";
|
||||
import PluginPane from "./PluginPane.svelte";
|
||||
export class PluginDialogModal extends Modal {
|
||||
plugin: ObsidianLiveSyncPlugin;
|
||||
component: ReturnType<typeof mount> | undefined;
|
||||
isOpened() {
|
||||
return this.component != undefined;
|
||||
}
|
||||
|
||||
constructor(app: App, plugin: ObsidianLiveSyncPlugin) {
|
||||
super(app);
|
||||
this.plugin = plugin;
|
||||
}
|
||||
|
||||
onOpen() {
|
||||
const { contentEl } = this;
|
||||
this.contentEl.style.overflow = "auto";
|
||||
this.contentEl.style.display = "flex";
|
||||
this.contentEl.style.flexDirection = "column";
|
||||
this.titleEl.setText("Customization Sync (Beta3)");
|
||||
if (!this.component) {
|
||||
this.component = mount(PluginPane, {
|
||||
target: contentEl,
|
||||
props: { plugin: this.plugin },
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
onClose() {
|
||||
if (this.component) {
|
||||
void unmount(this.component);
|
||||
this.component = undefined;
|
||||
}
|
||||
}
|
||||
}
|
||||
652
src/features/ConfigSync/PluginPane.svelte
Normal file
@@ -0,0 +1,652 @@
|
||||
<script lang="ts">
|
||||
import { onMount } from "svelte";
|
||||
import ObsidianLiveSyncPlugin from "../../main";
|
||||
import {
|
||||
ConfigSync,
|
||||
type IPluginDataExDisplay,
|
||||
pluginIsEnumerating,
|
||||
pluginList,
|
||||
pluginManifestStore,
|
||||
pluginV2Progress,
|
||||
} from "./CmdConfigSync.ts";
|
||||
import PluginCombo from "./PluginCombo.svelte";
|
||||
import { Menu, type PluginManifest } from "obsidian";
|
||||
import { unique } from "../../lib/src/common/utils";
|
||||
import {
|
||||
MODE_SELECTIVE,
|
||||
MODE_AUTOMATIC,
|
||||
MODE_PAUSED,
|
||||
type SYNC_MODE,
|
||||
MODE_SHINY,
|
||||
} from "../../lib/src/common/types";
|
||||
import { normalizePath } from "../../deps";
|
||||
import { HiddenFileSync } from "../HiddenFileSync/CmdHiddenFileSync.ts";
|
||||
import { LOG_LEVEL_NOTICE, Logger } from "octagonal-wheels/common/logger";
|
||||
export let plugin: ObsidianLiveSyncPlugin;
|
||||
|
||||
$: hideNotApplicable = false;
|
||||
$: thisTerm = plugin.services.setting.getDeviceAndVaultName();
|
||||
|
||||
const addOn = plugin.getAddOn(ConfigSync.name) as ConfigSync;
|
||||
if (!addOn) {
|
||||
const msg =
|
||||
"AddOn Module (ConfigSync) has not been loaded. This is very unexpected situation. Please report this issue.";
|
||||
Logger(msg, LOG_LEVEL_NOTICE);
|
||||
throw new Error(msg);
|
||||
}
|
||||
const addOnHiddenFileSync = plugin.getAddOn(HiddenFileSync.name) as HiddenFileSync;
|
||||
if (!addOnHiddenFileSync) {
|
||||
const msg =
|
||||
"AddOn Module (HiddenFileSync) has not been loaded. This is very unexpected situation. Please report this issue.";
|
||||
Logger(msg, LOG_LEVEL_NOTICE);
|
||||
throw new Error(msg);
|
||||
}
|
||||
|
||||
let list: IPluginDataExDisplay[] = [];
|
||||
|
||||
let selectNewestPulse = 0;
|
||||
let selectNewestStyle = 0;
|
||||
let hideEven = false;
|
||||
let loading = false;
|
||||
let applyAllPluse = 0;
|
||||
let isMaintenanceMode = false;
|
||||
async function requestUpdate() {
|
||||
await addOn.updatePluginList(true);
|
||||
}
|
||||
async function requestReload() {
|
||||
await addOn.reloadPluginList(true);
|
||||
}
|
||||
let allTerms = [] as string[];
|
||||
pluginList.subscribe((e) => {
|
||||
list = e;
|
||||
allTerms = unique(list.map((e) => e.term));
|
||||
});
|
||||
pluginIsEnumerating.subscribe((e) => {
|
||||
loading = e;
|
||||
});
|
||||
onMount(async () => {
|
||||
requestUpdate();
|
||||
});
|
||||
|
||||
function filterList(list: IPluginDataExDisplay[], categories: string[]) {
|
||||
const w = list.filter((e) => categories.indexOf(e.category) !== -1);
|
||||
return w.sort((a, b) => `${a.category}-${a.name}`.localeCompare(`${b.category}-${b.name}`));
|
||||
}
|
||||
|
||||
function groupBy(items: IPluginDataExDisplay[], key: string) {
|
||||
let ret = {} as Record<string, IPluginDataExDisplay[]>;
|
||||
for (const v of items) {
|
||||
//@ts-ignore
|
||||
const k = (key in v ? v[key] : "") as string;
|
||||
ret[k] = ret[k] || [];
|
||||
ret[k].push(v);
|
||||
}
|
||||
for (const k in ret) {
|
||||
ret[k] = ret[k].sort((a, b) => `${a.category}-${a.name}`.localeCompare(`${b.category}-${b.name}`));
|
||||
}
|
||||
const w = Object.entries(ret);
|
||||
return w.sort(([a], [b]) => `${a}`.localeCompare(`${b}`));
|
||||
}
|
||||
|
||||
const displays = {
|
||||
CONFIG: "Configuration",
|
||||
THEME: "Themes",
|
||||
SNIPPET: "Snippets",
|
||||
};
|
||||
async function scanAgain() {
|
||||
await addOn.scanAllConfigFiles(true);
|
||||
await requestUpdate();
|
||||
}
|
||||
async function replicate() {
|
||||
await plugin.services.replication.replicate(true);
|
||||
}
|
||||
function selectAllNewest(selectMode: boolean) {
|
||||
selectNewestPulse++;
|
||||
selectNewestStyle = selectMode ? 1 : 2;
|
||||
}
|
||||
function resetSelectNewest() {
|
||||
selectNewestPulse++;
|
||||
selectNewestStyle = 3;
|
||||
}
|
||||
function applyAll() {
|
||||
applyAllPluse++;
|
||||
}
|
||||
async function applyData(data: IPluginDataExDisplay): Promise<boolean> {
|
||||
return await addOn.applyData(data);
|
||||
}
|
||||
async function compareData(
|
||||
docA: IPluginDataExDisplay,
|
||||
docB: IPluginDataExDisplay,
|
||||
compareEach = false
|
||||
): Promise<boolean> {
|
||||
return await addOn.compareUsingDisplayData(docA, docB, compareEach);
|
||||
}
|
||||
async function deleteData(data: IPluginDataExDisplay): Promise<boolean> {
|
||||
return await addOn.deleteData(data);
|
||||
}
|
||||
function askMode(evt: MouseEvent, title: string, key: string) {
|
||||
const menu = new Menu();
|
||||
menu.addItem((item) => item.setTitle(title).setIsLabel(true));
|
||||
menu.addSeparator();
|
||||
const prevMode = automaticList.get(key) ?? MODE_SELECTIVE;
|
||||
for (const mode of [MODE_SELECTIVE, MODE_AUTOMATIC, MODE_PAUSED, MODE_SHINY]) {
|
||||
menu.addItem((item) => {
|
||||
item.setTitle(`${getIcon(mode as SYNC_MODE)}:${TITLES[mode]}`)
|
||||
.onClick((e) => {
|
||||
if (mode === MODE_AUTOMATIC) {
|
||||
askOverwriteModeForAutomatic(evt, key);
|
||||
} else {
|
||||
setMode(key, mode as SYNC_MODE);
|
||||
}
|
||||
})
|
||||
.setChecked(prevMode == mode)
|
||||
.setDisabled(prevMode == mode);
|
||||
});
|
||||
}
|
||||
menu.showAtMouseEvent(evt);
|
||||
}
|
||||
function applyAutomaticSync(key: string, direction: "pushForce" | "pullForce" | "safe") {
|
||||
setMode(key, MODE_AUTOMATIC);
|
||||
const configDir = normalizePath(plugin.app.vault.configDir);
|
||||
const files = (plugin.settings.pluginSyncExtendedSetting[key]?.files ?? []).map((e) => `${configDir}/${e}`);
|
||||
addOnHiddenFileSync.initialiseInternalFileSync(direction, true, files);
|
||||
}
|
||||
function askOverwriteModeForAutomatic(evt: MouseEvent, key: string) {
|
||||
const menu = new Menu();
|
||||
menu.addItem((item) => item.setTitle("Initial Action").setIsLabel(true));
|
||||
menu.addSeparator();
|
||||
menu.addItem((item) => {
|
||||
item.setTitle(`↑: Overwrite Remote`).onClick((e) => {
|
||||
applyAutomaticSync(key, "pushForce");
|
||||
});
|
||||
})
|
||||
.addItem((item) => {
|
||||
item.setTitle(`↓: Overwrite Local`).onClick((e) => {
|
||||
applyAutomaticSync(key, "pullForce");
|
||||
});
|
||||
})
|
||||
.addItem((item) => {
|
||||
item.setTitle(`⇅: Use newer`).onClick((e) => {
|
||||
applyAutomaticSync(key, "safe");
|
||||
});
|
||||
});
|
||||
menu.showAtMouseEvent(evt);
|
||||
}
|
||||
|
||||
$: options = {
|
||||
thisTerm,
|
||||
hideNotApplicable,
|
||||
selectNewest: selectNewestPulse,
|
||||
selectNewestStyle,
|
||||
applyAllPluse,
|
||||
applyData,
|
||||
compareData,
|
||||
deleteData,
|
||||
plugin,
|
||||
isMaintenanceMode,
|
||||
};
|
||||
|
||||
const ICON_EMOJI_PAUSED = `⛔`;
|
||||
const ICON_EMOJI_AUTOMATIC = `✨`;
|
||||
const ICON_EMOJI_SELECTIVE = `🔀`;
|
||||
const ICON_EMOJI_FLAGGED = `🚩`;
|
||||
|
||||
const ICONS: { [key: number]: string } = {
|
||||
[MODE_SELECTIVE]: ICON_EMOJI_SELECTIVE,
|
||||
[MODE_PAUSED]: ICON_EMOJI_PAUSED,
|
||||
[MODE_AUTOMATIC]: ICON_EMOJI_AUTOMATIC,
|
||||
[MODE_SHINY]: ICON_EMOJI_FLAGGED,
|
||||
};
|
||||
const TITLES: { [key: number]: string } = {
|
||||
[MODE_SELECTIVE]: "Selective",
|
||||
[MODE_PAUSED]: "Ignore",
|
||||
[MODE_AUTOMATIC]: "Automatic",
|
||||
[MODE_SHINY]: "Flagged Selective",
|
||||
};
|
||||
const PREFIX_PLUGIN_ALL = "PLUGIN_ALL";
|
||||
const PREFIX_PLUGIN_DATA = "PLUGIN_DATA";
|
||||
const PREFIX_PLUGIN_MAIN = "PLUGIN_MAIN";
|
||||
const PREFIX_PLUGIN_ETC = "PLUGIN_ETC";
|
||||
function setMode(key: string, mode: SYNC_MODE) {
|
||||
if (key.startsWith(PREFIX_PLUGIN_ALL + "/")) {
|
||||
setMode(PREFIX_PLUGIN_DATA + key.substring(PREFIX_PLUGIN_ALL.length), mode);
|
||||
setMode(PREFIX_PLUGIN_MAIN + key.substring(PREFIX_PLUGIN_ALL.length), mode);
|
||||
return;
|
||||
}
|
||||
const files = unique(
|
||||
list
|
||||
.filter((e) => `${e.category}/${e.name}` == key)
|
||||
.map((e) => e.files)
|
||||
.flat()
|
||||
.map((e) => e.filename)
|
||||
);
|
||||
if (mode == MODE_SELECTIVE) {
|
||||
automaticList.delete(key);
|
||||
delete plugin.settings.pluginSyncExtendedSetting[key];
|
||||
automaticListDisp = automaticList;
|
||||
} else {
|
||||
automaticList.set(key, mode);
|
||||
automaticListDisp = automaticList;
|
||||
if (!(key in plugin.settings.pluginSyncExtendedSetting)) {
|
||||
plugin.settings.pluginSyncExtendedSetting[key] = {
|
||||
key,
|
||||
mode,
|
||||
files: [],
|
||||
};
|
||||
}
|
||||
plugin.settings.pluginSyncExtendedSetting[key].files = files;
|
||||
plugin.settings.pluginSyncExtendedSetting[key].mode = mode;
|
||||
}
|
||||
plugin.services.setting.saveSettingData();
|
||||
}
|
||||
function getIcon(mode: SYNC_MODE) {
|
||||
if (mode in ICONS) {
|
||||
return ICONS[mode];
|
||||
} else {
|
||||
("");
|
||||
}
|
||||
}
|
||||
let automaticList = new Map<string, SYNC_MODE>();
|
||||
let automaticListDisp = new Map<string, SYNC_MODE>();
|
||||
|
||||
// apply current configuration to the dialogue
|
||||
for (const { key, mode } of Object.values(plugin.settings.pluginSyncExtendedSetting)) {
|
||||
automaticList.set(key, mode);
|
||||
}
|
||||
|
||||
automaticListDisp = automaticList;
|
||||
|
||||
let displayKeys: Record<string, string[]> = {};
|
||||
|
||||
function computeDisplayKeys(list: IPluginDataExDisplay[]) {
|
||||
const extraKeys = Object.keys(plugin.settings.pluginSyncExtendedSetting);
|
||||
return [
|
||||
...list,
|
||||
...extraKeys
|
||||
.map((e) => `${e}///`.split("/"))
|
||||
.filter((e) => e[0] && e[1])
|
||||
.map((e) => ({ category: e[0], name: e[1], displayName: e[1] })),
|
||||
]
|
||||
.sort((a, b) => (a.displayName ?? a.name).localeCompare(b.displayName ?? b.name))
|
||||
.reduce(
|
||||
(p, c) => ({
|
||||
...p,
|
||||
[c.category]: unique(
|
||||
c.category in p ? [...p[c.category], c.displayName ?? c.name] : [c.displayName ?? c.name]
|
||||
),
|
||||
}),
|
||||
{} as Record<string, string[]>
|
||||
);
|
||||
}
|
||||
$: {
|
||||
displayKeys = computeDisplayKeys(list);
|
||||
}
|
||||
|
||||
let deleteTerm = "";
|
||||
|
||||
async function deleteAllItems(term: string) {
|
||||
const deleteItems = list.filter((e) => e.term == term);
|
||||
for (const item of deleteItems) {
|
||||
await deleteData(item);
|
||||
}
|
||||
addOn.reloadPluginList(true);
|
||||
}
|
||||
|
||||
let nameMap = new Map<string, string>();
|
||||
function updateNameMap(e: Map<string, PluginManifest>) {
|
||||
const items = [...e.entries()].map(([k, v]) => [k.split("/").slice(-2).join("/"), v.name] as [string, string]);
|
||||
const newMap = new Map(items);
|
||||
if (newMap.size == nameMap.size) {
|
||||
let diff = false;
|
||||
for (const [k, v] of newMap) {
|
||||
if (nameMap.get(k) != v) {
|
||||
diff = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!diff) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
nameMap = newMap;
|
||||
}
|
||||
$: updateNameMap($pluginManifestStore);
|
||||
|
||||
let displayEntries = [] as [string, string][];
|
||||
$: {
|
||||
displayEntries = Object.entries(displays).filter(([key, _]) => key in displayKeys);
|
||||
}
|
||||
|
||||
let pluginEntries = [] as [string, IPluginDataExDisplay[]][];
|
||||
$: {
|
||||
pluginEntries = groupBy(filterList(list, ["PLUGIN_MAIN", "PLUGIN_DATA", "PLUGIN_ETC"]), "name");
|
||||
}
|
||||
let useSyncPluginEtc = plugin.settings.usePluginEtc;
|
||||
</script>
|
||||
|
||||
<div class="buttonsWrap">
|
||||
<div class="buttons">
|
||||
<button on:click={() => scanAgain()}>Scan changes</button>
|
||||
<button on:click={() => replicate()}>Sync once</button>
|
||||
<button on:click={() => requestUpdate()}>Refresh</button>
|
||||
{#if isMaintenanceMode}
|
||||
<button on:click={() => requestReload()}>Reload</button>
|
||||
{/if}
|
||||
</div>
|
||||
<div class="buttons">
|
||||
<button on:click={() => selectAllNewest(true)}>Select All Shiny</button>
|
||||
<button on:click={() => selectAllNewest(false)}>{ICON_EMOJI_FLAGGED} Select Flagged Shiny</button>
|
||||
<button on:click={() => resetSelectNewest()}>Deselect all</button>
|
||||
<button on:click={() => applyAll()} class="mod-cta">Apply All Selected</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="loading">
|
||||
{#if loading || $pluginV2Progress !== 0}
|
||||
<span>Updating list...{$pluginV2Progress == 0 ? "" : ` (${$pluginV2Progress})`}</span>
|
||||
{/if}
|
||||
</div>
|
||||
<div class="list">
|
||||
{#if list.length == 0}
|
||||
<div class="center">No Items.</div>
|
||||
{:else}
|
||||
{#each displayEntries as [key, label]}
|
||||
<div>
|
||||
<h3>{label}</h3>
|
||||
{#each displayKeys[key] as name}
|
||||
{@const bindKey = `${key}/${name}`}
|
||||
{@const mode = automaticListDisp.get(bindKey) ?? MODE_SELECTIVE}
|
||||
<div class="labelrow {hideEven ? 'hideeven' : ''}">
|
||||
<div class="title">
|
||||
<button class="status" on:click={(evt) => askMode(evt, `${key}/${name}`, bindKey)}>
|
||||
{getIcon(mode)}
|
||||
</button>
|
||||
<span class="name">{(key == "THEME" && nameMap.get(`themes/${name}`)) || name}</span>
|
||||
</div>
|
||||
<div class="body">
|
||||
{#if mode == MODE_SELECTIVE || mode == MODE_SHINY}
|
||||
<PluginCombo
|
||||
{...options}
|
||||
isFlagged={mode == MODE_SHINY}
|
||||
list={list.filter((e) => e.category == key && e.name == name)}
|
||||
hidden={false}
|
||||
/>
|
||||
{:else}
|
||||
<div class="statusnote">{TITLES[mode]}</div>
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
{/each}
|
||||
</div>
|
||||
{/each}
|
||||
<div>
|
||||
<h3>Plugins</h3>
|
||||
{#each pluginEntries as [name, listX]}
|
||||
{@const bindKeyAll = `${PREFIX_PLUGIN_ALL}/${name}`}
|
||||
{@const modeAll = automaticListDisp.get(bindKeyAll) ?? MODE_SELECTIVE}
|
||||
{@const bindKeyMain = `${PREFIX_PLUGIN_MAIN}/${name}`}
|
||||
{@const modeMain = automaticListDisp.get(bindKeyMain) ?? MODE_SELECTIVE}
|
||||
{@const bindKeyData = `${PREFIX_PLUGIN_DATA}/${name}`}
|
||||
{@const modeData = automaticListDisp.get(bindKeyData) ?? MODE_SELECTIVE}
|
||||
{@const bindKeyETC = `${PREFIX_PLUGIN_ETC}/${name}`}
|
||||
{@const modeEtc = automaticListDisp.get(bindKeyETC) ?? MODE_SELECTIVE}
|
||||
<div class="labelrow {hideEven ? 'hideeven' : ''}">
|
||||
<div class="title">
|
||||
<button
|
||||
class="status"
|
||||
on:click={(evt) => askMode(evt, `${PREFIX_PLUGIN_ALL}/${name}`, bindKeyAll)}
|
||||
>
|
||||
{getIcon(modeAll)}
|
||||
</button>
|
||||
<span class="name">{nameMap.get(`plugins/${name}`) || name}</span>
|
||||
</div>
|
||||
<div class="body">
|
||||
{#if modeAll == MODE_SELECTIVE || modeAll == MODE_SHINY}
|
||||
<PluginCombo {...options} isFlagged={modeAll == MODE_SHINY} list={listX} hidden={true} />
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
{#if modeAll == MODE_SELECTIVE || modeAll == MODE_SHINY}
|
||||
<div class="filerow {hideEven ? 'hideeven' : ''}">
|
||||
<div class="filetitle">
|
||||
<button
|
||||
class="status"
|
||||
on:click={(evt) => askMode(evt, `${PREFIX_PLUGIN_MAIN}/${name}/MAIN`, bindKeyMain)}
|
||||
>
|
||||
{getIcon(modeMain)}
|
||||
</button>
|
||||
<span class="name">MAIN</span>
|
||||
</div>
|
||||
<div class="body">
|
||||
{#if modeMain == MODE_SELECTIVE || modeMain == MODE_SHINY}
|
||||
<PluginCombo
|
||||
{...options}
|
||||
isFlagged={modeMain == MODE_SHINY}
|
||||
list={filterList(listX, ["PLUGIN_MAIN"])}
|
||||
hidden={false}
|
||||
/>
|
||||
{:else}
|
||||
<div class="statusnote">{TITLES[modeMain]}</div>
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
<div class="filerow {hideEven ? 'hideeven' : ''}">
|
||||
<div class="filetitle">
|
||||
<button
|
||||
class="status"
|
||||
on:click={(evt) => askMode(evt, `${PREFIX_PLUGIN_DATA}/${name}`, bindKeyData)}
|
||||
>
|
||||
{getIcon(modeData)}
|
||||
</button>
|
||||
<span class="name">DATA</span>
|
||||
</div>
|
||||
<div class="body">
|
||||
{#if modeData == MODE_SELECTIVE || modeData == MODE_SHINY}
|
||||
<PluginCombo
|
||||
{...options}
|
||||
isFlagged={modeData == MODE_SHINY}
|
||||
list={filterList(listX, ["PLUGIN_DATA"])}
|
||||
hidden={false}
|
||||
/>
|
||||
{:else}
|
||||
<div class="statusnote">{TITLES[modeData]}</div>
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
{#if useSyncPluginEtc}
|
||||
<div class="filerow {hideEven ? 'hideeven' : ''}">
|
||||
<div class="filetitle">
|
||||
<button
|
||||
class="status"
|
||||
on:click={(evt) => askMode(evt, `${PREFIX_PLUGIN_ETC}/${name}`, bindKeyETC)}
|
||||
>
|
||||
{getIcon(modeEtc)}
|
||||
</button>
|
||||
<span class="name">Other files</span>
|
||||
</div>
|
||||
<div class="body">
|
||||
{#if modeEtc == MODE_SELECTIVE || modeEtc == MODE_SHINY}
|
||||
<PluginCombo
|
||||
{...options}
|
||||
isFlagged={modeEtc == MODE_SHINY}
|
||||
list={filterList(listX, ["PLUGIN_ETC"])}
|
||||
hidden={false}
|
||||
/>
|
||||
{:else}
|
||||
<div class="statusnote">{TITLES[modeEtc]}</div>
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
{/if}
|
||||
{:else}
|
||||
<div class="noterow">
|
||||
<div class="statusnote">{TITLES[modeAll]}</div>
|
||||
</div>
|
||||
{/if}
|
||||
{/each}
|
||||
</div>
|
||||
{/if}
|
||||
</div>
|
||||
{#if isMaintenanceMode}
|
||||
<div class="buttons">
|
||||
<div>
|
||||
<h3>Maintenance Commands</h3>
|
||||
<div class="maintenancerow">
|
||||
<label for="">Delete All of </label>
|
||||
<select bind:value={deleteTerm}>
|
||||
{#each allTerms as term}
|
||||
<option value={term}>{term}</option>
|
||||
{/each}
|
||||
</select>
|
||||
<button
|
||||
class="status"
|
||||
on:click={(evt) => {
|
||||
deleteAllItems(deleteTerm);
|
||||
}}
|
||||
>
|
||||
🗑️
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{/if}
|
||||
<div class="buttons">
|
||||
<label><span>Hide not applicable items</span><input type="checkbox" bind:checked={hideEven} /></label>
|
||||
</div>
|
||||
<div class="buttons">
|
||||
<label><span>Maintenance mode</span><input type="checkbox" bind:checked={isMaintenanceMode} /></label>
|
||||
</div>
|
||||
|
||||
<style>
|
||||
.buttonsWrap {
|
||||
padding-bottom: 4px;
|
||||
}
|
||||
h3 {
|
||||
position: sticky;
|
||||
top: 0;
|
||||
background-color: var(--modal-background);
|
||||
}
|
||||
.labelrow {
|
||||
margin-left: 0.4em;
|
||||
display: flex;
|
||||
justify-content: flex-start;
|
||||
align-items: center;
|
||||
border-top: 1px solid var(--background-modifier-border);
|
||||
padding: 4px;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
.filerow {
|
||||
margin-left: 1.25em;
|
||||
display: flex;
|
||||
justify-content: flex-start;
|
||||
align-items: center;
|
||||
padding-right: 4px;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
.filerow.hideeven:has(:global(.even)),
|
||||
.labelrow.hideeven:has(:global(.even)) {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.noterow {
|
||||
min-height: 2em;
|
||||
display: flex;
|
||||
}
|
||||
button.status {
|
||||
flex-grow: 0;
|
||||
margin: 2px 4px;
|
||||
min-width: 3em;
|
||||
max-width: 4em;
|
||||
}
|
||||
.statusnote {
|
||||
display: flex;
|
||||
justify-content: flex-end;
|
||||
padding-right: var(--size-4-12);
|
||||
align-items: center;
|
||||
min-width: 10em;
|
||||
flex-grow: 1;
|
||||
}
|
||||
.list {
|
||||
overflow-y: auto;
|
||||
}
|
||||
.title {
|
||||
color: var(--text-normal);
|
||||
font-size: var(--font-ui-medium);
|
||||
line-height: var(--line-height-tight);
|
||||
margin-right: auto;
|
||||
}
|
||||
.body {
|
||||
/* margin-left: 0.4em; */
|
||||
margin-left: auto;
|
||||
display: flex;
|
||||
justify-content: flex-start;
|
||||
align-items: center;
|
||||
/* flex-wrap: wrap; */
|
||||
}
|
||||
.filetitle {
|
||||
color: var(--text-normal);
|
||||
font-size: var(--font-ui-medium);
|
||||
line-height: var(--line-height-tight);
|
||||
margin-right: auto;
|
||||
}
|
||||
.buttons {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
justify-content: flex-end;
|
||||
margin-top: 8px;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
.buttons > button {
|
||||
margin-left: 4px;
|
||||
width: auto;
|
||||
}
|
||||
|
||||
label {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
}
|
||||
label > span {
|
||||
margin-right: 0.25em;
|
||||
}
|
||||
:global(.is-mobile) .title,
|
||||
:global(.is-mobile) .filetitle {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.center {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
min-height: 3em;
|
||||
}
|
||||
.maintenancerow {
|
||||
display: flex;
|
||||
justify-content: flex-end;
|
||||
align-items: center;
|
||||
}
|
||||
.maintenancerow label {
|
||||
margin-right: 0.5em;
|
||||
margin-left: 0.5em;
|
||||
}
|
||||
|
||||
.loading {
|
||||
transition: height 0.25s ease-in-out;
|
||||
transition-delay: 4ms;
|
||||
overflow-y: hidden;
|
||||
flex-shrink: 0;
|
||||
display: flex;
|
||||
justify-content: flex-start;
|
||||
align-items: center;
|
||||
}
|
||||
.loading:empty {
|
||||
height: 0px;
|
||||
transition: height 0.25s ease-in-out;
|
||||
transition-delay: 1s;
|
||||
}
|
||||
.loading:not(:empty) {
|
||||
height: 2em;
|
||||
transition: height 0.25s ease-in-out;
|
||||
transition-delay: 0;
|
||||
}
|
||||
</style>
|
||||
89
src/features/HiddenFileCommon/JsonResolveModal.ts
Normal file
@@ -0,0 +1,89 @@
|
||||
import { App, Modal } from "../../deps.ts";
|
||||
import { type FilePath, type LoadedEntry } from "../../lib/src/common/types.ts";
|
||||
import JsonResolvePane from "./JsonResolvePane.svelte";
|
||||
import { waitForSignal } from "../../lib/src/common/utils.ts";
|
||||
import { mount, unmount } from "svelte";
|
||||
|
||||
export class JsonResolveModal extends Modal {
|
||||
// result: Array<[number, string]>;
|
||||
filename: FilePath;
|
||||
callback?: (keepRev?: string, mergedStr?: string) => Promise<void>;
|
||||
docs: LoadedEntry[];
|
||||
component?: ReturnType<typeof mount>;
|
||||
nameA: string;
|
||||
nameB: string;
|
||||
defaultSelect: string;
|
||||
keepOrder: boolean;
|
||||
hideLocal: boolean;
|
||||
title: string = "Conflicted Setting";
|
||||
|
||||
constructor(
|
||||
app: App,
|
||||
filename: FilePath,
|
||||
docs: LoadedEntry[],
|
||||
callback: (keepRev?: string, mergedStr?: string) => Promise<void>,
|
||||
nameA?: string,
|
||||
nameB?: string,
|
||||
defaultSelect?: string,
|
||||
keepOrder?: boolean,
|
||||
hideLocal?: boolean,
|
||||
title: string = "Conflicted Setting"
|
||||
) {
|
||||
super(app);
|
||||
this.callback = callback;
|
||||
this.filename = filename;
|
||||
this.docs = docs;
|
||||
this.nameA = nameA || "";
|
||||
this.nameB = nameB || "";
|
||||
this.keepOrder = keepOrder || false;
|
||||
this.defaultSelect = defaultSelect || "";
|
||||
this.title = title;
|
||||
this.hideLocal = hideLocal ?? false;
|
||||
void waitForSignal(`cancel-internal-conflict:${filename}`).then(() => this.close());
|
||||
}
|
||||
|
||||
async UICallback(keepRev?: string, mergedStr?: string) {
|
||||
if (this.callback) {
|
||||
await this.callback(keepRev, mergedStr);
|
||||
}
|
||||
this.close();
|
||||
this.callback = undefined;
|
||||
}
|
||||
|
||||
onOpen() {
|
||||
const { contentEl } = this;
|
||||
this.titleEl.setText(this.title);
|
||||
contentEl.empty();
|
||||
|
||||
if (this.component == undefined) {
|
||||
this.component = mount(JsonResolvePane, {
|
||||
target: contentEl,
|
||||
props: {
|
||||
docs: this.docs,
|
||||
filename: this.filename,
|
||||
nameA: this.nameA,
|
||||
nameB: this.nameB,
|
||||
defaultSelect: this.defaultSelect,
|
||||
keepOrder: this.keepOrder,
|
||||
hideLocal: this.hideLocal,
|
||||
callback: (keepRev: string | undefined, mergedStr: string | undefined) =>
|
||||
this.UICallback(keepRev, mergedStr),
|
||||
},
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
onClose() {
|
||||
const { contentEl } = this;
|
||||
contentEl.empty();
|
||||
// contentEl.empty();
|
||||
if (this.callback != undefined) {
|
||||
void this.callback(undefined);
|
||||
}
|
||||
if (this.component != undefined) {
|
||||
void unmount(this.component);
|
||||
this.component = undefined;
|
||||
}
|
||||
}
|
||||
}
|
||||
228
src/features/HiddenFileCommon/JsonResolvePane.svelte
Normal file
@@ -0,0 +1,228 @@
|
||||
<script lang="ts">
|
||||
import { type Diff, DIFF_DELETE, DIFF_INSERT, diff_match_patch } from "../../deps.ts";
|
||||
import type { FilePath, LoadedEntry } from "../../lib/src/common/types.ts";
|
||||
import { decodeBinary, readString } from "../../lib/src/string_and_binary/convert.ts";
|
||||
import { getDocData, isObjectDifferent, mergeObject } from "../../lib/src/common/utils.ts";
|
||||
|
||||
interface Props {
|
||||
docs?: LoadedEntry[];
|
||||
callback?: (keepRev?: string, mergedStr?: string) => Promise<void>;
|
||||
filename?: FilePath;
|
||||
nameA?: string;
|
||||
nameB?: string;
|
||||
defaultSelect?: string;
|
||||
keepOrder?: boolean;
|
||||
hideLocal?: boolean;
|
||||
}
|
||||
|
||||
let {
|
||||
docs = $bindable([]),
|
||||
callback = $bindable((async (_, __) => {
|
||||
Promise.resolve();
|
||||
}) as (keepRev?: string, mergedStr?: string) => Promise<void>),
|
||||
filename = $bindable("" as FilePath),
|
||||
nameA = $bindable("A"),
|
||||
nameB = $bindable("B"),
|
||||
defaultSelect = $bindable("" as string),
|
||||
keepOrder = $bindable(false),
|
||||
hideLocal = $bindable(false),
|
||||
}: Props = $props();
|
||||
type JSONData = Record<string | number | symbol, any> | [any];
|
||||
|
||||
const docsArray = $derived.by(() => {
|
||||
if (docs && docs.length >= 1) {
|
||||
if (keepOrder || docs[0].mtime < docs[1].mtime) {
|
||||
return { a: docs[0], b: docs[1] } as const;
|
||||
} else {
|
||||
return { a: docs[1], b: docs[0] } as const;
|
||||
}
|
||||
}
|
||||
return { a: false, b: false } as const;
|
||||
});
|
||||
const docA = $derived(docsArray.a);
|
||||
const docB = $derived(docsArray.b);
|
||||
const docAContent = $derived(docA && docToString(docA));
|
||||
const docBContent = $derived(docB && docToString(docB));
|
||||
|
||||
function parseJson(json: string | false) {
|
||||
if (json === false) return false;
|
||||
try {
|
||||
return JSON.parse(json) as JSONData;
|
||||
} catch (ex) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
const objA = $derived(parseJson(docAContent) || {});
|
||||
const objB = $derived(parseJson(docBContent) || {});
|
||||
const objAB = $derived(mergeObject(objA, objB));
|
||||
const objBAw = $derived(mergeObject(objB, objA));
|
||||
const objBA = $derived(isObjectDifferent(objBAw, objAB) ? objBAw : false);
|
||||
let diffs: Diff[] = $derived.by(() => (objA && selectedObj ? getJsonDiff(objA, selectedObj) : []));
|
||||
type SelectModes = "" | "A" | "B" | "AB" | "BA";
|
||||
let mode: SelectModes = $state(defaultSelect as SelectModes);
|
||||
|
||||
function docToString(doc: LoadedEntry) {
|
||||
return doc.datatype == "plain" ? getDocData(doc.data) : readString(new Uint8Array(decodeBinary(doc.data)));
|
||||
}
|
||||
function revStringToRevNumber(rev?: string) {
|
||||
if (!rev) return "";
|
||||
return rev.split("-")[0];
|
||||
}
|
||||
|
||||
function getDiff(left: string, right: string) {
|
||||
const dmp = new diff_match_patch();
|
||||
const mapLeft = dmp.diff_linesToChars_(left, right);
|
||||
const diffLeftSrc = dmp.diff_main(mapLeft.chars1, mapLeft.chars2, false);
|
||||
dmp.diff_charsToLines_(diffLeftSrc, mapLeft.lineArray);
|
||||
return diffLeftSrc;
|
||||
}
|
||||
function getJsonDiff(a: object, b: object) {
|
||||
return getDiff(JSON.stringify(a, null, 2), JSON.stringify(b, null, 2));
|
||||
}
|
||||
function apply() {
|
||||
if (!docA || !docB) return;
|
||||
if (docA._id == docB._id) {
|
||||
if (mode == "A") return callback(docA._rev!, undefined);
|
||||
if (mode == "B") return callback(docB._rev!, undefined);
|
||||
} else {
|
||||
if (mode == "A") return callback(undefined, docToString(docA));
|
||||
if (mode == "B") return callback(undefined, docToString(docB));
|
||||
}
|
||||
if (mode == "BA") return callback(undefined, JSON.stringify(objBA, null, 2));
|
||||
if (mode == "AB") return callback(undefined, JSON.stringify(objAB, null, 2));
|
||||
callback(undefined, undefined);
|
||||
}
|
||||
function cancel() {
|
||||
callback(undefined, undefined);
|
||||
}
|
||||
const mergedObjs = $derived.by(
|
||||
() =>
|
||||
({
|
||||
"": false,
|
||||
A: objA,
|
||||
B: objB,
|
||||
AB: objAB,
|
||||
BA: objBA,
|
||||
}) as Record<SelectModes, JSONData | false>
|
||||
);
|
||||
|
||||
let selectedObj = $derived(mode in mergedObjs ? mergedObjs[mode] : {});
|
||||
|
||||
let modesSrc = $state([] as ["" | "A" | "B" | "AB" | "BA", string][]);
|
||||
|
||||
const modes = $derived.by(() => {
|
||||
let newModes = [] as typeof modesSrc;
|
||||
|
||||
if (!hideLocal) {
|
||||
newModes.push(["", "Not now"]);
|
||||
newModes.push(["A", nameA || "A"]);
|
||||
}
|
||||
newModes.push(["B", nameB || "B"]);
|
||||
newModes.push(["AB", `${nameA || "A"} + ${nameB || "B"}`]);
|
||||
newModes.push(["BA", `${nameB || "B"} + ${nameA || "A"}`]);
|
||||
return newModes;
|
||||
});
|
||||
</script>
|
||||
|
||||
<h2>{filename}</h2>
|
||||
{#if !docA || !docB}
|
||||
<div class="message">Just for a minute, please!</div>
|
||||
<div class="buttons">
|
||||
<button onclick={apply}>Dismiss</button>
|
||||
</div>
|
||||
{:else}
|
||||
<div class="options">
|
||||
{#each modes as m}
|
||||
{#if m[0] == "" || mergedObjs[m[0]] != false}
|
||||
<label class={`sls-setting-label ${m[0] == mode ? "selected" : ""}`}
|
||||
><input type="radio" name="disp" bind:group={mode} value={m[0]} class="sls-setting-tab" />
|
||||
<div class="sls-setting-menu-btn">{m[1]}</div></label
|
||||
>
|
||||
{/if}
|
||||
{/each}
|
||||
</div>
|
||||
|
||||
{#if selectedObj != false}
|
||||
<div class="op-scrollable json-source">
|
||||
{#each diffs as diff}
|
||||
<span class={diff[0] == DIFF_DELETE ? "deleted" : diff[0] == DIFF_INSERT ? "added" : "normal"}
|
||||
>{diff[1]}</span
|
||||
>
|
||||
{/each}
|
||||
</div>
|
||||
{:else}
|
||||
NO PREVIEW
|
||||
{/if}
|
||||
|
||||
<div class="infos">
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<th>{nameA}</th>
|
||||
<td
|
||||
>{#if docA._id == docB._id}
|
||||
Rev:{revStringToRevNumber(docA._rev)}
|
||||
{/if}
|
||||
{new Date(docA.mtime).toLocaleString()}</td
|
||||
>
|
||||
<td>
|
||||
{docAContent && docAContent.length} letters
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>{nameB}</th>
|
||||
<td
|
||||
>{#if docA._id == docB._id}
|
||||
Rev:{revStringToRevNumber(docB._rev)}
|
||||
{/if}
|
||||
{new Date(docB.mtime).toLocaleString()}</td
|
||||
>
|
||||
<td>
|
||||
{docBContent && docBContent.length} letters
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="buttons">
|
||||
{#if hideLocal}
|
||||
<button onclick={cancel}>Cancel</button>
|
||||
{/if}
|
||||
<button onclick={apply}>Apply</button>
|
||||
</div>
|
||||
{/if}
|
||||
|
||||
<style>
|
||||
.spacer {
|
||||
flex-grow: 1;
|
||||
}
|
||||
.infos {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
margin: 4px 0.5em;
|
||||
}
|
||||
|
||||
.deleted {
|
||||
text-decoration: line-through;
|
||||
}
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.scroller {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
overflow-y: scroll;
|
||||
max-height: 60vh;
|
||||
user-select: text;
|
||||
-webkit-user-select: text;
|
||||
}
|
||||
.json-source {
|
||||
white-space: pre;
|
||||
height: auto;
|
||||
overflow: auto;
|
||||
min-height: var(--font-ui-medium);
|
||||
flex-grow: 1;
|
||||
}
|
||||
</style>
|
||||
1929
src/features/HiddenFileSync/CmdHiddenFileSync.ts
Normal file
102
src/features/LiveSyncCommands.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
import { LOG_LEVEL_VERBOSE, Logger } from "octagonal-wheels/common/logger";
|
||||
import { getPath } from "../common/utils.ts";
|
||||
import {
|
||||
LOG_LEVEL_INFO,
|
||||
LOG_LEVEL_NOTICE,
|
||||
type AnyEntry,
|
||||
type DocumentID,
|
||||
type FilePath,
|
||||
type FilePathWithPrefix,
|
||||
type LOG_LEVEL,
|
||||
} from "../lib/src/common/types.ts";
|
||||
import type ObsidianLiveSyncPlugin from "../main.ts";
|
||||
import { MARK_DONE } from "../modules/features/ModuleLog.ts";
|
||||
import type { LiveSyncCore } from "../main.ts";
|
||||
import { __$checkInstanceBinding } from "../lib/src/dev/checks.ts";
|
||||
|
||||
let noticeIndex = 0;
|
||||
export abstract class LiveSyncCommands {
|
||||
plugin: ObsidianLiveSyncPlugin;
|
||||
get app() {
|
||||
return this.plugin.app;
|
||||
}
|
||||
get settings() {
|
||||
return this.plugin.settings;
|
||||
}
|
||||
get localDatabase() {
|
||||
return this.plugin.localDatabase;
|
||||
}
|
||||
get services() {
|
||||
return this.plugin.services;
|
||||
}
|
||||
|
||||
// id2path(id: DocumentID, entry?: EntryHasPath, stripPrefix?: boolean): FilePathWithPrefix {
|
||||
// return this.plugin.$$id2path(id, entry, stripPrefix);
|
||||
// }
|
||||
async path2id(filename: FilePathWithPrefix | FilePath, prefix?: string): Promise<DocumentID> {
|
||||
return await this.services.path.path2id(filename, prefix);
|
||||
}
|
||||
getPath(entry: AnyEntry): FilePathWithPrefix {
|
||||
return getPath(entry);
|
||||
}
|
||||
|
||||
constructor(plugin: ObsidianLiveSyncPlugin) {
|
||||
this.plugin = plugin;
|
||||
this.onBindFunction(plugin, plugin.services);
|
||||
__$checkInstanceBinding(this);
|
||||
}
|
||||
abstract onunload(): void;
|
||||
abstract onload(): void | Promise<void>;
|
||||
|
||||
_isMainReady() {
|
||||
return this.plugin.services.appLifecycle.isReady();
|
||||
}
|
||||
_isMainSuspended() {
|
||||
return this.services.appLifecycle.isSuspended();
|
||||
}
|
||||
_isDatabaseReady() {
|
||||
return this.services.database.isDatabaseReady();
|
||||
}
|
||||
|
||||
_log = (msg: any, level: LOG_LEVEL = LOG_LEVEL_INFO, key?: string) => {
|
||||
if (typeof msg === "string" && level !== LOG_LEVEL_NOTICE) {
|
||||
msg = `[${this.constructor.name}]\u{200A} ${msg}`;
|
||||
}
|
||||
// console.log(msg);
|
||||
Logger(msg, level, key);
|
||||
};
|
||||
|
||||
_verbose = (msg: any, key?: string) => {
|
||||
this._log(msg, LOG_LEVEL_VERBOSE, key);
|
||||
};
|
||||
|
||||
_info = (msg: any, key?: string) => {
|
||||
this._log(msg, LOG_LEVEL_INFO, key);
|
||||
};
|
||||
|
||||
_notice = (msg: any, key?: string) => {
|
||||
this._log(msg, LOG_LEVEL_NOTICE, key);
|
||||
};
|
||||
_progress = (prefix: string = "", level: LOG_LEVEL = LOG_LEVEL_NOTICE) => {
|
||||
const key = `keepalive-progress-${noticeIndex++}`;
|
||||
return {
|
||||
log: (msg: any) => {
|
||||
this._log(prefix + msg, level, key);
|
||||
},
|
||||
once: (msg: any) => {
|
||||
this._log(prefix + msg, level);
|
||||
},
|
||||
done: (msg: string = "Done") => {
|
||||
this._log(prefix + msg + MARK_DONE, level, key);
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
_debug = (msg: any, key?: string) => {
|
||||
this._log(msg, LOG_LEVEL_VERBOSE, key);
|
||||
};
|
||||
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services) {
|
||||
// Override if needed.
|
||||
}
|
||||
}
|
||||
488
src/features/LocalDatabaseMainte/CmdLocalDatabaseMainte.ts
Normal file
@@ -0,0 +1,488 @@
|
||||
import { sizeToHumanReadable } from "octagonal-wheels/number";
|
||||
import {
|
||||
EntryTypes,
|
||||
LOG_LEVEL_INFO,
|
||||
LOG_LEVEL_NOTICE,
|
||||
LOG_LEVEL_VERBOSE,
|
||||
type DocumentID,
|
||||
type EntryDoc,
|
||||
type EntryLeaf,
|
||||
type MetaEntry,
|
||||
} from "../../lib/src/common/types";
|
||||
import { getNoFromRev } from "../../lib/src/pouchdb/LiveSyncLocalDB";
|
||||
import { LiveSyncCommands } from "../LiveSyncCommands";
|
||||
import { serialized } from "octagonal-wheels/concurrency/lock_v2";
|
||||
import { arrayToChunkedArray } from "octagonal-wheels/collection";
|
||||
const DB_KEY_SEQ = "gc-seq";
|
||||
const DB_KEY_CHUNK_SET = "chunk-set";
|
||||
const DB_KEY_DOC_USAGE_MAP = "doc-usage-map";
|
||||
type ChunkID = DocumentID;
|
||||
type NoteDocumentID = DocumentID;
|
||||
type Rev = string;
|
||||
|
||||
type ChunkUsageMap = Map<NoteDocumentID, Map<Rev, Set<ChunkID>>>;
|
||||
export class LocalDatabaseMaintenance extends LiveSyncCommands {
|
||||
onunload(): void {
|
||||
// NO OP.
|
||||
}
|
||||
onload(): void | Promise<void> {
|
||||
// NO OP.
|
||||
}
|
||||
async allChunks(includeDeleted: boolean = false) {
|
||||
const p = this._progress("", LOG_LEVEL_NOTICE);
|
||||
p.log("Retrieving chunks informations..");
|
||||
try {
|
||||
const ret = await this.localDatabase.allChunks(includeDeleted);
|
||||
return ret;
|
||||
} finally {
|
||||
p.done();
|
||||
}
|
||||
}
|
||||
get database() {
|
||||
return this.localDatabase.localDatabase;
|
||||
}
|
||||
clearHash() {
|
||||
this.localDatabase.clearCaches();
|
||||
}
|
||||
|
||||
async confirm(title: string, message: string, affirmative = "Yes", negative = "No") {
|
||||
return (
|
||||
(await this.plugin.confirm.askSelectStringDialogue(message, [affirmative, negative], {
|
||||
title,
|
||||
defaultAction: affirmative,
|
||||
})) === affirmative
|
||||
);
|
||||
}
|
||||
isAvailable() {
|
||||
if (!this.settings.doNotUseFixedRevisionForChunks) {
|
||||
this._notice("Please enable 'Compute revisions for chunks' in settings to use Garbage Collection.");
|
||||
return false;
|
||||
}
|
||||
if (this.settings.readChunksOnline) {
|
||||
this._notice("Please disable 'Read chunks online' in settings to use Garbage Collection.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
/**
|
||||
* Resurrect deleted chunks that are still used in the database.
|
||||
*/
|
||||
async resurrectChunks() {
|
||||
if (!this.isAvailable()) return;
|
||||
const { used, existing } = await this.allChunks(true);
|
||||
const excessiveDeletions = [...existing]
|
||||
.filter(([key, e]) => e._deleted)
|
||||
.filter(([key, e]) => used.has(e._id))
|
||||
.map(([key, e]) => e);
|
||||
const completelyLostChunks = [] as string[];
|
||||
// Data lost chunks : chunks that are deleted and data is purged.
|
||||
const dataLostChunks = [...existing]
|
||||
.filter(([key, e]) => e._deleted && e.data === "")
|
||||
.map(([key, e]) => e)
|
||||
.filter((e) => used.has(e._id));
|
||||
for (const e of dataLostChunks) {
|
||||
// Retrieve the data from the previous revision.
|
||||
const doc = await this.database.get(e._id, { rev: e._rev, revs: true, revs_info: true, conflicts: true });
|
||||
const history = doc._revs_info || [];
|
||||
// Chunks are immutable. So, we can resurrect the chunk by copying the data from any of previous revisions.
|
||||
let resurrected = null as null | string;
|
||||
const availableRevs = history
|
||||
.filter((e) => e.status == "available")
|
||||
.map((e) => e.rev)
|
||||
.sort((a, b) => getNoFromRev(a) - getNoFromRev(b));
|
||||
for (const rev of availableRevs) {
|
||||
const revDoc = await this.database.get(e._id, { rev: rev });
|
||||
if (revDoc.type == "leaf" && revDoc.data !== "") {
|
||||
// Found the data.
|
||||
resurrected = revDoc.data;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// If the data is not found, we cannot resurrect the chunk, add it to the excessiveDeletions.
|
||||
if (resurrected !== null) {
|
||||
excessiveDeletions.push({ ...e, data: resurrected, _deleted: false });
|
||||
} else {
|
||||
completelyLostChunks.push(e._id);
|
||||
}
|
||||
}
|
||||
// Chunks to be resurrected.
|
||||
const resurrectChunks = excessiveDeletions.filter((e) => e.data !== "").map((e) => ({ ...e, _deleted: false }));
|
||||
|
||||
if (resurrectChunks.length == 0) {
|
||||
this._notice("No chunks are found to be resurrected.");
|
||||
return;
|
||||
}
|
||||
const message = `We have following chunks that are deleted but still used in the database.
|
||||
|
||||
- Completely lost chunks: ${completelyLostChunks.length}
|
||||
- Resurrectable chunks: ${resurrectChunks.length}
|
||||
|
||||
Do you want to resurrect these chunks?`;
|
||||
if (await this.confirm("Resurrect Chunks", message, "Resurrect", "Cancel")) {
|
||||
const result = await this.database.bulkDocs(resurrectChunks);
|
||||
this.clearHash();
|
||||
const resurrectedChunks = result.filter((e) => "ok" in e).map((e) => e.id);
|
||||
this._notice(`Resurrected chunks: ${resurrectedChunks.length} / ${resurrectChunks.length}`);
|
||||
} else {
|
||||
this._notice("Resurrect operation is cancelled.");
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Commit deletion of files that are marked as deleted.
|
||||
* This method makes the deletion permanent, and the files will not be recovered.
|
||||
* After this, chunks that are used in the deleted files become ready for compaction.
|
||||
*/
|
||||
async commitFileDeletion() {
|
||||
if (!this.isAvailable()) return;
|
||||
const p = this._progress("", LOG_LEVEL_NOTICE);
|
||||
p.log("Searching for deleted files..");
|
||||
const docs = await this.database.allDocs<MetaEntry>({ include_docs: true });
|
||||
const deletedDocs = docs.rows.filter(
|
||||
(e) => (e.doc?.type == "newnote" || e.doc?.type == "plain") && e.doc?.deleted
|
||||
);
|
||||
if (deletedDocs.length == 0) {
|
||||
p.done("No deleted files found.");
|
||||
return;
|
||||
}
|
||||
p.log(`Found ${deletedDocs.length} deleted files.`);
|
||||
|
||||
const message = `We have following files that are marked as deleted.
|
||||
|
||||
- Deleted files: ${deletedDocs.length}
|
||||
|
||||
Are you sure to delete these files permanently?
|
||||
|
||||
Note: **Make sure to synchronise all devices before deletion.**
|
||||
|
||||
> [!Note]
|
||||
> This operation affects the database permanently. Deleted files will not be recovered after this operation.
|
||||
> And, the chunks that are used in the deleted files will be ready for compaction.`;
|
||||
|
||||
const deletingDocs = deletedDocs.map((e) => ({ ...e.doc, _deleted: true }) as MetaEntry);
|
||||
|
||||
if (await this.confirm("Delete Files", message, "Delete", "Cancel")) {
|
||||
const result = await this.database.bulkDocs(deletingDocs);
|
||||
this.clearHash();
|
||||
p.done(`Deleted ${result.filter((e) => "ok" in e).length} / ${deletedDocs.length} files.`);
|
||||
} else {
|
||||
p.done("Deletion operation is cancelled.");
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Commit deletion of chunks that are not used in the database.
|
||||
* This method makes the deletion permanent, and the chunks will not be recovered if the database run compaction.
|
||||
* After this, the database can shrink the database size by compaction.
|
||||
* It is recommended to compact the database after this operation (History should be kept once before compaction).
|
||||
*/
|
||||
async commitChunkDeletion() {
|
||||
if (!this.isAvailable()) return;
|
||||
const { existing } = await this.allChunks(true);
|
||||
const deletedChunks = [...existing].filter(([key, e]) => e._deleted && e.data !== "").map(([key, e]) => e);
|
||||
const deletedNotVacantChunks = deletedChunks.map((e) => ({ ...e, data: "", _deleted: true }));
|
||||
const size = deletedChunks.reduce((acc, e) => acc + e.data.length, 0);
|
||||
const humanSize = sizeToHumanReadable(size);
|
||||
const message = `We have following chunks that are marked as deleted.
|
||||
|
||||
- Deleted chunks: ${deletedNotVacantChunks.length} (${humanSize})
|
||||
|
||||
Are you sure to delete these chunks permanently?
|
||||
|
||||
Note: **Make sure to synchronise all devices before deletion.**
|
||||
|
||||
> [!Note]
|
||||
> This operation finally reduces the capacity of the remote.`;
|
||||
|
||||
if (deletedNotVacantChunks.length == 0) {
|
||||
this._notice("No deleted chunks found.");
|
||||
return;
|
||||
}
|
||||
if (await this.confirm("Delete Chunks", message, "Delete", "Cancel")) {
|
||||
const result = await this.database.bulkDocs(deletedNotVacantChunks);
|
||||
this.clearHash();
|
||||
this._notice(
|
||||
`Deleted chunks: ${result.filter((e) => "ok" in e).length} / ${deletedNotVacantChunks.length}`
|
||||
);
|
||||
} else {
|
||||
this._notice("Deletion operation is cancelled.");
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Compact the database.
|
||||
* This method removes all deleted chunks that are not used in the database.
|
||||
* Make sure all devices are synchronized before running this method.
|
||||
*/
|
||||
async markUnusedChunks() {
|
||||
if (!this.isAvailable()) return;
|
||||
const { used, existing } = await this.allChunks();
|
||||
const existChunks = [...existing];
|
||||
const unusedChunks = existChunks.filter(([key, e]) => !used.has(e._id)).map(([key, e]) => e);
|
||||
const deleteChunks = unusedChunks.map((e) => ({
|
||||
...e,
|
||||
_deleted: true,
|
||||
}));
|
||||
const size = deleteChunks.reduce((acc, e) => acc + e.data.length, 0);
|
||||
const humanSize = sizeToHumanReadable(size);
|
||||
if (deleteChunks.length == 0) {
|
||||
this._notice("No unused chunks found.");
|
||||
return;
|
||||
}
|
||||
const message = `We have following chunks that are not used from any files.
|
||||
|
||||
- Chunks: ${deleteChunks.length} (${humanSize})
|
||||
|
||||
Are you sure to mark these chunks to be deleted?
|
||||
|
||||
Note: **Make sure to synchronise all devices before deletion.**
|
||||
|
||||
> [!Note]
|
||||
> This operation will not reduces the capacity of the remote until permanent deletion.`;
|
||||
|
||||
if (await this.confirm("Mark unused chunks", message, "Mark", "Cancel")) {
|
||||
const result = await this.database.bulkDocs(deleteChunks);
|
||||
this.clearHash();
|
||||
this._notice(`Marked chunks: ${result.filter((e) => "ok" in e).length} / ${deleteChunks.length}`);
|
||||
}
|
||||
}
|
||||
|
||||
async removeUnusedChunks() {
|
||||
const { used, existing } = await this.allChunks();
|
||||
const existChunks = [...existing];
|
||||
const unusedChunks = existChunks.filter(([key, e]) => !used.has(e._id)).map(([key, e]) => e);
|
||||
const deleteChunks = unusedChunks.map((e) => ({
|
||||
...e,
|
||||
data: "",
|
||||
_deleted: true,
|
||||
}));
|
||||
const size = unusedChunks.reduce((acc, e) => acc + e.data.length, 0);
|
||||
const humanSize = sizeToHumanReadable(size);
|
||||
if (deleteChunks.length == 0) {
|
||||
this._notice("No unused chunks found.");
|
||||
return;
|
||||
}
|
||||
const message = `We have following chunks that are not used from any files.
|
||||
|
||||
- Chunks: ${deleteChunks.length} (${humanSize})
|
||||
|
||||
Are you sure to delete these chunks?
|
||||
|
||||
Note: **Make sure to synchronise all devices before deletion.**
|
||||
|
||||
> [!Note]
|
||||
> Chunks referenced from deleted files are not deleted. Please run "Commit File Deletion" before this operation.`;
|
||||
|
||||
if (await this.confirm("Mark unused chunks", message, "Mark", "Cancel")) {
|
||||
const result = await this.database.bulkDocs(deleteChunks);
|
||||
this._notice(`Deleted chunks: ${result.filter((e) => "ok" in e).length} / ${deleteChunks.length}`);
|
||||
this.clearHash();
|
||||
}
|
||||
}
|
||||
|
||||
async scanUnusedChunks() {
|
||||
const kvDB = this.plugin.kvDB;
|
||||
const chunkSet = (await kvDB.get<Set<DocumentID>>(DB_KEY_CHUNK_SET)) || new Set();
|
||||
const chunkUsageMap = (await kvDB.get<ChunkUsageMap>(DB_KEY_DOC_USAGE_MAP)) || new Map();
|
||||
const KEEP_MAX_REVS = 10;
|
||||
const unusedSet = new Set<DocumentID>([...chunkSet]);
|
||||
for (const [, revIdMap] of chunkUsageMap) {
|
||||
const sortedRevId = [...revIdMap.entries()].sort((a, b) => getNoFromRev(b[0]) - getNoFromRev(a[0]));
|
||||
if (sortedRevId.length > KEEP_MAX_REVS) {
|
||||
// If we have more revisions than we want to keep, we need to delete the extras
|
||||
}
|
||||
const keepRevID = sortedRevId.slice(0, KEEP_MAX_REVS);
|
||||
keepRevID.forEach((e) => e[1].forEach((ee) => unusedSet.delete(ee)));
|
||||
}
|
||||
return {
|
||||
chunkSet,
|
||||
chunkUsageMap,
|
||||
unusedSet,
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Track changes in the database and update the chunk usage map for garbage collection.
|
||||
* Note that this only able to perform without Fetch chunks on demand.
|
||||
*/
|
||||
async trackChanges(fromStart: boolean = false, showNotice: boolean = false) {
|
||||
if (!this.isAvailable()) return;
|
||||
const logLevel = showNotice ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO;
|
||||
const kvDB = this.plugin.kvDB;
|
||||
|
||||
const previousSeq = fromStart ? "" : await kvDB.get<string>(DB_KEY_SEQ);
|
||||
const chunkSet = (await kvDB.get<Set<DocumentID>>(DB_KEY_CHUNK_SET)) || new Set();
|
||||
|
||||
const chunkUsageMap = (await kvDB.get<ChunkUsageMap>(DB_KEY_DOC_USAGE_MAP)) || new Map();
|
||||
|
||||
const db = this.localDatabase.localDatabase;
|
||||
const verbose = (msg: string) => this._verbose(msg);
|
||||
|
||||
const processDoc = async (doc: EntryDoc, isDeleted: boolean) => {
|
||||
if (!("children" in doc)) {
|
||||
return;
|
||||
}
|
||||
const id = doc._id;
|
||||
const rev = doc._rev!;
|
||||
const deleted = doc._deleted || isDeleted;
|
||||
const softDeleted = doc.deleted;
|
||||
const children = (doc.children || []) as DocumentID[];
|
||||
if (!chunkUsageMap.has(id)) {
|
||||
chunkUsageMap.set(id, new Map<Rev, Set<ChunkID>>());
|
||||
}
|
||||
for (const chunkId of children) {
|
||||
if (deleted) {
|
||||
chunkUsageMap.get(id)!.delete(rev);
|
||||
// chunkSet.add(chunkId as DocumentID);
|
||||
} else {
|
||||
if (softDeleted) {
|
||||
//TODO: Soft delete
|
||||
chunkUsageMap.get(id)!.set(rev, (chunkUsageMap.get(id)!.get(rev) || new Set()).add(chunkId));
|
||||
} else {
|
||||
chunkUsageMap.get(id)!.set(rev, (chunkUsageMap.get(id)!.get(rev) || new Set()).add(chunkId));
|
||||
}
|
||||
}
|
||||
}
|
||||
verbose(
|
||||
`Tracking chunk: ${id}/${rev} (${doc?.path}), deleted: ${deleted ? "yes" : "no"} Soft-Deleted:${softDeleted ? "yes" : "no"}`
|
||||
);
|
||||
return await Promise.resolve();
|
||||
};
|
||||
// let saveQueue = 0;
|
||||
const saveState = async (seq: string | number) => {
|
||||
await kvDB.set(DB_KEY_SEQ, seq);
|
||||
await kvDB.set(DB_KEY_CHUNK_SET, chunkSet);
|
||||
await kvDB.set(DB_KEY_DOC_USAGE_MAP, chunkUsageMap);
|
||||
};
|
||||
|
||||
const processDocRevisions = async (doc: EntryDoc) => {
|
||||
try {
|
||||
const oldRevisions = await db.get(doc._id, { revs: true, revs_info: true, conflicts: true });
|
||||
const allRevs = oldRevisions._revs_info?.length || 0;
|
||||
const info = (oldRevisions._revs_info || [])
|
||||
.filter((e) => e.status == "available" && e.rev != doc._rev)
|
||||
.filter((info) => !chunkUsageMap.get(doc._id)?.has(info.rev));
|
||||
const infoLength = info.length;
|
||||
this._log(`Found ${allRevs} old revisions for ${doc._id} . ${infoLength} items to check `);
|
||||
if (info.length > 0) {
|
||||
const oldDocs = await Promise.all(
|
||||
info
|
||||
.filter((revInfo) => revInfo.status == "available")
|
||||
.map((revInfo) => db.get(doc._id, { rev: revInfo.rev }))
|
||||
).then((docs) => docs.filter((doc) => doc));
|
||||
for (const oldDoc of oldDocs) {
|
||||
await processDoc(oldDoc as EntryDoc, false);
|
||||
}
|
||||
}
|
||||
} catch (ex) {
|
||||
if ((ex as any)?.status == 404) {
|
||||
this._log(`No revisions found for ${doc._id}`, LOG_LEVEL_VERBOSE);
|
||||
} else {
|
||||
this._log(`Error finding revisions for ${doc._id}`);
|
||||
this._verbose(ex);
|
||||
}
|
||||
}
|
||||
};
|
||||
const processChange = async (doc: EntryDoc, isDeleted: boolean, seq: string | number) => {
|
||||
if (doc.type === EntryTypes.CHUNK) {
|
||||
if (isDeleted) return;
|
||||
chunkSet.add(doc._id);
|
||||
} else if ("children" in doc) {
|
||||
await processDoc(doc, isDeleted);
|
||||
await serialized("x-process-doc", async () => await processDocRevisions(doc));
|
||||
}
|
||||
};
|
||||
// Track changes
|
||||
let i = 0;
|
||||
await db
|
||||
.changes({
|
||||
since: previousSeq || "",
|
||||
live: false,
|
||||
conflicts: true,
|
||||
include_docs: true,
|
||||
style: "all_docs",
|
||||
return_docs: false,
|
||||
})
|
||||
.on("change", async (change) => {
|
||||
// handle change
|
||||
await processChange(change.doc!, change.deleted ?? false, change.seq);
|
||||
if (i++ % 100 == 0) {
|
||||
await saveState(change.seq);
|
||||
}
|
||||
})
|
||||
.on("complete", async (info) => {
|
||||
await saveState(info.last_seq);
|
||||
});
|
||||
|
||||
// Track all changed docs and new-leafs;
|
||||
|
||||
const result = await this.scanUnusedChunks();
|
||||
|
||||
const message = `Total chunks: ${result.chunkSet.size}\nUnused chunks: ${result.unusedSet.size}`;
|
||||
this._log(message, logLevel);
|
||||
}
|
||||
async performGC(showingNotice = false) {
|
||||
if (!this.isAvailable()) return;
|
||||
await this.trackChanges(false, showingNotice);
|
||||
const title = "Are all devices synchronised?";
|
||||
const confirmMessage = `This function deletes unused chunks from the device. If there are differences between devices, some chunks may be missing when resolving conflicts.
|
||||
Be sure to synchronise before executing.
|
||||
|
||||
However, if you have deleted them, you may be able to recover them by performing Hatch -> Recreate missing chunks for all files.
|
||||
|
||||
Are you ready to delete unused chunks?`;
|
||||
|
||||
const logLevel = showingNotice ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO;
|
||||
|
||||
const BUTTON_OK = `Yes, delete chunks`;
|
||||
const BUTTON_CANCEL = "Cancel";
|
||||
|
||||
const result = await this.plugin.confirm.askSelectStringDialogue(
|
||||
confirmMessage,
|
||||
[BUTTON_OK, BUTTON_CANCEL] as const,
|
||||
{
|
||||
title,
|
||||
defaultAction: BUTTON_CANCEL,
|
||||
}
|
||||
);
|
||||
if (result !== BUTTON_OK) {
|
||||
this._log("User cancelled chunk deletion", logLevel);
|
||||
return;
|
||||
}
|
||||
const { unusedSet, chunkSet } = await this.scanUnusedChunks();
|
||||
const deleteChunks = await this.database.allDocs({
|
||||
keys: [...unusedSet],
|
||||
include_docs: true,
|
||||
});
|
||||
for (const chunk of deleteChunks.rows) {
|
||||
if ((chunk as any)?.value?.deleted) {
|
||||
chunkSet.delete(chunk.key as DocumentID);
|
||||
}
|
||||
}
|
||||
const deleteDocs = deleteChunks.rows
|
||||
.filter((e) => "doc" in e)
|
||||
.map((e) => ({
|
||||
...(e as any).doc!,
|
||||
_deleted: true,
|
||||
}));
|
||||
|
||||
this._log(`Deleting chunks: ${deleteDocs.length}`, logLevel);
|
||||
const deleteChunkBatch = arrayToChunkedArray(deleteDocs, 100);
|
||||
let successCount = 0;
|
||||
let errored = 0;
|
||||
for (const batch of deleteChunkBatch) {
|
||||
const results = await this.database.bulkDocs(batch as EntryLeaf[]);
|
||||
for (const result of results) {
|
||||
if ("ok" in result) {
|
||||
chunkSet.delete(result.id as DocumentID);
|
||||
successCount++;
|
||||
} else {
|
||||
this._log(`Failed to delete doc: ${result.id}`, LOG_LEVEL_VERBOSE);
|
||||
errored++;
|
||||
}
|
||||
}
|
||||
this._log(`Deleting chunks: ${successCount} `, logLevel, "gc-preforming");
|
||||
}
|
||||
const message = `Garbage Collection completed.
|
||||
Success: ${successCount}, Errored: ${errored}`;
|
||||
this._log(message, logLevel);
|
||||
const kvDB = this.plugin.kvDB;
|
||||
await kvDB.set(DB_KEY_CHUNK_SET, chunkSet);
|
||||
}
|
||||
}
|
||||
281
src/features/P2PSync/CmdP2PReplicator.ts
Normal file
@@ -0,0 +1,281 @@
|
||||
import { P2PReplicatorPaneView, VIEW_TYPE_P2P } from "./P2PReplicator/P2PReplicatorPaneView.ts";
|
||||
import {
|
||||
AutoAccepting,
|
||||
LOG_LEVEL_NOTICE,
|
||||
P2P_DEFAULT_SETTINGS,
|
||||
REMOTE_P2P,
|
||||
type EntryDoc,
|
||||
type P2PSyncSetting,
|
||||
type RemoteDBSettings,
|
||||
} from "../../lib/src/common/types.ts";
|
||||
import { LiveSyncCommands } from "../LiveSyncCommands.ts";
|
||||
import {
|
||||
LiveSyncTrysteroReplicator,
|
||||
setReplicatorFunc,
|
||||
} from "../../lib/src/replication/trystero/LiveSyncTrysteroReplicator.ts";
|
||||
import { EVENT_REQUEST_OPEN_P2P, eventHub } from "../../common/events.ts";
|
||||
import type { LiveSyncAbstractReplicator } from "../../lib/src/replication/LiveSyncAbstractReplicator.ts";
|
||||
import { LOG_LEVEL_INFO, LOG_LEVEL_VERBOSE, Logger } from "octagonal-wheels/common/logger";
|
||||
import type { CommandShim } from "../../lib/src/replication/trystero/P2PReplicatorPaneCommon.ts";
|
||||
import {
|
||||
addP2PEventHandlers,
|
||||
closeP2PReplicator,
|
||||
openP2PReplicator,
|
||||
P2PLogCollector,
|
||||
removeP2PReplicatorInstance,
|
||||
type P2PReplicatorBase,
|
||||
} from "../../lib/src/replication/trystero/P2PReplicatorCore.ts";
|
||||
import { reactiveSource } from "octagonal-wheels/dataobject/reactive_v2";
|
||||
import type { Confirm } from "../../lib/src/interfaces/Confirm.ts";
|
||||
import type ObsidianLiveSyncPlugin from "../../main.ts";
|
||||
import type { SimpleStore } from "octagonal-wheels/databases/SimpleStoreBase";
|
||||
import { getPlatformName } from "../../lib/src/PlatformAPIs/obsidian/Environment.ts";
|
||||
import type { LiveSyncCore } from "../../main.ts";
|
||||
import { TrysteroReplicator } from "../../lib/src/replication/trystero/TrysteroReplicator.ts";
|
||||
import { SETTING_KEY_P2P_DEVICE_NAME } from "../../lib/src/common/types.ts";
|
||||
|
||||
export class P2PReplicator extends LiveSyncCommands implements P2PReplicatorBase, CommandShim {
|
||||
storeP2PStatusLine = reactiveSource("");
|
||||
|
||||
getSettings(): P2PSyncSetting {
|
||||
return this.plugin.settings;
|
||||
}
|
||||
get settings() {
|
||||
return this.plugin.settings;
|
||||
}
|
||||
getDB() {
|
||||
return this.plugin.localDatabase.localDatabase;
|
||||
}
|
||||
|
||||
get confirm(): Confirm {
|
||||
return this.plugin.confirm;
|
||||
}
|
||||
_simpleStore!: SimpleStore<any>;
|
||||
|
||||
simpleStore(): SimpleStore<any> {
|
||||
return this._simpleStore;
|
||||
}
|
||||
|
||||
constructor(plugin: ObsidianLiveSyncPlugin) {
|
||||
super(plugin);
|
||||
setReplicatorFunc(() => this._replicatorInstance);
|
||||
addP2PEventHandlers(this);
|
||||
this.afterConstructor();
|
||||
// onBindFunction is called in super class
|
||||
// this.onBindFunction(plugin, plugin.services);
|
||||
}
|
||||
|
||||
async handleReplicatedDocuments(docs: EntryDoc[]): Promise<void> {
|
||||
// console.log("Processing Replicated Docs", docs);
|
||||
return await this.services.replication.parseSynchroniseResult(
|
||||
docs as PouchDB.Core.ExistingDocument<EntryDoc>[]
|
||||
);
|
||||
}
|
||||
|
||||
_anyNewReplicator(settingOverride: Partial<RemoteDBSettings> = {}): Promise<LiveSyncAbstractReplicator> {
|
||||
const settings = { ...this.settings, ...settingOverride };
|
||||
if (settings.remoteType == REMOTE_P2P) {
|
||||
return Promise.resolve(new LiveSyncTrysteroReplicator(this.plugin));
|
||||
}
|
||||
return undefined!;
|
||||
}
|
||||
_replicatorInstance?: TrysteroReplicator;
|
||||
p2pLogCollector = new P2PLogCollector();
|
||||
|
||||
afterConstructor() {
|
||||
return;
|
||||
}
|
||||
|
||||
async open() {
|
||||
await openP2PReplicator(this);
|
||||
}
|
||||
async close() {
|
||||
await closeP2PReplicator(this);
|
||||
}
|
||||
|
||||
getConfig(key: string) {
|
||||
return this.services.config.getSmallConfig(key);
|
||||
}
|
||||
setConfig(key: string, value: string) {
|
||||
return this.services.config.setSmallConfig(key, value);
|
||||
}
|
||||
enableBroadcastCastings() {
|
||||
return this?._replicatorInstance?.enableBroadcastChanges();
|
||||
}
|
||||
disableBroadcastCastings() {
|
||||
return this?._replicatorInstance?.disableBroadcastChanges();
|
||||
}
|
||||
|
||||
init() {
|
||||
this._simpleStore = this.services.database.openSimpleStore("p2p-sync");
|
||||
return Promise.resolve(this);
|
||||
}
|
||||
|
||||
async initialiseP2PReplicator(): Promise<TrysteroReplicator> {
|
||||
await this.init();
|
||||
try {
|
||||
if (this._replicatorInstance) {
|
||||
await this._replicatorInstance.close();
|
||||
this._replicatorInstance = undefined;
|
||||
}
|
||||
|
||||
if (!this.settings.P2P_AppID) {
|
||||
this.settings.P2P_AppID = P2P_DEFAULT_SETTINGS.P2P_AppID;
|
||||
}
|
||||
const getInitialDeviceName = () =>
|
||||
this.getConfig(SETTING_KEY_P2P_DEVICE_NAME) || this.services.vault.getVaultName();
|
||||
|
||||
const getSettings = () => this.settings;
|
||||
const store = () => this.simpleStore();
|
||||
const getDB = () => this.getDB();
|
||||
|
||||
const getConfirm = () => this.confirm;
|
||||
const getPlatform = () => this.getPlatform();
|
||||
const env = {
|
||||
get db() {
|
||||
return getDB();
|
||||
},
|
||||
get confirm() {
|
||||
return getConfirm();
|
||||
},
|
||||
get deviceName() {
|
||||
return getInitialDeviceName();
|
||||
},
|
||||
get platform() {
|
||||
return getPlatform();
|
||||
},
|
||||
get settings() {
|
||||
return getSettings();
|
||||
},
|
||||
processReplicatedDocs: async (docs: EntryDoc[]): Promise<void> => {
|
||||
await this.handleReplicatedDocuments(docs);
|
||||
// No op. This is a client and does not need to process the docs
|
||||
},
|
||||
get simpleStore() {
|
||||
return store();
|
||||
},
|
||||
};
|
||||
this._replicatorInstance = new TrysteroReplicator(env);
|
||||
return this._replicatorInstance;
|
||||
} catch (e) {
|
||||
this._log(
|
||||
e instanceof Error ? e.message : "Something occurred on Initialising P2P Replicator",
|
||||
LOG_LEVEL_INFO
|
||||
);
|
||||
this._log(e, LOG_LEVEL_VERBOSE);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
getPlatform(): string {
|
||||
return getPlatformName();
|
||||
}
|
||||
|
||||
onunload(): void {
|
||||
removeP2PReplicatorInstance();
|
||||
void this.close();
|
||||
}
|
||||
|
||||
onload(): void | Promise<void> {
|
||||
eventHub.onEvent(EVENT_REQUEST_OPEN_P2P, () => {
|
||||
void this.openPane();
|
||||
});
|
||||
this.p2pLogCollector.p2pReplicationLine.onChanged((line) => {
|
||||
this.storeP2PStatusLine.value = line.value;
|
||||
});
|
||||
}
|
||||
async _everyOnInitializeDatabase(): Promise<boolean> {
|
||||
await this.initialiseP2PReplicator();
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
|
||||
private async _allSuspendExtraSync() {
|
||||
this.plugin.settings.P2P_Enabled = false;
|
||||
this.plugin.settings.P2P_AutoAccepting = AutoAccepting.NONE;
|
||||
this.plugin.settings.P2P_AutoBroadcast = false;
|
||||
this.plugin.settings.P2P_AutoStart = false;
|
||||
this.plugin.settings.P2P_AutoSyncPeers = "";
|
||||
this.plugin.settings.P2P_AutoWatchPeers = "";
|
||||
return await Promise.resolve(true);
|
||||
}
|
||||
|
||||
// async $everyOnLoadStart() {
|
||||
// return await Promise.resolve();
|
||||
// }
|
||||
|
||||
async openPane() {
|
||||
await this.services.API.showWindow(VIEW_TYPE_P2P);
|
||||
}
|
||||
|
||||
async _everyOnloadStart(): Promise<boolean> {
|
||||
this.plugin.registerView(VIEW_TYPE_P2P, (leaf) => new P2PReplicatorPaneView(leaf, this.plugin));
|
||||
this.plugin.addCommand({
|
||||
id: "open-p2p-replicator",
|
||||
name: "P2P Sync : Open P2P Replicator",
|
||||
callback: async () => {
|
||||
await this.openPane();
|
||||
},
|
||||
});
|
||||
this.plugin.addCommand({
|
||||
id: "p2p-establish-connection",
|
||||
name: "P2P Sync : Connect to the Signalling Server",
|
||||
checkCallback: (isChecking) => {
|
||||
if (isChecking) {
|
||||
return !(this._replicatorInstance?.server?.isServing ?? false);
|
||||
}
|
||||
void this.open();
|
||||
},
|
||||
});
|
||||
this.plugin.addCommand({
|
||||
id: "p2p-close-connection",
|
||||
name: "P2P Sync : Disconnect from the Signalling Server",
|
||||
checkCallback: (isChecking) => {
|
||||
if (isChecking) {
|
||||
return this._replicatorInstance?.server?.isServing ?? false;
|
||||
}
|
||||
Logger(`Closing P2P Connection`, LOG_LEVEL_NOTICE);
|
||||
void this.close();
|
||||
},
|
||||
});
|
||||
this.plugin.addCommand({
|
||||
id: "replicate-now-by-p2p",
|
||||
name: "Replicate now by P2P",
|
||||
checkCallback: (isChecking) => {
|
||||
if (isChecking) {
|
||||
if (this.settings.remoteType == REMOTE_P2P) return false;
|
||||
if (!this._replicatorInstance?.server?.isServing) return false;
|
||||
return true;
|
||||
}
|
||||
void this._replicatorInstance?.replicateFromCommand(false);
|
||||
},
|
||||
});
|
||||
this.plugin
|
||||
.addRibbonIcon("waypoints", "P2P Replicator", async () => {
|
||||
await this.openPane();
|
||||
})
|
||||
.addClass("livesync-ribbon-replicate-p2p");
|
||||
|
||||
return await Promise.resolve(true);
|
||||
}
|
||||
_everyAfterResumeProcess(): Promise<boolean> {
|
||||
if (this.settings.P2P_Enabled && this.settings.P2P_AutoStart) {
|
||||
setTimeout(() => void this.open(), 100);
|
||||
}
|
||||
const rep = this._replicatorInstance;
|
||||
rep?.allowReconnection();
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
_everyBeforeSuspendProcess(): Promise<boolean> {
|
||||
const rep = this._replicatorInstance;
|
||||
rep?.disconnectFromServer();
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
|
||||
override onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.replicator.handleGetNewReplicator(this._anyNewReplicator.bind(this));
|
||||
services.databaseEvents.handleOnDatabaseInitialisation(this._everyOnInitializeDatabase.bind(this));
|
||||
services.appLifecycle.handleOnInitialise(this._everyOnloadStart.bind(this));
|
||||
services.appLifecycle.handleOnSuspending(this._everyBeforeSuspendProcess.bind(this));
|
||||
services.appLifecycle.handleOnResumed(this._everyAfterResumeProcess.bind(this));
|
||||
services.setting.handleSuspendExtraSync(this._allSuspendExtraSync.bind(this));
|
||||
}
|
||||
}
|
||||
496
src/features/P2PSync/P2PReplicator/P2PReplicatorPane.svelte
Normal file
@@ -0,0 +1,496 @@
|
||||
<script lang="ts">
|
||||
import { onMount, setContext } from "svelte";
|
||||
import { AutoAccepting, DEFAULT_SETTINGS, type P2PSyncSetting } from "../../../lib/src/common/types";
|
||||
import {
|
||||
AcceptedStatus,
|
||||
ConnectionStatus,
|
||||
type CommandShim,
|
||||
type PeerStatus,
|
||||
type PluginShim,
|
||||
} from "../../../lib/src/replication/trystero/P2PReplicatorPaneCommon";
|
||||
import PeerStatusRow from "../P2PReplicator/PeerStatusRow.svelte";
|
||||
import { EVENT_LAYOUT_READY, eventHub } from "../../../common/events";
|
||||
import {
|
||||
type PeerInfo,
|
||||
type P2PServerInfo,
|
||||
EVENT_SERVER_STATUS,
|
||||
EVENT_REQUEST_STATUS,
|
||||
EVENT_P2P_REPLICATOR_STATUS,
|
||||
} from "../../../lib/src/replication/trystero/TrysteroReplicatorP2PServer";
|
||||
import { type P2PReplicatorStatus } from "../../../lib/src/replication/trystero/TrysteroReplicator";
|
||||
import { $msg as _msg } from "../../../lib/src/common/i18n";
|
||||
import { SETTING_KEY_P2P_DEVICE_NAME } from "../../../lib/src/common/types";
|
||||
|
||||
interface Props {
|
||||
plugin: PluginShim;
|
||||
cmdSync: CommandShim;
|
||||
}
|
||||
|
||||
let { plugin, cmdSync }: Props = $props();
|
||||
// const cmdSync = plugin.getAddOn<P2PReplicator>("P2PReplicator")!;
|
||||
setContext("getReplicator", () => cmdSync);
|
||||
|
||||
const initialSettings = { ...plugin.settings };
|
||||
|
||||
let settings = $state<P2PSyncSetting>(initialSettings);
|
||||
// const vaultName = service.vault.getVaultName();
|
||||
// const dbKey = `${vaultName}-p2p-device-name`;
|
||||
|
||||
const initialDeviceName = cmdSync.getConfig(SETTING_KEY_P2P_DEVICE_NAME) ?? plugin.services.vault.getVaultName();
|
||||
let deviceName = $state<string>(initialDeviceName);
|
||||
|
||||
let eP2PEnabled = $state<boolean>(initialSettings.P2P_Enabled);
|
||||
let eRelay = $state<string>(initialSettings.P2P_relays);
|
||||
let eRoomId = $state<string>(initialSettings.P2P_roomID);
|
||||
let ePassword = $state<string>(initialSettings.P2P_passphrase);
|
||||
let eAppId = $state<string>(initialSettings.P2P_AppID);
|
||||
let eDeviceName = $state<string>(initialDeviceName);
|
||||
let eAutoAccept = $state<boolean>(initialSettings.P2P_AutoAccepting == AutoAccepting.ALL);
|
||||
let eAutoStart = $state<boolean>(initialSettings.P2P_AutoStart);
|
||||
let eAutoBroadcast = $state<boolean>(initialSettings.P2P_AutoBroadcast);
|
||||
|
||||
const isP2PEnabledModified = $derived.by(() => eP2PEnabled !== settings.P2P_Enabled);
|
||||
const isRelayModified = $derived.by(() => eRelay !== settings.P2P_relays);
|
||||
const isRoomIdModified = $derived.by(() => eRoomId !== settings.P2P_roomID);
|
||||
const isPasswordModified = $derived.by(() => ePassword !== settings.P2P_passphrase);
|
||||
const isAppIdModified = $derived.by(() => eAppId !== settings.P2P_AppID);
|
||||
const isDeviceNameModified = $derived.by(() => eDeviceName !== deviceName);
|
||||
const isAutoAcceptModified = $derived.by(() => eAutoAccept !== (settings.P2P_AutoAccepting == AutoAccepting.ALL));
|
||||
const isAutoStartModified = $derived.by(() => eAutoStart !== settings.P2P_AutoStart);
|
||||
const isAutoBroadcastModified = $derived.by(() => eAutoBroadcast !== settings.P2P_AutoBroadcast);
|
||||
|
||||
const isAnyModified = $derived.by(
|
||||
() =>
|
||||
isP2PEnabledModified ||
|
||||
isRelayModified ||
|
||||
isRoomIdModified ||
|
||||
isPasswordModified ||
|
||||
isAppIdModified ||
|
||||
isDeviceNameModified ||
|
||||
isAutoAcceptModified ||
|
||||
isAutoStartModified ||
|
||||
isAutoBroadcastModified
|
||||
);
|
||||
|
||||
async function saveAndApply() {
|
||||
const newSettings = {
|
||||
...plugin.settings,
|
||||
P2P_Enabled: eP2PEnabled,
|
||||
P2P_relays: eRelay,
|
||||
P2P_roomID: eRoomId,
|
||||
P2P_passphrase: ePassword,
|
||||
P2P_AppID: eAppId,
|
||||
P2P_AutoAccepting: eAutoAccept ? AutoAccepting.ALL : AutoAccepting.NONE,
|
||||
P2P_AutoStart: eAutoStart,
|
||||
P2P_AutoBroadcast: eAutoBroadcast,
|
||||
};
|
||||
plugin.settings = newSettings;
|
||||
cmdSync.setConfig(SETTING_KEY_P2P_DEVICE_NAME, eDeviceName);
|
||||
deviceName = eDeviceName;
|
||||
await plugin.saveSettings();
|
||||
}
|
||||
async function revert() {
|
||||
eP2PEnabled = settings.P2P_Enabled;
|
||||
eRelay = settings.P2P_relays;
|
||||
eRoomId = settings.P2P_roomID;
|
||||
ePassword = settings.P2P_passphrase;
|
||||
eAppId = settings.P2P_AppID;
|
||||
eAutoAccept = settings.P2P_AutoAccepting == AutoAccepting.ALL;
|
||||
eAutoStart = settings.P2P_AutoStart;
|
||||
eAutoBroadcast = settings.P2P_AutoBroadcast;
|
||||
}
|
||||
|
||||
let serverInfo = $state<P2PServerInfo | undefined>(undefined);
|
||||
let replicatorInfo = $state<P2PReplicatorStatus | undefined>(undefined);
|
||||
const applyLoadSettings = (d: P2PSyncSetting, force: boolean) => {
|
||||
const { P2P_relays, P2P_roomID, P2P_passphrase, P2P_AppID, P2P_AutoAccepting } = d;
|
||||
if (force || !isP2PEnabledModified) eP2PEnabled = d.P2P_Enabled;
|
||||
if (force || !isRelayModified) eRelay = P2P_relays;
|
||||
if (force || !isRoomIdModified) eRoomId = P2P_roomID;
|
||||
if (force || !isPasswordModified) ePassword = P2P_passphrase;
|
||||
if (force || !isAppIdModified) eAppId = P2P_AppID;
|
||||
const newAutoAccept = P2P_AutoAccepting === AutoAccepting.ALL;
|
||||
if (force || !isAutoAcceptModified) eAutoAccept = newAutoAccept;
|
||||
if (force || !isAutoStartModified) eAutoStart = d.P2P_AutoStart;
|
||||
if (force || !isAutoBroadcastModified) eAutoBroadcast = d.P2P_AutoBroadcast;
|
||||
|
||||
settings = d;
|
||||
};
|
||||
onMount(() => {
|
||||
const r = eventHub.onEvent("setting-saved", async (d) => {
|
||||
applyLoadSettings(d, false);
|
||||
closeServer();
|
||||
});
|
||||
const rx = eventHub.onEvent(EVENT_LAYOUT_READY, () => {
|
||||
applyLoadSettings(plugin.settings, true);
|
||||
});
|
||||
const r2 = eventHub.onEvent(EVENT_SERVER_STATUS, (status) => {
|
||||
serverInfo = status;
|
||||
advertisements = status?.knownAdvertisements ?? [];
|
||||
});
|
||||
const r3 = eventHub.onEvent(EVENT_P2P_REPLICATOR_STATUS, (status) => {
|
||||
replicatorInfo = status;
|
||||
});
|
||||
eventHub.emitEvent(EVENT_REQUEST_STATUS);
|
||||
return () => {
|
||||
r();
|
||||
r2();
|
||||
r3();
|
||||
};
|
||||
});
|
||||
let isConnected = $derived.by(() => {
|
||||
return serverInfo?.isConnected ?? false;
|
||||
});
|
||||
let serverPeerId = $derived.by(() => {
|
||||
return serverInfo?.serverPeerId ?? "";
|
||||
});
|
||||
let advertisements = $state<PeerInfo[]>([]);
|
||||
|
||||
let autoSyncPeers = $derived.by(() =>
|
||||
settings.P2P_AutoSyncPeers.split(",")
|
||||
.map((e) => e.trim())
|
||||
.filter((e) => e)
|
||||
);
|
||||
let autoWatchPeers = $derived.by(() =>
|
||||
settings.P2P_AutoWatchPeers.split(",")
|
||||
.map((e) => e.trim())
|
||||
.filter((e) => e)
|
||||
);
|
||||
let syncOnCommand = $derived.by(() =>
|
||||
settings.P2P_SyncOnReplication.split(",")
|
||||
.map((e) => e.trim())
|
||||
.filter((e) => e)
|
||||
);
|
||||
|
||||
const peers = $derived.by(() =>
|
||||
advertisements.map((ad) => {
|
||||
let accepted: AcceptedStatus;
|
||||
const isTemporaryAccepted = ad.isTemporaryAccepted;
|
||||
if (isTemporaryAccepted === undefined) {
|
||||
if (ad.isAccepted === undefined) {
|
||||
accepted = AcceptedStatus.UNKNOWN;
|
||||
} else {
|
||||
accepted = ad.isAccepted ? AcceptedStatus.ACCEPTED : AcceptedStatus.DENIED;
|
||||
}
|
||||
} else if (isTemporaryAccepted === true) {
|
||||
accepted = AcceptedStatus.ACCEPTED_IN_SESSION;
|
||||
} else {
|
||||
accepted = AcceptedStatus.DENIED_IN_SESSION;
|
||||
}
|
||||
const isFetching = replicatorInfo?.replicatingFrom.indexOf(ad.peerId) !== -1;
|
||||
const isSending = replicatorInfo?.replicatingTo.indexOf(ad.peerId) !== -1;
|
||||
const isWatching = replicatorInfo?.watchingPeers.indexOf(ad.peerId) !== -1;
|
||||
const syncOnStart = autoSyncPeers.indexOf(ad.name) !== -1;
|
||||
const watchOnStart = autoWatchPeers.indexOf(ad.name) !== -1;
|
||||
const syncOnReplicationCommand = syncOnCommand.indexOf(ad.name) !== -1;
|
||||
const st: PeerStatus = {
|
||||
name: ad.name,
|
||||
peerId: ad.peerId,
|
||||
accepted: accepted,
|
||||
status: ad.isAccepted ? ConnectionStatus.CONNECTED : ConnectionStatus.DISCONNECTED,
|
||||
isSending: isSending,
|
||||
isFetching: isFetching,
|
||||
isWatching: isWatching,
|
||||
syncOnConnect: syncOnStart,
|
||||
watchOnConnect: watchOnStart,
|
||||
syncOnReplicationCommand: syncOnReplicationCommand,
|
||||
};
|
||||
return st;
|
||||
})
|
||||
);
|
||||
|
||||
function useDefaultRelay() {
|
||||
eRelay = DEFAULT_SETTINGS.P2P_relays;
|
||||
}
|
||||
function _generateRandom() {
|
||||
return (Math.floor(Math.random() * 1000) + 1000).toString().substring(1);
|
||||
}
|
||||
function generateRandom(length: number) {
|
||||
let buf = "";
|
||||
while (buf.length < length) {
|
||||
buf += "-" + _generateRandom();
|
||||
}
|
||||
return buf.substring(1, length);
|
||||
}
|
||||
function chooseRandom() {
|
||||
eRoomId = generateRandom(12) + "-" + Math.random().toString(36).substring(2, 5);
|
||||
}
|
||||
|
||||
async function openServer() {
|
||||
await cmdSync.open();
|
||||
}
|
||||
async function closeServer() {
|
||||
await cmdSync.close();
|
||||
}
|
||||
function startBroadcasting() {
|
||||
void cmdSync.enableBroadcastCastings();
|
||||
}
|
||||
function stopBroadcasting() {
|
||||
void cmdSync.disableBroadcastCastings();
|
||||
}
|
||||
|
||||
const initialDialogStatusKey = `p2p-dialog-status`;
|
||||
const getDialogStatus = () => {
|
||||
try {
|
||||
const initialDialogStatus = JSON.parse(cmdSync.getConfig(initialDialogStatusKey) ?? "{}") as {
|
||||
notice?: boolean;
|
||||
setting?: boolean;
|
||||
};
|
||||
return initialDialogStatus;
|
||||
} catch (e) {
|
||||
return {};
|
||||
}
|
||||
};
|
||||
const initialDialogStatus = getDialogStatus();
|
||||
let isNoticeOpened = $state<boolean>(initialDialogStatus.notice ?? true);
|
||||
let isSettingOpened = $state<boolean>(initialDialogStatus.setting ?? true);
|
||||
$effect(() => {
|
||||
const dialogStatus = {
|
||||
notice: isNoticeOpened,
|
||||
setting: isSettingOpened,
|
||||
};
|
||||
cmdSync.setConfig(initialDialogStatusKey, JSON.stringify(dialogStatus));
|
||||
});
|
||||
let isObsidian = $derived.by(() => {
|
||||
return plugin.services.API.getPlatform() === "obsidian";
|
||||
});
|
||||
</script>
|
||||
|
||||
<article>
|
||||
<h1>Peer to Peer Replicator</h1>
|
||||
<details bind:open={isNoticeOpened}>
|
||||
<summary>{_msg("P2P.Note.Summary")}</summary>
|
||||
<p class="important">{_msg("P2P.Note.important_note")}</p>
|
||||
<p class="important-sub">
|
||||
{_msg("P2P.Note.important_note_sub")}
|
||||
</p>
|
||||
{#each _msg("P2P.Note.description").split("\n\n") as paragraph}
|
||||
<p>{paragraph}</p>
|
||||
{/each}
|
||||
</details>
|
||||
<h2>Connection Settings</h2>
|
||||
{#if isObsidian}
|
||||
You can configure in the Obsidian Plugin Settings.
|
||||
{:else}
|
||||
<details bind:open={isSettingOpened}>
|
||||
<summary>{eRelay}</summary>
|
||||
<table class="settings">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th> Enable P2P Replicator </th>
|
||||
<td>
|
||||
<label class={{ "is-dirty": isP2PEnabledModified }}>
|
||||
<input type="checkbox" bind:checked={eP2PEnabled} />
|
||||
</label>
|
||||
</td>
|
||||
</tr><tr>
|
||||
<th> Relay settings </th>
|
||||
<td>
|
||||
<label class={{ "is-dirty": isRelayModified }}>
|
||||
<input
|
||||
type="text"
|
||||
placeholder="wss://exp-relay.vrtmrz.net, wss://xxxxx"
|
||||
bind:value={eRelay}
|
||||
autocomplete="off"
|
||||
/>
|
||||
<button onclick={() => useDefaultRelay()}> Use vrtmrz's relay </button>
|
||||
</label>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th> Room ID </th>
|
||||
<td>
|
||||
<label class={{ "is-dirty": isRoomIdModified }}>
|
||||
<input
|
||||
type="text"
|
||||
placeholder="anything-you-like"
|
||||
bind:value={eRoomId}
|
||||
autocomplete="off"
|
||||
spellcheck="false"
|
||||
autocorrect="off"
|
||||
/>
|
||||
<button onclick={() => chooseRandom()}> Use Random Number </button>
|
||||
</label>
|
||||
<span>
|
||||
<small>
|
||||
This can isolate your connections between devices. Use the same Room ID for the same
|
||||
devices.</small
|
||||
>
|
||||
</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th> Password </th>
|
||||
<td>
|
||||
<label class={{ "is-dirty": isPasswordModified }}>
|
||||
<input type="password" placeholder="password" bind:value={ePassword} />
|
||||
</label>
|
||||
<span>
|
||||
<small>
|
||||
This password is used to encrypt the connection. Use something long enough.
|
||||
</small>
|
||||
</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th> This device name </th>
|
||||
<td>
|
||||
<label class={{ "is-dirty": isDeviceNameModified }}>
|
||||
<input
|
||||
type="text"
|
||||
placeholder="iphone-16"
|
||||
bind:value={eDeviceName}
|
||||
autocomplete="off"
|
||||
/>
|
||||
</label>
|
||||
<span>
|
||||
<small>
|
||||
Device name to identify the device. Please use shorter one for the stable peer
|
||||
detection, i.e., "iphone-16" or "macbook-2021".
|
||||
</small>
|
||||
</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th> Auto Connect </th>
|
||||
<td>
|
||||
<label class={{ "is-dirty": isAutoStartModified }}>
|
||||
<input type="checkbox" bind:checked={eAutoStart} />
|
||||
</label>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th> Start change-broadcasting on Connect </th>
|
||||
<td>
|
||||
<label class={{ "is-dirty": isAutoBroadcastModified }}>
|
||||
<input type="checkbox" bind:checked={eAutoBroadcast} />
|
||||
</label>
|
||||
</td>
|
||||
</tr>
|
||||
<!-- <tr>
|
||||
<th> Auto Accepting </th>
|
||||
<td>
|
||||
<label class={{ "is-dirty": isAutoAcceptModified }}>
|
||||
<input type="checkbox" bind:checked={eAutoAccept} />
|
||||
</label>
|
||||
</td>
|
||||
</tr> -->
|
||||
</tbody>
|
||||
</table>
|
||||
<button disabled={!isAnyModified} class="button mod-cta" onclick={saveAndApply}>Save and Apply</button>
|
||||
<button disabled={!isAnyModified} class="button" onclick={revert}>Revert changes</button>
|
||||
</details>
|
||||
{/if}
|
||||
|
||||
<div>
|
||||
<h2>Signaling Server Connection</h2>
|
||||
<div>
|
||||
{#if !isConnected}
|
||||
<p>No Connection</p>
|
||||
{:else}
|
||||
<p>Connected to Signaling Server (as Peer ID: {serverPeerId})</p>
|
||||
{/if}
|
||||
</div>
|
||||
<div>
|
||||
{#if !isConnected}
|
||||
<button onclick={openServer}>Connect</button>
|
||||
{:else}
|
||||
<button onclick={closeServer}>Disconnect</button>
|
||||
{#if replicatorInfo?.isBroadcasting !== undefined}
|
||||
{#if replicatorInfo?.isBroadcasting}
|
||||
<button onclick={stopBroadcasting}>Stop Broadcasting</button>
|
||||
{:else}
|
||||
<button onclick={startBroadcasting}>Start Broadcasting</button>
|
||||
{/if}
|
||||
{/if}
|
||||
<details>
|
||||
<summary>Broadcasting?</summary>
|
||||
<p>
|
||||
<small>
|
||||
If you want to use `LiveSync`, you should broadcast changes. All `watching` peers which
|
||||
detects this will start the replication for fetching. <br />
|
||||
However, This should not be enabled if you want to increase your secrecy more.
|
||||
</small>
|
||||
</p>
|
||||
</details>
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<h2>Peers</h2>
|
||||
<table class="peers">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>Action</th>
|
||||
<th>Command</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{#each peers as peer}
|
||||
<PeerStatusRow peerStatus={peer}></PeerStatusRow>
|
||||
{/each}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</article>
|
||||
|
||||
<style>
|
||||
article {
|
||||
max-width: 100%;
|
||||
}
|
||||
article p {
|
||||
user-select: text;
|
||||
-webkit-user-select: text;
|
||||
}
|
||||
h2 {
|
||||
margin-top: var(--size-4-1);
|
||||
margin-bottom: var(--size-4-1);
|
||||
padding-bottom: var(--size-4-1);
|
||||
border-bottom: 1px solid var(--background-modifier-border);
|
||||
}
|
||||
label.is-dirty {
|
||||
background-color: var(--background-modifier-error);
|
||||
}
|
||||
input {
|
||||
background-color: transparent;
|
||||
}
|
||||
th {
|
||||
/* display: flex;
|
||||
justify-content: center;
|
||||
align-items: center; */
|
||||
min-height: var(--input-height);
|
||||
}
|
||||
td {
|
||||
min-height: var(--input-height);
|
||||
}
|
||||
td > label {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
min-height: var(--input-height);
|
||||
}
|
||||
td > label > * {
|
||||
margin: auto var(--size-4-1);
|
||||
}
|
||||
table.peers {
|
||||
width: 100%;
|
||||
}
|
||||
.important {
|
||||
color: var(--text-error);
|
||||
font-size: 1.2em;
|
||||
font-weight: bold;
|
||||
}
|
||||
.important-sub {
|
||||
color: var(--text-warning);
|
||||
}
|
||||
.settings label {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
</style>
|
||||
198
src/features/P2PSync/P2PReplicator/P2PReplicatorPaneView.ts
Normal file
@@ -0,0 +1,198 @@
|
||||
import { Menu, WorkspaceLeaf } from "obsidian";
|
||||
import ReplicatorPaneComponent from "./P2PReplicatorPane.svelte";
|
||||
import type ObsidianLiveSyncPlugin from "../../../main.ts";
|
||||
import { mount } from "svelte";
|
||||
import { SvelteItemView } from "../../../common/SvelteItemView.ts";
|
||||
import { eventHub } from "../../../common/events.ts";
|
||||
|
||||
import { unique } from "octagonal-wheels/collection";
|
||||
import { LOG_LEVEL_NOTICE, REMOTE_P2P } from "../../../lib/src/common/types.ts";
|
||||
import { Logger } from "../../../lib/src/common/logger.ts";
|
||||
import { P2PReplicator } from "../CmdP2PReplicator.ts";
|
||||
import {
|
||||
EVENT_P2P_PEER_SHOW_EXTRA_MENU,
|
||||
type PeerStatus,
|
||||
} from "../../../lib/src/replication/trystero/P2PReplicatorPaneCommon.ts";
|
||||
export const VIEW_TYPE_P2P = "p2p-replicator";
|
||||
|
||||
function addToList(item: string, list: string) {
|
||||
return unique(
|
||||
list
|
||||
.split(",")
|
||||
.map((e) => e.trim())
|
||||
.concat(item)
|
||||
.filter((p) => p)
|
||||
).join(",");
|
||||
}
|
||||
function removeFromList(item: string, list: string) {
|
||||
return list
|
||||
.split(",")
|
||||
.map((e) => e.trim())
|
||||
.filter((p) => p !== item)
|
||||
.filter((p) => p)
|
||||
.join(",");
|
||||
}
|
||||
|
||||
export class P2PReplicatorPaneView extends SvelteItemView {
|
||||
plugin: ObsidianLiveSyncPlugin;
|
||||
icon = "waypoints";
|
||||
title: string = "";
|
||||
navigation = false;
|
||||
|
||||
getIcon(): string {
|
||||
return "waypoints";
|
||||
}
|
||||
get replicator() {
|
||||
const r = this.plugin.getAddOn<P2PReplicator>(P2PReplicator.name);
|
||||
if (!r || !r._replicatorInstance) {
|
||||
throw new Error("Replicator not found");
|
||||
}
|
||||
return r._replicatorInstance;
|
||||
}
|
||||
async replicateFrom(peer: PeerStatus) {
|
||||
await this.replicator.replicateFrom(peer.peerId);
|
||||
}
|
||||
async replicateTo(peer: PeerStatus) {
|
||||
await this.replicator.requestSynchroniseToPeer(peer.peerId);
|
||||
}
|
||||
async getRemoteConfig(peer: PeerStatus) {
|
||||
Logger(
|
||||
`Requesting remote config for ${peer.name}. Please input the passphrase on the remote device`,
|
||||
LOG_LEVEL_NOTICE
|
||||
);
|
||||
const remoteConfig = await this.replicator.getRemoteConfig(peer.peerId);
|
||||
if (remoteConfig) {
|
||||
Logger(`Remote config for ${peer.name} is retrieved successfully`);
|
||||
const DROP = "Yes, and drop local database";
|
||||
const KEEP = "Yes, but keep local database";
|
||||
const CANCEL = "No, cancel";
|
||||
const yn = await this.plugin.confirm.askSelectStringDialogue(
|
||||
`Do you really want to apply the remote config? This will overwrite your current config immediately and restart.
|
||||
And you can also drop the local database to rebuild from the remote device.`,
|
||||
[DROP, KEEP, CANCEL] as const,
|
||||
{
|
||||
defaultAction: CANCEL,
|
||||
title: "Apply Remote Config ",
|
||||
}
|
||||
);
|
||||
if (yn === DROP || yn === KEEP) {
|
||||
if (yn === DROP) {
|
||||
if (remoteConfig.remoteType !== REMOTE_P2P) {
|
||||
const yn2 = await this.plugin.confirm.askYesNoDialog(
|
||||
`Do you want to set the remote type to "P2P Sync" to rebuild by "P2P replication"?`,
|
||||
{
|
||||
title: "Rebuild from remote device",
|
||||
}
|
||||
);
|
||||
if (yn2 === "yes") {
|
||||
remoteConfig.remoteType = REMOTE_P2P;
|
||||
remoteConfig.P2P_RebuildFrom = peer.name;
|
||||
}
|
||||
}
|
||||
}
|
||||
this.plugin.settings = remoteConfig;
|
||||
await this.plugin.saveSettings();
|
||||
if (yn === DROP) {
|
||||
await this.plugin.rebuilder.scheduleFetch();
|
||||
} else {
|
||||
this.plugin.services.appLifecycle.scheduleRestart();
|
||||
}
|
||||
} else {
|
||||
Logger(`Cancelled\nRemote config for ${peer.name} is not applied`, LOG_LEVEL_NOTICE);
|
||||
}
|
||||
} else {
|
||||
Logger(`Cannot retrieve remote config for ${peer.peerId}`);
|
||||
}
|
||||
}
|
||||
|
||||
async toggleProp(peer: PeerStatus, prop: "syncOnConnect" | "watchOnConnect" | "syncOnReplicationCommand") {
|
||||
const settingMap = {
|
||||
syncOnConnect: "P2P_AutoSyncPeers",
|
||||
watchOnConnect: "P2P_AutoWatchPeers",
|
||||
syncOnReplicationCommand: "P2P_SyncOnReplication",
|
||||
} as const;
|
||||
|
||||
const targetSetting = settingMap[prop];
|
||||
if (peer[prop]) {
|
||||
this.plugin.settings[targetSetting] = removeFromList(peer.name, this.plugin.settings[targetSetting]);
|
||||
await this.plugin.saveSettings();
|
||||
} else {
|
||||
this.plugin.settings[targetSetting] = addToList(peer.name, this.plugin.settings[targetSetting]);
|
||||
await this.plugin.saveSettings();
|
||||
}
|
||||
await this.plugin.saveSettings();
|
||||
}
|
||||
m?: Menu;
|
||||
constructor(leaf: WorkspaceLeaf, plugin: ObsidianLiveSyncPlugin) {
|
||||
super(leaf);
|
||||
this.plugin = plugin;
|
||||
eventHub.onEvent(EVENT_P2P_PEER_SHOW_EXTRA_MENU, ({ peer, event }) => {
|
||||
if (this.m) {
|
||||
this.m.hide();
|
||||
}
|
||||
this.m = new Menu()
|
||||
.addItem((item) => item.setTitle("📥 Only Fetch").onClick(() => this.replicateFrom(peer)))
|
||||
.addItem((item) => item.setTitle("📤 Only Send").onClick(() => this.replicateTo(peer)))
|
||||
.addSeparator()
|
||||
.addItem((item) => {
|
||||
item.setTitle("🔧 Get Configuration").onClick(async () => {
|
||||
await this.getRemoteConfig(peer);
|
||||
});
|
||||
})
|
||||
.addSeparator()
|
||||
.addItem((item) => {
|
||||
const mark = peer.syncOnConnect ? "checkmark" : null;
|
||||
item.setTitle("Toggle Sync on connect")
|
||||
.onClick(async () => {
|
||||
await this.toggleProp(peer, "syncOnConnect");
|
||||
})
|
||||
.setIcon(mark);
|
||||
})
|
||||
.addItem((item) => {
|
||||
const mark = peer.watchOnConnect ? "checkmark" : null;
|
||||
item.setTitle("Toggle Watch on connect")
|
||||
.onClick(async () => {
|
||||
await this.toggleProp(peer, "watchOnConnect");
|
||||
})
|
||||
.setIcon(mark);
|
||||
})
|
||||
.addItem((item) => {
|
||||
const mark = peer.syncOnReplicationCommand ? "checkmark" : null;
|
||||
item.setTitle("Toggle Sync on `Replicate now` command")
|
||||
.onClick(async () => {
|
||||
await this.toggleProp(peer, "syncOnReplicationCommand");
|
||||
})
|
||||
.setIcon(mark);
|
||||
});
|
||||
this.m.showAtPosition({ x: event.x, y: event.y });
|
||||
});
|
||||
}
|
||||
|
||||
getViewType() {
|
||||
return VIEW_TYPE_P2P;
|
||||
}
|
||||
|
||||
getDisplayText() {
|
||||
return "Peer-to-Peer Replicator";
|
||||
}
|
||||
|
||||
override async onClose(): Promise<void> {
|
||||
await super.onClose();
|
||||
if (this.m) {
|
||||
this.m.hide();
|
||||
}
|
||||
}
|
||||
instantiateComponent(target: HTMLElement) {
|
||||
const cmdSync = this.plugin.getAddOn<P2PReplicator>(P2PReplicator.name);
|
||||
if (!cmdSync) {
|
||||
throw new Error("Replicator not found");
|
||||
}
|
||||
return mount(ReplicatorPaneComponent, {
|
||||
target: target,
|
||||
props: {
|
||||
plugin: cmdSync.plugin,
|
||||
cmdSync: cmdSync,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
259
src/features/P2PSync/P2PReplicator/PeerStatusRow.svelte
Normal file
@@ -0,0 +1,259 @@
|
||||
<script lang="ts">
|
||||
import { getContext } from "svelte";
|
||||
import { AcceptedStatus, type PeerStatus } from "../../../lib/src/replication/trystero/P2PReplicatorPaneCommon";
|
||||
import type { P2PReplicator } from "../CmdP2PReplicator";
|
||||
import { eventHub } from "../../../common/events";
|
||||
import { EVENT_P2P_PEER_SHOW_EXTRA_MENU } from "../../../lib/src/replication/trystero/P2PReplicatorPaneCommon";
|
||||
|
||||
interface Props {
|
||||
peerStatus: PeerStatus;
|
||||
}
|
||||
|
||||
let { peerStatus }: Props = $props();
|
||||
let peer = $derived(peerStatus);
|
||||
|
||||
function select<T extends string | number | symbol, U>(d: T, cond: Record<T, U>): U;
|
||||
function select<T extends string | number | symbol, U, V>(d: T, cond: Record<T, U>, def: V): U | V;
|
||||
function select<T extends string | number | symbol, U>(d: T, cond: Record<T, U>, def?: U): U | undefined {
|
||||
return d in cond ? cond[d] : def;
|
||||
}
|
||||
|
||||
let statusChips = $derived.by(() =>
|
||||
[
|
||||
peer.isWatching ? ["WATCHING"] : [],
|
||||
peer.isFetching ? ["FETCHING"] : [],
|
||||
peer.isSending ? ["SENDING"] : [],
|
||||
].flat()
|
||||
);
|
||||
let acceptedStatusChip = $derived.by(() =>
|
||||
select(
|
||||
peer.accepted.toString(),
|
||||
{
|
||||
[AcceptedStatus.ACCEPTED]: "ACCEPTED",
|
||||
[AcceptedStatus.ACCEPTED_IN_SESSION]: "ACCEPTED (in session)",
|
||||
[AcceptedStatus.DENIED_IN_SESSION]: "DENIED (in session)",
|
||||
[AcceptedStatus.DENIED]: "DENIED",
|
||||
[AcceptedStatus.UNKNOWN]: "NEW",
|
||||
},
|
||||
""
|
||||
)
|
||||
);
|
||||
const classList = {
|
||||
["SENDING"]: "connected",
|
||||
["FETCHING"]: "connected",
|
||||
["WATCHING"]: "connected-live",
|
||||
["WAITING"]: "waiting",
|
||||
["ACCEPTED"]: "accepted",
|
||||
["DENIED"]: "denied",
|
||||
["NEW"]: "unknown",
|
||||
};
|
||||
let isAccepted = $derived.by(
|
||||
() => peer.accepted === AcceptedStatus.ACCEPTED || peer.accepted === AcceptedStatus.ACCEPTED_IN_SESSION
|
||||
);
|
||||
let isDenied = $derived.by(
|
||||
() => peer.accepted === AcceptedStatus.DENIED || peer.accepted === AcceptedStatus.DENIED_IN_SESSION
|
||||
);
|
||||
|
||||
let isNew = $derived.by(() => peer.accepted === AcceptedStatus.UNKNOWN);
|
||||
|
||||
function makeDecision(isAccepted: boolean, isTemporary: boolean) {
|
||||
cmdReplicator._replicatorInstance?.server?.makeDecision({
|
||||
peerId: peer.peerId,
|
||||
name: peer.name,
|
||||
decision: isAccepted,
|
||||
isTemporary: isTemporary,
|
||||
});
|
||||
}
|
||||
function revokeDecision() {
|
||||
cmdReplicator._replicatorInstance?.server?.revokeDecision({
|
||||
peerId: peer.peerId,
|
||||
name: peer.name,
|
||||
});
|
||||
}
|
||||
const cmdReplicator = getContext<() => P2PReplicator>("getReplicator")();
|
||||
const replicator = cmdReplicator._replicatorInstance!;
|
||||
|
||||
const peerAttrLabels = $derived.by(() => {
|
||||
const attrs = [];
|
||||
if (peer.syncOnConnect) {
|
||||
attrs.push("✔ SYNC");
|
||||
}
|
||||
if (peer.watchOnConnect) {
|
||||
attrs.push("✔ WATCH");
|
||||
}
|
||||
if (peer.syncOnReplicationCommand) {
|
||||
attrs.push("✔ SELECT");
|
||||
}
|
||||
return attrs;
|
||||
});
|
||||
function startWatching() {
|
||||
replicator.watchPeer(peer.peerId);
|
||||
}
|
||||
function stopWatching() {
|
||||
replicator.unwatchPeer(peer.peerId);
|
||||
}
|
||||
|
||||
function sync() {
|
||||
replicator.sync(peer.peerId, false);
|
||||
}
|
||||
|
||||
function moreMenu(evt: MouseEvent) {
|
||||
eventHub.emitEvent(EVENT_P2P_PEER_SHOW_EXTRA_MENU, { peer, event: evt });
|
||||
}
|
||||
</script>
|
||||
|
||||
<tr>
|
||||
<td>
|
||||
<div class="info">
|
||||
<div class="row name">
|
||||
<span class="peername">{peer.name}</span>
|
||||
</div>
|
||||
<div class="row peer-id">
|
||||
<span class="peerid">({peer.peerId})</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="status-chips">
|
||||
<div class="row">
|
||||
<span class="chip {select(acceptedStatusChip, classList)}">{acceptedStatusChip}</span>
|
||||
</div>
|
||||
{#if isAccepted}
|
||||
<div class="row">
|
||||
{#each statusChips as chip}
|
||||
<span class="chip {select(chip, classList)}">{chip}</span>
|
||||
{/each}
|
||||
</div>
|
||||
{/if}
|
||||
<div class="row">
|
||||
{#each peerAttrLabels as attr}
|
||||
<span class="chip attr">{attr}</span>
|
||||
{/each}
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<div class="buttons">
|
||||
<div class="row">
|
||||
{#if isNew}
|
||||
{#if !isAccepted}
|
||||
<button class="button" onclick={() => makeDecision(true, true)}>Accept in session</button>
|
||||
<button class="button mod-cta" onclick={() => makeDecision(true, false)}>Accept</button>
|
||||
{/if}
|
||||
{#if !isDenied}
|
||||
<button class="button" onclick={() => makeDecision(false, true)}>Deny in session</button>
|
||||
<button class="button mod-warning" onclick={() => makeDecision(false, false)}>Deny</button>
|
||||
{/if}
|
||||
{:else}
|
||||
<button class="button mod-warning" onclick={() => revokeDecision()}>Revoke</button>
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
{#if isAccepted}
|
||||
<div class="buttons">
|
||||
<div class="row">
|
||||
<button class="button" onclick={sync} disabled={peer.isSending || peer.isFetching}>🔄</button>
|
||||
<!-- <button class="button" onclick={replicateFrom} disabled={peer.isFetching}>📥</button>
|
||||
<button class="button" onclick={replicateTo} disabled={peer.isSending}>📤</button> -->
|
||||
{#if peer.isWatching}
|
||||
<button class="button" onclick={stopWatching}>Stop ⚡</button>
|
||||
{:else}
|
||||
<button class="button" onclick={startWatching} title="live">⚡</button>
|
||||
{/if}
|
||||
<button class="button" onclick={moreMenu}>...</button>
|
||||
</div>
|
||||
</div>
|
||||
{/if}
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<style>
|
||||
tr:nth-child(odd) {
|
||||
background-color: var(--background-primary-alt);
|
||||
}
|
||||
.info {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
padding: var(--size-4-1) var(--size-4-1);
|
||||
}
|
||||
|
||||
.peer-id {
|
||||
font-size: 0.8em;
|
||||
}
|
||||
.status-chips {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
/* min-width: 10em; */
|
||||
}
|
||||
.buttons {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
}
|
||||
.buttons .row {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
flex-wrap: wrap;
|
||||
/* padding: var(--size-4-1) var(--size-4-1); */
|
||||
}
|
||||
.chip {
|
||||
display: inline-block;
|
||||
padding: 4px 8px;
|
||||
margin: 4px;
|
||||
border-radius: 4px;
|
||||
font-size: 0.75em;
|
||||
font-weight: bold;
|
||||
background-color: var(--tag-background);
|
||||
border: var(--tag-border-width) solid var(--tag-border-color);
|
||||
}
|
||||
.chip.connected {
|
||||
background-color: var(--background-modifier-success);
|
||||
color: var(--text-normal);
|
||||
}
|
||||
.chip.connected-live {
|
||||
background-color: var(--background-modifier-success);
|
||||
border-color: var(--background-modifier-success);
|
||||
color: var(--text-normal);
|
||||
}
|
||||
.chip.accepted {
|
||||
background-color: var(--background-modifier-success);
|
||||
color: var(--text-normal);
|
||||
}
|
||||
.chip.waiting {
|
||||
background-color: var(--background-secondary);
|
||||
}
|
||||
.chip.unknown {
|
||||
background-color: var(--background-primary);
|
||||
color: var(--text-warning);
|
||||
}
|
||||
.chip.denied {
|
||||
background-color: var(--background-modifier-error);
|
||||
color: var(--text-error);
|
||||
}
|
||||
.chip.attr {
|
||||
background-color: var(--background-secondary);
|
||||
}
|
||||
.button {
|
||||
margin: var(--size-4-1);
|
||||
}
|
||||
.button.affirmative {
|
||||
background-color: var(--interactive-accent);
|
||||
color: var(--text-normal);
|
||||
}
|
||||
.button.affirmative:hover {
|
||||
background-color: var(--interactive-accent-hover);
|
||||
}
|
||||
.button.negative {
|
||||
background-color: var(--background-modifier-error);
|
||||
color: var(--text-error);
|
||||
}
|
||||
.button.negative:hover {
|
||||
background-color: var(--background-modifier-error-hover);
|
||||
}
|
||||
</style>
|
||||
2
src/lib
3411
src/main.ts
197
src/modules/AbstractModule.ts
Normal file
@@ -0,0 +1,197 @@
|
||||
import { LOG_LEVEL_INFO, LOG_LEVEL_NOTICE, LOG_LEVEL_VERBOSE, Logger } from "octagonal-wheels/common/logger";
|
||||
import type { LOG_LEVEL } from "../lib/src/common/types";
|
||||
import type { LiveSyncCore } from "../main";
|
||||
import { __$checkInstanceBinding } from "../lib/src/dev/checks";
|
||||
// import { unique } from "octagonal-wheels/collection";
|
||||
// import type { IObsidianModule } from "./AbstractObsidianModule.ts";
|
||||
// import type {
|
||||
// ICoreModuleBase,
|
||||
// AllInjectableProps,
|
||||
// AllExecuteProps,
|
||||
// EveryExecuteProps,
|
||||
// AnyExecuteProps,
|
||||
// ICoreModule,
|
||||
// } from "./ModuleTypes";
|
||||
|
||||
// function isOverridableKey(key: string): key is keyof ICoreModuleBase {
|
||||
// return key.startsWith("$");
|
||||
// }
|
||||
|
||||
// function isInjectableKey(key: string): key is keyof AllInjectableProps {
|
||||
// return key.startsWith("$$");
|
||||
// }
|
||||
|
||||
// function isAllExecuteKey(key: string): key is keyof AllExecuteProps {
|
||||
// return key.startsWith("$all");
|
||||
// }
|
||||
// function isEveryExecuteKey(key: string): key is keyof EveryExecuteProps {
|
||||
// return key.startsWith("$every");
|
||||
// }
|
||||
// function isAnyExecuteKey(key: string): key is keyof AnyExecuteProps {
|
||||
// return key.startsWith("$any");
|
||||
// }
|
||||
/**
|
||||
* All $prefixed functions are hooked by the modules. Be careful to call them directly.
|
||||
* Please refer to the module's source code to understand the function.
|
||||
* $$ : Completely overridden functions.
|
||||
* $all : Process all modules and return all results.
|
||||
* $every : Process all modules until the first failure.
|
||||
* $any : Process all modules until the first success.
|
||||
* $ : Other interceptive points. You should manually assign the module
|
||||
* All of above performed on injectModules function.
|
||||
*/
|
||||
// export function injectModules<T extends ICoreModule>(target: T, modules: ICoreModule[]) {
|
||||
// const allKeys = unique([
|
||||
// ...Object.keys(Object.getOwnPropertyDescriptors(target)),
|
||||
// ...Object.keys(Object.getOwnPropertyDescriptors(Object.getPrototypeOf(target))),
|
||||
// ]).filter((e) => e.startsWith("$")) as (keyof ICoreModule)[];
|
||||
// const moduleMap = new Map<string, IObsidianModule[]>();
|
||||
// for (const module of modules) {
|
||||
// for (const key of allKeys) {
|
||||
// if (isOverridableKey(key)) {
|
||||
// if (key in module) {
|
||||
// const list = moduleMap.get(key) || [];
|
||||
// if (typeof module[key] === "function") {
|
||||
// module[key] = module[key].bind(module) as any;
|
||||
// }
|
||||
// list.push(module);
|
||||
// moduleMap.set(key, list);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// Logger(`Injecting modules for ${target.constructor.name}`, LOG_LEVEL_VERBOSE);
|
||||
// for (const key of allKeys) {
|
||||
// const modules = moduleMap.get(key) || [];
|
||||
// if (isInjectableKey(key)) {
|
||||
// if (modules.length == 0) {
|
||||
// throw new Error(`No module injected for ${key}. This is a fatal error.`);
|
||||
// }
|
||||
// target[key] = modules[0][key]! as any;
|
||||
// Logger(`[${modules[0].constructor.name}]: Injected ${key} `, LOG_LEVEL_VERBOSE);
|
||||
// } else if (isAllExecuteKey(key)) {
|
||||
// const modules = moduleMap.get(key) || [];
|
||||
// target[key] = async (...args: any) => {
|
||||
// for (const module of modules) {
|
||||
// try {
|
||||
// //@ts-ignore
|
||||
// await module[key]!(...args);
|
||||
// } catch (ex) {
|
||||
// Logger(`[${module.constructor.name}]: All handler for ${key} failed`, LOG_LEVEL_VERBOSE);
|
||||
// Logger(ex, LOG_LEVEL_VERBOSE);
|
||||
// }
|
||||
// }
|
||||
// return true;
|
||||
// };
|
||||
// for (const module of modules) {
|
||||
// Logger(`[${module.constructor.name}]: Injected (All) ${key} `, LOG_LEVEL_VERBOSE);
|
||||
// }
|
||||
// } else if (isEveryExecuteKey(key)) {
|
||||
// target[key] = async (...args: any) => {
|
||||
// for (const module of modules) {
|
||||
// try {
|
||||
// //@ts-ignore:2556
|
||||
// const ret = await module[key]!(...args);
|
||||
// if (ret !== undefined && !ret) {
|
||||
// // Failed then return that falsy value.
|
||||
// return ret;
|
||||
// }
|
||||
// } catch (ex) {
|
||||
// Logger(`[${module.constructor.name}]: Every handler for ${key} failed`);
|
||||
// Logger(ex, LOG_LEVEL_VERBOSE);
|
||||
// }
|
||||
// }
|
||||
// return true;
|
||||
// };
|
||||
// for (const module of modules) {
|
||||
// Logger(`[${module.constructor.name}]: Injected (Every) ${key} `, LOG_LEVEL_VERBOSE);
|
||||
// }
|
||||
// } else if (isAnyExecuteKey(key)) {
|
||||
// //@ts-ignore
|
||||
// target[key] = async (...args: any[]) => {
|
||||
// for (const module of modules) {
|
||||
// try {
|
||||
// //@ts-ignore:2556
|
||||
// const ret = await module[key](...args);
|
||||
// // If truly value returned, then return that value.
|
||||
// if (ret) {
|
||||
// return ret;
|
||||
// }
|
||||
// } catch (ex) {
|
||||
// Logger(`[${module.constructor.name}]: Any handler for ${key} failed`);
|
||||
// Logger(ex, LOG_LEVEL_VERBOSE);
|
||||
// }
|
||||
// }
|
||||
// return false;
|
||||
// };
|
||||
// for (const module of modules) {
|
||||
// Logger(`[${module.constructor.name}]: Injected (Any) ${key} `, LOG_LEVEL_VERBOSE);
|
||||
// }
|
||||
// } else {
|
||||
// Logger(`No injected handler for ${key} `, LOG_LEVEL_VERBOSE);
|
||||
// }
|
||||
// }
|
||||
// Logger(`Injected modules for ${target.constructor.name}`, LOG_LEVEL_VERBOSE);
|
||||
// return true;
|
||||
// }
|
||||
|
||||
export abstract class AbstractModule {
|
||||
_log = (msg: any, level: LOG_LEVEL = LOG_LEVEL_INFO, key?: string) => {
|
||||
if (typeof msg === "string" && level !== LOG_LEVEL_NOTICE) {
|
||||
msg = `[${this.constructor.name}]\u{200A} ${msg}`;
|
||||
}
|
||||
// console.log(msg);
|
||||
Logger(msg, level, key);
|
||||
};
|
||||
|
||||
get localDatabase() {
|
||||
return this.core.localDatabase;
|
||||
}
|
||||
get settings() {
|
||||
return this.core.settings;
|
||||
}
|
||||
set settings(value) {
|
||||
this.core.settings = value;
|
||||
}
|
||||
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services) {
|
||||
// Override if needed.
|
||||
}
|
||||
constructor(public core: LiveSyncCore) {
|
||||
this.onBindFunction(core, core.services);
|
||||
Logger(`[${this.constructor.name}] Loaded`, LOG_LEVEL_VERBOSE);
|
||||
__$checkInstanceBinding(this);
|
||||
}
|
||||
saveSettings = this.core.saveSettings.bind(this.core);
|
||||
|
||||
addTestResult(key: string, value: boolean, summary?: string, message?: string) {
|
||||
this.services.test.addTestResult(`${this.constructor.name}`, key, value, summary, message);
|
||||
}
|
||||
testDone(result: boolean = true) {
|
||||
return Promise.resolve(result);
|
||||
}
|
||||
testFail(message: string) {
|
||||
this._log(message, LOG_LEVEL_NOTICE);
|
||||
return this.testDone(false);
|
||||
}
|
||||
|
||||
async _test(key: string, process: () => Promise<any>) {
|
||||
this._log(`Testing ${key}`, LOG_LEVEL_VERBOSE);
|
||||
try {
|
||||
const ret = await process();
|
||||
if (ret !== true) {
|
||||
this.addTestResult(key, false, ret.toString());
|
||||
return this.testFail(`${key} failed: ${ret}`);
|
||||
}
|
||||
this.addTestResult(key, true, "");
|
||||
} catch (ex: any) {
|
||||
this.addTestResult(key, false, "Failed by Exception", ex.toString());
|
||||
return this.testFail(`${key} failed: ${ex}`);
|
||||
}
|
||||
return this.testDone();
|
||||
}
|
||||
|
||||
get services() {
|
||||
return this.core._services;
|
||||
}
|
||||
}
|
||||
54
src/modules/AbstractObsidianModule.ts
Normal file
@@ -0,0 +1,54 @@
|
||||
import { type Prettify } from "../lib/src/common/types";
|
||||
import type { LiveSyncCore } from "../main";
|
||||
import type ObsidianLiveSyncPlugin from "../main";
|
||||
import { AbstractModule } from "./AbstractModule.ts";
|
||||
import type { ChainableExecuteFunction, OverridableFunctionsKeys } from "./ModuleTypes";
|
||||
|
||||
export type IObsidianModuleBase = OverridableFunctionsKeys<ObsidianLiveSyncPlugin>;
|
||||
export type IObsidianModule = Prettify<Partial<IObsidianModuleBase>>;
|
||||
export type ModuleKeys = keyof IObsidianModule;
|
||||
export type ChainableModuleProps = ChainableExecuteFunction<ObsidianLiveSyncPlugin>;
|
||||
|
||||
export abstract class AbstractObsidianModule extends AbstractModule {
|
||||
addCommand = this.plugin.addCommand.bind(this.plugin);
|
||||
registerView = this.plugin.registerView.bind(this.plugin);
|
||||
addRibbonIcon = this.plugin.addRibbonIcon.bind(this.plugin);
|
||||
registerObsidianProtocolHandler = this.plugin.registerObsidianProtocolHandler.bind(this.plugin);
|
||||
|
||||
get localDatabase() {
|
||||
return this.plugin.localDatabase;
|
||||
}
|
||||
get settings() {
|
||||
return this.plugin.settings;
|
||||
}
|
||||
set settings(value) {
|
||||
this.plugin.settings = value;
|
||||
}
|
||||
get app() {
|
||||
return this.plugin.app;
|
||||
}
|
||||
|
||||
constructor(
|
||||
public plugin: ObsidianLiveSyncPlugin,
|
||||
public core: LiveSyncCore
|
||||
) {
|
||||
super(core);
|
||||
}
|
||||
|
||||
saveSettings = this.plugin.saveSettings.bind(this.plugin);
|
||||
|
||||
isMainReady() {
|
||||
return this.services.appLifecycle.isReady();
|
||||
}
|
||||
isMainSuspended() {
|
||||
return this.services.appLifecycle.isSuspended();
|
||||
}
|
||||
isDatabaseReady() {
|
||||
return this.services.database.isDatabaseReady();
|
||||
}
|
||||
|
||||
//should be overridden
|
||||
isThisModuleEnabled() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
55
src/modules/ModuleTypes.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
import type { Prettify } from "../lib/src/common/types";
|
||||
import type { LiveSyncCore } from "../main";
|
||||
|
||||
export type OverridableFunctionsKeys<T> = {
|
||||
[K in keyof T as K extends `$${string}` ? K : never]: T[K];
|
||||
};
|
||||
|
||||
export type ChainableExecuteFunction<T> = {
|
||||
[K in keyof T as K extends `$${string}`
|
||||
? T[K] extends (...args: any) => ChainableFunctionResult
|
||||
? K
|
||||
: never
|
||||
: never]: T[K];
|
||||
};
|
||||
|
||||
export type ICoreModuleBase = OverridableFunctionsKeys<LiveSyncCore>;
|
||||
export type ICoreModule = Prettify<Partial<ICoreModuleBase>>;
|
||||
export type CoreModuleKeys = keyof ICoreModule;
|
||||
|
||||
export type ChainableFunctionResult =
|
||||
| Promise<boolean | undefined | string>
|
||||
| Promise<boolean | undefined>
|
||||
| Promise<boolean>
|
||||
| Promise<void>;
|
||||
export type ChainableFunctionResultOrAll = Promise<boolean | undefined | string | void>;
|
||||
|
||||
type AllExecuteFunction<T> = {
|
||||
[K in keyof T as K extends `$all${string}`
|
||||
? T[K] extends (...args: any[]) => ChainableFunctionResultOrAll
|
||||
? K
|
||||
: never
|
||||
: never]: T[K];
|
||||
};
|
||||
type EveryExecuteFunction<T> = {
|
||||
[K in keyof T as K extends `$every${string}`
|
||||
? T[K] extends (...args: any[]) => ChainableFunctionResult
|
||||
? K
|
||||
: never
|
||||
: never]: T[K];
|
||||
};
|
||||
type AnyExecuteFunction<T> = {
|
||||
[K in keyof T as K extends `$any${string}`
|
||||
? T[K] extends (...args: any[]) => ChainableFunctionResult
|
||||
? K
|
||||
: never
|
||||
: never]: T[K];
|
||||
};
|
||||
type InjectableFunction<T> = {
|
||||
[K in keyof T as K extends `$$${string}` ? (T[K] extends (...args: any[]) => any ? K : never) : never]: T[K];
|
||||
};
|
||||
export type AllExecuteProps = AllExecuteFunction<LiveSyncCore>;
|
||||
export type EveryExecuteProps = EveryExecuteFunction<LiveSyncCore>;
|
||||
export type AnyExecuteProps = AnyExecuteFunction<LiveSyncCore>;
|
||||
|
||||
export type AllInjectableProps = InjectableFunction<LiveSyncCore>;
|
||||
352
src/modules/core/ModuleDatabaseFileAccess.ts
Normal file
@@ -0,0 +1,352 @@
|
||||
import { LOG_LEVEL_VERBOSE } from "octagonal-wheels/common/logger";
|
||||
import { EVENT_FILE_SAVED, eventHub } from "../../common/events";
|
||||
import {
|
||||
getDatabasePathFromUXFileInfo,
|
||||
getStoragePathFromUXFileInfo,
|
||||
isInternalMetadata,
|
||||
markChangesAreSame,
|
||||
} from "../../common/utils";
|
||||
import type {
|
||||
UXFileInfoStub,
|
||||
FilePathWithPrefix,
|
||||
UXFileInfo,
|
||||
MetaEntry,
|
||||
LoadedEntry,
|
||||
FilePath,
|
||||
SavingEntry,
|
||||
DocumentID,
|
||||
} from "../../lib/src/common/types";
|
||||
import type { DatabaseFileAccess } from "../interfaces/DatabaseFileAccess";
|
||||
import { isPlainText, shouldBeIgnored, stripAllPrefixes } from "../../lib/src/string_and_binary/path";
|
||||
import {
|
||||
createBlob,
|
||||
createTextBlob,
|
||||
delay,
|
||||
determineTypeFromBlob,
|
||||
isDocContentSame,
|
||||
readContent,
|
||||
} from "../../lib/src/common/utils";
|
||||
import { serialized } from "octagonal-wheels/concurrency/lock";
|
||||
import { AbstractModule } from "../AbstractModule.ts";
|
||||
import { ICHeader } from "../../common/types.ts";
|
||||
import type { LiveSyncCore } from "../../main.ts";
|
||||
|
||||
export class ModuleDatabaseFileAccess extends AbstractModule implements DatabaseFileAccess {
|
||||
private _everyOnload(): Promise<boolean> {
|
||||
this.core.databaseFileAccess = this;
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
|
||||
private async _everyModuleTest(): Promise<boolean> {
|
||||
if (!this.settings.enableDebugTools) return Promise.resolve(true);
|
||||
const testString = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam nec purus nec nunc";
|
||||
// Before test, we need to delete completely.
|
||||
const conflicts = await this.getConflictedRevs("autoTest.md" as FilePathWithPrefix);
|
||||
for (const rev of conflicts) {
|
||||
await this.delete("autoTest.md" as FilePathWithPrefix, rev);
|
||||
}
|
||||
await this.delete("autoTest.md" as FilePathWithPrefix);
|
||||
// OK, begin!
|
||||
|
||||
await this._test(
|
||||
"storeContent",
|
||||
async () => await this.storeContent("autoTest.md" as FilePathWithPrefix, testString)
|
||||
);
|
||||
// For test, we need to clear the caches.
|
||||
this.localDatabase.clearCaches();
|
||||
await this._test("readContent", async () => {
|
||||
const content = await this.fetch("autoTest.md" as FilePathWithPrefix);
|
||||
if (!content) return "File not found";
|
||||
if (content.deleted) return "File is deleted";
|
||||
return (await content.body.text()) == testString
|
||||
? true
|
||||
: `Content is not same ${await content.body.text()}`;
|
||||
});
|
||||
await this._test("delete", async () => await this.delete("autoTest.md" as FilePathWithPrefix));
|
||||
await this._test("read deleted content", async () => {
|
||||
const content = await this.fetch("autoTest.md" as FilePathWithPrefix);
|
||||
if (!content) return true;
|
||||
if (content.deleted) return true;
|
||||
return `Still exist !:${await content.body.text()},${JSON.stringify(content, undefined, 2)}`;
|
||||
});
|
||||
await delay(100);
|
||||
return this.testDone();
|
||||
}
|
||||
|
||||
async checkIsTargetFile(file: UXFileInfoStub | FilePathWithPrefix): Promise<boolean> {
|
||||
const path = getStoragePathFromUXFileInfo(file);
|
||||
if (!(await this.services.vault.isTargetFile(path))) {
|
||||
this._log(`File is not target`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
if (shouldBeIgnored(path)) {
|
||||
this._log(`File should be ignored`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
async delete(file: UXFileInfoStub | FilePathWithPrefix, rev?: string): Promise<boolean> {
|
||||
if (!(await this.checkIsTargetFile(file))) {
|
||||
return true;
|
||||
}
|
||||
const fullPath = getDatabasePathFromUXFileInfo(file);
|
||||
try {
|
||||
this._log(`deleteDB By path:${fullPath}`);
|
||||
return await this.deleteFromDBbyPath(fullPath, rev);
|
||||
} catch (ex) {
|
||||
this._log(`Failed to delete ${fullPath}`);
|
||||
this._log(ex, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async createChunks(file: UXFileInfo, force: boolean = false, skipCheck?: boolean): Promise<boolean> {
|
||||
return await this.__store(file, force, skipCheck, true);
|
||||
}
|
||||
|
||||
async store(file: UXFileInfo, force: boolean = false, skipCheck?: boolean): Promise<boolean> {
|
||||
return await this.__store(file, force, skipCheck, false);
|
||||
}
|
||||
async storeContent(path: FilePathWithPrefix, content: string): Promise<boolean> {
|
||||
const blob = createTextBlob(content);
|
||||
const bytes = (await blob.arrayBuffer()).byteLength;
|
||||
const isInternal = path.startsWith(".") ? true : undefined;
|
||||
const dummyUXFileInfo: UXFileInfo = {
|
||||
name: path.split("/").pop() as string,
|
||||
path: path,
|
||||
stat: {
|
||||
size: bytes,
|
||||
ctime: Date.now(),
|
||||
mtime: Date.now(),
|
||||
type: "file",
|
||||
},
|
||||
body: blob,
|
||||
isInternal,
|
||||
};
|
||||
return await this.__store(dummyUXFileInfo, true, false, false);
|
||||
}
|
||||
|
||||
private async __store(
|
||||
file: UXFileInfo,
|
||||
force: boolean = false,
|
||||
skipCheck?: boolean,
|
||||
onlyChunks?: boolean
|
||||
): Promise<boolean> {
|
||||
if (!skipCheck) {
|
||||
if (!(await this.checkIsTargetFile(file))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (!file) {
|
||||
this._log("File seems bad", LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
// const path = getPathFromUXFileInfo(file);
|
||||
const isPlain = isPlainText(file.name);
|
||||
const possiblyLarge = !isPlain;
|
||||
const content = file.body;
|
||||
|
||||
const datatype = determineTypeFromBlob(content);
|
||||
const idPrefix = file.isInternal ? ICHeader : "";
|
||||
const fullPath = getStoragePathFromUXFileInfo(file);
|
||||
const fullPathOnDB = getDatabasePathFromUXFileInfo(file);
|
||||
|
||||
if (possiblyLarge) this._log(`Processing: ${fullPath}`, LOG_LEVEL_VERBOSE);
|
||||
|
||||
// if (isInternalMetadata(fullPath)) {
|
||||
// this._log(`Internal file: ${fullPath}`, LOG_LEVEL_VERBOSE);
|
||||
// return false;
|
||||
// }
|
||||
if (file.isInternal) {
|
||||
if (file.deleted) {
|
||||
file.stat = {
|
||||
size: 0,
|
||||
ctime: Date.now(),
|
||||
mtime: Date.now(),
|
||||
type: "file",
|
||||
};
|
||||
} else if (file.stat == undefined) {
|
||||
const stat = await this.core.storageAccess.statHidden(file.path);
|
||||
if (!stat) {
|
||||
// We stored actually deleted or not since here, so this is an unexpected case. we should raise an error.
|
||||
this._log(`Internal file not found: ${fullPath}`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
file.stat = stat;
|
||||
}
|
||||
}
|
||||
|
||||
const idMain = await this.services.path.path2id(fullPath);
|
||||
|
||||
const id = (idPrefix + idMain) as DocumentID;
|
||||
const d: SavingEntry = {
|
||||
_id: id,
|
||||
path: fullPathOnDB,
|
||||
data: content,
|
||||
ctime: file.stat.ctime,
|
||||
mtime: file.stat.mtime,
|
||||
size: file.stat.size,
|
||||
children: [],
|
||||
datatype: datatype,
|
||||
type: datatype,
|
||||
eden: {},
|
||||
};
|
||||
//upsert should locked
|
||||
const msg = `STORAGE -> DB (${datatype}) `;
|
||||
const isNotChanged = await serialized("file-" + fullPath, async () => {
|
||||
if (force) {
|
||||
this._log(msg + "Force writing " + fullPath, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
// Commented out temporarily: this checks that the file was made ourself.
|
||||
// if (this.core.storageAccess.recentlyTouched(file)) {
|
||||
// return true;
|
||||
// }
|
||||
try {
|
||||
const old = await this.localDatabase.getDBEntry(d.path, undefined, false, true, false);
|
||||
if (old !== false) {
|
||||
const oldData = { data: old.data, deleted: old._deleted || old.deleted };
|
||||
const newData = { data: d.data, deleted: d._deleted || d.deleted };
|
||||
if (oldData.deleted != newData.deleted) return false;
|
||||
if (!(await isDocContentSame(old.data, newData.data))) return false;
|
||||
this._log(
|
||||
msg + "Skipped (not changed) " + fullPath + (d._deleted || d.deleted ? " (deleted)" : ""),
|
||||
LOG_LEVEL_VERBOSE
|
||||
);
|
||||
markChangesAreSame(old, d.mtime, old.mtime);
|
||||
return true;
|
||||
// d._rev = old._rev;
|
||||
}
|
||||
} catch (ex) {
|
||||
this._log(
|
||||
msg +
|
||||
"Error, Could not check the diff for the old one." +
|
||||
(force ? "force writing." : "") +
|
||||
fullPath +
|
||||
(d._deleted || d.deleted ? " (deleted)" : ""),
|
||||
LOG_LEVEL_VERBOSE
|
||||
);
|
||||
this._log(ex, LOG_LEVEL_VERBOSE);
|
||||
return !force;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
if (isNotChanged) {
|
||||
this._log(msg + " Skip " + fullPath, LOG_LEVEL_VERBOSE);
|
||||
return true;
|
||||
}
|
||||
const ret = await this.localDatabase.putDBEntry(d, onlyChunks);
|
||||
if (ret !== false) {
|
||||
this._log(msg + fullPath);
|
||||
eventHub.emitEvent(EVENT_FILE_SAVED);
|
||||
}
|
||||
return ret != false;
|
||||
}
|
||||
|
||||
async getConflictedRevs(file: UXFileInfoStub | FilePathWithPrefix): Promise<string[]> {
|
||||
if (!(await this.checkIsTargetFile(file))) {
|
||||
return [];
|
||||
}
|
||||
const filename = getDatabasePathFromUXFileInfo(file);
|
||||
const doc = await this.localDatabase.getDBEntryMeta(filename, { conflicts: true }, true);
|
||||
if (doc === false) {
|
||||
return [];
|
||||
}
|
||||
return doc._conflicts || [];
|
||||
}
|
||||
|
||||
async fetch(
|
||||
file: UXFileInfoStub | FilePathWithPrefix,
|
||||
rev?: string,
|
||||
waitForReady?: boolean,
|
||||
skipCheck = false
|
||||
): Promise<UXFileInfo | false> {
|
||||
if (skipCheck && !(await this.checkIsTargetFile(file))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const entry = await this.fetchEntry(file, rev, waitForReady, true);
|
||||
if (entry === false) {
|
||||
return false;
|
||||
}
|
||||
const data = createBlob(readContent(entry));
|
||||
const path = stripAllPrefixes(entry.path);
|
||||
const fileInfo: UXFileInfo = {
|
||||
name: path.split("/").pop() as string,
|
||||
path: path,
|
||||
stat: {
|
||||
size: entry.size,
|
||||
ctime: entry.ctime,
|
||||
mtime: entry.mtime,
|
||||
type: "file",
|
||||
},
|
||||
body: data,
|
||||
deleted: entry.deleted || entry._deleted,
|
||||
};
|
||||
if (isInternalMetadata(entry.path)) {
|
||||
fileInfo.isInternal = true;
|
||||
}
|
||||
return fileInfo;
|
||||
}
|
||||
async fetchEntryMeta(
|
||||
file: UXFileInfoStub | FilePathWithPrefix,
|
||||
rev?: string,
|
||||
skipCheck = false
|
||||
): Promise<MetaEntry | false> {
|
||||
const dbFileName = getDatabasePathFromUXFileInfo(file);
|
||||
if (skipCheck && !(await this.checkIsTargetFile(file))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const doc = await this.localDatabase.getDBEntryMeta(dbFileName, rev ? { rev: rev } : undefined, true);
|
||||
if (doc === false) {
|
||||
return false;
|
||||
}
|
||||
return doc as MetaEntry;
|
||||
}
|
||||
async fetchEntryFromMeta(
|
||||
meta: MetaEntry,
|
||||
waitForReady: boolean = true,
|
||||
skipCheck = false
|
||||
): Promise<LoadedEntry | false> {
|
||||
if (skipCheck && !(await this.checkIsTargetFile(meta.path))) {
|
||||
return false;
|
||||
}
|
||||
const doc = await this.localDatabase.getDBEntryFromMeta(meta as LoadedEntry, false, waitForReady);
|
||||
if (doc === false) {
|
||||
return false;
|
||||
}
|
||||
return doc;
|
||||
}
|
||||
async fetchEntry(
|
||||
file: UXFileInfoStub | FilePathWithPrefix,
|
||||
rev?: string,
|
||||
waitForReady: boolean = true,
|
||||
skipCheck = false
|
||||
): Promise<LoadedEntry | false> {
|
||||
if (skipCheck && !(await this.checkIsTargetFile(file))) {
|
||||
return false;
|
||||
}
|
||||
const entry = await this.fetchEntryMeta(file, rev, true);
|
||||
if (entry === false) {
|
||||
return false;
|
||||
}
|
||||
const doc = await this.fetchEntryFromMeta(entry, waitForReady, true);
|
||||
return doc;
|
||||
}
|
||||
async deleteFromDBbyPath(fullPath: FilePath | FilePathWithPrefix, rev?: string): Promise<boolean> {
|
||||
if (!(await this.checkIsTargetFile(fullPath))) {
|
||||
this._log(`storeFromStorage: File is not target: ${fullPath}`);
|
||||
return true;
|
||||
}
|
||||
const opt = rev ? { rev: rev } : undefined;
|
||||
const ret = await this.localDatabase.deleteDBEntry(fullPath, opt);
|
||||
eventHub.emitEvent(EVENT_FILE_SAVED);
|
||||
return ret;
|
||||
}
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.appLifecycle.handleOnLoaded(this._everyOnload.bind(this));
|
||||
services.test.handleTest(this._everyModuleTest.bind(this));
|
||||
}
|
||||
}
|
||||
440
src/modules/core/ModuleFileHandler.ts
Normal file
@@ -0,0 +1,440 @@
|
||||
import { LOG_LEVEL_INFO, LOG_LEVEL_NOTICE, LOG_LEVEL_VERBOSE } from "octagonal-wheels/common/logger";
|
||||
import { serialized } from "octagonal-wheels/concurrency/lock";
|
||||
import type { FileEventItem } from "../../common/types";
|
||||
import type {
|
||||
FilePath,
|
||||
FilePathWithPrefix,
|
||||
MetaEntry,
|
||||
UXFileInfo,
|
||||
UXFileInfoStub,
|
||||
UXInternalFileInfoStub,
|
||||
} from "../../lib/src/common/types";
|
||||
import { AbstractModule } from "../AbstractModule.ts";
|
||||
import {
|
||||
compareFileFreshness,
|
||||
EVEN,
|
||||
getPath,
|
||||
getPathWithoutPrefix,
|
||||
getStoragePathFromUXFileInfo,
|
||||
markChangesAreSame,
|
||||
} from "../../common/utils";
|
||||
import { getDocDataAsArray, isDocContentSame, readAsBlob, readContent } from "../../lib/src/common/utils";
|
||||
import { shouldBeIgnored } from "../../lib/src/string_and_binary/path";
|
||||
import { Semaphore } from "octagonal-wheels/concurrency/semaphore";
|
||||
import { eventHub } from "../../common/events.ts";
|
||||
import type { LiveSyncCore } from "../../main.ts";
|
||||
|
||||
export class ModuleFileHandler extends AbstractModule {
|
||||
get db() {
|
||||
return this.core.databaseFileAccess;
|
||||
}
|
||||
get storage() {
|
||||
return this.core.storageAccess;
|
||||
}
|
||||
|
||||
_everyOnloadStart(): Promise<boolean> {
|
||||
this.core.fileHandler = this;
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
|
||||
async readFileFromStub(file: UXFileInfoStub | UXFileInfo) {
|
||||
if ("body" in file && file.body) {
|
||||
return file;
|
||||
}
|
||||
const readFile = await this.storage.readStubContent(file);
|
||||
if (!readFile) {
|
||||
throw new Error(`File ${file.path} is not exist on the storage`);
|
||||
}
|
||||
return readFile;
|
||||
}
|
||||
|
||||
async storeFileToDB(
|
||||
info: UXFileInfoStub | UXFileInfo | UXInternalFileInfoStub | FilePathWithPrefix,
|
||||
force: boolean = false,
|
||||
onlyChunks: boolean = false
|
||||
): Promise<boolean> {
|
||||
const file = typeof info === "string" ? this.storage.getFileStub(info) : info;
|
||||
if (file == null) {
|
||||
this._log(`File ${info} is not exist on the storage`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
// const file = item.args.file;
|
||||
if (file.isInternal) {
|
||||
this._log(
|
||||
`Internal file ${file.path} is not allowed to be processed on processFileEvent`,
|
||||
LOG_LEVEL_VERBOSE
|
||||
);
|
||||
return false;
|
||||
}
|
||||
// First, check the file on the database
|
||||
const entry = await this.db.fetchEntry(file, undefined, true, true);
|
||||
|
||||
if (!entry || entry.deleted || entry._deleted) {
|
||||
// If the file is not exist on the database, then it should be created.
|
||||
const readFile = await this.readFileFromStub(file);
|
||||
if (!onlyChunks) {
|
||||
return await this.db.store(readFile);
|
||||
} else {
|
||||
return await this.db.createChunks(readFile, false, true);
|
||||
}
|
||||
}
|
||||
|
||||
// entry is exist on the database, check the difference between the file and the entry.
|
||||
|
||||
let shouldApplied = false;
|
||||
if (!force && !onlyChunks) {
|
||||
// 1. if the time stamp is far different, then it should be updated.
|
||||
// Note: This checks only the mtime with the resolution reduced to 2 seconds.
|
||||
// 2 seconds it for the ZIP file's mtime. If not, we cannot backup the vault as the ZIP file.
|
||||
// This is hardcoded on `compareMtime` of `src/common/utils.ts`.
|
||||
if (compareFileFreshness(file, entry) !== EVEN) {
|
||||
shouldApplied = true;
|
||||
}
|
||||
// 2. if not, the content should be checked.
|
||||
let readFile: UXFileInfo | undefined = undefined;
|
||||
if (!shouldApplied) {
|
||||
readFile = await this.readFileFromStub(file);
|
||||
if (!readFile) {
|
||||
this._log(`File ${file.path} is not exist on the storage`, LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
}
|
||||
if (await isDocContentSame(getDocDataAsArray(entry.data), readFile.body)) {
|
||||
// Timestamp is different but the content is same. therefore, two timestamps should be handled as same.
|
||||
// So, mark the changes are same.
|
||||
markChangesAreSame(readFile, readFile.stat.mtime, entry.mtime);
|
||||
} else {
|
||||
shouldApplied = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!shouldApplied) {
|
||||
this._log(`File ${file.path} is not changed`, LOG_LEVEL_VERBOSE);
|
||||
return true;
|
||||
}
|
||||
if (!readFile) readFile = await this.readFileFromStub(file);
|
||||
// If the file is changed, then the file should be stored.
|
||||
if (onlyChunks) {
|
||||
return await this.db.createChunks(readFile, false, true);
|
||||
} else {
|
||||
return await this.db.store(readFile, false, true);
|
||||
}
|
||||
} else {
|
||||
// If force is true, then it should be updated.
|
||||
const readFile = await this.readFileFromStub(file);
|
||||
if (onlyChunks) {
|
||||
return await this.db.createChunks(readFile, true, true);
|
||||
} else {
|
||||
return await this.db.store(readFile, true, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async deleteFileFromDB(info: UXFileInfoStub | UXInternalFileInfoStub | FilePath): Promise<boolean> {
|
||||
const file = typeof info === "string" ? this.storage.getFileStub(info) : info;
|
||||
if (file == null) {
|
||||
this._log(`File ${info} is not exist on the storage`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
// const file = item.args.file;
|
||||
if (file.isInternal) {
|
||||
this._log(
|
||||
`Internal file ${file.path} is not allowed to be processed on processFileEvent`,
|
||||
LOG_LEVEL_VERBOSE
|
||||
);
|
||||
return false;
|
||||
}
|
||||
// First, check the file on the database
|
||||
const entry = await this.db.fetchEntry(file, undefined, true, true);
|
||||
if (!entry || entry.deleted || entry._deleted) {
|
||||
this._log(`File ${file.path} is not exist or already deleted on the database`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
// Check the file is already conflicted. if so, only the conflicted one should be deleted.
|
||||
const conflictedRevs = await this.db.getConflictedRevs(file);
|
||||
if (conflictedRevs.length > 0) {
|
||||
// If conflicted, then it should be deleted. entry._rev should be own file's rev.
|
||||
// TODO: I BELIEVED SO. BUT I NOTICED THAT I AN NOT SURE. I SHOULD CHECK THIS.
|
||||
// ANYWAY, I SHOULD DELETE THE FILE. ACTUALLY WE SIMPLY DELETED THE FILE UNTIL PREVIOUS VERSIONS.
|
||||
return await this.db.delete(file, entry._rev);
|
||||
}
|
||||
// Otherwise, the file should be deleted simply. This is the previous behaviour.
|
||||
return await this.db.delete(file);
|
||||
}
|
||||
|
||||
async deleteRevisionFromDB(
|
||||
info: UXFileInfoStub | FilePath | FilePathWithPrefix,
|
||||
rev: string
|
||||
): Promise<boolean | undefined> {
|
||||
//TODO: Possibly check the conflicting.
|
||||
return await this.db.delete(info, rev);
|
||||
}
|
||||
|
||||
async resolveConflictedByDeletingRevision(
|
||||
info: UXFileInfoStub | FilePath,
|
||||
rev: string
|
||||
): Promise<boolean | undefined> {
|
||||
const path = getStoragePathFromUXFileInfo(info);
|
||||
if (!(await this.deleteRevisionFromDB(info, rev))) {
|
||||
this._log(`Failed to delete the conflicted revision ${rev} of ${path}`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
if (!(await this.dbToStorageWithSpecificRev(info, rev, true))) {
|
||||
this._log(`Failed to apply the resolved revision ${rev} of ${path} to the storage`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async dbToStorageWithSpecificRev(
|
||||
info: UXFileInfoStub | UXFileInfo | FilePath | null,
|
||||
rev: string,
|
||||
force?: boolean
|
||||
): Promise<boolean> {
|
||||
const file = typeof info === "string" ? this.storage.getFileStub(info) : info;
|
||||
if (file == null) {
|
||||
this._log(`File ${info} is not exist on the storage`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
const docEntry = await this.db.fetchEntryMeta(file, rev, true);
|
||||
if (!docEntry) {
|
||||
this._log(`File ${file.path} is not exist on the database`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
return await this.dbToStorage(docEntry, file, force);
|
||||
}
|
||||
|
||||
async dbToStorage(
|
||||
entryInfo: MetaEntry | FilePathWithPrefix,
|
||||
info: UXFileInfoStub | UXFileInfo | FilePath | null,
|
||||
force?: boolean
|
||||
): Promise<boolean> {
|
||||
const file = typeof info === "string" ? this.storage.getFileStub(info) : info;
|
||||
const mode = file == null ? "create" : "modify";
|
||||
const pathFromEntryInfo = typeof entryInfo === "string" ? entryInfo : getPath(entryInfo);
|
||||
const docEntry = await this.db.fetchEntryMeta(pathFromEntryInfo, undefined, true);
|
||||
if (!docEntry) {
|
||||
this._log(`File ${pathFromEntryInfo} is not exist on the database`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
const path = getPath(docEntry);
|
||||
|
||||
// 1. Check if it already conflicted.
|
||||
const revs = await this.db.getConflictedRevs(path);
|
||||
if (revs.length > 0) {
|
||||
// Some conflicts are exist.
|
||||
if (this.settings.writeDocumentsIfConflicted) {
|
||||
// If configured to write the document even if conflicted, then it should be written.
|
||||
// NO OP
|
||||
} else {
|
||||
// If not, then it should be checked. and will be processed later (i.e., after the conflict is resolved).
|
||||
await this.services.conflict.queueCheckForIfOpen(path);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Check if the file is already exist on the storage.
|
||||
const existDoc = this.storage.getStub(path);
|
||||
if (existDoc && existDoc.isFolder) {
|
||||
this._log(`Folder ${path} is already exist on the storage as a folder`, LOG_LEVEL_VERBOSE);
|
||||
// We can do nothing, and other modules should also nothing to do.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check existence of both file and docEntry.
|
||||
const existOnDB = !(docEntry._deleted || docEntry.deleted || false);
|
||||
const existOnStorage = existDoc != null;
|
||||
if (!existOnDB && !existOnStorage) {
|
||||
this._log(`File ${path} seems to be deleted, but already not on storage`, LOG_LEVEL_VERBOSE);
|
||||
return true;
|
||||
}
|
||||
if (!existOnDB && existOnStorage) {
|
||||
// Deletion has been Transferred. Storage files will be deleted.
|
||||
// Note: If the folder becomes empty, the folder will be deleted if not configured to keep it.
|
||||
// This behaviour is implemented on the `ModuleFileAccessObsidian`.
|
||||
// And it does not care actually deleted.
|
||||
await this.storage.deleteVaultItem(path);
|
||||
return true;
|
||||
}
|
||||
// Okay, the file is exist on the database. Let's check the file is exist on the storage.
|
||||
const docRead = await this.db.fetchEntryFromMeta(docEntry);
|
||||
if (!docRead) {
|
||||
this._log(`File ${path} is not exist on the database`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we want to process size mismatched files -- in case of having files created by some integrations, enable the toggle.
|
||||
if (!this.settings.processSizeMismatchedFiles) {
|
||||
// Check the file is not corrupted
|
||||
// (Zero is a special case, may be created by some APIs and it might be acceptable).
|
||||
if (docRead.size != 0 && docRead.size !== readAsBlob(docRead).size) {
|
||||
this._log(`File ${path} seems to be corrupted! Writing prevented.`, LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const docData = readContent(docRead);
|
||||
|
||||
if (existOnStorage && !force) {
|
||||
// The file is exist on the storage. Let's check the difference between the file and the entry.
|
||||
// But, if force is true, then it should be updated.
|
||||
// Ok, we have to compare.
|
||||
let shouldApplied = false;
|
||||
// 1. if the time stamp is far different, then it should be updated.
|
||||
// Note: This checks only the mtime with the resolution reduced to 2 seconds.
|
||||
// 2 seconds it for the ZIP file's mtime. If not, we cannot backup the vault as the ZIP file.
|
||||
// This is hardcoded on `compareMtime` of `src/common/utils.ts`.
|
||||
if (compareFileFreshness(existDoc, docEntry) !== EVEN) {
|
||||
shouldApplied = true;
|
||||
}
|
||||
// 2. if not, the content should be checked.
|
||||
|
||||
if (!shouldApplied) {
|
||||
const readFile = await this.readFileFromStub(existDoc);
|
||||
if (await isDocContentSame(docData, readFile.body)) {
|
||||
// The content is same. So, we do not need to update the file.
|
||||
shouldApplied = false;
|
||||
// Timestamp is different but the content is same. therefore, two timestamps should be handled as same.
|
||||
// So, mark the changes are same.
|
||||
markChangesAreSame(docRead, docRead.mtime, existDoc.stat.mtime);
|
||||
} else {
|
||||
shouldApplied = true;
|
||||
}
|
||||
}
|
||||
if (!shouldApplied) {
|
||||
this._log(`File ${docRead.path} is not changed`, LOG_LEVEL_VERBOSE);
|
||||
return true;
|
||||
}
|
||||
// Let's apply the changes.
|
||||
} else {
|
||||
this._log(
|
||||
`File ${docRead.path} ${existOnStorage ? "(new) " : ""} ${force ? " (forced)" : ""}`,
|
||||
LOG_LEVEL_VERBOSE
|
||||
);
|
||||
}
|
||||
await this.storage.ensureDir(path);
|
||||
const ret = await this.storage.writeFileAuto(path, docData, { ctime: docRead.ctime, mtime: docRead.mtime });
|
||||
await this.storage.touched(path);
|
||||
this.storage.triggerFileEvent(mode, path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
private async _anyHandlerProcessesFileEvent(item: FileEventItem): Promise<boolean> {
|
||||
const eventItem = item.args;
|
||||
const type = item.type;
|
||||
const path = eventItem.file.path;
|
||||
if (!(await this.services.vault.isTargetFile(path))) {
|
||||
this._log(`File ${path} is not the target file`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
if (shouldBeIgnored(path)) {
|
||||
this._log(`File ${path} should be ignored`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
const lockKey = `processFileEvent-${path}`;
|
||||
return await serialized(lockKey, async () => {
|
||||
switch (type) {
|
||||
case "CREATE":
|
||||
case "CHANGED":
|
||||
return await this.storeFileToDB(item.args.file);
|
||||
case "DELETE":
|
||||
return await this.deleteFileFromDB(item.args.file);
|
||||
case "INTERNAL":
|
||||
// this should be handled on the other module.
|
||||
return false;
|
||||
default:
|
||||
this._log(`Unsupported event type: ${type}`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async _anyProcessReplicatedDoc(entry: MetaEntry): Promise<boolean> {
|
||||
return await serialized(entry.path, async () => {
|
||||
if (!(await this.services.vault.isTargetFile(entry.path))) {
|
||||
this._log(`File ${entry.path} is not the target file`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
if (this.services.vault.isFileSizeTooLarge(entry.size)) {
|
||||
this._log(`File ${entry.path} is too large (on database) to be processed`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
if (shouldBeIgnored(entry.path)) {
|
||||
this._log(`File ${entry.path} should be ignored`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
const path = getPath(entry);
|
||||
|
||||
const targetFile = this.storage.getStub(getPathWithoutPrefix(entry));
|
||||
if (targetFile && targetFile.isFolder) {
|
||||
this._log(`${getPath(entry)} is already exist as the folder`);
|
||||
// Nothing to do and other modules should also nothing to do.
|
||||
return true;
|
||||
} else {
|
||||
if (targetFile && this.services.vault.isFileSizeTooLarge(targetFile.stat.size)) {
|
||||
this._log(`File ${targetFile.path} is too large (on storage) to be processed`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
this._log(
|
||||
`Processing ${path} (${entry._id.substring(0, 8)} :${entry._rev?.substring(0, 5)}) : Started...`,
|
||||
LOG_LEVEL_VERBOSE
|
||||
);
|
||||
// Before writing (or skipped ), merging dialogue should be cancelled.
|
||||
eventHub.emitEvent("conflict-cancelled", path);
|
||||
const ret = await this.dbToStorage(entry, targetFile);
|
||||
this._log(`Processing ${path} (${entry._id.substring(0, 8)} :${entry._rev?.substring(0, 5)}) : Done`);
|
||||
return ret;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async createAllChunks(showingNotice?: boolean): Promise<void> {
|
||||
this._log("Collecting local files on the storage", LOG_LEVEL_VERBOSE);
|
||||
const semaphore = Semaphore(10);
|
||||
|
||||
let processed = 0;
|
||||
const filesStorageSrc = this.storage.getFiles();
|
||||
const incProcessed = () => {
|
||||
processed++;
|
||||
if (processed % 25 == 0)
|
||||
this._log(
|
||||
`Creating missing chunks: ${processed} of ${total} files`,
|
||||
showingNotice ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO,
|
||||
"chunkCreation"
|
||||
);
|
||||
};
|
||||
const total = filesStorageSrc.length;
|
||||
const procAllChunks = filesStorageSrc.map(async (file) => {
|
||||
if (!(await this.services.vault.isTargetFile(file))) {
|
||||
incProcessed();
|
||||
return true;
|
||||
}
|
||||
if (this.services.vault.isFileSizeTooLarge(file.stat.size)) {
|
||||
incProcessed();
|
||||
return true;
|
||||
}
|
||||
if (shouldBeIgnored(file.path)) {
|
||||
incProcessed();
|
||||
return true;
|
||||
}
|
||||
const release = await semaphore.acquire();
|
||||
incProcessed();
|
||||
try {
|
||||
await this.storeFileToDB(file, false, true);
|
||||
} catch (ex) {
|
||||
this._log(ex, LOG_LEVEL_VERBOSE);
|
||||
} finally {
|
||||
release();
|
||||
}
|
||||
});
|
||||
await Promise.all(procAllChunks);
|
||||
this._log(
|
||||
`Creating chunks Done: ${processed} of ${total} files`,
|
||||
showingNotice ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO,
|
||||
"chunkCreation"
|
||||
);
|
||||
}
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.appLifecycle.handleOnInitialise(this._everyOnloadStart.bind(this));
|
||||
services.fileProcessing.handleProcessFileEvent(this._anyHandlerProcessesFileEvent.bind(this));
|
||||
services.replication.handleProcessSynchroniseResult(this._anyProcessReplicatedDoc.bind(this));
|
||||
}
|
||||
}
|
||||
46
src/modules/core/ModuleLocalDatabaseObsidian.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import { $msg } from "../../lib/src/common/i18n";
|
||||
import { LiveSyncLocalDB } from "../../lib/src/pouchdb/LiveSyncLocalDB.ts";
|
||||
import { initializeStores } from "../../common/stores.ts";
|
||||
import { AbstractModule } from "../AbstractModule.ts";
|
||||
import { LiveSyncManagers } from "../../lib/src/managers/LiveSyncManagers.ts";
|
||||
import type { LiveSyncCore } from "../../main.ts";
|
||||
|
||||
export class ModuleLocalDatabaseObsidian extends AbstractModule {
|
||||
_everyOnloadStart(): Promise<boolean> {
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
private async _openDatabase(): Promise<boolean> {
|
||||
if (this.localDatabase != null) {
|
||||
await this.localDatabase.close();
|
||||
}
|
||||
const vaultName = this.services.vault.getVaultName();
|
||||
this._log($msg("moduleLocalDatabase.logWaitingForReady"));
|
||||
const getDB = () => this.core.localDatabase.localDatabase;
|
||||
const getSettings = () => this.core.settings;
|
||||
this.core.managers = new LiveSyncManagers({
|
||||
get database() {
|
||||
return getDB();
|
||||
},
|
||||
getActiveReplicator: () => this.core.replicator,
|
||||
id2path: this.services.path.id2path,
|
||||
// path2id: this.core.$$path2id.bind(this.core),
|
||||
path2id: this.services.path.path2id,
|
||||
get settings() {
|
||||
return getSettings();
|
||||
},
|
||||
});
|
||||
this.core.localDatabase = new LiveSyncLocalDB(vaultName, this.core);
|
||||
|
||||
initializeStores(vaultName);
|
||||
return await this.localDatabase.initializeDatabase();
|
||||
}
|
||||
|
||||
_isDatabaseReady(): boolean {
|
||||
return this.localDatabase != null && this.localDatabase.isReady;
|
||||
}
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.database.handleIsDatabaseReady(this._isDatabaseReady.bind(this));
|
||||
services.appLifecycle.handleOnInitialise(this._everyOnloadStart.bind(this));
|
||||
services.database.handleOpenDatabase(this._openDatabase.bind(this));
|
||||
}
|
||||
}
|
||||
41
src/modules/core/ModulePeriodicProcess.ts
Normal file
@@ -0,0 +1,41 @@
|
||||
import { PeriodicProcessor } from "../../common/utils";
|
||||
import type { LiveSyncCore } from "../../main";
|
||||
import { AbstractModule } from "../AbstractModule";
|
||||
|
||||
export class ModulePeriodicProcess extends AbstractModule {
|
||||
periodicSyncProcessor = new PeriodicProcessor(this.core, async () => await this.services.replication.replicate());
|
||||
|
||||
disablePeriodic() {
|
||||
this.periodicSyncProcessor?.disable();
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
resumePeriodic() {
|
||||
this.periodicSyncProcessor.enable(
|
||||
this.settings.periodicReplication ? this.settings.periodicReplicationInterval * 1000 : 0
|
||||
);
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
private _allOnUnload() {
|
||||
return this.disablePeriodic();
|
||||
}
|
||||
private _everyBeforeRealizeSetting(): Promise<boolean> {
|
||||
return this.disablePeriodic();
|
||||
}
|
||||
private _everyBeforeSuspendProcess(): Promise<boolean> {
|
||||
return this.disablePeriodic();
|
||||
}
|
||||
private _everyAfterResumeProcess(): Promise<boolean> {
|
||||
return this.resumePeriodic();
|
||||
}
|
||||
private _everyAfterRealizeSetting(): Promise<boolean> {
|
||||
return this.resumePeriodic();
|
||||
}
|
||||
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.appLifecycle.handleOnUnload(this._allOnUnload.bind(this));
|
||||
services.setting.handleBeforeRealiseSetting(this._everyBeforeRealizeSetting.bind(this));
|
||||
services.setting.handleSettingRealised(this._everyAfterRealizeSetting.bind(this));
|
||||
services.appLifecycle.handleOnSuspending(this._everyBeforeSuspendProcess.bind(this));
|
||||
services.appLifecycle.handleOnResumed(this._everyAfterResumeProcess.bind(this));
|
||||
}
|
||||
}
|
||||
23
src/modules/core/ModulePouchDB.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import { AbstractModule } from "../AbstractModule";
|
||||
import { PouchDB } from "../../lib/src/pouchdb/pouchdb-browser";
|
||||
import type { LiveSyncCore } from "../../main";
|
||||
import { ExtraSuffixIndexedDB } from "../../lib/src/common/types";
|
||||
|
||||
export class ModulePouchDB extends AbstractModule {
|
||||
_createPouchDBInstance<T extends object>(
|
||||
name?: string,
|
||||
options?: PouchDB.Configuration.DatabaseConfiguration
|
||||
): PouchDB.Database<T> {
|
||||
const optionPass = options ?? {};
|
||||
if (this.settings.useIndexedDBAdapter) {
|
||||
optionPass.adapter = "indexeddb";
|
||||
//@ts-ignore :missing def
|
||||
optionPass.purged_infos_limit = 1;
|
||||
return new PouchDB(name + ExtraSuffixIndexedDB, optionPass);
|
||||
}
|
||||
return new PouchDB(name, optionPass);
|
||||
}
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.database.handleCreatePouchDBInstance(this._createPouchDBInstance.bind(this));
|
||||
}
|
||||
}
|
||||
281
src/modules/core/ModuleRebuilder.ts
Normal file
@@ -0,0 +1,281 @@
|
||||
import { delay } from "octagonal-wheels/promises";
|
||||
import {
|
||||
FLAGMD_REDFLAG2_HR,
|
||||
FLAGMD_REDFLAG3_HR,
|
||||
LOG_LEVEL_NOTICE,
|
||||
LOG_LEVEL_VERBOSE,
|
||||
REMOTE_COUCHDB,
|
||||
REMOTE_MINIO,
|
||||
} from "../../lib/src/common/types.ts";
|
||||
import { AbstractModule } from "../AbstractModule.ts";
|
||||
import type { Rebuilder } from "../interfaces/DatabaseRebuilder.ts";
|
||||
import type { LiveSyncCouchDBReplicator } from "../../lib/src/replication/couchdb/LiveSyncReplicator.ts";
|
||||
import { fetchAllUsedChunks } from "@/lib/src/pouchdb/chunks.ts";
|
||||
import { EVENT_DATABASE_REBUILT, eventHub } from "src/common/events.ts";
|
||||
import type { LiveSyncCore } from "../../main.ts";
|
||||
|
||||
export class ModuleRebuilder extends AbstractModule implements Rebuilder {
|
||||
private _everyOnload(): Promise<boolean> {
|
||||
this.core.rebuilder = this;
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
async $performRebuildDB(
|
||||
method: "localOnly" | "remoteOnly" | "rebuildBothByThisDevice" | "localOnlyWithChunks"
|
||||
): Promise<void> {
|
||||
if (method == "localOnly") {
|
||||
await this.$fetchLocal();
|
||||
}
|
||||
if (method == "localOnlyWithChunks") {
|
||||
await this.$fetchLocal(true);
|
||||
}
|
||||
if (method == "remoteOnly") {
|
||||
await this.$rebuildRemote();
|
||||
}
|
||||
if (method == "rebuildBothByThisDevice") {
|
||||
await this.$rebuildEverything();
|
||||
}
|
||||
}
|
||||
|
||||
async informOptionalFeatures() {
|
||||
await this.core.services.UI.showMarkdownDialog(
|
||||
"All optional features are disabled",
|
||||
`Customisation Sync and Hidden File Sync will all be disabled.
|
||||
Please enable them from the settings screen after setup is complete.`,
|
||||
["OK"]
|
||||
);
|
||||
}
|
||||
async askUsingOptionalFeature(opt: { enableFetch?: boolean; enableOverwrite?: boolean }) {
|
||||
if (
|
||||
(await this.core.confirm.askYesNoDialog(
|
||||
"Do you want to enable extra features? If you are new to Self-hosted LiveSync, try the core feature first!",
|
||||
{ title: "Enable extra features", defaultOption: "No", timeout: 15 }
|
||||
)) == "yes"
|
||||
) {
|
||||
await this.services.setting.suggestOptionalFeatures(opt);
|
||||
}
|
||||
}
|
||||
|
||||
async rebuildRemote() {
|
||||
await this.services.setting.suspendExtraSync();
|
||||
this.core.settings.isConfigured = true;
|
||||
|
||||
await this.services.setting.realiseSetting();
|
||||
await this.services.remote.markLocked();
|
||||
await this.services.remote.tryResetDatabase();
|
||||
await this.services.remote.markLocked();
|
||||
await delay(500);
|
||||
// await this.askUsingOptionalFeature({ enableOverwrite: true });
|
||||
await delay(1000);
|
||||
await this.services.remote.replicateAllToRemote(true);
|
||||
await delay(1000);
|
||||
await this.services.remote.replicateAllToRemote(true, true);
|
||||
await this.informOptionalFeatures();
|
||||
}
|
||||
$rebuildRemote(): Promise<void> {
|
||||
return this.rebuildRemote();
|
||||
}
|
||||
|
||||
async rebuildEverything() {
|
||||
await this.services.setting.suspendExtraSync();
|
||||
// await this.askUseNewAdapter();
|
||||
this.core.settings.isConfigured = true;
|
||||
await this.services.setting.realiseSetting();
|
||||
await this.resetLocalDatabase();
|
||||
await delay(1000);
|
||||
await this.services.databaseEvents.initialiseDatabase(true, true, true);
|
||||
await this.services.remote.markLocked();
|
||||
await this.services.remote.tryResetDatabase();
|
||||
await this.services.remote.markLocked();
|
||||
await delay(500);
|
||||
// We do not have any other devices' data, so we do not need to ask for overwriting.
|
||||
// await this.askUsingOptionalFeature({ enableOverwrite: false });
|
||||
await delay(1000);
|
||||
await this.services.remote.replicateAllToRemote(true);
|
||||
await delay(1000);
|
||||
await this.services.remote.replicateAllToRemote(true, true);
|
||||
await this.informOptionalFeatures();
|
||||
}
|
||||
|
||||
$rebuildEverything(): Promise<void> {
|
||||
return this.rebuildEverything();
|
||||
}
|
||||
|
||||
$fetchLocal(makeLocalChunkBeforeSync?: boolean, preventMakeLocalFilesBeforeSync?: boolean): Promise<void> {
|
||||
return this.fetchLocal(makeLocalChunkBeforeSync, preventMakeLocalFilesBeforeSync);
|
||||
}
|
||||
|
||||
async scheduleRebuild(): Promise<void> {
|
||||
try {
|
||||
await this.core.storageAccess.writeFileAuto(FLAGMD_REDFLAG2_HR, "");
|
||||
} catch (ex) {
|
||||
this._log("Could not create red_flag_rebuild.md", LOG_LEVEL_NOTICE);
|
||||
this._log(ex, LOG_LEVEL_VERBOSE);
|
||||
}
|
||||
this.services.appLifecycle.performRestart();
|
||||
}
|
||||
async scheduleFetch(): Promise<void> {
|
||||
try {
|
||||
await this.core.storageAccess.writeFileAuto(FLAGMD_REDFLAG3_HR, "");
|
||||
} catch (ex) {
|
||||
this._log("Could not create red_flag_fetch.md", LOG_LEVEL_NOTICE);
|
||||
this._log(ex, LOG_LEVEL_VERBOSE);
|
||||
}
|
||||
this.services.appLifecycle.performRestart();
|
||||
}
|
||||
|
||||
private async _tryResetRemoteDatabase(): Promise<void> {
|
||||
await this.core.replicator.tryResetRemoteDatabase(this.settings);
|
||||
}
|
||||
|
||||
private async _tryCreateRemoteDatabase(): Promise<void> {
|
||||
await this.core.replicator.tryCreateRemoteDatabase(this.settings);
|
||||
}
|
||||
|
||||
private async _resetLocalDatabase(): Promise<boolean> {
|
||||
this.core.storageAccess.clearTouched();
|
||||
return await this.localDatabase.resetDatabase();
|
||||
}
|
||||
|
||||
async suspendAllSync() {
|
||||
this.core.settings.liveSync = false;
|
||||
this.core.settings.periodicReplication = false;
|
||||
this.core.settings.syncOnSave = false;
|
||||
this.core.settings.syncOnEditorSave = false;
|
||||
this.core.settings.syncOnStart = false;
|
||||
this.core.settings.syncOnFileOpen = false;
|
||||
this.core.settings.syncAfterMerge = false;
|
||||
await this.services.setting.suspendExtraSync();
|
||||
}
|
||||
async suspendReflectingDatabase() {
|
||||
if (this.core.settings.doNotSuspendOnFetching) return;
|
||||
if (this.core.settings.remoteType == REMOTE_MINIO) return;
|
||||
this._log(
|
||||
`Suspending reflection: Database and storage changes will not be reflected in each other until completely finished the fetching.`,
|
||||
LOG_LEVEL_NOTICE
|
||||
);
|
||||
this.core.settings.suspendParseReplicationResult = true;
|
||||
this.core.settings.suspendFileWatching = true;
|
||||
await this.core.saveSettings();
|
||||
}
|
||||
async resumeReflectingDatabase() {
|
||||
if (this.core.settings.doNotSuspendOnFetching) return;
|
||||
if (this.core.settings.remoteType == REMOTE_MINIO) return;
|
||||
this._log(`Database and storage reflection has been resumed!`, LOG_LEVEL_NOTICE);
|
||||
this.core.settings.suspendParseReplicationResult = false;
|
||||
this.core.settings.suspendFileWatching = false;
|
||||
await this.services.vault.scanVault(true);
|
||||
await this.services.replication.onBeforeReplicate(false); //TODO: Check actual need of this.
|
||||
await this.core.saveSettings();
|
||||
}
|
||||
// No longer needed, both adapters have each advantages and disadvantages.
|
||||
// async askUseNewAdapter() {
|
||||
// if (!this.core.settings.useIndexedDBAdapter) {
|
||||
// const message = `Now this core has been configured to use the old database adapter for keeping compatibility. Do you want to deactivate it?`;
|
||||
// const CHOICE_YES = "Yes, disable and use latest";
|
||||
// const CHOICE_NO = "No, keep compatibility";
|
||||
// const choices = [CHOICE_YES, CHOICE_NO];
|
||||
//
|
||||
// const ret = await this.core.confirm.confirmWithMessage(
|
||||
// "Database adapter",
|
||||
// message,
|
||||
// choices,
|
||||
// CHOICE_YES,
|
||||
// 10
|
||||
// );
|
||||
// if (ret == CHOICE_YES) {
|
||||
// this.core.settings.useIndexedDBAdapter = true;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
async fetchLocal(makeLocalChunkBeforeSync?: boolean, preventMakeLocalFilesBeforeSync?: boolean) {
|
||||
await this.services.setting.suspendExtraSync();
|
||||
// await this.askUseNewAdapter();
|
||||
this.core.settings.isConfigured = true;
|
||||
await this.suspendReflectingDatabase();
|
||||
await this.services.setting.realiseSetting();
|
||||
await this.resetLocalDatabase();
|
||||
await delay(1000);
|
||||
await this.services.database.openDatabase();
|
||||
// this.core.isReady = true;
|
||||
this.services.appLifecycle.markIsReady();
|
||||
if (makeLocalChunkBeforeSync) {
|
||||
await this.core.fileHandler.createAllChunks(true);
|
||||
} else if (!preventMakeLocalFilesBeforeSync) {
|
||||
await this.services.databaseEvents.initialiseDatabase(true, true, true);
|
||||
} else {
|
||||
// Do not create local file entries before sync (Means use remote information)
|
||||
}
|
||||
await this.services.remote.markResolved();
|
||||
await delay(500);
|
||||
await this.services.remote.replicateAllFromRemote(true);
|
||||
await delay(1000);
|
||||
await this.services.remote.replicateAllFromRemote(true);
|
||||
await this.resumeReflectingDatabase();
|
||||
await this.informOptionalFeatures();
|
||||
// No longer enable
|
||||
// await this.askUsingOptionalFeature({ enableFetch: true });
|
||||
}
|
||||
async fetchLocalWithRebuild() {
|
||||
return await this.fetchLocal(true);
|
||||
}
|
||||
|
||||
private async _allSuspendAllSync(): Promise<boolean> {
|
||||
await this.suspendAllSync();
|
||||
return true;
|
||||
}
|
||||
|
||||
async resetLocalDatabase() {
|
||||
if (this.core.settings.isConfigured && this.core.settings.additionalSuffixOfDatabaseName == "") {
|
||||
// Discard the non-suffixed database
|
||||
await this.services.database.resetDatabase();
|
||||
}
|
||||
const suffix = this.services.API.getAppID() || "";
|
||||
this.core.settings.additionalSuffixOfDatabaseName = suffix;
|
||||
await this.services.database.resetDatabase();
|
||||
eventHub.emitEvent(EVENT_DATABASE_REBUILT);
|
||||
}
|
||||
async fetchRemoteChunks() {
|
||||
if (
|
||||
!this.core.settings.doNotSuspendOnFetching &&
|
||||
this.core.settings.readChunksOnline &&
|
||||
this.core.settings.remoteType == REMOTE_COUCHDB
|
||||
) {
|
||||
this._log(`Fetching chunks`, LOG_LEVEL_NOTICE);
|
||||
const replicator = this.services.replicator.getActiveReplicator() as LiveSyncCouchDBReplicator;
|
||||
const remoteDB = await replicator.connectRemoteCouchDBWithSetting(
|
||||
this.settings,
|
||||
this.services.API.isMobile(),
|
||||
true
|
||||
);
|
||||
if (typeof remoteDB == "string") {
|
||||
this._log(remoteDB, LOG_LEVEL_NOTICE);
|
||||
} else {
|
||||
await fetchAllUsedChunks(this.localDatabase.localDatabase, remoteDB.db);
|
||||
}
|
||||
this._log(`Fetching chunks done`, LOG_LEVEL_NOTICE);
|
||||
}
|
||||
}
|
||||
async resolveAllConflictedFilesByNewerOnes() {
|
||||
this._log(`Resolving conflicts by newer ones`, LOG_LEVEL_NOTICE);
|
||||
const files = this.core.storageAccess.getFileNames();
|
||||
|
||||
let i = 0;
|
||||
for (const file of files) {
|
||||
if (i++ % 10)
|
||||
this._log(
|
||||
`Check and Processing ${i} / ${files.length}`,
|
||||
LOG_LEVEL_NOTICE,
|
||||
"resolveAllConflictedFilesByNewerOnes"
|
||||
);
|
||||
await this.services.conflict.resolveByNewest(file);
|
||||
}
|
||||
this._log(`Done!`, LOG_LEVEL_NOTICE, "resolveAllConflictedFilesByNewerOnes");
|
||||
}
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.appLifecycle.handleOnLoaded(this._everyOnload.bind(this));
|
||||
services.database.handleResetDatabase(this._resetLocalDatabase.bind(this));
|
||||
services.remote.handleTryResetDatabase(this._tryResetRemoteDatabase.bind(this));
|
||||
services.remote.handleTryCreateDatabase(this._tryCreateRemoteDatabase.bind(this));
|
||||
services.setting.handleSuspendAllSync(this._allSuspendAllSync.bind(this));
|
||||
}
|
||||
}
|
||||
594
src/modules/core/ModuleReplicator.ts
Normal file
@@ -0,0 +1,594 @@
|
||||
import { fireAndForget, yieldMicrotask } from "octagonal-wheels/promises";
|
||||
import type { LiveSyncLocalDB } from "../../lib/src/pouchdb/LiveSyncLocalDB";
|
||||
import { AbstractModule } from "../AbstractModule";
|
||||
import {
|
||||
Logger,
|
||||
LOG_LEVEL_NOTICE,
|
||||
LOG_LEVEL_INFO,
|
||||
LOG_LEVEL_VERBOSE,
|
||||
LEVEL_NOTICE,
|
||||
LEVEL_INFO,
|
||||
type LOG_LEVEL,
|
||||
} from "octagonal-wheels/common/logger";
|
||||
import { isLockAcquired, shareRunningResult, skipIfDuplicated } from "octagonal-wheels/concurrency/lock";
|
||||
import { balanceChunkPurgedDBs } from "@/lib/src/pouchdb/chunks";
|
||||
import { purgeUnreferencedChunks } from "@/lib/src/pouchdb/chunks";
|
||||
import { LiveSyncCouchDBReplicator } from "../../lib/src/replication/couchdb/LiveSyncReplicator";
|
||||
import { throttle } from "octagonal-wheels/function";
|
||||
import { arrayToChunkedArray } from "octagonal-wheels/collection";
|
||||
import {
|
||||
SYNCINFO_ID,
|
||||
VER,
|
||||
type EntryBody,
|
||||
type EntryDoc,
|
||||
type EntryLeaf,
|
||||
type LoadedEntry,
|
||||
type MetaEntry,
|
||||
type RemoteType,
|
||||
} from "../../lib/src/common/types";
|
||||
import { QueueProcessor } from "octagonal-wheels/concurrency/processor";
|
||||
import {
|
||||
getPath,
|
||||
isChunk,
|
||||
isValidPath,
|
||||
rateLimitedSharedExecution,
|
||||
scheduleTask,
|
||||
updatePreviousExecutionTime,
|
||||
} from "../../common/utils";
|
||||
import { isAnyNote } from "../../lib/src/common/utils";
|
||||
import { EVENT_FILE_SAVED, EVENT_ON_UNRESOLVED_ERROR, EVENT_SETTING_SAVED, eventHub } from "../../common/events";
|
||||
import type { LiveSyncAbstractReplicator } from "../../lib/src/replication/LiveSyncAbstractReplicator";
|
||||
|
||||
import { $msg } from "../../lib/src/common/i18n";
|
||||
import { clearHandlers } from "../../lib/src/replication/SyncParamsHandler";
|
||||
import type { LiveSyncCore } from "../../main";
|
||||
|
||||
const KEY_REPLICATION_ON_EVENT = "replicationOnEvent";
|
||||
const REPLICATION_ON_EVENT_FORECASTED_TIME = 5000;
|
||||
|
||||
export class ModuleReplicator extends AbstractModule {
|
||||
_replicatorType?: RemoteType;
|
||||
_previousErrors = new Set<string>();
|
||||
|
||||
showError(msg: string, max_log_level: LOG_LEVEL = LEVEL_NOTICE) {
|
||||
const level = this._previousErrors.has(msg) ? LEVEL_INFO : max_log_level;
|
||||
this._log(msg, level);
|
||||
if (!this._previousErrors.has(msg)) {
|
||||
this._previousErrors.add(msg);
|
||||
eventHub.emitEvent(EVENT_ON_UNRESOLVED_ERROR);
|
||||
}
|
||||
}
|
||||
clearErrors() {
|
||||
this._previousErrors.clear();
|
||||
eventHub.emitEvent(EVENT_ON_UNRESOLVED_ERROR);
|
||||
}
|
||||
|
||||
private _everyOnloadAfterLoadSettings(): Promise<boolean> {
|
||||
eventHub.onEvent(EVENT_FILE_SAVED, () => {
|
||||
if (this.settings.syncOnSave && !this.core.services.appLifecycle.isSuspended()) {
|
||||
scheduleTask("perform-replicate-after-save", 250, () => this.services.replication.replicateByEvent());
|
||||
}
|
||||
});
|
||||
eventHub.onEvent(EVENT_SETTING_SAVED, (setting) => {
|
||||
if (this._replicatorType !== setting.remoteType) {
|
||||
void this.setReplicator();
|
||||
}
|
||||
});
|
||||
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
|
||||
async setReplicator() {
|
||||
const replicator = await this.services.replicator.getNewReplicator();
|
||||
if (!replicator) {
|
||||
this.showError($msg("Replicator.Message.InitialiseFatalError"), LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
}
|
||||
if (this.core.replicator) {
|
||||
await this.core.replicator.closeReplication();
|
||||
this._log("Replicator closed for changing", LOG_LEVEL_VERBOSE);
|
||||
}
|
||||
this.core.replicator = replicator;
|
||||
this._replicatorType = this.settings.remoteType;
|
||||
await yieldMicrotask();
|
||||
// Clear any existing sync parameter handlers (means clearing key-deriving salt).
|
||||
clearHandlers();
|
||||
return true;
|
||||
}
|
||||
|
||||
_getReplicator(): LiveSyncAbstractReplicator {
|
||||
return this.core.replicator;
|
||||
}
|
||||
|
||||
_everyOnInitializeDatabase(db: LiveSyncLocalDB): Promise<boolean> {
|
||||
return this.setReplicator();
|
||||
}
|
||||
|
||||
_everyOnResetDatabase(db: LiveSyncLocalDB): Promise<boolean> {
|
||||
return this.setReplicator();
|
||||
}
|
||||
async ensureReplicatorPBKDF2Salt(showMessage: boolean = false): Promise<boolean> {
|
||||
// Checking salt
|
||||
const replicator = this.services.replicator.getActiveReplicator();
|
||||
if (!replicator) {
|
||||
this.showError($msg("Replicator.Message.InitialiseFatalError"), LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
}
|
||||
return await replicator.ensurePBKDF2Salt(this.settings, showMessage, true);
|
||||
}
|
||||
|
||||
async _everyBeforeReplicate(showMessage: boolean): Promise<boolean> {
|
||||
// Checking salt
|
||||
if (!this.core.managers.networkManager.isOnline) {
|
||||
this.showError("Network is offline", showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO);
|
||||
return false;
|
||||
}
|
||||
// Showing message is false: that because be shown here. (And it is a fatal error, no way to hide it).
|
||||
if (!(await this.ensureReplicatorPBKDF2Salt(false))) {
|
||||
this.showError("Failed to initialise the encryption key, preventing replication.");
|
||||
return false;
|
||||
}
|
||||
await this.loadQueuedFiles();
|
||||
this.clearErrors();
|
||||
return true;
|
||||
}
|
||||
|
||||
private async _replicate(showMessage: boolean = false): Promise<boolean | void> {
|
||||
try {
|
||||
updatePreviousExecutionTime(KEY_REPLICATION_ON_EVENT, REPLICATION_ON_EVENT_FORECASTED_TIME);
|
||||
return await this.$$_replicate(showMessage);
|
||||
} finally {
|
||||
updatePreviousExecutionTime(KEY_REPLICATION_ON_EVENT);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* obsolete method. No longer maintained and will be removed in the future.
|
||||
* @deprecated v0.24.17
|
||||
* @param showMessage If true, show message to the user.
|
||||
*/
|
||||
async cleaned(showMessage: boolean) {
|
||||
Logger(`The remote database has been cleaned.`, showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO);
|
||||
await skipIfDuplicated("cleanup", async () => {
|
||||
const count = await purgeUnreferencedChunks(this.localDatabase.localDatabase, true);
|
||||
const message = `The remote database has been cleaned up.
|
||||
To synchronize, this device must be also cleaned up. ${count} chunk(s) will be erased from this device.
|
||||
However, If there are many chunks to be deleted, maybe fetching again is faster.
|
||||
We will lose the history of this device if we fetch the remote database again.
|
||||
Even if you choose to clean up, you will see this option again if you exit Obsidian and then synchronise again.`;
|
||||
const CHOICE_FETCH = "Fetch again";
|
||||
const CHOICE_CLEAN = "Cleanup";
|
||||
const CHOICE_DISMISS = "Dismiss";
|
||||
const ret = await this.core.confirm.confirmWithMessage(
|
||||
"Cleaned",
|
||||
message,
|
||||
[CHOICE_FETCH, CHOICE_CLEAN, CHOICE_DISMISS],
|
||||
CHOICE_DISMISS,
|
||||
30
|
||||
);
|
||||
if (ret == CHOICE_FETCH) {
|
||||
await this.core.rebuilder.$performRebuildDB("localOnly");
|
||||
}
|
||||
if (ret == CHOICE_CLEAN) {
|
||||
const replicator = this.services.replicator.getActiveReplicator();
|
||||
if (!(replicator instanceof LiveSyncCouchDBReplicator)) return;
|
||||
const remoteDB = await replicator.connectRemoteCouchDBWithSetting(
|
||||
this.settings,
|
||||
this.services.API.isMobile(),
|
||||
true
|
||||
);
|
||||
if (typeof remoteDB == "string") {
|
||||
Logger(remoteDB, LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
}
|
||||
|
||||
await purgeUnreferencedChunks(this.localDatabase.localDatabase, false);
|
||||
this.localDatabase.clearCaches();
|
||||
// Perform the synchronisation once.
|
||||
if (await this.core.replicator.openReplication(this.settings, false, showMessage, true)) {
|
||||
await balanceChunkPurgedDBs(this.localDatabase.localDatabase, remoteDB.db);
|
||||
await purgeUnreferencedChunks(this.localDatabase.localDatabase, false);
|
||||
this.localDatabase.clearCaches();
|
||||
await this.services.replicator.getActiveReplicator()?.markRemoteResolved(this.settings);
|
||||
Logger("The local database has been cleaned up.", showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO);
|
||||
} else {
|
||||
Logger(
|
||||
"Replication has been cancelled. Please try it again.",
|
||||
showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async _canReplicate(showMessage: boolean = false): Promise<boolean> {
|
||||
if (!this.services.appLifecycle.isReady()) {
|
||||
Logger(`Not ready`);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (isLockAcquired("cleanup")) {
|
||||
Logger($msg("Replicator.Message.Cleaned"), LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (this.settings.versionUpFlash != "") {
|
||||
Logger($msg("Replicator.Message.VersionUpFlash"), LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(await this.services.fileProcessing.commitPendingFileEvents())) {
|
||||
this.showError($msg("Replicator.Message.Pending"), LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!this.core.managers.networkManager.isOnline) {
|
||||
this.showError("Network is offline", showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO);
|
||||
return false;
|
||||
}
|
||||
if (!(await this.services.replication.onBeforeReplicate(showMessage))) {
|
||||
this.showError($msg("Replicator.Message.SomeModuleFailed"), LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
}
|
||||
this.clearErrors();
|
||||
return true;
|
||||
}
|
||||
|
||||
async $$_replicate(showMessage: boolean = false): Promise<boolean | void> {
|
||||
const checkBeforeReplicate = await this.services.replication.isReplicationReady(showMessage);
|
||||
if (!checkBeforeReplicate) return false;
|
||||
|
||||
//<-- Here could be an module.
|
||||
const ret = await this.core.replicator.openReplication(this.settings, false, showMessage, false);
|
||||
if (!ret) {
|
||||
if (this.core.replicator.tweakSettingsMismatched && this.core.replicator.preferredTweakValue) {
|
||||
await this.services.tweakValue.askResolvingMismatched(this.core.replicator.preferredTweakValue);
|
||||
} else {
|
||||
if (this.core.replicator?.remoteLockedAndDeviceNotAccepted) {
|
||||
if (this.core.replicator.remoteCleaned && this.settings.useIndexedDBAdapter) {
|
||||
await this.cleaned(showMessage);
|
||||
} else {
|
||||
const message = $msg("Replicator.Dialogue.Locked.Message");
|
||||
const CHOICE_FETCH = $msg("Replicator.Dialogue.Locked.Action.Fetch");
|
||||
const CHOICE_DISMISS = $msg("Replicator.Dialogue.Locked.Action.Dismiss");
|
||||
const CHOICE_UNLOCK = $msg("Replicator.Dialogue.Locked.Action.Unlock");
|
||||
const ret = await this.core.confirm.askSelectStringDialogue(
|
||||
message,
|
||||
[CHOICE_FETCH, CHOICE_UNLOCK, CHOICE_DISMISS],
|
||||
{
|
||||
title: $msg("Replicator.Dialogue.Locked.Title"),
|
||||
defaultAction: CHOICE_DISMISS,
|
||||
timeout: 60,
|
||||
}
|
||||
);
|
||||
if (ret == CHOICE_FETCH) {
|
||||
this._log($msg("Replicator.Dialogue.Locked.Message.Fetch"), LOG_LEVEL_NOTICE);
|
||||
await this.core.rebuilder.scheduleFetch();
|
||||
this.services.appLifecycle.scheduleRestart();
|
||||
return;
|
||||
} else if (ret == CHOICE_UNLOCK) {
|
||||
await this.core.replicator.markRemoteResolved(this.settings);
|
||||
this._log($msg("Replicator.Dialogue.Locked.Message.Unlocked"), LOG_LEVEL_NOTICE);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
private async _replicateByEvent(): Promise<boolean | void> {
|
||||
const least = this.settings.syncMinimumInterval;
|
||||
if (least > 0) {
|
||||
return rateLimitedSharedExecution(KEY_REPLICATION_ON_EVENT, least, async () => {
|
||||
return await this.services.replication.replicate();
|
||||
});
|
||||
}
|
||||
return await shareRunningResult(`replication`, () => this.services.replication.replicate());
|
||||
}
|
||||
_parseReplicationResult(docs: Array<PouchDB.Core.ExistingDocument<EntryDoc>>): void {
|
||||
if (this.settings.suspendParseReplicationResult && !this.replicationResultProcessor.isSuspended) {
|
||||
this.replicationResultProcessor.suspend();
|
||||
}
|
||||
this.replicationResultProcessor.enqueueAll(docs);
|
||||
if (!this.settings.suspendParseReplicationResult && this.replicationResultProcessor.isSuspended) {
|
||||
this.replicationResultProcessor.resume();
|
||||
}
|
||||
}
|
||||
_saveQueuedFiles = throttle(() => {
|
||||
const saveData = this.replicationResultProcessor._queue
|
||||
.filter((e) => e !== undefined && e !== null)
|
||||
.map((e) => e?._id ?? ("" as string)) as string[];
|
||||
const kvDBKey = "queued-files";
|
||||
// localStorage.setItem(lsKey, saveData);
|
||||
fireAndForget(() => this.core.kvDB.set(kvDBKey, saveData));
|
||||
}, 100);
|
||||
saveQueuedFiles() {
|
||||
this._saveQueuedFiles();
|
||||
}
|
||||
async loadQueuedFiles() {
|
||||
if (this.settings.suspendParseReplicationResult) return;
|
||||
if (!this.settings.isConfigured) return;
|
||||
try {
|
||||
const kvDBKey = "queued-files";
|
||||
// const ids = [...new Set(JSON.parse(localStorage.getItem(lsKey) || "[]"))] as string[];
|
||||
const ids = [...new Set((await this.core.kvDB.get<string[]>(kvDBKey)) ?? [])];
|
||||
const batchSize = 100;
|
||||
const chunkedIds = arrayToChunkedArray(ids, batchSize);
|
||||
|
||||
// suspendParseReplicationResult is true, so we have to resume it if it is suspended.
|
||||
if (this.replicationResultProcessor.isSuspended) {
|
||||
this.replicationResultProcessor.resume();
|
||||
}
|
||||
for await (const idsBatch of chunkedIds) {
|
||||
const ret = await this.localDatabase.allDocsRaw<EntryDoc>({
|
||||
keys: idsBatch,
|
||||
include_docs: true,
|
||||
limit: 100,
|
||||
});
|
||||
const docs = ret.rows
|
||||
.filter((e) => e.doc)
|
||||
.map((e) => e.doc) as PouchDB.Core.ExistingDocument<EntryDoc>[];
|
||||
const errors = ret.rows.filter((e) => !e.doc && !e.value.deleted);
|
||||
if (errors.length > 0) {
|
||||
Logger("Some queued processes were not resurrected");
|
||||
Logger(JSON.stringify(errors), LOG_LEVEL_VERBOSE);
|
||||
}
|
||||
this.replicationResultProcessor.enqueueAll(docs);
|
||||
}
|
||||
} catch (e) {
|
||||
Logger(`Failed to load queued files.`, LOG_LEVEL_NOTICE);
|
||||
Logger(e, LOG_LEVEL_VERBOSE);
|
||||
} finally {
|
||||
// Check again before awaiting,
|
||||
if (this.replicationResultProcessor.isSuspended) {
|
||||
this.replicationResultProcessor.resume();
|
||||
}
|
||||
}
|
||||
// Wait for all queued files to be processed.
|
||||
try {
|
||||
await this.replicationResultProcessor.waitForAllProcessed();
|
||||
} catch (e) {
|
||||
Logger(`Failed to wait for all queued files to be processed.`, LOG_LEVEL_NOTICE);
|
||||
Logger(e, LOG_LEVEL_VERBOSE);
|
||||
}
|
||||
}
|
||||
|
||||
replicationResultProcessor = new QueueProcessor(
|
||||
async (docs: PouchDB.Core.ExistingDocument<EntryDoc>[]) => {
|
||||
if (this.settings.suspendParseReplicationResult) return;
|
||||
const change = docs[0];
|
||||
if (!change) return;
|
||||
if (isChunk(change._id)) {
|
||||
this.localDatabase.onNewLeaf(change as EntryLeaf);
|
||||
return;
|
||||
}
|
||||
if (await this.services.replication.processVirtualDocument(change)) return;
|
||||
// any addon needs this item?
|
||||
// for (const proc of this.core.addOns) {
|
||||
// if (await proc.parseReplicationResultItem(change)) {
|
||||
// return;
|
||||
// }
|
||||
// }
|
||||
if (change.type == "versioninfo") {
|
||||
if (change.version > VER) {
|
||||
this.core.replicator.closeReplication();
|
||||
Logger(
|
||||
`Remote database updated to incompatible version. update your Self-hosted LiveSync plugin.`,
|
||||
LOG_LEVEL_NOTICE
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (
|
||||
change._id == SYNCINFO_ID || // Synchronisation information data
|
||||
change._id.startsWith("_design") //design document
|
||||
) {
|
||||
return;
|
||||
}
|
||||
if (isAnyNote(change)) {
|
||||
const docPath = getPath(change);
|
||||
if (!(await this.services.vault.isTargetFile(docPath))) {
|
||||
Logger(`Skipped: ${docPath}`, LOG_LEVEL_VERBOSE);
|
||||
return;
|
||||
}
|
||||
if (this.databaseQueuedProcessor._isSuspended) {
|
||||
Logger(`Processing scheduled: ${docPath}`, LOG_LEVEL_INFO);
|
||||
}
|
||||
const size = change.size;
|
||||
if (this.services.vault.isFileSizeTooLarge(size)) {
|
||||
Logger(
|
||||
`Processing ${docPath} has been skipped due to file size exceeding the limit`,
|
||||
LOG_LEVEL_NOTICE
|
||||
);
|
||||
return;
|
||||
}
|
||||
this.databaseQueuedProcessor.enqueue(change);
|
||||
}
|
||||
return;
|
||||
},
|
||||
{
|
||||
batchSize: 1,
|
||||
suspended: true,
|
||||
concurrentLimit: 100,
|
||||
delay: 0,
|
||||
totalRemainingReactiveSource: this.core.replicationResultCount,
|
||||
}
|
||||
)
|
||||
.replaceEnqueueProcessor((queue, newItem) => {
|
||||
const q = queue.filter((e) => e._id != newItem._id);
|
||||
return [...q, newItem];
|
||||
})
|
||||
.startPipeline()
|
||||
.onUpdateProgress(() => {
|
||||
this.saveQueuedFiles();
|
||||
});
|
||||
|
||||
async checkIsChangeRequiredForDatabaseProcessing(dbDoc: LoadedEntry): Promise<boolean> {
|
||||
const path = getPath(dbDoc);
|
||||
try {
|
||||
const savedDoc = await this.localDatabase.getRaw<LoadedEntry>(dbDoc._id, {
|
||||
conflicts: true,
|
||||
revs_info: true,
|
||||
});
|
||||
const newRev = dbDoc._rev ?? "";
|
||||
const latestRev = savedDoc._rev ?? "";
|
||||
const revisions = savedDoc._revs_info?.map((e) => e.rev) ?? [];
|
||||
if (savedDoc._conflicts && savedDoc._conflicts.length > 0) {
|
||||
// There are conflicts, so we have to process it.
|
||||
return true;
|
||||
}
|
||||
if (newRev == latestRev) {
|
||||
// The latest revision. We need to process it.
|
||||
return true;
|
||||
}
|
||||
const index = revisions.indexOf(newRev);
|
||||
if (index >= 0) {
|
||||
// the revision has been inserted before.
|
||||
return false; // Already processed.
|
||||
}
|
||||
return true; // This mostly should not happen, but we have to process it just in case.
|
||||
} catch (e: any) {
|
||||
if ("status" in e && e.status == 404) {
|
||||
return true;
|
||||
// Not existing, so we have to process it.
|
||||
} else {
|
||||
Logger(
|
||||
`Failed to get existing document for ${path} (${dbDoc._id.substring(0, 8)}, ${dbDoc._rev?.substring(0, 10)}) `,
|
||||
LOG_LEVEL_NOTICE
|
||||
);
|
||||
Logger(e, LOG_LEVEL_VERBOSE);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
databaseQueuedProcessor = new QueueProcessor(
|
||||
async (docs: EntryBody[]) => {
|
||||
const dbDoc = docs[0] as LoadedEntry; // It has no `data`
|
||||
const path = getPath(dbDoc);
|
||||
// If the document is existing with any revision, confirm that we have to process it.
|
||||
const isRequired = await this.checkIsChangeRequiredForDatabaseProcessing(dbDoc);
|
||||
if (!isRequired) {
|
||||
Logger(`Skipped (Not latest): ${path} (${dbDoc._id.substring(0, 8)})`, LOG_LEVEL_VERBOSE);
|
||||
return;
|
||||
}
|
||||
// If `Read chunks online` is disabled, chunks should be transferred before here.
|
||||
// However, in some cases, chunks are after that. So, if missing chunks exist, we have to wait for them.
|
||||
const doc = await this.localDatabase.getDBEntryFromMeta({ ...dbDoc }, false, true);
|
||||
if (!doc) {
|
||||
Logger(
|
||||
`Something went wrong while gathering content of ${path} (${dbDoc._id.substring(0, 8)}, ${dbDoc._rev?.substring(0, 10)}) `,
|
||||
LOG_LEVEL_NOTICE
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (await this.services.replication.processOptionalSynchroniseResult(dbDoc)) {
|
||||
// Already processed
|
||||
} else if (isValidPath(getPath(doc))) {
|
||||
this.storageApplyingProcessor.enqueue(doc as MetaEntry);
|
||||
} else {
|
||||
Logger(`Skipped: ${path} (${doc._id.substring(0, 8)})`, LOG_LEVEL_VERBOSE);
|
||||
}
|
||||
return;
|
||||
},
|
||||
{
|
||||
suspended: true,
|
||||
batchSize: 1,
|
||||
concurrentLimit: 10,
|
||||
yieldThreshold: 1,
|
||||
delay: 0,
|
||||
totalRemainingReactiveSource: this.core.databaseQueueCount,
|
||||
}
|
||||
)
|
||||
.replaceEnqueueProcessor((queue, newItem) => {
|
||||
const q = queue.filter((e) => e._id != newItem._id);
|
||||
return [...q, newItem];
|
||||
})
|
||||
.startPipeline();
|
||||
|
||||
storageApplyingProcessor = new QueueProcessor(
|
||||
async (docs: MetaEntry[]) => {
|
||||
const entry = docs[0];
|
||||
await this.services.replication.processSynchroniseResult(entry);
|
||||
return;
|
||||
},
|
||||
{
|
||||
suspended: true,
|
||||
batchSize: 1,
|
||||
concurrentLimit: 6,
|
||||
yieldThreshold: 1,
|
||||
delay: 0,
|
||||
totalRemainingReactiveSource: this.core.storageApplyingCount,
|
||||
}
|
||||
)
|
||||
.replaceEnqueueProcessor((queue, newItem) => {
|
||||
const q = queue.filter((e) => e._id != newItem._id);
|
||||
return [...q, newItem];
|
||||
})
|
||||
.startPipeline();
|
||||
|
||||
_everyBeforeSuspendProcess(): Promise<boolean> {
|
||||
this.core.replicator?.closeReplication();
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
|
||||
private async _replicateAllToServer(
|
||||
showingNotice: boolean = false,
|
||||
sendChunksInBulkDisabled: boolean = false
|
||||
): Promise<boolean> {
|
||||
if (!this.services.appLifecycle.isReady()) return false;
|
||||
if (!(await this.services.replication.onBeforeReplicate(showingNotice))) {
|
||||
Logger($msg("Replicator.Message.SomeModuleFailed"), LOG_LEVEL_NOTICE);
|
||||
return false;
|
||||
}
|
||||
if (!sendChunksInBulkDisabled) {
|
||||
if (this.core.replicator instanceof LiveSyncCouchDBReplicator) {
|
||||
if (
|
||||
(await this.core.confirm.askYesNoDialog("Do you want to send all chunks before replication?", {
|
||||
defaultOption: "No",
|
||||
timeout: 20,
|
||||
})) == "yes"
|
||||
) {
|
||||
await this.core.replicator.sendChunks(this.core.settings, undefined, true, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
const ret = await this.core.replicator.replicateAllToServer(this.settings, showingNotice);
|
||||
if (ret) return true;
|
||||
const checkResult = await this.services.replication.checkConnectionFailure();
|
||||
if (checkResult == "CHECKAGAIN") return await this.services.remote.replicateAllToRemote(showingNotice);
|
||||
return !checkResult;
|
||||
}
|
||||
async _replicateAllFromServer(showingNotice: boolean = false): Promise<boolean> {
|
||||
if (!this.services.appLifecycle.isReady()) return false;
|
||||
const ret = await this.core.replicator.replicateAllFromServer(this.settings, showingNotice);
|
||||
if (ret) return true;
|
||||
const checkResult = await this.services.replication.checkConnectionFailure();
|
||||
if (checkResult == "CHECKAGAIN") return await this.services.remote.replicateAllFromRemote(showingNotice);
|
||||
return !checkResult;
|
||||
}
|
||||
|
||||
private _reportUnresolvedMessages(): Promise<string[]> {
|
||||
return Promise.resolve([...this._previousErrors]);
|
||||
}
|
||||
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.replicator.handleGetActiveReplicator(this._getReplicator.bind(this));
|
||||
services.databaseEvents.handleOnDatabaseInitialisation(this._everyOnInitializeDatabase.bind(this));
|
||||
services.databaseEvents.handleOnResetDatabase(this._everyOnResetDatabase.bind(this));
|
||||
services.appLifecycle.handleOnSettingLoaded(this._everyOnloadAfterLoadSettings.bind(this));
|
||||
services.replication.handleParseSynchroniseResult(this._parseReplicationResult.bind(this));
|
||||
services.appLifecycle.handleOnSuspending(this._everyBeforeSuspendProcess.bind(this));
|
||||
services.replication.handleBeforeReplicate(this._everyBeforeReplicate.bind(this));
|
||||
services.replication.handleIsReplicationReady(this._canReplicate.bind(this));
|
||||
services.replication.handleReplicate(this._replicate.bind(this));
|
||||
services.replication.handleReplicateByEvent(this._replicateByEvent.bind(this));
|
||||
services.remote.handleReplicateAllToRemote(this._replicateAllToServer.bind(this));
|
||||
services.remote.handleReplicateAllFromRemote(this._replicateAllFromServer.bind(this));
|
||||
services.appLifecycle.reportUnresolvedMessages(this._reportUnresolvedMessages.bind(this));
|
||||
}
|
||||
}
|
||||
42
src/modules/core/ModuleReplicatorCouchDB.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
import { fireAndForget } from "octagonal-wheels/promises";
|
||||
import { REMOTE_MINIO, REMOTE_P2P, type RemoteDBSettings } from "../../lib/src/common/types";
|
||||
import { LiveSyncCouchDBReplicator } from "../../lib/src/replication/couchdb/LiveSyncReplicator";
|
||||
import type { LiveSyncAbstractReplicator } from "../../lib/src/replication/LiveSyncAbstractReplicator";
|
||||
import { AbstractModule } from "../AbstractModule";
|
||||
import type { LiveSyncCore } from "../../main";
|
||||
|
||||
export class ModuleReplicatorCouchDB extends AbstractModule {
|
||||
_anyNewReplicator(settingOverride: Partial<RemoteDBSettings> = {}): Promise<LiveSyncAbstractReplicator | false> {
|
||||
const settings = { ...this.settings, ...settingOverride };
|
||||
// If new remote types were added, add them here. Do not use `REMOTE_COUCHDB` directly for the safety valve.
|
||||
if (settings.remoteType == REMOTE_MINIO || settings.remoteType == REMOTE_P2P) {
|
||||
return Promise.resolve(false);
|
||||
}
|
||||
return Promise.resolve(new LiveSyncCouchDBReplicator(this.core));
|
||||
}
|
||||
_everyAfterResumeProcess(): Promise<boolean> {
|
||||
if (this.services.appLifecycle.isSuspended()) return Promise.resolve(true);
|
||||
if (!this.services.appLifecycle.isReady()) return Promise.resolve(true);
|
||||
if (this.settings.remoteType != REMOTE_MINIO && this.settings.remoteType != REMOTE_P2P) {
|
||||
const LiveSyncEnabled = this.settings.liveSync;
|
||||
const continuous = LiveSyncEnabled;
|
||||
const eventualOnStart = !LiveSyncEnabled && this.settings.syncOnStart;
|
||||
// If enabled LiveSync or on start, open replication
|
||||
if (LiveSyncEnabled || eventualOnStart) {
|
||||
// And note that we do not open the conflict detection dialogue directly during this process.
|
||||
// This should be raised explicitly if needed.
|
||||
fireAndForget(async () => {
|
||||
const canReplicate = await this.services.replication.isReplicationReady(false);
|
||||
if (!canReplicate) return;
|
||||
void this.core.replicator.openReplication(this.settings, continuous, false, false);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.replicator.handleGetNewReplicator(this._anyNewReplicator.bind(this));
|
||||
services.appLifecycle.handleOnResumed(this._everyAfterResumeProcess.bind(this));
|
||||
}
|
||||
}
|
||||
18
src/modules/core/ModuleReplicatorMinIO.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
import { REMOTE_MINIO, type RemoteDBSettings } from "../../lib/src/common/types";
|
||||
import { LiveSyncJournalReplicator } from "../../lib/src/replication/journal/LiveSyncJournalReplicator";
|
||||
import type { LiveSyncAbstractReplicator } from "../../lib/src/replication/LiveSyncAbstractReplicator";
|
||||
import type { LiveSyncCore } from "../../main";
|
||||
import { AbstractModule } from "../AbstractModule";
|
||||
|
||||
export class ModuleReplicatorMinIO extends AbstractModule {
|
||||
_anyNewReplicator(settingOverride: Partial<RemoteDBSettings> = {}): Promise<LiveSyncAbstractReplicator | false> {
|
||||
const settings = { ...this.settings, ...settingOverride };
|
||||
if (settings.remoteType == REMOTE_MINIO) {
|
||||
return Promise.resolve(new LiveSyncJournalReplicator(this.core));
|
||||
}
|
||||
return Promise.resolve(false);
|
||||
}
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.replicator.handleGetNewReplicator(this._anyNewReplicator.bind(this));
|
||||
}
|
||||
}
|
||||
34
src/modules/core/ModuleReplicatorP2P.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import { REMOTE_P2P, type RemoteDBSettings } from "../../lib/src/common/types";
|
||||
import type { LiveSyncAbstractReplicator } from "../../lib/src/replication/LiveSyncAbstractReplicator";
|
||||
import { AbstractModule } from "../AbstractModule";
|
||||
import { LiveSyncTrysteroReplicator } from "../../lib/src/replication/trystero/LiveSyncTrysteroReplicator";
|
||||
import type { LiveSyncCore } from "../../main";
|
||||
|
||||
export class ModuleReplicatorP2P extends AbstractModule {
|
||||
_anyNewReplicator(settingOverride: Partial<RemoteDBSettings> = {}): Promise<LiveSyncAbstractReplicator | false> {
|
||||
const settings = { ...this.settings, ...settingOverride };
|
||||
if (settings.remoteType == REMOTE_P2P) {
|
||||
return Promise.resolve(new LiveSyncTrysteroReplicator(this.core));
|
||||
}
|
||||
return Promise.resolve(false);
|
||||
}
|
||||
_everyAfterResumeProcess(): Promise<boolean> {
|
||||
if (this.settings.remoteType == REMOTE_P2P) {
|
||||
// // If LiveSync enabled, open replication
|
||||
// if (this.settings.liveSync) {
|
||||
// fireAndForget(() => this.core.replicator.openReplication(this.settings, true, false, false));
|
||||
// }
|
||||
// // If sync on start enabled, open replication
|
||||
// if (!this.settings.liveSync && this.settings.syncOnStart) {
|
||||
// // Possibly ok as if only share the result
|
||||
// fireAndForget(() => this.core.replicator.openReplication(this.settings, false, false, false));
|
||||
// }
|
||||
}
|
||||
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.replicator.handleGetNewReplicator(this._anyNewReplicator.bind(this));
|
||||
services.appLifecycle.handleOnResumed(this._everyAfterResumeProcess.bind(this));
|
||||
}
|
||||
}
|
||||
185
src/modules/core/ModuleTargetFilter.ts
Normal file
@@ -0,0 +1,185 @@
|
||||
import { LRUCache } from "octagonal-wheels/memory/LRUCache";
|
||||
import {
|
||||
getStoragePathFromUXFileInfo,
|
||||
id2path,
|
||||
isInternalMetadata,
|
||||
path2id,
|
||||
stripInternalMetadataPrefix,
|
||||
useMemo,
|
||||
} from "../../common/utils";
|
||||
import {
|
||||
LOG_LEVEL_VERBOSE,
|
||||
type DocumentID,
|
||||
type EntryHasPath,
|
||||
type FilePath,
|
||||
type FilePathWithPrefix,
|
||||
type ObsidianLiveSyncSettings,
|
||||
type UXFileInfoStub,
|
||||
} from "../../lib/src/common/types";
|
||||
import { addPrefix, isAcceptedAll } from "../../lib/src/string_and_binary/path";
|
||||
import { AbstractModule } from "../AbstractModule";
|
||||
import { EVENT_REQUEST_RELOAD_SETTING_TAB, EVENT_SETTING_SAVED, eventHub } from "../../common/events";
|
||||
import { isDirty } from "../../lib/src/common/utils";
|
||||
import type { LiveSyncCore } from "../../main";
|
||||
export class ModuleTargetFilter extends AbstractModule {
|
||||
reloadIgnoreFiles() {
|
||||
this.ignoreFiles = this.settings.ignoreFiles.split(",").map((e) => e.trim());
|
||||
}
|
||||
private _everyOnload(): Promise<boolean> {
|
||||
this.reloadIgnoreFiles();
|
||||
eventHub.onEvent(EVENT_SETTING_SAVED, (evt: ObsidianLiveSyncSettings) => {
|
||||
this.reloadIgnoreFiles();
|
||||
});
|
||||
eventHub.onEvent(EVENT_REQUEST_RELOAD_SETTING_TAB, () => {
|
||||
this.reloadIgnoreFiles();
|
||||
});
|
||||
return Promise.resolve(true);
|
||||
}
|
||||
|
||||
_id2path(id: DocumentID, entry?: EntryHasPath, stripPrefix?: boolean): FilePathWithPrefix {
|
||||
const tempId = id2path(id, entry);
|
||||
if (stripPrefix && isInternalMetadata(tempId)) {
|
||||
const out = stripInternalMetadataPrefix(tempId);
|
||||
return out;
|
||||
}
|
||||
return tempId;
|
||||
}
|
||||
async _path2id(filename: FilePathWithPrefix | FilePath, prefix?: string): Promise<DocumentID> {
|
||||
const destPath = addPrefix(filename, prefix ?? "");
|
||||
return await path2id(
|
||||
destPath,
|
||||
this.settings.usePathObfuscation ? this.settings.passphrase : "",
|
||||
!this.settings.handleFilenameCaseSensitive
|
||||
);
|
||||
}
|
||||
|
||||
private _isFileSizeExceeded(size: number) {
|
||||
if (this.settings.syncMaxSizeInMB > 0 && size > 0) {
|
||||
if (this.settings.syncMaxSizeInMB * 1024 * 1024 < size) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
_markFileListPossiblyChanged(): void {
|
||||
this.totalFileEventCount++;
|
||||
}
|
||||
totalFileEventCount = 0;
|
||||
get fileListPossiblyChanged() {
|
||||
if (isDirty("totalFileEventCount", this.totalFileEventCount)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private async _isTargetFile(file: string | UXFileInfoStub, keepFileCheckList = false) {
|
||||
const fileCount = useMemo<Record<string, number>>(
|
||||
{
|
||||
key: "fileCount", // forceUpdate: !keepFileCheckList,
|
||||
},
|
||||
(ctx, prev) => {
|
||||
if (keepFileCheckList && prev) return prev;
|
||||
if (!keepFileCheckList && prev && !this.fileListPossiblyChanged) {
|
||||
return prev;
|
||||
}
|
||||
const fileList = (ctx.get("fileList") ?? []) as FilePathWithPrefix[];
|
||||
// const fileNameList = (ctx.get("fileNameList") ?? []) as FilePath[];
|
||||
// const fileNames =
|
||||
const vaultFiles = this.core.storageAccess.getFileNames().sort();
|
||||
if (prev && vaultFiles.length == fileList.length) {
|
||||
const fl3 = new Set([...fileList, ...vaultFiles]);
|
||||
if (fileList.length == fl3.size && vaultFiles.length == fl3.size) {
|
||||
return prev;
|
||||
}
|
||||
}
|
||||
ctx.set("fileList", vaultFiles);
|
||||
|
||||
const fileCount: Record<string, number> = {};
|
||||
for (const file of vaultFiles) {
|
||||
const lc = file.toLowerCase();
|
||||
if (!fileCount[lc]) {
|
||||
fileCount[lc] = 1;
|
||||
} else {
|
||||
fileCount[lc]++;
|
||||
}
|
||||
}
|
||||
return fileCount;
|
||||
}
|
||||
);
|
||||
|
||||
const filepath = getStoragePathFromUXFileInfo(file);
|
||||
const lc = filepath.toLowerCase();
|
||||
if (this.services.setting.shouldCheckCaseInsensitively()) {
|
||||
if (lc in fileCount && fileCount[lc] > 1) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
const fileNameLC = getStoragePathFromUXFileInfo(file).split("/").pop()?.toLowerCase();
|
||||
if (this.settings.useIgnoreFiles) {
|
||||
if (this.ignoreFiles.some((e) => e.toLowerCase() == fileNameLC)) {
|
||||
// We must reload ignore files due to the its change.
|
||||
await this.readIgnoreFile(filepath);
|
||||
}
|
||||
if (await this.services.vault.isIgnoredByIgnoreFile(file)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (!this.localDatabase?.isTargetFile(filepath)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
ignoreFileCache = new LRUCache<string, string[] | false>(300, 250000, true);
|
||||
ignoreFiles = [] as string[];
|
||||
async readIgnoreFile(path: string) {
|
||||
try {
|
||||
// this._log(`[ignore]Reading ignore file: ${path}`, LOG_LEVEL_VERBOSE);
|
||||
if (!(await this.core.storageAccess.isExistsIncludeHidden(path))) {
|
||||
this.ignoreFileCache.set(path, false);
|
||||
// this._log(`[ignore]Ignore file not found: ${path}`, LOG_LEVEL_VERBOSE);
|
||||
return false;
|
||||
}
|
||||
const file = await this.core.storageAccess.readHiddenFileText(path);
|
||||
const gitignore = file.split(/\r?\n/g);
|
||||
this.ignoreFileCache.set(path, gitignore);
|
||||
this._log(`[ignore]Ignore file loaded: ${path}`, LOG_LEVEL_VERBOSE);
|
||||
return gitignore;
|
||||
} catch (ex) {
|
||||
this._log(`[ignore]Failed to read ignore file ${path}`);
|
||||
this._log(ex, LOG_LEVEL_VERBOSE);
|
||||
this.ignoreFileCache.set(path, false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
async getIgnoreFile(path: string) {
|
||||
if (this.ignoreFileCache.has(path)) {
|
||||
return this.ignoreFileCache.get(path) ?? false;
|
||||
} else {
|
||||
return await this.readIgnoreFile(path);
|
||||
}
|
||||
}
|
||||
private async _isIgnoredByIgnoreFiles(file: string | UXFileInfoStub): Promise<boolean> {
|
||||
if (!this.settings.useIgnoreFiles) {
|
||||
return false;
|
||||
}
|
||||
const filepath = getStoragePathFromUXFileInfo(file);
|
||||
if (this.ignoreFileCache.has(filepath)) {
|
||||
// Renew
|
||||
await this.readIgnoreFile(filepath);
|
||||
}
|
||||
if (!(await isAcceptedAll(filepath, this.ignoreFiles, (filename) => this.getIgnoreFile(filename)))) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.vault.handleMarkFileListPossiblyChanged(this._markFileListPossiblyChanged.bind(this));
|
||||
services.path.handleId2Path(this._id2path.bind(this));
|
||||
services.path.handlePath2Id(this._path2id.bind(this));
|
||||
services.appLifecycle.handleOnLoaded(this._everyOnload.bind(this));
|
||||
services.vault.handleIsFileSizeTooLarge(this._isFileSizeExceeded.bind(this));
|
||||
services.vault.handleIsIgnoredByIgnoreFile(this._isIgnoredByIgnoreFiles.bind(this));
|
||||
services.vault.handleIsTargetFile(this._isTargetFile.bind(this));
|
||||
}
|
||||
}
|
||||
115
src/modules/coreFeatures/ModuleCheckRemoteSize.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
import { LOG_LEVEL_INFO, LOG_LEVEL_NOTICE, LOG_LEVEL_VERBOSE } from "octagonal-wheels/common/logger";
|
||||
import { AbstractModule } from "../AbstractModule.ts";
|
||||
import { sizeToHumanReadable } from "octagonal-wheels/number";
|
||||
import { $msg } from "src/lib/src/common/i18n.ts";
|
||||
import type { LiveSyncCore } from "../../main.ts";
|
||||
|
||||
export class ModuleCheckRemoteSize extends AbstractModule {
|
||||
async _allScanStat(): Promise<boolean> {
|
||||
if (this.core.managers.networkManager.isOnline === false) {
|
||||
this._log("Network is offline, skipping remote size check.", LOG_LEVEL_INFO);
|
||||
return true;
|
||||
}
|
||||
this._log($msg("moduleCheckRemoteSize.logCheckingStorageSizes"), LOG_LEVEL_VERBOSE);
|
||||
if (this.settings.notifyThresholdOfRemoteStorageSize < 0) {
|
||||
const message = $msg("moduleCheckRemoteSize.msgSetDBCapacity");
|
||||
const ANSWER_0 = $msg("moduleCheckRemoteSize.optionNoWarn");
|
||||
const ANSWER_800 = $msg("moduleCheckRemoteSize.option800MB");
|
||||
const ANSWER_2000 = $msg("moduleCheckRemoteSize.option2GB");
|
||||
const ASK_ME_NEXT_TIME = $msg("moduleCheckRemoteSize.optionAskMeLater");
|
||||
|
||||
const ret = await this.core.confirm.askSelectStringDialogue(
|
||||
message,
|
||||
[ANSWER_0, ANSWER_800, ANSWER_2000, ASK_ME_NEXT_TIME],
|
||||
{
|
||||
defaultAction: ASK_ME_NEXT_TIME,
|
||||
title: $msg("moduleCheckRemoteSize.titleDatabaseSizeNotify"),
|
||||
timeout: 40,
|
||||
}
|
||||
);
|
||||
if (ret == ANSWER_0) {
|
||||
this.settings.notifyThresholdOfRemoteStorageSize = 0;
|
||||
await this.core.saveSettings();
|
||||
} else if (ret == ANSWER_800) {
|
||||
this.settings.notifyThresholdOfRemoteStorageSize = 800;
|
||||
await this.core.saveSettings();
|
||||
} else if (ret == ANSWER_2000) {
|
||||
this.settings.notifyThresholdOfRemoteStorageSize = 2000;
|
||||
await this.core.saveSettings();
|
||||
}
|
||||
}
|
||||
if (this.settings.notifyThresholdOfRemoteStorageSize > 0) {
|
||||
const remoteStat = await this.core.replicator?.getRemoteStatus(this.settings);
|
||||
if (remoteStat) {
|
||||
const estimatedSize = remoteStat.estimatedSize;
|
||||
if (estimatedSize) {
|
||||
const maxSize = this.settings.notifyThresholdOfRemoteStorageSize * 1024 * 1024;
|
||||
if (estimatedSize > maxSize) {
|
||||
const message = $msg("moduleCheckRemoteSize.msgDatabaseGrowing", {
|
||||
estimatedSize: sizeToHumanReadable(estimatedSize),
|
||||
maxSize: sizeToHumanReadable(maxSize),
|
||||
});
|
||||
const newMax = ~~(estimatedSize / 1024 / 1024) + 100;
|
||||
const ANSWER_ENLARGE_LIMIT = $msg("moduleCheckRemoteSize.optionIncreaseLimit", {
|
||||
newMax: newMax.toString(),
|
||||
});
|
||||
const ANSWER_REBUILD = $msg("moduleCheckRemoteSize.optionRebuildAll");
|
||||
const ANSWER_IGNORE = $msg("moduleCheckRemoteSize.optionDismiss");
|
||||
const ret = await this.core.confirm.askSelectStringDialogue(
|
||||
message,
|
||||
[ANSWER_ENLARGE_LIMIT, ANSWER_REBUILD, ANSWER_IGNORE],
|
||||
{
|
||||
defaultAction: ANSWER_IGNORE,
|
||||
title: $msg("moduleCheckRemoteSize.titleDatabaseSizeLimitExceeded"),
|
||||
timeout: 60,
|
||||
}
|
||||
);
|
||||
if (ret == ANSWER_REBUILD) {
|
||||
const ret = await this.core.confirm.askYesNoDialog(
|
||||
$msg("moduleCheckRemoteSize.msgConfirmRebuild"),
|
||||
{ defaultOption: "No" }
|
||||
);
|
||||
if (ret == "yes") {
|
||||
this.core.settings.notifyThresholdOfRemoteStorageSize = -1;
|
||||
await this.saveSettings();
|
||||
await this.core.rebuilder.scheduleRebuild();
|
||||
}
|
||||
} else if (ret == ANSWER_ENLARGE_LIMIT) {
|
||||
this.settings.notifyThresholdOfRemoteStorageSize = ~~(estimatedSize / 1024 / 1024) + 100;
|
||||
this._log(
|
||||
$msg("moduleCheckRemoteSize.logThresholdEnlarged", {
|
||||
size: this.settings.notifyThresholdOfRemoteStorageSize.toString(),
|
||||
}),
|
||||
LOG_LEVEL_NOTICE
|
||||
);
|
||||
await this.core.saveSettings();
|
||||
} else {
|
||||
// Dismiss or Close the dialog
|
||||
}
|
||||
|
||||
this._log(
|
||||
$msg("moduleCheckRemoteSize.logExceededWarning", {
|
||||
measuredSize: sizeToHumanReadable(estimatedSize),
|
||||
notifySize: sizeToHumanReadable(
|
||||
this.settings.notifyThresholdOfRemoteStorageSize * 1024 * 1024
|
||||
),
|
||||
}),
|
||||
LOG_LEVEL_INFO
|
||||
);
|
||||
} else {
|
||||
this._log(
|
||||
$msg("moduleCheckRemoteSize.logCurrentStorageSize", {
|
||||
measuredSize: sizeToHumanReadable(estimatedSize),
|
||||
}),
|
||||
LOG_LEVEL_INFO
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
|
||||
services.appLifecycle.handleOnScanningStartupIssues(this._allScanStat.bind(this));
|
||||
}
|
||||
}
|
||||
82
src/modules/coreFeatures/ModuleConflictChecker.ts
Normal file
@@ -0,0 +1,82 @@
|
||||
import { AbstractModule } from "../AbstractModule.ts";
|
||||
import { LOG_LEVEL_NOTICE, type FilePathWithPrefix } from "../../lib/src/common/types";
|
||||
import { QueueProcessor } from "octagonal-wheels/concurrency/processor";
|
||||
import { sendValue } from "octagonal-wheels/messagepassing/signal";
|
||||
import type { InjectableServiceHub } from "../../lib/src/services/InjectableServices.ts";
|
||||
import type { LiveSyncCore } from "../../main.ts";
|
||||
|
||||
export class ModuleConflictChecker extends AbstractModule {
|
||||
async _queueConflictCheckIfOpen(file: FilePathWithPrefix): Promise<void> {
|
||||
const path = file;
|
||||
if (this.settings.checkConflictOnlyOnOpen) {
|
||||
const af = this.services.vault.getActiveFilePath();
|
||||
if (af && af != path) {
|
||||
this._log(`${file} is conflicted, merging process has been postponed.`, LOG_LEVEL_NOTICE);
|
||||
return;
|
||||
}
|
||||
}
|
||||
await this.services.conflict.queueCheckFor(path);
|
||||
}
|
||||
|
||||
async _queueConflictCheck(file: FilePathWithPrefix): Promise<void> {
|
||||
const optionalConflictResult = await this.services.conflict.getOptionalConflictCheckMethod(file);
|
||||
if (optionalConflictResult == true) {
|
||||
// The conflict has been resolved by another process.
|
||||
return;
|
||||
} else if (optionalConflictResult === "newer") {
|
||||
// The conflict should be resolved by the newer entry.
|
||||
await this.services.conflict.resolveByNewest(file);
|
||||
} else {
|
||||
this.conflictCheckQueue.enqueue(file);
|
||||
}
|
||||
}
|
||||
|
||||
_waitForAllConflictProcessed(): Promise<boolean> {
|
||||
return this.conflictResolveQueue.waitForAllProcessed();
|
||||
}
|
||||
|
||||
// TODO-> Move to ModuleConflictResolver?
|
||||
conflictResolveQueue = new QueueProcessor(
|
||||
async (filenames: FilePathWithPrefix[]) => {
|
||||
const filename = filenames[0];
|
||||
return await this.services.conflict.resolve(filename);
|
||||
},
|
||||
{
|
||||
suspended: false,
|
||||
batchSize: 1,
|
||||
// No need to limit concurrency to `1` here, subsequent process will handle it,
|
||||
// And, some cases, we do not need to synchronised. (e.g., auto-merge available).
|
||||
// Therefore, limiting global concurrency is performed on resolver with the UI.
|
||||
concurrentLimit: 10,
|
||||
delay: 0,
|
||||
keepResultUntilDownstreamConnected: false,
|
||||
}
|
||||
).replaceEnqueueProcessor((queue, newEntity) => {
|
||||
const filename = newEntity;
|
||||
sendValue("cancel-resolve-conflict:" + filename, true);
|
||||
const newQueue = [...queue].filter((e) => e != newEntity);
|
||||
return [...newQueue, newEntity];
|
||||
});
|
||||
|
||||
conflictCheckQueue = // First process - Check is the file actually need resolve -
|
||||
new QueueProcessor(
|
||||
(files: FilePathWithPrefix[]) => {
|
||||
const filename = files[0];
|
||||
return Promise.resolve([filename]);
|
||||
},
|
||||
{
|
||||
suspended: false,
|
||||
batchSize: 1,
|
||||
concurrentLimit: 10,
|
||||
delay: 0,
|
||||
keepResultUntilDownstreamConnected: true,
|
||||
pipeTo: this.conflictResolveQueue,
|
||||
totalRemainingReactiveSource: this.core.conflictProcessQueueCount,
|
||||
}
|
||||
);
|
||||
onBindFunction(core: LiveSyncCore, services: InjectableServiceHub): void {
|
||||
services.conflict.handleQueueCheckForIfOpen(this._queueConflictCheckIfOpen.bind(this));
|
||||
services.conflict.handleQueueCheckFor(this._queueConflictCheck.bind(this));
|
||||
services.conflict.handleEnsureAllProcessed(this._waitForAllConflictProcessed.bind(this));
|
||||
}
|
||||
}
|
||||