Compare commits

..

1 Commits

Author SHA1 Message Date
snyk-bot
d0548a280a fix: package.json & package-lock.json to reduce vulnerabilities
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-JS-FASTXMLPARSER-7573289
2024-07-31 01:17:04 +00:00
238 changed files with 14975 additions and 53130 deletions

5
.eslintignore Normal file
View File

@@ -0,0 +1,5 @@
node_modules
build
.eslintrc.js.bak
src/lib/src/patches/pouchdb-utils
esbuild.config.mjs

View File

@@ -2,9 +2,7 @@
"root": true,
"parser": "@typescript-eslint/parser",
"plugins": [
"@typescript-eslint",
"eslint-plugin-svelte",
"eslint-plugin-import"
"@typescript-eslint"
],
"extends": [
"eslint:recommended",
@@ -17,19 +15,6 @@
"tsconfig.json"
]
},
"ignorePatterns": [
"**/node_modules/*",
"**/jest.config.js",
"src/lib/coverage",
"src/lib/browsertest",
"**/test.ts",
"**/tests.ts",
"**/**test.ts",
"**/**.test.ts",
"src/apps/**",
"esbuild.*.mjs",
"terser.*.mjs"
],
"rules": {
"no-unused-vars": "off",
"@typescript-eslint/no-unused-vars": [
@@ -38,22 +23,12 @@
"args": "none"
}
],
"no-unused-labels": "off",
"@typescript-eslint/ban-ts-comment": "off",
"no-prototype-builtins": "off",
"@typescript-eslint/no-empty-function": "off",
"require-await": "warn",
"@typescript-eslint/require-await": "warn",
"@typescript-eslint/no-misused-promises": "warn",
"@typescript-eslint/no-floating-promises": "warn",
"no-async-promise-executor": "warn",
"no-async-promise-executor": "off",
"@typescript-eslint/no-explicit-any": "off",
"@typescript-eslint/no-unnecessary-type-assertion": "error",
"no-constant-condition": [
"error",
{
"checkLoops": false
}
]
"@typescript-eslint/no-unnecessary-type-assertion": "error"
}
}

View File

@@ -1,56 +0,0 @@
# Run tests by Harnessed CI
name: harness-ci
on:
workflow_dispatch:
inputs:
testsuite:
description: 'Run specific test suite (leave empty to run all)'
type: choice
options:
- ''
- 'suite/'
- 'suitep2p/'
default: ''
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '24.x'
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Install test dependencies (Playwright Chromium)
run: npm run test:install-dependencies
- name: Start test services (CouchDB + MinIO + Nostr Relay + WebPeer)
run: npm run test:docker-all:start
- name: Run tests suite
if: ${{ inputs.testsuite == '' || inputs.testsuite == 'suite/' }}
env:
CI: true
run: npm run test suite/
- name: Run P2P tests suite
if: ${{ inputs.testsuite == '' || inputs.testsuite == 'suitep2p/' }}
env:
CI: true
run: npm run test suitep2p/
- name: Stop test services
if: always()
run: npm run test:docker-all:stop

View File

@@ -10,19 +10,19 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v2
with:
fetch-depth: 0 # otherwise, you will failed to push refs to dest repo
submodules: recursive
- name: Use Node.js
uses: actions/setup-node@v4
uses: actions/setup-node@v1
with:
node-version: '24.x' # You might need to adjust this value to your own version
node-version: '18.x' # You might need to adjust this value to your own version
# Get the version number and put it in a variable
- name: Get Version
id: version
run: |
echo "tag=$(git describe --abbrev=0 --tags)" >> $GITHUB_OUTPUT
echo "::set-output name=tag::$(git describe --abbrev=0 --tags)"
# Build the plugin
- name: Build
id: build
@@ -36,69 +36,59 @@ jobs:
cp main.js manifest.json styles.css README.md ${{ github.event.repository.name }}
zip -r ${{ github.event.repository.name }}.zip ${{ github.event.repository.name }}
# Create the release on github
# - name: Create Release
# id: create_release
# uses: actions/create-release@v1
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# VERSION: ${{ steps.version.outputs.tag }}
# with:
# tag_name: ${{ steps.version.outputs.tag }}
# release_name: ${{ steps.version.outputs.tag }}
# draft: true
# prerelease: false
# # Upload the packaged release file
# - name: Upload zip file
# id: upload-zip
# uses: actions/upload-release-asset@v1
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with:
# upload_url: ${{ steps.create_release.outputs.upload_url }}
# asset_path: ./${{ github.event.repository.name }}.zip
# asset_name: ${{ github.event.repository.name }}-${{ steps.version.outputs.tag }}.zip
# asset_content_type: application/zip
# # Upload the main.js
# - name: Upload main.js
# id: upload-main
# uses: actions/upload-release-asset@v1
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with:
# upload_url: ${{ steps.create_release.outputs.upload_url }}
# asset_path: ./main.js
# asset_name: main.js
# asset_content_type: text/javascript
# # Upload the manifest.json
# - name: Upload manifest.json
# id: upload-manifest
# uses: actions/upload-release-asset@v1
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with:
# upload_url: ${{ steps.create_release.outputs.upload_url }}
# asset_path: ./manifest.json
# asset_name: manifest.json
# asset_content_type: application/json
# # Upload the style.css
# - name: Upload styles.css
# id: upload-css
# uses: actions/upload-release-asset@v1
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with:
# upload_url: ${{ steps.create_release.outputs.upload_url }}
# asset_path: ./styles.css
# asset_name: styles.css
# asset_content_type: text/css
- name: Create Release and Upload Assets
uses: softprops/action-gh-release@v2
- name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
VERSION: ${{ github.ref }}
with:
files: |
${{ github.event.repository.name }}.zip
main.js
manifest.json
styles.css
name: ${{ steps.version.outputs.tag }}
tag_name: ${{ steps.version.outputs.tag }}
draft: true
tag_name: ${{ github.ref }}
release_name: ${{ github.ref }}
draft: true
prerelease: false
# Upload the packaged release file
- name: Upload zip file
id: upload-zip
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./${{ github.event.repository.name }}.zip
asset_name: ${{ github.event.repository.name }}-${{ steps.version.outputs.tag }}.zip
asset_content_type: application/zip
# Upload the main.js
- name: Upload main.js
id: upload-main
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./main.js
asset_name: main.js
asset_content_type: text/javascript
# Upload the manifest.json
- name: Upload manifest.json
id: upload-manifest
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./manifest.json
asset_name: manifest.json
asset_content_type: application/json
# Upload the style.css
- name: Upload styles.css
id: upload-css
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./styles.css
asset_name: styles.css
asset_content_type: text/css
# TODO: release notes???

View File

@@ -1,33 +0,0 @@
# Run Unit test without Harnesses
name: unit-ci
on:
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '24.x'
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Install test dependencies (Playwright Chromium)
run: npm run test:install-dependencies
- name: Run unit tests suite
run: npm run test:unit

14
.gitignore vendored
View File

@@ -9,22 +9,8 @@ package-lock.json
# build
main.js
main_org.js
main_org_*.js
*.js.map
meta.json
meta-*.json
# obsidian
data.json
.vscode
# environment variables
.env
# local config files
*.local
cov_profile/**
coverage

View File

@@ -1,2 +0,0 @@
pouchdb-browser.js
main_org.js

View File

@@ -1,20 +0,0 @@
import { readFileSync } from "fs";
let localPrettierConfig = {};
try {
const localConfig = readFileSync(".prettierrc.local", "utf-8");
localPrettierConfig = JSON.parse(localConfig);
console.log("Using local Prettier config from .prettierrc.local");
} catch (e) {
// no local config
}
const prettierConfig = {
trailingComma: "es5",
tabWidth: 4,
printWidth: 120,
semi: true,
endOfLine: "cr",
...localPrettierConfig,
};
export default prettierConfig;

View File

@@ -1,11 +0,0 @@
hostname=http://localhost:5989/
dbname=livesync-test-db2
minioEndpoint=http://127.0.0.1:9000
username=admin
password=testpassword
accessKey=minioadmin
secretKey=minioadmin
bucketName=livesync-test-bucket
# ENABLE_DEBUGGER=true
# PRINT_LIVESYNC_LOGS=true
# ENABLE_UI=true

View File

@@ -1,39 +1,30 @@
<!-- For translation: 20240227r0 -->
# Self-hosted LiveSync
[Japanese docs](./README_ja.md) - [Chinese docs](./README_cn.md).
Self-hosted LiveSync is a community-developed synchronisation plug-in available on all Obsidian-compatible platforms. It leverages robust server solutions such as CouchDB or object storage systems (e.g., MinIO, S3, R2, etc.) to ensure reliable data synchronisation.
Additionally, it supports peer-to-peer synchronisation using WebRTC now (experimental), enabling you to synchronise your notes directly between devices without relying on a server.
Self-hosted LiveSync is a community-implemented synchronization plugin, available on every obsidian-compatible platform and using CouchDB or Object Storage (e.g., MinIO, S3, R2, etc.) as the server.
![obsidian_live_sync_demo](https://user-images.githubusercontent.com/45774780/137355323-f57a8b09-abf2-4501-836c-8cb7d2ff24a3.gif)
>[!IMPORTANT]
> This plug-in is not compatible with the official "Obsidian Sync" and cannot synchronise with it.
Note: This plugin cannot synchronise with the official "Obsidian Sync".
## Features
- Synchronise vaults efficiently with minimal traffic.
- Handle conflicting modifications effectively.
- Automatically merge simple conflicts.
- Use open-source solutions for the server.
- Compatible solutions are supported.
- Support end-to-end encryption.
- Synchronise settings, snippets, themes, and plug-ins via [Customisation Sync (Beta)](docs/settings.md#6-customization-sync-advanced) or [Hidden File Sync](docs/settings.md#7-hidden-files-advanced).
- Enable WebRTC peer-to-peer synchronisation without requiring a `host` (Experimental).
- This feature is still in the experimental stage. Please exercise caution when using it.
- WebRTC is a peer-to-peer synchronisation method, so **at least one device must be online to synchronise**.
- Instead of keeping your device online as a stable peer, you can use two pseudo-peers:
- [livesync-serverpeer](https://github.com/vrtmrz/livesync-serverpeer): A pseudo-client running on the server for receiving and sending data between devices.
- [webpeer](https://github.com/vrtmrz/livesync-commonlib/tree/main/apps/webpeer): A pseudo-client for receiving and sending data between devices.
- A pre-built instance is available at [fancy-syncing.vrtmrz.net/webpeer](https://fancy-syncing.vrtmrz.net/webpeer/) (hosted on the vrtmrz blog site). This is also peer-to-peer. Feel free to use it.
- For more information, refer to the [English explanatory article](https://fancy-syncing.vrtmrz.net/blog/0034-p2p-sync-en.html) or the [Japanese explanatory article](https://fancy-syncing.vrtmrz.net/blog/0034-p2p-sync).
This plug-in may be particularly useful for researchers, engineers, and developers who need to keep their notes fully self-hosted for security reasons. It is also suitable for anyone seeking the peace of mind that comes with knowing their notes remain entirely private.
- Synchronize vaults very efficiently with less traffic.
- Good at conflicted modification.
- Automatic merging for simple conflicts.
- Using OSS solution for the server.
- Compatible solutions can be used.
- Supporting End-to-end encryption.
- Synchronisation of settings, snippets, themes, and plug-ins, via [Customization sync(Beta)](#customization-sync) or [Hidden File Sync](#hiddenfilesync)
- WebClip from [obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf)
This plug-in might be useful for researchers, engineers, and developers with a need to keep their notes fully self-hosted for security reasons. Or just anyone who would like the peace of mind of knowing that their notes are fully private.
>[!IMPORTANT]
> - Before installing or upgrading this plug-in, please back up your vault.
> - Do not enable this plug-in alongside another synchronisation solution at the same time (including iCloud and Obsidian Sync).
> - For backups, we also provide a plug-in called [Differential ZIP Backup](https://github.com/vrtmrz/diffzip).
> - Before installing or upgrading this plug-in, please back your vault up.
> - Do not enable this plugin with another synchronization solution at the same time (including iCloud and Obsidian Sync).
> - This is a synchronization plugin. Not a backup solution. Do not rely on this for backup.
## How to use
@@ -52,11 +43,9 @@ This plug-in may be particularly useful for researchers, engineers, and develope
1. [Setup CouchDB on fly.io](docs/setup_flyio.md)
2. [Setup your CouchDB](docs/setup_own_server.md)
2. Configure plug-in in [Quick Setup](docs/quick_setup.md)
> [!TIP]
> Fly.io is no longer free. Fortunately, despite some issues, we can still use IBM Cloudant. Refer to [Setup IBM Cloudant](docs/setup_cloudant.md).
> And also, we can use peer-to-peer synchronisation without a server. Or very cheap Object Storage -- Cloudflare R2 can be used for free.
> HOWEVER, most importantly, we can use the server that we trust. Therefore, please set up your own server.
> CouchDB can be run on a Raspberry Pi. (But please be careful about the security of your server).
> Now, fly.io has become not free. Fortunately, even though there are some issues, we are still able to use IBM Cloudant. Here is [Setup IBM Cloudant](docs/setup_cloudant.md). It will be updated soon!
## Information in StatusBar
@@ -86,19 +75,10 @@ Synchronization status is shown in the status bar with the following icons.
To prevent file and database corruption, please wait to stop Obsidian until all progress indicators have disappeared as possible (The plugin will also try to resume, though). Especially in case of if you have deleted or renamed files.
## Tips and Troubleshooting
If you are having problems getting the plugin working see: [Tips and Troubleshooting](docs/troubleshooting.md).
## Acknowledgements
The project has been in continual progress and harmony thanks to:
- Many [Contributors](https://github.com/vrtmrz/obsidian-livesync/graphs/contributors).
- Many [GitHub Sponsors](https://github.com/sponsors/vrtmrz#sponsors).
- JetBrains Community Programs / Support for Open-Source Projects. <img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.png" alt="JetBrains logo" height="24">
May those who have contributed be honoured and remembered for their kindness and generosity.
## Development Guide
Please refer to [Development Guide](devs.md) for development setup, testing infrastructure, code conventions, and more.
If you are having problems getting the plugin working see: [Tips and Troubleshooting](docs/troubleshooting.md)
## License

View File

@@ -1,93 +0,0 @@
<!-- For translation: 20240227r0 -->
# Self-hosted LiveSync
[Documentación en inglés](./README_ja.md) - [Documentación en japonés](./README_ja.md) - [Documentación en chino](./README_cn.md).
Self-hosted LiveSync es un plugin de sincronización implementado por la comunidad, disponible en todas las plataformas compatibles con Obsidian y utiliza CouchDB o Almacenamiento de Objetos (por ejemplo, MinIO, S3, R2, etc.) como servidor.
![Demostración de Obsidian Live Sync](https://user-images.githubusercontent.com/45774780/137355323-f57a8b09-abf2-4501-836c-8cb7d2ff24a3.gif)
Nota: Este plugin no puede sincronizarse con el "Obsidian Sync" oficial.
## Características
- Sincroniza bóvedas de manera eficiente con menos tráfico.
- Buen manejo de modificaciones en conflicto.
- Fusión automática para conflictos simples.
- Uso de soluciones de código abierto para el servidor.
- Pueden usarse soluciones compatibles.
- Soporte de cifrado de extremo a extremo.
- Sincronización de configuraciones, fragmentos, temas y complementos a través de [Sincronización de personalización \(Beta\)](#customization-sync) o [Sincronización de archivos ocultos](#hiddenfilesync)
- WebClip de [obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf)
Este plugin puede ser útil para investigadores, ingenieros y desarrolladores que necesitan mantener sus notas totalmente autoalojadas por razones de seguridad, o para aquellos que deseen tener la tranquilidad de saber que sus notas son totalmente privadas.
>[!IMPORTANTE]
> - Antes de instalar o actualizar este plugin, realice un respaldo de su bóveda.
> - No active este plugin junto con otra solución de sincronización al mismo tiempo (incluyendo iCloud y Obsidian Sync).
> - Este es un plugin de sincronización, no una solución de respaldo. No confíe en él para realizar respaldos.
## Cómo usar
### Configuración en 3 minutos - CouchDB en fly.io
**Recomendado para principiantes**
[![Configuración de LiveSync en Fly.io 2024 usando Google Colab](https://img.youtube.com/vi/7sa_I1832Xc/0.jpg)](https://www.youtube.com/watch?v=7sa_I1832Xc)
1. [Configurar CouchDB en fly.io](docs/setup_flyio_es.md)
2. Configurar el plugin en [Configuración rápida](docs/quick_setup_es.md)
### Configuración manual
1. Configurar el servidor
1. [Configurar CouchDB en fly.io](docs/setup_flyio_es.md)
2. [Configurar su CouchDB](docs/setup_own_server_es.md)
2. Configura el plugin en [Configuración rápida](docs/quick_setup_es.md)
> [!CONSEJO]
> Actualmente, fly.io ya no es gratuito. Afortunadamente, aunque hay algunos problemas, aún podemos usar IBM Cloudant. Aquí está como [Configurar IBM Cloudant](docs/setup_cloudant.md). ¡Se actualizará pronto!
## Información en la barra de estado
El estado de sincronización se muestra en la barra de estado con los siguientes iconos.
- Indicador de actividad
- 📲 Solicitud de red
- Estado
- ⏹️ Detenido
- 💤 LiveSync activado. Esperando cambios
- ⚡️ Sincronización en progreso
- ⚠ Ocurrió un error
- Indicador estadístico
- ↑ Chunks y metadatos subidos
- ↓ Chunks y metadatos descargados
- Indicador de progreso
- 📥 Elementos transferidos sin procesar
- 📄 Operación de base de datos en curso
- 💾 Procesos de escritura en almacenamiento en curso
- ⏳ Procesos de lectura en almacenamiento en curso
- 🛫 Procesos de lectura en almacenamiento pendientes
- 📬 Procesos de lectura en almacenamiento por lotes
- ⚙️ Procesos de almacenamiento de archivos ocultos en curso o pendientes
- 🧩 Chunks en espera
- 🔌 Elementos de personalización en curso (Configuración, fragmentos y plugins)
Para prevenir la corrupción de archivos y bases de datos, antes de detener Obsidian espere hasta que todos los indicadores de progreso hayan desaparecido (el plugin también intentará reanudar, sin embargo). Especialmente en caso de que haya eliminado o renombrado archivos.
## Consejos y Solución de Problemas
Si tienes problemas para hacer funcionar el plugin, consulta: [Consejos y solución de problemas](docs/troubleshooting_es.md).
## Agradecimientos
El proyecto ha progresado y mantenido en armonía gracias a:
- Muchos [Colaboradores](https://github.com/vrtmrz/obsidian-livesync/graphs/contributors)
- Muchos [Patrocinadores de GitHub](https://github.com/sponsors/vrtmrz#sponsors)
- Programas comunitarios de JetBrains / Soporte para Proyectos de Código Abierto <img src="https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.png" alt="JetBrains logo." height="24">
Que aquellos que han contribuido sean honrados y recordados por su amabilidad y generosidad.
## Licencia
Licenciado bajo la Licencia MIT.

140
devs.md
View File

@@ -1,140 +0,0 @@
# Self-hosted LiveSync Development Guide
## Project Overview
Self-hosted LiveSync is an Obsidian plugin for synchronising vaults across devices using CouchDB, MinIO/S3, or peer-to-peer WebRTC. The codebase uses a modular architecture with TypeScript, Svelte, and PouchDB.
## Architecture
### Module System
The plugin uses a dynamic module system to reduce coupling and improve maintainability:
- **Service Hub**: Central registry for services using dependency injection
- Services are registered, and accessed via `this.services` (in most modules)
- **Module Loading**: All modules extend `AbstractModule` or `AbstractObsidianModule` (which extends `AbstractModule`). These modules are loaded in main.ts and some modules
- **Module Categories** (by directory):
- `core/` - Platform-independent core functionality
- `coreObsidian/` - Obsidian-specific core (e.g., `ModuleFileAccessObsidian`)
- `essential/` - Required modules (e.g., `ModuleMigration`, `ModuleKeyValueDB`)
- `features/` - Optional features (e.g., `ModuleLog`, `ModuleObsidianSettings`)
- `extras/` - Development/testing tools (e.g., `ModuleDev`, `ModuleIntegratedTest`)
### Key Architectural Components
- **LiveSyncLocalDB** (`src/lib/src/pouchdb/`): Local PouchDB database wrapper
- **Replicators** (`src/lib/src/replication/`): CouchDB, Journal, and MinIO sync engines
- **Service Hub** (`src/modules/services/`): Central service registry using dependency injection
- **Common Library** (`src/lib/`): Platform-independent sync logic, shared with other tools
### File Structure Conventions
- **Platform-specific code**: Use `.platform.ts` suffix (replaced with `.obsidian.ts` in production builds via esbuild)
- **Development code**: Use `.dev.ts` suffix (replaced with `.prod.ts` in production)
- **Path aliases**: `@/*` maps to `src/*`, `@lib/*` maps to `src/lib/src/*`
## Build & Development Workflow
### Commands
```bash
npm run check # TypeScript and svelte type checking
npm run dev # Development build with auto-rebuild (uses .env for test vault paths)
npm run build # Production build
npm run buildDev # Development build (one-time)
npm run bakei18n # Pre-build step: compile i18n resources (YAML → JSON → TS)
npm test # Run vitest tests (requires Docker services)
```
### Environment Setup
- Create `.env` file with `PATHS_TEST_INSTALL` pointing to test vault plug-in directories (`:` separated on Unix, `;` on Windows)
- Development builds auto-copy to these paths on build
### Testing Infrastructure
- **Deno Tests**: Unit tests for platform-independent code (e.g., `HashManager.test.ts`)
- **Vitest** (`vitest.config.ts`): E2E test by Browser-based-harness using Playwright
- **Docker Services**: Tests require CouchDB, MinIO (S3), and P2P services:
```bash
npm run test:docker-all:start # Start all test services
npm run test:full # Run tests with coverage
npm run test:docker-all:stop # Stop services
```
If some services are not needed, start only required ones (e.g., `test:docker-couchdb:start`)
Note that if services are already running, starting script will fail. Please stop them first.
- **Test Structure**:
- `test/suite/` - Integration tests for sync operations
- `test/unit/` - Unit tests (via vitest, as harness is browser-based)
- `test/harness/` - Mock implementations (e.g., `obsidian-mock.ts`)
## Code Conventions
### Internationalisation (i18n)
- **Translation Workflow**:
1. Edit YAML files in `src/lib/src/common/messagesYAML/` (human-editable)
2. Run `npm run bakei18n` to compile: YAML → JSON → TypeScript constants
3. Use `$t()`, `$msg()` functions for translations
You can also use `$f` for formatted messages with Tagged Template Literals.
- **Usage**:
```typescript
$msg("dialog.someKey"); // Typed key with autocomplete
$t("Some message"); // Direct translation
$f`Hello, ${userName}`; // Formatted message
```
- **Supported languages**: `def` (English), `de`, `es`, `ja`, `ko`, `ru`, `zh`, `zh-tw`
### File Path Handling
- Use tagged types from `types.ts`: `FilePath`, `FilePathWithPrefix`, `DocumentID`
- Prefix constants: `CHeader` (chunks), `ICHeader`/`ICHeaderEnd` (internal data)
- Path utilities in `src/lib/src/string_and_binary/path.ts`: `addPrefix()`, `stripAllPrefixes()`, `shouldBeIgnored()`
### Logging & Debugging
- Use `this._log(msg, LOG_LEVEL_INFO)` in modules (automatically prefixes with module name)
- Log levels: `LOG_LEVEL_DEBUG`, `LOG_LEVEL_VERBOSE`, `LOG_LEVEL_INFO`, `LOG_LEVEL_NOTICE`, `LOG_LEVEL_URGENT`
- LOG_LEVEL_NOTICE and above are reported to the user via Obsidian notices
- LOG_LEVEL_DEBUG is for debug only and not shown in default builds
- Dev mode creates `ls-debug/` folder in `.obsidian/` for debug outputs (e.g., missing translations)
- This causes pretty significant performance overhead.
## Common Patterns
### Module Implementation
```typescript
export class ModuleExample extends AbstractObsidianModule {
async _everyOnloadStart(): Promise<boolean> {
/* ... */
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.appLifecycle.handleOnInitialise(this._everyOnloadStart.bind(this));
}
}
```
### Settings Management
- Settings defined in `src/lib/src/common/types.ts` (`ObsidianLiveSyncSettings`)
- Configuration metadata in `src/lib/src/common/settingConstants.ts`
- Use `this.services.setting.saveSettingData()` instead of using plugin methods directly
### Database Operations
- Local database operations through `LiveSyncLocalDB` (wraps PouchDB)
- Document types: `EntryDoc` (files), `EntryLeaf` (chunks), `PluginDataEntry` (plugin sync)
## Important Files
- [main.ts](src/main.ts) - Plugin entry point, module registration
- [esbuild.config.mjs](esbuild.config.mjs) - Build configuration with platform/dev file replacement
- [package.json](package.json) - Scripts reference and dependencies
## Contribution Guidelines
- Follow existing code style and conventions
- Please bump dependencies with care, check artifacts after updates, with diff-tools and only expected changes in the build output (to avoid unexpected vulnerabilities).
- When adding new features, please consider it has an OSS implementation, and avoid using proprietary services or APIs that may limit usage.
- For example, any functionality to connect to a new type of server is expected to either have an OSS implementation available for that server, or to be managed under some responsibilities and/or limitations without disrupting existing functionality, and scope for surveillance reduced by some means (e.g., by client-side encryption, auditing the server ourselves).

View File

@@ -25,10 +25,10 @@ npm run buildDev
## Make messages to be translated
1. Find the message that you want to be translated.
2. Change the literal to use `$tf`, like below.
2. Change the literal to a tagged template literal using `$f`, like below.
```diff
- Logger("Could not determine passphrase to save data.json! You probably make the configuration sure again!", LOG_LEVEL_URGENT);
+ Logger($tf('someKeyForPassphraseError'), LOG_LEVEL_URGENT);
+ Logger($f`Could not determine passphrase to save data.json! You probably make the configuration sure again!`, LOG_LEVEL_URGENT);
```
3. Make the PR to `https://github.com/vrtmrz/obsidian-livesync`.
4. Follow the steps of "Add translations for already defined terms" to add the translations.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.3 KiB

View File

@@ -1,122 +0,0 @@
# [WITHDRAWN] Chunk Aggregation by Prefix
## Goal
To address the "document explosion" and storage bloat issues caused by the current chunking mechanism, while preserving the benefits of content-addressable storage and efficient delta synchronisation. This design aims to significantly reduce the number of documents in the database and simplify Garbage Collection (GC).
## Motivation
Our current synchronisation solution splits files into content-defined chunks, with each chunk stored as a separate document in CouchDB, identified by its hash. This architecture effectively leverages CouchDB's replication for automatic deduplication and efficient transfer.
However, this approach faces significant challenges as the number of files and edits increases:
1. **Document Explosion:** A large vault can generate millions of chunk documents, severely degrading CouchDB's performance, particularly during view building and replication.
2. **Storage Bloat & GC Difficulty:** Obsolete chunks generated during edits are difficult to identify and remove. Since CouchDB's deletion (`_deleted: true`) is a soft delete, and compaction is a heavy, space-intensive operation, unused chunks perpetually consume storage, making GC impractical for many users.
3. **The "Eden" Problem:** A previous attempt, "Keep newborn chunks in Eden", aimed to mitigate this by embedding volatile chunks within the parent document. While it reduced the number of standalone chunks, it introduced a new issue: the parent document's history (`_revs_info`) became excessively large, causing its own form of database bloat and making compaction equally necessary but difficult to manage.
This new design addresses the root cause—the sheer number of documents—by aggregating chunks into sets.
## Prerequisites
- The new implementation must maintain the core benefit of deduplication to ensure efficient synchronisation.
- The solution must not introduce a single point of bottleneck and should handle concurrent writes from multiple clients gracefully.
- The system must provide a clear and feasible strategy for Garbage Collection.
- The design should be forward-compatible, allowing for a smooth migration path for existing users.
## Outlined Methods and Implementation Plans
### Abstract
This design introduces a two-tiered document structure to manage chunks: **Index Documents** and **Data Documents**. Chunks are no longer stored as individual documents. Instead, they are grouped into `Data Documents` based on a common hash prefix. The existence and location of each chunk are tracked by `Index Documents`, which are also grouped by the same prefix. This approach dramatically reduces the total document count.
### Detailed Implementation
**1. Document Structure:**
- **Index Document:** Maps chunk hashes to their corresponding Data Document ID. Identified by a prefix of the chunk hash.
- `_id`: `idx:{prefix}` (e.g., `idx:a9f1b`)
- Content:
```json
{
"_id": "idx:a9f1b",
"_rev": "...",
"chunks": {
"a9f1b12...": "dat:a9f1b-001",
"a9f1b34...": "dat:a9f1b-001",
"a9f1b56...": "dat:a9f1b-002"
}
}
```
- **Data Document:** Contains the actual chunk data as base64-encoded strings. Identified by a prefix and a sequential number.
- `_id`: `dat:{prefix}-{sequence}` (e.g., `dat:a9f1b-001`)
- Content:
```json
{
"_id": "dat:a9f1b-001",
"_rev": "...",
"chunks": {
"a9f1b12...": "...", // base64 data
"a9f1b34...": "..." // base64 data
}
}
```
**2. Configuration:**
- `chunk_prefix_length`: The number of characters from the start of a chunk hash to use as a prefix (e.g., `5`). This determines the granularity of aggregation.
- `data_doc_size_limit`: The maximum size for a single Data Document to prevent it from becoming too large (e.g., 1MB). When this limit is reached, a new Data Document with an incremented sequence number is created.
**3. Write/Save Operation Flow:**
When a client creates new chunks:
1. For each new chunk, determine its hash prefix.
2. Read the corresponding `Index Document` (e.g., `idx:a9f1b`).
3. From the index, determine which of the new chunks already exist in the database.
4. For the **truly new chunks only**:
a. Read the last `Data Document` for that prefix (e.g., `dat:a9f1b-005`).
b. If it is nearing its size limit, create a new one (`dat:a9f1b-006`).
c. Add the new chunk data to the Data Document and save it.
5. Update the `Index Document` with the locations of the newly added chunks.
**4. Handling Write Conflicts:**
Concurrent writes to the same `Index Document` or `Data Document` from multiple clients will cause conflicts (409 Conflict). This is expected and must be handled gracefully. Since additions are incremental, the client application must implement a **retry-and-merge loop**:
1. Attempt to save the document.
2. On a conflict, re-fetch the latest version of the document from the server.
3. Merge its own changes into the latest version.
4. Attempt to save again.
5. Repeat until successful or a retry limit is reached.
**5. Garbage Collection (GC):**
GC becomes a manageable, periodic batch process:
1. Scan all file metadata documents to build a master set of all *currently referenced* chunk hashes.
2. Iterate through all `Index Documents`. For each chunk listed:
a. If the chunk hash is not in the master reference set, it is garbage.
b. Remove the garbage entry from the `Index Document`.
c. Remove the corresponding data from its `Data Document`.
3. If a `Data Document` becomes empty after this process, it can be deleted.
## Test Strategy
1. **Unit Tests:** Implement tests for the conflict resolution logic (retry-and-merge loop) to ensure robustness.
2. **Integration Tests:**
- Verify that concurrent writes from multiple simulated clients result in a consistent, merged state without data loss.
- Run a full synchronisation scenario and confirm the resulting database has a significantly lower document count compared to the previous implementation.
3. **GC Test:** Simulate a scenario where files are deleted, run the GC process, and verify that orphaned chunks are correctly removed from both Index and Data documents, and that storage is reclaimed after compaction.
4. **Migration Test:** Develop and test a "rebuild" process for existing users, which migrates their chunk data into the new aggregated structure.
## Documentation Strategy
- This design document will be published to explain the new architecture.
- The configuration options (`chunk_prefix_length`, etc.) will be documented for advanced users.
- A guide for the migration/rebuild process will be provided.
## Future Work
The separation of index and data opens up a powerful possibility. While this design initially implements both within CouchDB, the `Data Documents` could be offloaded to a dedicated object storage service such as **S3, MinIO, or Cloudflare R2**.
In such a hybrid model, CouchDB would handle only the lightweight `Index Documents` and file metadata, serving as a high-speed synchronisation and coordination layer. The bulky chunk data would reside in a more cost-effective and scalable blob store. This would represent the ultimate evolution of this architecture, combining the best of both worlds.
## Consideration and Conclusion
This design directly addresses the scalability limitations of the original chunk-per-document model. By aggregating chunks into sets, it significantly reduces the document count, which in turn improves database performance and makes maintenance feasible. The explicit handling of write conflicts and a clear strategy for garbage collection make this a robust and sustainable long-term solution. It effectively resolves the problems identified in previous approaches, including the "Eden" experiment, by tackling the root cause of database bloat. This architecture provides a solid foundation for future growth and scalability.

View File

@@ -1,127 +0,0 @@
# [WIP] The design intent explanation for using metadata and chunks
## Abstract
## Goal
- To explain the following:
- What metadata and chunks are
- The design intent of using metadata and chunks
## Background and Motivation
We are using PouchDB and CouchDB for storing files and synchronising them. PouchDB is a JavaScript database that stores data on the device (browser, and of course, Obsidian), while CouchDB is a NoSQL database that stores data on the server. The two databases can be synchronised to keep data consistent across devices via the CouchDB replication protocol. This is a powerful and flexible way to store and synchronise data, including conflict management, but it is not well suited for files. Therefore, we needed to manage how to store files and synchronise them.
## Terminology
- Password:
- A string used to authenticate the user.
- Passphrase:
- A string used to encrypt and decrypt data.
- This is not a password.
- Encrypt:
- To convert data into a format that is unreadable to anyone.
- Can be decrypted by the user who has the passphrase.
- Should be 1:n, containing random data to ensure that even the same data, when encrypted, results in different outputs.
- Obfuscate:
- To convert data into a format that is not easily readable.
- Can be decrypted by the user who has the passphrase.
- Should be 1:1, containing no random data, and the same data is always obfuscated to the same result. It is necessarily unreadable.
- Hash:
- To convert data into a fixed-length string that is not easily readable.
- Cannot be decrypted.
- Should be 1:1, containing no random data, and the same data is always hashed to the same result.
## Designs
### Principles
- To synchronise and handle conflicts, we should keep the history of modifications.
- No data should be lost. Even though some extra data may be stored, it should be removed later, safely.
- Each stored data item should be as small as possible to transfer efficiently, but not so small as to be inefficient.
- Any type of file should be supported, including binary files.
- Encryption should be supported efficiently.
- This method should not depart too far from the PouchDB/CouchDB philosophy. It needs to leave room for other `remote`s, to benefit from custom replicators.
As a result, we have adopted the following design.
- Files are stored as one metadata entry and multiple chunks.
- Chunks are content-addressable, and the metadata contains the ids of the chunks.
- Chunks may be referenced from multiple metadata entries. They should be efficiently managed to avoid redundancy.
### Metadata Design
The metadata contains the following information:
| Field | Type | Description | Note |
| -------- | -------------------- | ---------------------------- | ----------------------------------------------------------------------------------------------------- |
| _id | string | The id of the metadata | It is created from the file path |
| _rev | string | The revision of the metadata | It is created by PouchDB |
| children | [string] | The ids of the chunks | |
| path | string | The path of the file | If Obfuscate path has been enabled, it has been encrypted |
| size | number | The size of the metadata | Not respected; for troubleshooting |
| ctime | string | The creation timestamp | This is not used to compare files, but when writing to storage, it will be used |
| mtime | string | The modification timestamp | This will be used to compare files, and will be written to storage |
| type | `plain` \| `newnote` | The type of the file | Children of type `plain` will not be base64 encoded, while `newnote` will be |
| e_ | boolean | The file is encrypted | Encryption is processed during transfer to the remote. In local storage, this property does not exist |
#### Decision Rule for `_id` of Metadata
```ts
// Note: This is pseudo code.
let _id = PATH;
if (!HANDLE_FILES_AS_CASE_SENSITIVE) {
_id = _id.toLowerCase();
}
if (_id.startsWith("_")) {
_id = "/" + _id;
}
if (OBFUSCATE_PATH) {
_id = `f:${OBFUSCATE_PATH(_id, E2EE_PASSPHRASE)}`;
}
return _id;
```
#### Expected Questions
- Why do we need to handle files as case-sensitive?
- Some filesystems are case-sensitive, while others are not. For example, Windows is not case-sensitive, while Linux is. Therefore, we need to handle files as case-sensitive to manage conflicts.
- The trade-off is that you will not be able to manage files with different cases, so this can be disabled if you only have case-sensitive terminals.
- Why obfuscate the path?
- E2EE only encrypts the content of the file, not metadata. Hence, E2EE alone is not enough to protect the vault completely. The path is also part of the metadata, so it should be obfuscated. This is a trade-off between security and performance. However, if you title a note with sensitive information, you should obfuscate the path.
- What is `f:`?
- It is a prefix to indicate that the path is obfuscated. It is used to distinguish between normal paths and obfuscated paths. Due to file enumeration, Self-hosted LiveSync should scan the files to find the metadata, excluding chunks and other information.
- Why does an unobfuscated path not start with `f:`?
- For compatibility. Self-hosted LiveSync, by its nature, must also be able to handle files created with newer versions as far as possible.
### Chunk Design
#### Chunk Structure
The chunk contains the following information:
| Field | Type | Description | Note |
| ----- | ------------ | ------------------------- | ----------------------------------------------------------------------------------------------------- |
| _id | `h:{string}` | The id of the chunk | It is created from the hash of the chunk content |
| _rev | string | The revision of the chunk | It is created by PouchDB |
| data | string | The content of the chunk | |
| type | `leaf` | Fixed | |
| e_ | boolean | The chunk is encrypted | Encryption is processed during transfer to the remote. In local storage, this property does not exist |
**SORRY, TO BE WRITTEN, BUT WE HAVE IMPLEMENTED `v2`, WHICH REQUIRES MORE INFORMATION.**
### How they are unified
## Deduplication and Optimisation
## Synchronisation Strategy
## Performance Considerations
## Security and Privacy
## Edge Cases

View File

@@ -1,117 +0,0 @@
# [IN DESIGN] Tiered Chunk Storage with Live Compaction
** VERY IMPORTANT NOTE: This design must be used with the new journal synchronisation method. Otherwise, we risk introducing the bloat of changes from hot-pack into the Bucket. (CouchDB/PouchDB can synchronise only the most recent changes, or resolve conflicts.) Previous Journal Sync **IS NOT**. Please proceed with caution. **
## Goal
To establish a highly efficient, robust, and scalable synchronisation architecture by introducing a tiered storage system inspired by Log-Structured Merge-Trees (LSM-Trees). This design aims to address the challenges of real-time synchronisation, specifically the massive generation of transient data, while minimising storage bloat and ensuring high performance.
## Motivation
Our previous designs, including "Chunk Aggregation by Prefix", successfully addressed the "document explosion" problem. However, the introduction of real-time editor synchronisation exposed a new, critical challenge: the constant generation of short-lived "garbage" chunks during user input. This "garbage storm" places immense pressure on storage, I/O, and the Garbage Collection (GC) process.
A simple aggregation strategy is insufficient because it treats all data equally, mixing valuable, stable chunks with transient, garbage chunks in permanent storage. This leads to storage bloat and inefficient compaction. We require a system that can intelligently distinguish between "hot" (volatile) and "cold" (stable) data, processing them in the most efficient manner possible.
## Outlined Methods and Implementation Plans
### Abstract
This design implements a two-tiered storage system within CouchDB.
1. **Level 0 Hot Storage:** A set of "Hot-Packs", one for each active client. These act as fast, append-only logs for all newly created chunks. They serve as a temporary staging area, absorbing the "garbage storm" of real-time editing.
2. **Level 1 Cold Storage:** The permanent, immutable storage for stable chunks, consisting of **Index Documents** for fast lookups and **Data Documents (Cold-Packs)** for storing chunk data.
A background "Compaction" process continuously promotes stable chunks from Hot Storage to Cold Storage, while automatically discarding garbage. This keeps the permanent storage clean and highly optimised.
### Detailed Implementation
**1. Document Structure:**
- **Hot-Pack Document (Level 0):** A per-client, append-only log.
- `_id`: `hotpack:{client_id}` (`client_id` could be the same as the `deviceNodeID` used in the `accepted_nodes` in MILESTONE_DOC; enables database 'lockout' for safe synchronisation)
- Content: A log of chunk creation events.
```json
{
"_id": "hotpack:a9f1b12...",
"_rev": "...",
"log": [
{ "hash": "abc...", "data": "...", "ts": ..., "file_id": "file1" },
{ "hash": "def...", "data": "...", "ts": ..., "file_id": "file2" }
]
}
```
- **Index Document (Level 1):** A fast, prefix-based lookup table for stable chunks.
- `_id`: `idx:{prefix}` (e.g., `idx:a9f1b`)
- Content: Maps a chunk hash to the ID of the Cold-Pack it resides in.
```json
{
"_id": "idx:a9f1b",
"chunks": { "a9f1b12...": "dat:1678886400" }
}
```
- **Cold-Pack Document (Level 1):** An immutable data block created by the compaction process.
- `_id`: `dat:{timestamp_or_uuid}` (e.g., `dat:1678886400123`)
- Content: A collection of stable chunks.
```json
{
"_id": "dat:1678886400123",
"chunks": { "a9f1b12...": "...", "c3d4e5f...": "..." }
}
```
- **Hot-Pack List Document:** A central registry of all active Hot-Packs. This might be a computed document that clients maintain in memory on startup.
- `_id`: `hotpack_list`
- Content: `{"active_clients": ["hotpack:a9f1b12...", "hotpack:c3d4e5f..."]}`
**2. Write/Save Operation Flow (Real-time Editing):**
1. A client generates a new chunk.
2. It **immediately appends** the chunk object (`{hash, data, ts, file_id}`) to its **own** Hot-Pack document's `log` array within its local PouchDB. This operation is extremely fast.
3. The PouchDB synchronisation process replicates this change to the remote CouchDB and other clients in the background. No other Hot-Packs are consulted during this write operation.
**3. Read/Load Operation Flow:**
To find a chunk's data:
1. The client first consults its in-memory list of active Hot-Pack IDs (see section 5).
2. It searches for the chunk hash in all **Hot-Pack documents**, starting from its own, then others. It reads them in reverse log order (newest first).
3. If not found, it consults the appropriate **Index Document (`idx:...`)** to get the ID of the Cold-Pack.
4. It then reads the chunk data from the corresponding **Cold-Pack document (`dat:...`)**.
**4. Compaction & Promotion Process (The "GC"):**
This is a background task run periodically by clients, or triggered when the number of unprocessed log entries exceeds a threshold (to maintain the ability to synchronise with the remote database, which has a limited document size).
1. The client takes its own Hot-Pack (`hotpack:{client_id}`) and scans its `log` array from the beginning (oldest first).
2. For each chunk in the log, it checks if the chunk is still referenced in the latest revision of any file.
- **If not referenced (Garbage):** The log entry is simply discarded.
- **If referenced (Stable):** The chunk is added to a "promotion batch".
3. After scanning a certain number of log entries, the client takes the "promotion batch".
4. It creates one or more new, immutable **Cold-Pack (`dat:...`)** documents to store the chunk data from the batch.
5. It updates the corresponding **Index (`idx:...`)** documents to point to the new Cold-Pack(s).
6. Once the promotion is successfully saved to the database, it **removes the processed entries from its Hot-Pack's `log` array**. This is a critical step to prevent reprocessing and keep the Hot-Pack small.
**5. Hot-Pack List Management:**
To know which Hot-Packs to read, clients will:
1. On startup, load the `hotpack_list` document into memory.
2. Use PouchDB's live `changes` feed to monitor the creation of new `hotpack:*` documents.
3. Upon detecting an unknown Hot-Pack, the client updates its in-memory list and attempts to update the central `hotpack_list` document (on a best-effort basis, with conflict resolution).
## Planned Test Strategy
1. **Unit Tests:** Test the Compaction/Promotion logic extensively. Ensure garbage is correctly identified and stable chunks are promoted correctly.
2. **Integration Tests:** Simulate a multi-client real-time editing session.
- Verify that writes are fast and responsive.
- Confirm that transient garbage chunks do not pollute the Cold Storage.
- Confirm that after a period of inactivity, compaction runs and the Hot-Packs shrink.
3. **Stress Tests:** Simulate many clients joining and leaving to test the robustness of the `hotpack_list` management.
## Documentation Strategy
- This design document will serve as the core architectural reference.
- The roles of each document type (Hot-Pack, Index, Cold-Pack, List) will be clearly explained for future developers.
- The logic of the Compaction/Promotion process will be detailed.
## Consideration and Conclusion
This tiered storage design is a direct evolution, born from the lessons of previous architectures. It embraces the ephemeral nature of data in real-time applications. By creating a "staging area" (Hot-Packs) for volatile data, it protects the integrity and performance of the permanent "cold" storage. The Compaction process acts as a self-cleaning mechanism, ensuring that only valuable, stable data is retained long-term. This is not just an optimisation; it is a fundamental shift that enables robust, high-performance, and scalable real-time synchronisation on top of CouchDB.

View File

@@ -1,97 +0,0 @@
# [IN DESIGN] Tiered Chunk Storage for Bucket Sync
## Goal
To evolve the "Journal Sync" mechanism by integrating the Tiered Storage architecture. This design aims to drastically reduce the size and number of sync packs, minimise storage consumption on the backend bucket, and establish a clear, efficient process for Garbage Collection, all while remaining protocol-agnostic.
## Motivation
The original "Journal Sync" liberates us from CouchDB's protocol, but it still packages and transfers entire document changes, including bulky and often transient chunk data. In a real-time or frequent-editing scenario, this results in:
1. **Bloated Sync Packs:** Packs become large with redundant or short-lived chunk data, increasing upload and download times.
2. **Inefficient Storage:** The backend bucket stores numerous packs containing overlapping and obsolete chunk data, wasting space.
3. **Impractical Garbage Collection:** Identifying and purging obsolete *chunk data* from within the pack-based journal history is extremely difficult.
This new design addresses these problems by fundamentally changing *what* is synchronised in the journal packs. We will synchronise lightweight metadata and logs, while handling bulk data separately.
## Outlined methods and implementation plans
### Abstract
This design adapts the Tiered Storage model for a bucket-based backend. The backend bucket is partitioned into distinct areas for different data types. The "Journal Sync" process is now responsible for synchronising only the "hot" volatile data and lightweight metadata. A separate, asynchronous "Compaction" process, which can be run by any client, is responsible for migrating stable data into permanent, deduplicated "cold" storage.
### Detailed Implementation
**1. Bucket Structure:**
The backend bucket will have four distinct logical areas (prefixes):
- `packs/`: For "Journal Sync" packs, containing the journal of metadata and Hot-Log changes.
- `hot_logs/`: A dedicated area for each client's "Hot-Log," containing newly created, volatile chunks.
- `indices/`: For prefix-based Index files, mapping chunk hashes to their permanent location in Cold Storage.
- `cold_chunks/`: For deduplicated, stable chunk data, stored by content hash.
**2. Data Structures (Client-side PouchDB & Backend Bucket):**
- **Client Metadata:** Standard file metadata documents, kept in the client's PouchDB.
- **Hot-Log (in `hot_logs/`):** A per-client, append-only log file on the bucket.
- Path: `hot_logs/{client_id}.jsonlog`
- Content: A sequence of JSON objects, one per line, representing chunk creation events. `{"hash": "...", "data": "...", "ts": ..., "file_id": "..."}`
- **Index File (in `indices/`):** A JSON file for a given hash prefix.
- Path: `indices/{prefix}.json`
- Content: Maps a chunk hash to its content hash (which is its key in `cold_chunks/`). `{"hash_abc...": true, "hash_def...": true}`
- **Cold Chunk (in `cold_chunks/`):** The raw, immutable, deduplicated chunk data.
- Path: `cold_chunks/{chunk_hash}`
**3. "Journal Sync" - Send/Receive Operation (Not Live):**
This process is now extremely lightweight.
1. **Send:**
a. The client takes all newly generated chunks and **appends them to its own Hot-Log file (`hot_logs/{client_id}.jsonlog`)** on the bucket.
b. The client updates its local file metadata in PouchDB.
c. It then creates a "Journal Sync" pack containing **only the PouchDB journal of the file metadata changes.** This pack is very small as it contains no chunk data.
d. The pack is uploaded to `packs/`.
2. **Receive:**
a. The client downloads new packs from `packs/` and applies the metadata journal to its local PouchDB.
b. It downloads the latest versions of all **other clients' Hot-Log files** from `hot_logs/`.
c. Now the client has a complete, up-to-date view of all metadata and all "hot" chunks.
**4. Read/Load Operation Flow:**
To find a chunk's data:
1. The client searches for the chunk hash in its local copy of all **Hot-Logs**.
2. If not found, it downloads and consults the appropriate **Index file (`indices/{prefix}.json`)**.
3. If the index confirms existence, it downloads the data from **`cold_chunks/{chunk_hash}`**.
**5. Compaction & Promotion Process (Asynchronous "GC"):**
This is a deliberate, offline-capable process that any client can choose to run.
1. The client "leases" its own Hot-Log for compaction.
2. It reads its entire `hot_logs/{client_id}.jsonlog`.
3. For each chunk in the log, it checks if the chunk is referenced in the *current, latest state* of the file metadata.
- **If not referenced (Garbage):** The log entry is discarded.
- **If referenced (Stable):** The chunk is added to a "promotion batch."
4. For each chunk in the promotion batch:
a. It checks the corresponding `indices/{prefix}.json` to see if the chunk already exists in Cold Storage.
b. If it does not exist, it **uploads the chunk data to `cold_chunks/{chunk_hash}`** and updates the `indices/{prefix}.json` file.
5. Once the entire Hot-Log has been processed, the client **deletes its `hot_logs/{client_id}.jsonlog` file** (or truncates it to empty), effectively completing the cycle.
## Test strategy
1. **Component Tests:** Test the Compaction process independently. Ensure it correctly identifies stable versus garbage chunks and populates the `cold_chunks/` and `indices/` areas correctly.
2. **Integration Tests:**
- Simulate a multi-client sync cycle. Verify that sync packs in `packs/` are small.
- Confirm that `hot_logs/` are correctly created and updated.
- Run the Compaction process and verify that data migrates correctly to cold storage and the hot log is cleared.
3. **Conflict Tests:** Simulate two clients trying to compact the same index file simultaneously and ensure the outcome is consistent (for example, via a locking mechanism or last-write-wins).
## Documentation strategy
- This design document will be the primary reference for the bucket-based architecture.
- The structure of the backend bucket (`packs/`, `hot_logs/`, etc.) will be clearly defined.
- A detailed description of how to run the Compaction process will be provided to users.
## Consideration and Conclusion
By applying the Tiered Storage model to "Journal Sync", we transform it into a remarkably efficient system. The synchronisation of everyday changes becomes extremely fast and lightweight, as only metadata journals are exchanged. The heavy lifting of data deduplication and permanent storage is offloaded to a separate, asynchronous Compaction process. This clear separation of concerns makes the system highly scalable, minimises storage costs, and finally provides a practical, robust solution for Garbage Collection in a protocol-agnostic, bucket-based environment.

View File

@@ -1,6 +1,6 @@
# Keep newborn chunks in Eden
# Keep newborn chunks in Eden.
Notice: deprecated. please refer to the result section of this document.
NOTE: This is the planned feature design document. This is planned, but not be implemented now (v0.23.3). This has not reached the design freeze and will be added to from time to time.
## Goal
@@ -19,18 +19,15 @@ Reduce the number of chunks which in volatile, and reduce the usage of storage o
- The problem is that this unnecessary chunking slows down both local and remote operations.
## Prerequisite
- The implementation must be able to control the size of the document appropriately so that it does not become non-transferable (1).
- The implementation must be such that data corruption can be avoided even if forward compatibility is not maintained; due to the nature of Self-hosted LiveSync, backward version connexions are expected.
- The implementation must be such that data corruption can be avoided even if forward compatibility is not maintained; due to the nature of Self-hosted LiveSync, backward version connexions are expected.
- Viewed as a feature:
- This feature should be disabled for migration users.
- This feature should be enabled for new users and after rebuilds of migrated users.
- Therefore, back into the implementation view, Ideally, the implementation should be such that data recovery can be achieved by immediately upgrading after replication.
## Outlined methods and implementation plans
### Abstract
To store and transfer only stable chunks independently and share them from multiple documents after stabilisation, new chunks, i.e. chunks that are considered non-stable, are modified to be stored in the document and transferred with the document. In this case, care should be taken not to exceed prerequisite (1).
If this is achieved, the non-leaf document will not be transferred, and even if it is, the chunk will be stored in the document, so that the size can be reduced by the compaction.
@@ -43,11 +40,11 @@ Details are given below.
type EntryWithEden = {
eden: {
[key: DocumentID]: {
data: string;
epoch: number; // The document revision which this chunk has been born.
};
};
};
data: string,
epoch: number, // The document revision which this chunk has been born.
}
}
}
```
2. The following configuration items are added:
Note: These configurations should be shared as `Tweaks value` between each client.
@@ -66,7 +63,6 @@ Details are given below.
5. In End-to-End Encryption, property `eden` of documents will also be encrypted.
### Note
- When this feature has been enabled, forward compatibility is temporarily lost. However, it is detected as missing chunks, and this data is not reflected in the storage in the old version. Therefore, no data loss will occur.
## Test strategy
@@ -81,26 +77,5 @@ Details are given below.
- Indeed, we lack a fulfilled configuration table. Efforts will be made and, if they can be produced, this document will then be referenced. But not required while in the experimental or beta feature.
- However, this might be an essential feature. Further efforts are desired.
## Results from actual operation
After implementing this feature, we have been using it for a while. The following results were obtained.
- Drawbacks were thought not to be a problem, but they were actually a problem:
- A document with `Eden` has a quite larger history compared to a document without `Eden`.
- Self-hosted LiveSync does not perform compaction aggressively, which results in the remote database becoming partially bloated.
- Compaction of the Remote Database (CouchDB) requires the same amount of free space as the size of the database. Therefore, it is not possible to perform compaction on a remote database if we reached to the maximum size of the database. It means that when we detect it, it is too late.
- We have mentioned that `We need compaction` in previous sections. However, but it was so hard to be determined whether the compaction is required or not, until the database is bloated. (Of course, it requires some time to compact the database, and, literally, some document loses its history. It is not a good idea to perform frequently and meaninglessly. We need manual decision, but indeed difficult to normal users).
### Consideration and Conclusion
This feature results in two aspects:
- For the users who are familiar with the CouchDB, this feature is a bit useful. They can watch and handle the database by themselves.
- For the users who are not familiar with the CouchDB, i.e., normal users, this feature is not so useful, either. They are not familiar with the database, and they do not know how to handle it. Therefore, they cannot decide whether the compaction is required or not.
Hence, this feature would be kept as an experimental feature, but it is not enabled by default. In addition to that, it is marked as deprecated. Detailed notice will be noisy for the users who are not familiar with the CouchDB. Details would be kept in this document, for the future.
It is not recommended to use this feature, unless the person who is familiar with the CouchDB and the database management.
Vorotamoroz has written this document. Bias: I am the first author of this plug-in, familiar with the CouchDB.
Research and development has been frozen on 2025-04-11. But, bugs will be fixed if they are found. Please feel free to report them.
To be described after implemented, tested, and, released.

View File

@@ -1,759 +1,267 @@
NOTE: This document not completed. I'll improve this doc in a while. but your contributions are always welcome.
NOTE: This document surely became outdated. I'll improve this doc in a while. but your contributions are always welcome.
# Settings of Self-hosted LiveSync
# Settings of this plugin
There are many settings in Self-hosted LiveSync. This document describes each setting in detail (not how-to). Configuration and settings are divided into several categories and indicated by icons. The icon is as follows:
The settings dialog has been quite long, so I split each configuration into tabs.
If you feel something, please feel free to inform me.
| Icon | Description |
| :--: | ------------------------------------------------------------------ |
| 💬 | [0. Change Log](#0-change-log) |
| 🧙‍♂️ | [1. Setup](#1-setup) |
| ⚙️ | [2. General Settings](#2-general-settings) |
| 🛰️ | [3. Remote Configuration](#3-remote-configuration) |
| 🔄 | [4. Sync Settings](#4-sync-settings) |
| 🚦 | [5. Selector (Advanced)](#5-selector-advanced) |
| 🔌 | [6. Customization sync (Advanced)](#6-customization-sync-advanced) |
| 🧰 | [7. Hatch](#7-hatch) |
| 🔧 | [8. Advanced (Advanced)](#8-advanced-advanced) |
| 💪 | [9. Power users (Power User)](#9-power-users-power-user) |
| 🩹 | [10. Patches (Edge Case)](#10-patches-edge-case) |
| 🎛️ | [11. Maintenance](#11-maintenance) |
| icon | description |
| :---: | ----------------------------------------------------------------- |
| 🛰️ | [Remote Database Configurations](#remote-database-configurations) |
| 📦 | [Local Database Configurations](#local-database-configurations) |
| ⚙️ | [General Settings](#general-settings) |
| 🔁 | [Sync Settings](#sync-settings) |
| 🔧 | [Miscellaneous](#miscellaneous) |
| 🧰 | [Hatch](#miscellaneous) |
| 🔌 | [Plugin and its settings](#plugin-and-its-settings) |
| 🚑 | [Corrupted data](#corrupted-data) |
## 0. Change Log
## Remote Database Configurations
Configure the settings of synchronize server. If any synchronization is enabled, you can't edit this section. Please disable all synchronization to change.
This pane shows version up information. You can check what has been changed in recent versions.
### URI
URI of CouchDB. In the case of Cloudant, It's "External Endpoint(preferred)".
**Do not end it up with a slash** when it doesn't contain the database name.
## 1. Setup
### Username
Your CouchDB's Username. Administrator's privilege is preferred.
This pane is used for setting up Self-hosted LiveSync. There are several options to set up Self-hosted LiveSync.
### Password
Your CouchDB's Password.
Note: This password is saved into your Obsidian's vault in plain text.
### 1. Quick Setup
### Database Name
The Database name to synchronize.
If not exist, created automatically.
Most preferred method to setup Self-hosted LiveSync. You can setup Self-hosted LiveSync with a few clicks.
#### Connect with Setup URI
### End to End Encryption
Encrypt your database. It affects only the database, your files are left as plain.
Setup the Self-hosted LiveSync with the `setup URI` which is [copied from another device](#copy-current-settings-as-a-new-setup-uri) or the setup script.
The encryption algorithm is AES-GCM.
#### Manual setup
Note: If you want to use "Plugins and their settings", you have to enable this.
Step-by-step setup for Self-hosted LiveSync. You can setup Self-hosted LiveSync manually with Minimal setting items.
### Passphrase
The passphrase to used as the key of encryption. Please use the long text.
#### Enable LiveSync
### Apply
Set the End to End encryption enabled and its passphrase for use in replication.
If you change the passphrase of an existing database, overwriting the remote database is strongly recommended.
This button only appears when the setup was not completed. If you have completed the setup manually, you can enable LiveSync on this device by this button.
### 2. To setup other devices
### Overwrite remote database
Overwrite the remote database with the local database using the passphrase you applied.
#### Copy the current settings to a Setup URI
You can copy the current settings as a new setup URI. And this URI can be used to setup the other devices as [Use the copied setup URI](#use-the-copied-setup-uri).
### Rebuild
Rebuild remote and local databases with local files. It will delete all document history and retained chunks, and shrink the database.
### 3. Reset
### Test Database connection
You can check the connection by clicking this button.
#### Discard existing settings and databases
### Check database configuration
You can check and modify your CouchDB configuration from here directly.
Reset the Self-hosted LiveSync settings and databases.
**Hazardous operation. Please be careful when using this.**
### Lock remote database.
Other devices are banned from the database when you have locked the database.
If you have something troubled with other devices, you can protect the vault and remote database with your device.
### 4. Enable extra and advanced features
## Local Database Configurations
"Local Database" is created inside your obsidian.
To keep the set-up dialogue simple, some panes are hidden in default. You can enable them here.
### Batch database update
Delay database update until raise replication, open another file, window visibility changes, or file events except for file modification.
This option can not be used with LiveSync at the same time.
#### Enable advanced features
Setting key: useAdvancedMode
### Fetch rebuilt DB.
If one device rebuilds or locks the remote database, every other device will be locked out from the remote database until it fetches rebuilt DB.
Following panes will be shown when you enable this setting.
| Icon | Description |
| :--: | ------------------------------------------------------------------ |
| 🚦 | [5. Selector (Advanced)](#5-selector-advanced) |
| 🔌 | [6. Customization sync (Advanced)](#6-customization-sync-advanced) |
| 🔧 | [8. Advanced (Advanced)](#8-advanced-advanced) |
### minimum chunk size and LongLine threshold
The configuration of chunk splitting.
#### Enable poweruser features
Self-hosted LiveSync splits the note into chunks for efficient synchronization. This chunk should be longer than the "Minimum chunk size".
Setting key: usePowerUserMode
Specifically, the length of the chunk is determined by the following orders.
Following panes will be shown when you enable this setting.
| Icon | Description |
| :--: | ------------------------------------------------------------------ |
| 💪 | [9. Power users (Power User)](#9-power-users-power-user) |
1. Find the nearest newline character, and if it is farther than LongLineThreshold, this piece becomes an independent chunk.
#### Enable edge case treatment features
2. If not, find the nearest to these items.
1. A newline character
2. An empty line (Windows style)
3. An empty line (non-Windows style)
3. Compare the farther in these 3 positions and the next "newline\]#" position, and pick a shorter piece as a chunk.
Setting key: useEdgeCaseMode
This rule was made empirically from my dataset. If this rule acts as badly on your data. Please give me the information.
Following panes will be shown when you enable this setting.
| Icon | Description |
| :--: | ------------------------------------------------------------------ |
| 🩹 | [10. Patches (Edge Case)](#10-patches-edge-case) |
You can dump saved note structure to `Dump informations of this doc`. Replace every character with x except newline and "#" when sending information to me.
## 2. General Settings
The default values are 20 letters and 250 letters.
### 1. Appearance
## General Settings
#### Display Language
### Do not show low-priority log
If you enable this option, log only the entries with the popup.
Setting key: displayLanguage
### Verbose log
You can change the display language. It is independent of the system language and/or Obsidian's language.
Note: Not all messages have been translated. And, please revert to "Default" when reporting errors. Of course, your contribution to translation is always welcome!
## Sync Settings
#### Show status inside the editor
### LiveSync
Do LiveSync.
Setting key: showStatusOnEditor
It is the one of raison d'être of this plugin.
We can show the status of synchronisation inside the editor.
Useful, but this method drains many batteries on the mobile and uses not the ignorable amount of data transfer.
Reflected after reboot
This method is exclusive to other synchronization methods.
#### Show status as icons only
### Periodic Sync
Synchronize periodically.
Setting key: showOnlyIconsOnEditor
### Periodic Sync Interval
Unit is seconds.
Show status as icons only. This is useful when you want to save space on the status bar.
### Sync on Save
Synchronize when the note has been modified or created.
#### Show status on the status bar
### Sync on File Open
Synchronize when the note is opened.
Setting key: showStatusOnStatusbar
### Sync on Start
Synchronize when Obsidian started.
We can show the status of synchronisation on the status bar. (Default: On)
### Use Trash for deleted files
When the file has been deleted on remote devices, deletion will be replicated to the local device and the file will be deleted.
### 2. Logging
If this option is enabled, move deleted files into the trash instead delete actually.
#### Show only notifications
### Do not delete empty folder
Self-hosted LiveSync will delete the folder when the folder becomes empty. If this option is enabled, leave it as an empty folder.
Setting key: lessInformationInLog
### Use newer file if conflicted (beta)
Always use the newer file to resolve and overwrite when conflict has occurred.
Prevent logging and show only notification. Please disable when you report the logs
#### Verbose Log
### Experimental.
### Sync hidden files
Setting key: showVerboseLog
Synchronize hidden files.
Show verbose log. Please enable when you report the logs
- Scan hidden files before replication.
If you enable this option, all hidden files are scanned once before replication.
## 3. Remote Configuration
- Scan hidden files periodicaly.
If you enable this option, all hidden files will be scanned each [n] seconds.
### 1. Remote Server
Hidden files are not actively detected, so we need scanning.
#### Remote Type
Each scan stores the file with their modification time. And if the file has been disappeared, the fact is also stored. Then, When the entry of the hidden file has been replicated, it will be reflected in the storage if the entry is newer than storage.
Setting key: remoteType
Therefore, the clock must be adjusted. If the modification time is determined to be older, the changeset will be skipped or cancelled (It means, **deleted**), even if the file spawned in a hidden folder.
Remote server type
### Advanced settings
Self-hosted LiveSync using PouchDB and synchronizes with the remote by [this protocol](https://docs.couchdb.org/en/stable/replication/protocol.html).
So, it splits every entry into chunks to be acceptable by the database with limited payload size and document size.
### 2. Notification
However, it was not enough.
According to [2.4.2.5.2. Upload Batch of Changed Documents](https://docs.couchdb.org/en/stable/replication/protocol.html#upload-batch-of-changed-documents) in [Replicate Changes](https://docs.couchdb.org/en/stable/replication/protocol.html#replicate-changes), it might become a bigger request.
#### Notify when the estimated remote storage size exceeds on start up
Unfortunately, there is no way to deal with this automatically by size for every request.
Therefore, I made it possible to configure this.
Setting key: notifyThresholdOfRemoteStorageSize
Note: If you set these values lower number, the number of requests will increase.
Therefore, if you are far from the server, the total throughput will be low, and the traffic will increase.
MB (0 to disable). We can get a notification when the estimated remote storage size exceeds this value.
### Batch size
Number of change feed items to process at a time. Defaults to 250.
### 3. Privacy & Encryption
### Batch limit
Number of batches to process at a time. Defaults to 40. This along with batch size controls how many docs are kept in memory at a time.
#### End-to-End Encryption
## Miscellaneous
Setting key: encrypt
### Show status inside editor
Show information inside the editor pane.
It would be useful for mobile.
Enable end-to-end encryption. enabling this is recommend. If you change the passphrase, you need to rebuild databases (You will be informed).
### Check integrity on saving
Check all chunks are correctly saved on saving.
#### Passphrase
### Presets
You can set synchronization method at once as these pattern:
- LiveSync
- LiveSync : enabled
- Batch database update : disabled
- Periodic Sync : disabled
- Sync on Save : disabled
- Sync on File Open : disabled
- Sync on Start : disabled
- Periodic w/ batch
- LiveSync : disabled
- Batch database update : enabled
- Periodic Sync : enabled
- Sync on Save : disabled
- Sync on File Open : enabled
- Sync on Start : enabled
- Disable all sync
- LiveSync : disabled
- Batch database update : disabled
- Periodic Sync : disabled
- Sync on Save : disabled
- Sync on File Open : disabled
- Sync on Start : disabled
Setting key: passphrase
Encrypting passphrase. If you change the passphrase, you need to rebuild databases (You will be informed).
## Hatch
From here, everything is under the hood. Please handle it with care.
#### Path Obfuscation
When there are problems with synchronization, the warning message is shown Under this section header.
Setting key: usePathObfuscation
- Pattern 1
![CorruptedData](../images/lock_pattern1.png)
This message is shown when the remote database is locked and your device is not marked as "resolved".
Almost it is happened by enabling End-to-End encryption or History has been dropped.
If you enabled End-to-End encryption, you can unlock the remote database by "Apply and receive" automatically. Or "Drop and receive" when you dropped. If you want to unlock manually, click "mark this device as resolved".
In default, the path of the file is not obfuscated to improve the performance. If you enable this, the path of the file will be obfuscated. This is useful when you want to hide the path of the file.
- Pattern 2
![CorruptedData](../images/lock_pattern2.png)
The remote database indicates that has been unlocked Pattern 1.
When you mark all devices as resolved, you can unlock the database.
But, there's no problem even if you leave it as it is.
#### Use dynamic iteration count (Experimental)
### Verify and repair all files
read all files in the vault, and update them into the database if there's diff or could not read from the database.
Setting key: useDynamicIterationCount
### Suspend file watching
If enable this option, Self-hosted LiveSync dismisses every file change or deletes the event.
This is an experimental feature and not recommended. If you enable this, the iteration count of the encryption will be dynamically determined. This is useful when you want to improve the performance.
From here, these commands are used inside applying encryption passphrases or dropping histories.
---
Usually, doesn't use it so much. But sometimes it could be handy.
**now writing from here onwards, sorry**
## Plugins and settings (beta)
---
### Enable plugin synchronization
If you want to use this feature, you have to activate this feature by this switch.
### 4. Fetch settings
### Sweep plugins automatically
Plugin sweep will run before replication automatically.
#### Fetch config from remote server
### Sweep plugins periodically
Plugin sweep will run each 1 minute.
Fetch necessary settings from already configured remote server.
### Notify updates
When replication is complete, a message will be notified if a newer version of the plugin applied to this device is configured on another device.
### 5. Minio,S3,R2
### Device and Vault name
To save the plugins, you have to set a unique name every each device.
#### Endpoint URL
### Open
Open the "Plugins and their settings" dialog.
Setting key: endpoint
### Corrupted or missing data
![CorruptedData](../images/corrupted_data.png)
#### Access Key
Setting key: accessKey
#### Secret Key
Setting key: secretKey
#### Region
Setting key: region
#### Bucket Name
Setting key: bucket
#### Use Custom HTTP Handler
Setting key: useCustomRequestHandler
Enable this if your Object Storage doesn't support CORS
#### Test Connection
#### Apply Settings
### 6. CouchDB
#### Server URI
Setting key: couchDB_URI
#### Username
Setting key: couchDB_USER
username
#### Password
Setting key: couchDB_PASSWORD
password
#### Database Name
Setting key: couchDB_DBNAME
#### Test Database Connection
Open database connection. If the remote database is not found and you have permission to create a database, the database will be created.
#### Validate Database Configuration
Checks and fixes any potential issues with the database config.
#### Apply Settings
## 4. Sync Settings
### 1. Synchronization Preset
#### Presets
Setting key: preset
Apply preset configuration
### 2. Synchronization Method
#### Sync Mode
Setting key: syncMode
#### Periodic Sync interval
Setting key: periodicReplicationInterval
Interval (sec)
#### Sync on Save
Setting key: syncOnSave
Starts synchronisation when a file is saved.
#### Sync on Editor Save
Setting key: syncOnEditorSave
When you save a file in the editor, start a sync automatically
#### Sync on File Open
Setting key: syncOnFileOpen
Forces the file to be synced when opened.
#### Sync on Startup
Setting key: syncOnStart
Automatically Sync all files when opening Obsidian.
#### Sync after merging file
Setting key: syncAfterMerge
Sync automatically after merging files
### 3. Update thinning
#### Batch database update
Setting key: batchSave
Reducing the frequency with which on-disk changes are reflected into the DB
#### Minimum delay for batch database updating
Setting key: batchSaveMinimumDelay
Seconds. Saving to the local database will be delayed until this value after we stop typing or saving.
#### Maximum delay for batch database updating
Setting key: batchSaveMaximumDelay
Saving will be performed forcefully after this number of seconds.
### 4. Deletion Propagation (Advanced)
#### Use the trash bin
Setting key: trashInsteadDelete
Move remotely deleted files to the trash, instead of deleting.
#### Keep empty folder
Setting key: doNotDeleteFolder
Should we keep folders that don't have any files inside?
### 5. Conflict resolution (Advanced)
#### (BETA) Always overwrite with a newer file
Setting key: resolveConflictsByNewerFile
Testing only - Resolve file conflicts by syncing newer copies of the file, this can overwrite modified files. Be Warned.
#### Delay conflict resolution of inactive files
Setting key: checkConflictOnlyOnOpen
Should we only check for conflicts when a file is opened?
#### Delay merge conflict prompt for inactive files.
Setting key: showMergeDialogOnlyOnActive
Should we prompt you about conflicting files when a file is opened?
### 6. Sync settings via markdown (Advanced)
#### Filename
Setting key: settingSyncFile
Save settings to a markdown file. You will be notified when new settings arrive. You can set different files by the platform.
#### Write credentials in the file
Setting key: writeCredentialsForSettingSync
(Not recommended) If set, credentials will be stored in the file.
#### Notify all setting files
Setting key: notifyAllSettingSyncFile
### 7. Hidden Files (Advanced)
#### Hidden file synchronization
#### Enable Hidden files sync
#### Scan for hidden files before replication
Setting key: syncInternalFilesBeforeReplication
#### Scan hidden files periodically
Setting key: syncInternalFilesInterval
Seconds, 0 to disable
## 5. Selector (Advanced)
### 1. Normal Files
#### Synchronising files
(RegExp) Empty to sync all files. Set filter as a regular expression to limit synchronising files.
#### Non-Synchronising files
(RegExp) If this is set, any changes to local and remote files that match this will be skipped.
#### Maximum file size
Setting key: syncMaxSizeInMB
(MB) If this is set, changes to local and remote files that are larger than this will be skipped. If the file becomes smaller again, a newer one will be used.
#### (Beta) Use ignore files
Setting key: useIgnoreFiles
If this is set, changes to local files which are matched by the ignore files will be skipped. Remote changes are determined using local ignore files.
#### Ignore files
Setting key: ignoreFiles
Comma separated `.gitignore, .dockerignore`
### 2. Hidden Files (Advanced)
#### Ignore patterns
#### Add default patterns
## 6. Customization sync (Advanced)
### 1. Customization Sync
#### Device name
Setting key: deviceAndVaultName
Unique name between all synchronized devices. To edit this setting, please disable customization sync once.
#### Per-file-saved customization sync
Setting key: usePluginSyncV2
If enabled per-filed efficient customization sync will be used. We need a small migration when enabling this. And all devices should be updated to v0.23.18. Once we enabled this, we lost a compatibility with old versions.
#### Enable customization sync
Setting key: usePluginSync
#### Scan customization automatically
Setting key: autoSweepPlugins
Scan customization before replicating.
#### Scan customization periodically
Setting key: autoSweepPluginsPeriodic
Scan customization every 1 minute.
#### Notify customized
Setting key: notifyPluginOrSettingUpdated
Notify when other device has newly customized.
#### Open
Open the dialog
## 7. Hatch
### 1. Reporting Issue
#### Make report to inform the issue
#### Write logs into the file
Setting key: writeLogToTheFile
Warning! This will have a serious impact on performance. And the logs will not be synchronised under the default name. Please be careful with logs; they often contain your confidential information.
### 2. Scram Switches
#### Suspend file watching
Setting key: suspendFileWatching
Stop watching for file changes.
#### Suspend database reflecting
Setting key: suspendParseReplicationResult
Stop reflecting database changes to storage files.
### 3. Recovery and Repair
#### Recreate missing chunks for all files
This will recreate chunks for all files. If there were missing chunks, this may fix the errors.
#### Resolve All conflicted files by the newer one
Resolve all conflicted files by the newer one. Caution: This will overwrite the older one, and cannot resurrect the overwritten one.
#### Verify and repair all files
Compare the content of files between on local database and storage. If not matched, you will be asked which one you want to keep.
#### Check and convert non-path-obfuscated files
### 4. Reset
#### Back to non-configured
#### Delete all customization sync data
## 8. Advanced (Advanced)
### 1. Memory cache
#### Memory cache size (by total items)
Setting key: hashCacheMaxCount
#### Memory cache size (by total characters)
Setting key: hashCacheMaxAmount
(Mega chars)
### 2. Local Database Tweak
#### Enhance chunk size
Setting key: customChunkSize
#### Use splitting-limit-capped chunk splitter
Setting key: enableChunkSplitterV2
If enabled, chunks will be split into no more than 100 items. However, dedupe is slightly weaker.
#### Use Segmented-splitter
Setting key: useSegmenter
If this enabled, chunks will be split into semantically meaningful segments. Not all platforms support this feature.
### 3. Transfer Tweak
#### Fetch chunks on demand
Setting key: readChunksOnline
(ex. Read chunks online) If this option is enabled, LiveSync reads chunks online directly instead of replicating them locally. Increasing Custom chunk size is recommended.
#### Batch size of on-demand fetching
Setting key: concurrencyOfReadChunksOnline
#### The delay for consecutive on-demand fetches
Setting key: minimumIntervalOfReadChunksOnline
## 9. Power users (Power User)
### 1. Remote Database Tweak
#### Incubate Chunks in Document (Beta)
Setting key: useEden
If enabled, newly created chunks are temporarily kept within the document, and graduated to become independent chunks once stabilised.
#### Maximum Incubating Chunks
Setting key: maxChunksInEden
The maximum number of chunks that can be incubated within the document. Chunks exceeding this number will immediately graduate to independent chunks.
#### Maximum Incubating Chunk Size
Setting key: maxTotalLengthInEden
The maximum total size of chunks that can be incubated within the document. Chunks exceeding this size will immediately graduate to independent chunks.
#### Maximum Incubation Period
Setting key: maxAgeInEden
The maximum duration for which chunks can be incubated within the document. Chunks exceeding this period will graduate to independent chunks.
#### Data Compression (Experimental)
Setting key: enableCompression
### 2. CouchDB Connection Tweak
#### Batch size
Setting key: batch_size
Number of changes to sync at a time. Defaults to 50. Minimum is 2.
#### Batch limit
Setting key: batches_limit
Number of batches to process at a time. Defaults to 40. Minimum is 2. This along with batch size controls how many docs are kept in memory at a time.
#### Use timeouts instead of heartbeats
Setting key: useTimeouts
If this option is enabled, PouchDB will hold the connection open for 60 seconds, and if no change arrives in that time, close and reopen the socket, instead of holding it open indefinitely. Useful when a proxy limits request duration but can increase resource usage.
### 3. Configuration Encryption
#### Encrypting sensitive configuration items
Setting key: configPassphraseStore
#### Passphrase of sensitive configuration items
Setting key: configPassphrase
This passphrase will not be copied to another device. It will be set to `Default` until you configure it again.
### 4. Developer
#### Enable Developers' Debug Tools.
Setting key: enableDebugTools
Requires restart of Obsidian
## 10. Patches (Edge Case)
### 1. Compatibility (Metadata)
#### Do not keep metadata of deleted files.
Setting key: deleteMetadataOfDeletedFiles
#### Delete old metadata of deleted files on start-up
Setting key: automaticallyDeleteMetadataOfDeletedFiles
(Days passed, 0 to disable automatic-deletion)
### 2. Compatibility (Conflict Behaviour)
#### Always prompt merge conflicts
Setting key: disableMarkdownAutoMerge
Should we prompt you for every single merge, even if we can safely merge automatcially?
#### Apply Latest Change if Conflicting
Setting key: writeDocumentsIfConflicted
Enable this option to automatically apply the most recent change to documents even when it conflicts
### 3. Compatibility (Database structure)
#### (Obsolete) Use an old adapter for compatibility (obsolete)
Setting key: useIndexedDBAdapter
Before v0.17.16, we used an old adapter for the local database. Now the new adapter is preferred. However, it needs local database rebuilding. Please disable this toggle when you have enough time. If leave it enabled, also while fetching from the remote database, you will be asked to disable this.
#### Compute revisions for chunks (Previous behaviour)
Setting key: doNotUseFixedRevisionForChunks
If this enabled, all chunks will be stored with the revision made from its content. (Previous behaviour)
#### Handle files as Case-Sensitive
Setting key: handleFilenameCaseSensitive
If this enabled, All files are handled as case-Sensitive (Previous behaviour).
### 4. Compatibility (Internal API Usage)
#### Scan changes on customization sync
Setting key: watchInternalFileChanges
Do not use internal API
### 5. Edge case addressing (Database)
#### Database suffix
Setting key: additionalSuffixOfDatabaseName
LiveSync could not handle multiple vaults which have same name without different prefix, This should be automatically configured.
#### The Hash algorithm for chunk IDs (Experimental)
Setting key: hashAlg
### 6. Edge case addressing (Behaviour)
#### Fetch database with previous behaviour
Setting key: doNotSuspendOnFetching
#### Keep empty folder
Setting key: doNotDeleteFolder
Should we keep folders that don't have any files inside?
### 7. Edge case addressing (Processing)
#### Do not split chunks in the background
Setting key: disableWorkerForGeneratingChunks
If disabled(toggled), chunks will be split on the UI thread (Previous behaviour).
#### Process small files in the foreground
Setting key: processSmallFilesInUIThread
If enabled, the file under 1kb will be processed in the UI thread.
### 8. Compatibility (Trouble addressed)
#### Do not check configuration mismatch before replication
Setting key: disableCheckingConfigMismatch
## 11. Maintenance
### 1. Scram!
#### Lock Server
Lock the remote server to prevent synchronization with other devices.
#### Emergency restart
Disables all synchronization and restart.
### 2. Syncing
#### Resend
Resend all chunks to the remote.
#### Reset journal received history
Initialise journal received history. On the next sync, every item except this device sent will be downloaded again.
#### Reset journal sent history
Initialise journal sent history. On the next sync, every item except this device received will be sent again.
### 3. Rebuilding Operations (Local)
#### Fetch from remote
Restore or reconstruct local database from remote.
#### Fetch rebuilt DB (Save local documents before)
Restore or reconstruct local database from remote database but use local chunks.
### 4. Total Overhaul
#### Rebuild everything
Rebuild local and remote database with local files.
### 5. Rebuilding Operations (Remote Only)
#### Perform cleanup
Reduces storage space by discarding all non-latest revisions. This requires the same amount of free space on the remote server and the local client.
#### Overwrite remote
Overwrite remote with local DB and passphrase.
#### Reset all journal counter
Initialise all journal history, On the next sync, every item will be received and sent.
#### Purge all journal counter
Purge all download/upload cache.
#### Fresh Start Wipe
Delete all data on the remote server.
### 6. Deprecated
#### Run database cleanup
Attempt to shrink the database by deleting unused chunks. This may not work consistently. Use the 'Rebuild everything' under Total Overhaul.
### 7. Reset
#### Delete local database to reset or uninstall Self-hosted LiveSync
When Self-hosted LiveSync could not write to the file on the storage, the files are shown here. If you have the old data in your vault, change it once, it will be cured. Or you can use the "File History" plugin.

View File

@@ -5,15 +5,10 @@
- [Setup a CouchDB server](#setup-a-couchdb-server)
- [Table of Contents](#table-of-contents)
- [1. Prepare CouchDB](#1-prepare-couchdb)
- [A. Using Docker](#a-using-docker)
- [A. Using Docker container](#a-using-docker-container)
- [1. Prepare](#1-prepare)
- [2. Run docker container](#2-run-docker-container)
- [B. Using Docker Compose](#b-using-docker-compose)
- [1. Prepare](#1-prepare-1)
- [2. Creating Compose file](#2-create-a-docker-composeyml-file-with-the-following-added-to-it)
- [3. Boot check](#3-run-the-docker-compose-file-to-boot-check)
- [4. Starting Docker Compose in background](#4-run-the-docker-compose-file-in-the-background)
- [C. Install CouchDB directly](#c-install-couchdb-directly)
- [B. Install CouchDB directly](#b-install-couchdb-directly)
- [2. Run couchdb-init.sh for initialise](#2-run-couchdb-initsh-for-initialise)
- [3. Expose CouchDB to the Internet](#3-expose-couchdb-to-the-internet)
- [4. Client Setup](#4-client-setup)
@@ -26,95 +21,43 @@
---
## 1. Prepare CouchDB
### A. Using Docker
### A. Using Docker container
#### 1. Prepare
```bash
# Adding environment variables.
# Prepare environment variables.
export hostname=localhost:5984
export username=goojdasjdas #Please change as you like.
export password=kpkdasdosakpdsa #Please change as you like
# Creating the save data & configuration directories.
# Prepare directories which saving data and configurations.
mkdir couchdb-data
mkdir couchdb-etc
```
#### 2. Run docker container
1. Boot Check.
```
$ docker run --name couchdb-for-ols --rm -it -e COUCHDB_USER=${username} -e COUCHDB_PASSWORD=${password} -v ${PWD}/couchdb-data:/opt/couchdb/data -v ${PWD}/couchdb-etc:/opt/couchdb/etc/local.d -p 5984:5984 couchdb
```
> [!WARNING]
> If your container threw an error or exited unexpectedly, please check the permission of couchdb-data, and couchdb-etc.
> Once CouchDB starts, these directories will be owned by uid:`5984`. Please chown it for that uid again.
If your container has been exited, please check the permission of couchdb-data, and couchdb-etc.
Once CouchDB run, these directories will be owned by uid:`5984`. Please chown it for you again.
2. Enable it in the background
2. Enable it in background
```
$ docker run --name couchdb-for-ols -d --restart always -e COUCHDB_USER=${username} -e COUCHDB_PASSWORD=${password} -v ${PWD}/couchdb-data:/opt/couchdb/data -v ${PWD}/couchdb-etc:/opt/couchdb/etc/local.d -p 5984:5984 couchdb
```
Congrats, move on to [step 2](#2-run-couchdb-initsh-for-initialise)
### B. Using Docker Compose
#### 1. Prepare
```
# Creating the save data & configuration directories.
mkdir couchdb-data
mkdir couchdb-etc
```
#### 2. Create a `docker-compose.yml` file with the following added to it
```
services:
couchdb:
image: couchdb:latest
container_name: couchdb-for-ols
user: 5984:5984
environment:
- COUCHDB_USER=<INSERT USERNAME HERE> #Please change as you like.
- COUCHDB_PASSWORD=<INSERT PASSWORD HERE> #Please change as you like.
volumes:
- ./couchdb-data:/opt/couchdb/data
- ./couchdb-etc:/opt/couchdb/etc/local.d
ports:
- 5984:5984
restart: unless-stopped
```
#### 3. Run the Docker Compose file to boot check
```
docker compose up
# Or if using the old version
docker-compose up
```
> [!WARNING]
> If your container threw an error or exited unexpectedly, please check the permission of couchdb-data, and couchdb-etc.
> Once CouchDB starts, these directories will be owned by uid:`5984`. Please chown it for that uid again.
#### 4. Run the Docker Compose file in the background
If all went well and didn't throw any errors, `CTRL+C` out of it, and then run this command
```
docker compose up -d
# Or if using the old version
docker-compose up -d
```
Congrats, move on to [step 2](#2-run-couchdb-initsh-for-initialise)
### C. Install CouchDB directly
Please refer to the [official document](https://docs.couchdb.org/en/stable/install/index.html). However, we do not have to configure it fully. Just the administrator needs to be configured.
### B. Install CouchDB directly
Please refer the [official document](https://docs.couchdb.org/en/stable/install/index.html). However, we do not have to configure it fully. Just administrator needs to be configured.
## 2. Run couchdb-init.sh for initialise
```
curl -s https://raw.githubusercontent.com/vrtmrz/obsidian-livesync/main/utils/couchdb/couchdb-init.sh | bash
$ curl -s https://raw.githubusercontent.com/vrtmrz/obsidian-livesync/main/utils/couchdb/couchdb-init.sh | bash
```
If it results like the following:
If it results like following:
```
-- Configuring CouchDB by REST APIs... -->
{"ok":true}
@@ -132,25 +75,15 @@ If it results like the following:
Your CouchDB has been initialised successfully. If you want this manually, please read the script.
If you are using Docker Compose and the above command does not work or displays `ERROR: Hostname missing`, you can try running the following command, replacing the placeholders with your own values:
```
curl -s https://raw.githubusercontent.com/vrtmrz/obsidian-livesync/main/utils/couchdb/couchdb-init.sh | hostname=http://<YOUR SERVER IP>:5984 username=<INSERT USERNAME HERE> password=<INSERT PASSWORD HERE> bash
```
## 3. Expose CouchDB to the Internet
- You can skip this instruction if you using only in intranet and only with desktop devices.
- For mobile devices, Obsidian requires a valid SSL certificate. Usually, it needs exposing the internet.
Whatever solutions we can use. For simplicity, the following sample uses Cloudflare Zero Trust for testing.
```
cloudflared tunnel --url http://localhost:5984
```
You will then get the following output:
Whatever solutions we can use. For the simplicity, following sample uses Cloudflare Zero Trust for testing.
```
$ cloudflared tunnel --url http://localhost:5984
2024-02-14T10:35:25Z INF Thank you for trying Cloudflare Tunnel. Doing so, without a Cloudflare account, is a quick way to experiment and try it out. However, be aware that these account-less Tunnels have no uptime guarantee. If you intend to use Tunnels in production you should use a pre-created named tunnel by following: https://developers.cloudflare.com/cloudflare-one/connections/connect-apps
2024-02-14T10:35:25Z INF Requesting new quick Tunnel on trycloudflare.com...
2024-02-14T10:35:26Z INF +--------------------------------------------------------------------------------------------+
@@ -161,33 +94,19 @@ You will then get the following output:
:
:
```
Now `https://tiles-photograph-routine-groundwater.trycloudflare.com` is our server. Make it into the background once, please.
Now `https://tiles-photograph-routine-groundwater.trycloudflare.com` is our server. Make it into background once please.
## 4. Client Setup
> [!TIP]
> Now manual configuration is not recommended for some reasons. However, if you want to do so, please use `Setup wizard`. The recommended extra configurations will be also set.
> Now manually configuration is not recommended for some reasons. However, if you want to do so, please use `Setup wizard`. The recommended extra configurations will be also set.
### 1. Generate the setup URI on a desktop device or server
```bash
export hostname=https://tiles-photograph-routine-groundwater.trycloudflare.com #Point to your vault
export database=obsidiannotes #Please change as you like
export passphrase=dfsapkdjaskdjasdas #Please change as you like
export username=johndoe
export password=abc123
deno run -A https://raw.githubusercontent.com/vrtmrz/obsidian-livesync/main/utils/flyio/generate_setupuri.ts
```
> [!TIP]
> What is the `passphrase`? Is it different from `uri_passphrase`?
> Yes, the `passphrase` we have exported now is for an End-to-End Encryption passphrase.
> And, `uri_passphrase` that used in the `generate_setupuri.ts` is a different one; for decrypting Set-up URI at using that.
> Why: I (vorotamoroz) think that the passphrase of the Setup-URI should be different from the E2EE passphrase to prevent exposure caused by operational errors or the possibility of evil in our environment. On top of that, I believe that it is desirable for the Setup-URI to be random. Setup-URI is inevitably long, so it goes through the clipboard. I think that its passphrase should not go through the same path, so it should essentially be typed manually.
> Hence, if we keep empty for uri_passphrase, generate_setupuri.ts generates an adjective-noun-randomnumber passphrase so that we can remember it without going through the clipboard.
You will then get the following output:
```bash
$ export hostname=https://tiles-photograph-routine-groundwater.trycloudflare.com #Point to your vault
$ export database=obsidiannotes #Please change as you like
$ export passphrase=dfsapkdjaskdjasdas #Please change as you like
$ deno run -A https://raw.githubusercontent.com/vrtmrz/obsidian-livesync/main/utils/flyio/generate_setupuri.ts
obsidian://setuplivesync?settings=%5B%22tm2DpsOE74nJAryprZO2M93wF%2Fvg.......4b26ed33230729%22%5D
Your passphrase of Setup-URI is: patient-haze
@@ -287,4 +206,4 @@ entryPoints:
address: ":443"
...
```
```

View File

@@ -1,24 +1,10 @@
# Notes on Terminology, Spelling, Vocabulary Conventions
# Terms used in this project
## Spelling and Vocabulary conventions
## Terms
1. Almost all of the english words are written in British English. For example, "organisation" instead of "organization", "synchronisation" instead of "synchronization", etc. This convention originated from the author's personal preference but is now maintained for consistency.
### Chunks
<!-- TBW, sorry for the draft! -->
2. Idiomatic terms, such as used in HTML, CSS, and JavaScript, are usually be aligned with the language used in the technology. For example, "color" instead of "colour", "program" instead of "programme", etc. Especially, terms which are used for attributes, properties, and methods are notable.
3. We use `dialogue` in documentation for consistency. While `dialog` may appear in source code, particularly in class names, method names, and attributes (following technical conventions in No. 2), we consistently use `dialogue` for user-facing messages and general documentation text. This approach balances No. 1 with No. 2.
4. Contractions are not used. For example, "do not" instead of "don't", "cannot" instead of "can't", etc. especially `'d`.
- We may encounter difficulties with tenses.
5. However, try using affirmative forms, `Discard` instead of `Do not keep`, `Continue` instead of `Do not stop`, etc.
- Some languages, such as Japanese, have a different meaning for `yes` and `no` between affirmative and negative questions.
## Terminology
- Self-hosted LiveSync
- This plug-in name. `Self-hosted` is one word.
- LiveSync
- Very confusing term.
- As shorten-form of `Self-hosted LiveSync`.
- As a name of synchronisation mode. This should be changed to `Continuos`, in contrast to `Periodic`.
<!-- Please feel free to write any terms that should be mentioned. And please make pull request. I would love to fill the rest. -->
<!-- ### Chunks -->

View File

@@ -1,81 +0,0 @@
---
title: "JWT Authentication on CouchDB"
livesync-version: 0.25.24
tags:
- tips
- CouchDB
- JWT
authors:
- vorotamoroz
---
# JWT Authentication on CouchDB
When using CouchDB as a backend for Self-hosted LiveSync, it is possible to enhance security by employing JWT (JSON Web Token) Authentication. In particular, using asymmetric keys (ES256 and ES512) provides greater security against token interception.
## Setting up JWT Authentication (Asymmetrical Key Example)
### 1. Generate a key pair
We can use `openssl` to generate an EC key pair as follows:
```bash
# Generate private key
# ES512 for secp521r1 curve, we can also use ES256 for prime256v1 curve
openssl ecparam -name secp521r1 -genkey -noout | openssl pkcs8 -topk8 -inform PEM -nocrypt -out private_key.pem
# openssl ecparam -name prime256v1 -genkey -noout | openssl pkcs8 -topk8 -inform PEM -nocrypt -out private_key.pem
# Generate public key in SPKI format
openssl ec -in private_key.pem -pubout -outform PEM -out public_key.pem
```
> [!TIP]
> A key generator will be provided again in a future version of the user interface.
### 2. Configure CouchDB to accept JWT tokens
The following configuration is required:
| Key | Value | Note |
| ------------------------------ | ----------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| chttpd/authentication_handlers | {chttpd_auth, jwt_authentication_handler} | In total, it may be `{chttpd_auth, jwt_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}`, or something similar. |
| jwt_auth/required_claims | "exp" | |
| jwt_keys/ec:your_key_id | Your public key in PEM (SPKI) format | Replace `your_key_id` with your actual key ID. You can decide as you like. Note that you can add multiple keys if needed. If you want to use HSxxx, you should set `jwt_keys/hmac:your_key_id` with your HMAC secret. |
Note: When configuring CouchDB via web interface (Fauxton), new-lines on the public key should be replaced with `\n` for header and footer lines (So wired, but true I have tested). as follows:
```
-----BEGIN PUBLIC KEY-----
\nMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQBq0irb/+K0Qzo7ayIHj0Xtthcntjz
r665J5UYdEQMiTtku5rnp95RuN97uA2pPOJOacMBAoiVUnZ1pqEBz9xH9yoAixji
Ju...........................................................gTt
/xtqrJRwrEy986oRZRQ=
\n-----END PUBLIC KEY-----
```
For detailed information, please refer to the [CouchDB JWT Authentication Documentation](https://docs.couchdb.org/en/stable/api/server/authn.html#jwt-authentication).
### 3. Configure Self-hosted LiveSync to use JWT Authentication
| Setting | Description |
| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- |
| Use JWT Authentication | Enable this option to use JWT Authentication. |
| JWT Algorithm | Select the JWT signing algorithm (e.g., ES256, ES512) that matches your key pair. |
| JWT Key | Paste your private key in PEM (pkcs8) format. |
| JWT Expiration Duration | Set the token expiration time in minutes. Locally cached tokens are also invalidated after this duration. |
| JWT Key ID (kid) | Enter the key ID that you used when configuring CouchDB, i.e., the one that replaced `your_key_id`. |
| JWT Subject (sub) | Set your user ID; this overrides the original `Username` setting. If you have detected access with `Username`, you have failed to authorise with JWT. |
> [!IMPORTANT]
> Self-hosted LiveSync requests to CouchDB treat the user as `_admin`. If you want to restrict access, configure `jwt_auth/roles_claim_name` to a custom claim name. (Self-hosted LiveSync always sets `_couchdb.roles` with the value `["_admin"]`).
### 4. Test the configuration
Just try to `Test Settings and Continue` in the remote setup dialogue. If you have successfully authenticated, you are all set.
## Additional Notes
This feature is still experimental. Please ensure to test thoroughly in your environment before deploying to production.
However, we think that this is a great step towards enhancing security when using CouchDB with Self-hosted LiveSync. We shall enable this setting by default in future releases.
We would love to hear your feedback and any issues you encounter.

View File

@@ -1,29 +0,0 @@
---
title: "Peer-to-Peer Synchronisation Tips"
livesync-version: 0.25.24
tags:
- tips
- p2p
authors:
- vorotamoroz
---
# Peer-to-Peer Synchronisation Tips
> [!IMPORTANT]
> Peer-to-peer synchronisation is still an experimental feature. Although we have made every effort to ensure its reliability, it may not function correctly in all environments.
## Difficulties with Peer-to-Peer Synchronisation
It is often the case that peer-to-peer connections do not function correctly, for instance, when using mobile data services.
In such circumstances, we recommend connecting all devices to a single Virtual Private Network (VPN). It is advisable to select a service, such as Tailscale, which facilitates direct communication between peers wherever possible.
Should one be in an environment where even Tailscale is unable to connect, or where it cannot be lawfully installed, please continue reading.
## A More Detailed Explanation
The failure of a Peer-to-Peer connection via WebRTC can be attributed to several factors. These may include an unsuccessful UDP hole-punching attempt, or an intermediary gateway intentionally terminating the connection. Troubleshooting this matter is not a simple undertaking. Furthermore, and rather unfortunately, gateway administrators are typically aware of this type of network behaviour. Whilst a legitimate purpose for such traffic can be cited, such as for web conferencing, this is often insufficient to prevent it from being blocked.
This situation, however, is the primary reason that our project does not provide a TURN server. Although it is said that a TURN server within WebRTC does not decrypt communications, the project holds the view that the risk of a malicious party impersonating a TURN server must be avoided. Consequently, configuring a TURN server for relay communication is not currently possible through the user interface. Furthermore, there is no official project TURN server, which is to say, one that could be monitored by a third party.
We request that you provide your own server, using your own Fully Qualified Domain Name (FQDN), and subsequently enter its details into the advanced settings.
For testing purposes, Cloudflare's Real-Time TURN Service is exceedingly convenient and offers a generous amount of free data. However, it must be noted that because it is a well-known destination, such traffic is highly conspicuous. There is also a significant possibility that it may be blocked by default. We advise proceeding with caution.

View File

@@ -1,16 +1,8 @@
<!-- 2024-02-15 -->
# Tips and Troubleshooting
- [Tips and Troubleshooting](#tips-and-troubleshooting)
- [Tips](#tips)
- [CORS avoidance](#cors-avoidance)
- [CORS configuration with reverse proxy](#cors-configuration-with-reverse-proxy)
- [Nginx](#nginx)
- [Nginx and subdirectory](#nginx-and-subdirectory)
- [Caddy](#caddy)
- [Caddy and subdirectory](#caddy-and-subdirectory)
- [Apache](#apache)
- [Show all setting panes](#show-all-setting-panes)
- [How to resolve `Tweaks Mismatched of Changed`](#how-to-resolve-tweaks-mismatched-of-changed)
- [Notable bugs and fixes](#notable-bugs-and-fixes)
- [Binary files get bigger on iOS](#binary-files-get-bigger-on-ios)
- [Some setting name has been changed](#some-setting-name-has-been-changed)
@@ -22,145 +14,25 @@
- [Why are the logs volatile and ephemeral?](#why-are-the-logs-volatile-and-ephemeral)
- [Some network logs are not written into the file.](#some-network-logs-are-not-written-into-the-file)
- [If a file were deleted or trimmed, the capacity of the database should be reduced, right?](#if-a-file-were-deleted-or-trimmed-the-capacity-of-the-database-should-be-reduced-right)
- [How to launch the DevTools](#how-to-launch-the-devtools)
- [On Desktop Devices](#on-desktop-devices)
- [On Android](#on-android)
- [On iOS, iPadOS devices](#on-ios-ipados-devices)
- [How can I use the DevTools?](#how-can-i-use-the-devtools)
- [Checking the network log](#checking-the-network-log)
- [Troubleshooting](#troubleshooting)
- [While using Cloudflare Tunnels, often Obsidian API fallback and `524` error occurs.](#while-using-cloudflare-tunnels-often-obsidian-api-fallback-and-524-error-occurs)
- [On the mobile device, cannot synchronise on the local network!](#on-the-mobile-device-cannot-synchronise-on-the-local-network)
- [I think that something bad happening on the vault...](#i-think-that-something-bad-happening-on-the-vault)
- [Tips](#tips)
- [How to resolve `Tweaks Mismatched of Changed`](#how-to-resolve-tweaks-mismatched-of-changed)
- [Old tips](#old-tips)
<!-- - -->
## Tips
### CORS avoidance
If we are unable to configure CORS properly for any reason (for example, if we cannot configure non-administered network devices), we may choose to ignore CORS.
To use the Obsidian API (also known as the Non-Native API) to bypass CORS, we can enable the toggle ``Use Request API to avoid `inevitable` CORS problem``.
<!-- Add **Long explanation of CORS** here for integrity -->
### CORS configuration with reverse proxy
- IMPORTANT: CouchDB handles CORS by itself. Do not process CORS on the reverse
proxy.
- Do not process `Option` requests on the reverse proxy!
- Make sure `host` and `X-Forwarded-For` headers are forwarded to the CouchDB.
- If you are using a subdirectory, make sure to handle it properly. More
detailed information is in the
[CouchDB documentation](https://docs.couchdb.org/en/stable/best-practices/reverse-proxies.html).
Minimal configurations are as follows:
#### Nginx
```nginx
location / {
proxy_pass http://localhost:5984;
proxy_redirect off;
proxy_buffering off;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
```
#### Nginx and subdirectory
```nginx
location /couchdb {
rewrite ^ $request_uri;
rewrite ^/couchdb/(.*) /$1 break;
proxy_pass http://localhost:5984$uri;
proxy_redirect off;
proxy_buffering off;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /_session {
proxy_pass http://localhost:5984/_session;
proxy_redirect off;
proxy_buffering off;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
```
#### Caddy
```caddyfile
domain.com {
reverse_proxy localhost:5984
}
```
#### Caddy and subdirectory
```caddyfile
domain.com {
reverse_proxy /couchdb/* localhost:5984
reverse_proxy /_session/* localhost:5984/_session
}
```
#### Apache
Sorry, Apache is not recommended for CouchDB. Omit the configuration from here.
Please refer to the
[Official documentation](https://docs.couchdb.org/en/stable/best-practices/reverse-proxies.html#reverse-proxying-with-apache-http-server).
### Show all setting panes
Full pane is not shown by default. To show all panes, please toggle all in
`🧙‍♂️ Wizard` -> `Enable extra and advanced features`.
For your information, the all panes are as follows:
![All Panes](all_toggles.png)
### How to resolve `Tweaks Mismatched of Changed`
(Since v0.23.17)
If you have changed some configurations or tweaks which should be unified
between the devices, you will be asked how to reflect (or not) other devices at
the next synchronisation. It also occurs on the device itself, where changes are
made, to prevent unexpected configuration changes from unwanted propagation.\
(We may thank this behaviour if we have synchronised or backed up and restored
Self-hosted LiveSync. At least, for me so).
Following dialogue will be shown: ![Dialogue](tweak_mismatch_dialogue.png)
- If we want to propagate the setting of the device, we should choose
`Update with mine`.
- On other devices, we should choose `Use configured` to accept and use the
configured configuration.
- `Dismiss` can postpone a decision. However, we cannot synchronise until we
have decided.
Rest assured that in most cases we can choose `Use configured`. (Unless you are
certain that you have not changed the configuration).
If we see it for the first time, it reflects the settings of the device that has
been synchronised with the remote for the first time since the upgrade.
Probably, we can accept that.
<!-- Add here -->
## Notable bugs and fixes
### Binary files get bigger on iOS
- Reported at: v0.20.x
- Fixed at: v0.21.2 (Fixed but not reviewed)
- Required action: larger files will not be fixed automatically, please perform
`Verify and repair all files`. If our local database and storage are not
matched, we will be asked to apply which one.
- Required action: larger files will not be fixed automatically, please perform `Verify and repair all files`. If our local database and storage are not matched, we will be asked to apply which one.
### Some setting name has been changed
- Fixed at: v0.22.6
| Previous name | New name |
@@ -174,191 +46,103 @@ Probably, we can accept that.
### Why `Use an old adapter for compatibility` is somehow enabled in my vault?
Because you are a compassionate and experienced user. Before v0.17.16, we used
an old adapter for the local database. At that time, current default adapter has
not been stable. The new adapter has better performance and has a new feature
like purging. Therefore, we should use new adapters and current default is so.
Because you are a compassionate and experienced user. Before v0.17.16, we used an old adapter for the local database. At that time, current default adapter has not been stable.
The new adapter has better performance and has a new feature like purging. Therefore, we should use new adapters and current default is so.
However, when switching from an old adapter to a new adapter, some converting or
local database rebuilding is required, and it takes a few time. It was a long
time ago now, but we once inconvenienced everyone in a hurry when we changed the
format of our database. For these reasons, this toggle is automatically on if we
have upgraded from vault which using an old adapter.
However, when switching from an old adapter to a new adapter, some converting or local database rebuilding is required, and it takes a few time. It was a long time ago now, but we once inconvenienced everyone in a hurry when we changed the format of our database.
For these reasons, this toggle is automatically on if we have upgraded from vault which using an old adapter.
When you rebuild everything or fetch from the remote again, you will be asked to
switch this.
When you rebuild everything or fetch from the remote again, you will be asked to switch this.
Therefore, experienced users (especially those stable enough not to have to
rebuild the database) may have this toggle enabled in their Vault. Please
disable it when you have enough time.
Therefore, experienced users (especially those stable enough not to have to rebuild the database) may have this toggle enabled in their Vault.
Please disable it when you have enough time.
### ZIP (or any extensions) files were not synchronised. Why?
It depends on Obsidian detects. May toggling `Detect all extensions` of
`File and links` (setting of Obsidian) will help us.
It depends on Obsidian detects. May toggling `Detect all extensions` of `File and links` (setting of Obsidian) will help us.
### I hope to report the issue, but you said you needs `Report`. How to make it?
We can copy the report to the clipboard, by pressing the `Make report` button on
the `Hatch` pane. ![Screenshot](../images/hatch.png)
We can copy the report to the clipboard, by pressing the `Make report` button on the `Hatch` pane.
![Screenshot](../images/hatch.png)
### Where can I check the log?
We can launch the log pane by `Show log` on the command palette.
And if you have troubled something, please enable the `Verbose Log` on the `General Setting` pane.
We can launch the log pane by `Show log` on the command palette. And if you have
troubled something, please enable the `Verbose Log` on the `General Setting`
pane.
However, the logs would not be kept so long and cleared when restarted. If you
want to check the logs, please enable `Write logs into the file` temporarily.
However, the logs would not be kept so long and cleared when restarted. If you want to check the logs, please enable `Write logs into the file` temporarily.
![ScreenShot](../images/write_logs_into_the_file.png)
> [!IMPORTANT]
>
> - Writing logs into the file will impact the performance.
> - Please make sure that you have erased all your confidential information
> before reporting issue.
> - Please make sure that you have erased all your confidential information before reporting issue.
### Why are the logs volatile and ephemeral?
To avoid unexpected exposure to our confidential things.
### Some network logs are not written into the file.
Especially the CORS error will be reported as a general error to the plug-in for
security reasons. So we cannot detect and log it. We are only able to
investigate them by [Checking the network log](#checking-the-network-log).
Especially the CORS error will be reported as a general error to the plug-in for security reasons. So we cannot detect and log it. We are only able to investigate them by [Checking the network log](#checking-the-network-log).
### If a file were deleted or trimmed, the capacity of the database should be reduced, right?
No, even though if files were deleted, chunks were not deleted.
Self-hosted LiveSync splits the files into multiple chunks and transfers only newly created. This behaviour enables us to less traffic. And, the chunks will be shared between the files to reduce the total usage of the database.
No, even though if files were deleted, chunks were not deleted. Self-hosted
LiveSync splits the files into multiple chunks and transfers only newly created.
This behaviour enables us to less traffic. And, the chunks will be shared
between the files to reduce the total usage of the database.
And one more thing, we can handle the conflicts on any device even though it has
happened on other devices. This means that conflicts will happen in the past,
after the time we have synchronised. Hence we cannot collect and delete the
unused chunks even though if we are not currently referenced.
To shrink the database size, `Rebuild everything` only reliably and effectively.
But do not worry, if we have synchronised well. We have the actual and real
files. Only it takes a bit of time and traffics.
### How to launch the DevTools
#### On Desktop Devices
We can launch the DevTools by pressing `ctrl`+`shift`+`i` (`Command`+`shift`+`i` on Mac).
#### On Android
Please refer to [Remote debug Android devices](https://developer.chrome.com/docs/devtools/remote-debugging/).
Once the DevTools have been launched, everything operates the same as on a PC.
#### On iOS, iPadOS devices
If we have a Mac, we can inspect from Safari on the Mac. Please refer to [Inspecting iOS and iPadOS](https://developer.apple.com/documentation/safari-developer-tools/inspecting-ios).
And one more thing, we can handle the conflicts on any device even though it has happened on other devices. This means that conflicts will happen in the past, after the time we have synchronised. Hence we cannot collect and delete the unused chunks even though if we are not currently referenced.
To shrink the database size, `Rebuild everything` only reliably and effectively. But do not worry, if we have synchronised well. We have the actual and real files. Only it takes a bit of time and traffics.
### How can I use the DevTools?
#### Checking the network log
1. Open the network pane.
2. Find the requests marked in red.\
![Errored](../images/devtools1.png)
3. Capture the `Headers`, `Payload`, and, `Response`. **Please be sure to keep
important information confidential**. If the `Response` contains secrets, you
can omitted that. Note: Headers contains a some credentials. **The path of
the request URL, Remote Address, authority, and authorization must be
concealed.**\
![Concealed sample](../images/devtools2.png)
2. Find the requests marked in red.
![Errored](../images/devtools1.png)
3. Capture the `Headers`, `Payload`, and, `Response`. **Please be sure to keep important information confidential**. If the `Response` contains secrets, you can omitted that.
Note: Headers contains a some credentials. **The path of the request URL, Remote Address, authority, and authorization must be concealed.**
![Concealed sample](../images/devtools2.png)
## Troubleshooting
<!-- Add here -->
### On the mobile device, cannot synchronise on the local network!
Obsidian mobile is not able to connect to the non-secure end-point, such as starting with `http://`. Make sure your URI of CouchDB. Also not able to use a self-signed certificate.
### I think that something bad happening on the vault...
Place `redflag.md` on top of the vault, and restart Obsidian. The most simple way is to create a new note and rename it to `redflag`. Of course, we can put it without Obsidian.
If there is `redflag.md`, Self-hosted LiveSync suspends all database and storage processes.
## Tips
### How to resolve `Tweaks Mismatched of Changed`
(Since v0.23.17)
If you have changed some configurations or tweaks which should be unified between the devices, you will be asked how to reflect (or not) other devices at the next synchronisation. It also occurs on the device itself, where changes are made, to prevent unexpected configuration changes from unwanted propagation.
(We may thank this behaviour if we have synchronised or backed up and restored Self-hosted LiveSync. At least, for me so).
Following dialogue will be shown:
![Dialogue](tweak_mismatch_dialogue.png)
- If we want to propagate the setting of the device, we should choose `Update with mine`.
- On other devices, we should choose `Use configured` to accept and use the configured configuration.
- `Dismiss` can postpone a decision. However, we cannot synchronise until we have decided.
Rest assured that in most cases we can choose `Use configured`. (Unless you are certain that you have not changed the configuration).
If we see it for the first time, it reflects the settings of the device that has been synchronised with the remote for the first time since the upgrade. Probably, we can accept that.
<!-- Add here -->
### While using Cloudflare Tunnels, often Obsidian API fallback and `524` error occurs.
A `524` error occurs when the request to the server is not completed within a
`specified time`. This is a timeout error from Cloudflare. From the reported
issue, it seems to be 100 seconds. (#627).
Therefore, this error returns from Cloudflare, not from the server. Hence, the
result contains no CORS field. It means that this response makes the Obsidian
API fallback.
However, even if the Obsidian API fallback occurs, the request is still not
completed within the `specified time`, 100 seconds.
To solve this issue, we need to configure the timeout settings.
Please enable the toggle in `💪 Power users` -> `CouchDB Connection Tweak` ->
`Use timeouts instead of heartbeats`.
### On the mobile device, cannot synchronise on the local network!
Obsidian mobile is not able to connect to the non-secure end-point, such as
starting with `http://`. Make sure your URI of CouchDB. Also not able to use a
self-signed certificate.
### I think that something bad happening on the vault...
Place `redflag.md` on top of the vault, and restart Obsidian. The most simple
way is to create a new note and rename it to `redflag`. Of course, we can put it
without Obsidian.
If there is `redflag.md`, Self-hosted LiveSync suspends all database and storage
processes.
There are some options to use `redflag.md`.
| Filename | Human-Friendly Name | Description |
| ------------- | ------------------- | ------------------------------------------------------------------------------------ |
| `redflag.md` | - | Suspends all processes. |
| `redflag2.md` | `flag_rebuild.md` | Suspends all processes, and rebuild both local and remote databases by local files. |
| `redflag3.md` | `flag_fetch.md` | Suspends all processes, discard the local database, and fetch from the remote again. |
When fetching everything remotely or performing a rebuild, restarting Obsidian
is performed once for safety reasons. At that time, Self-hosted LiveSync uses
these files to determine whether the process should be carried out. (The use of
normal markdown files is a trick to externally force cancellation in the event
of faults in the rebuild or fetch function itself, especially on mobile
devices). This mechanism is also used for set-up. And just for information,
these files are also not subject to synchronisation.
However, occasionally the deletion of files may fail. This should generally work
normally after restarting Obsidian. (As far as I can observe).
### Old tips
- Rarely, a file in the database could be corrupted. The plugin will not write to local storage when a file looks corrupted. If a local version of the file is on your device, the corruption could be fixed by editing the local file and synchronizing it. But if the file does not exist on any of your devices, then it can not be rescued. In this case, you can delete these items from the settings dialog.
- To stop the boot-up sequence (eg. for fixing problems on databases), you can put a `redflag.md` file (or directory) at the root of your vault.
Tip for iOS: a redflag directory can be created at the root of the vault using the File application.
- Also, with `redflag2.md` placed, we can automatically rebuild both the local and the remote databases during the boot-up sequence. With `redflag3.md`, we can discard only the local database and fetch from the remote again.
- Q: The database is growing, how can I shrink it down?
A: each of the docs is saved with their past 100 revisions for detecting and resolving conflicts. Picturing that one device has been offline for a while, and comes online again. The device has to compare its notes with the remotely saved ones. If there exists a historic revision in which the note used to be identical, it could be updated safely (like git fast-forward). Even if that is not in revision histories, we only have to check the differences after the revision that both devices commonly have. This is like git's conflict-resolving method. So, We have to make the database again like an enlarged git repo if you want to solve the root of the problem.
- And more technical Information is in the [Technical Information](tech_info.md)
- If you want to synchronize files without obsidian, you can use [filesystem-livesync](https://github.com/vrtmrz/filesystem-livesync).
- WebClipper is also available on Chrome Web Store:[obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf)
- Rarely, a file in the database could be corrupted. The plugin will not write
to local storage when a file looks corrupted. If a local version of the file
is on your device, the corruption could be fixed by editing the local file and
synchronizing it. But if the file does not exist on any of your devices, then
it can not be rescued. In this case, you can delete these items from the
settings dialog.
- To stop the boot-up sequence (eg. for fixing problems on databases), you can
put a `redflag.md` file (or directory) at the root of your vault. Tip for iOS:
a redflag directory can be created at the root of the vault using the File
application.
- Also, with `redflag2.md` placed, we can automatically rebuild both the local
and the remote databases during the boot-up sequence. With `redflag3.md`, we
can discard only the local database and fetch from the remote again.
- Q: The database is growing, how can I shrink it down? A: each of the docs is
saved with their past 100 revisions for detecting and resolving conflicts.
Picturing that one device has been offline for a while, and comes online
again. The device has to compare its notes with the remotely saved ones. If
there exists a historic revision in which the note used to be identical, it
could be updated safely (like git fast-forward). Even if that is not in
revision histories, we only have to check the differences after the revision
that both devices commonly have. This is like git's conflict-resolving method.
So, We have to make the database again like an enlarged git repo if you want
to solve the root of the problem.
- And more technical Information is in the [Technical Information](tech_info.md)
- If you want to synchronize files without obsidian, you can use
[filesystem-livesync](https://github.com/vrtmrz/filesystem-livesync).
- WebClipper is also available on Chrome Web
Store:[obsidian-livesync-webclip](https://chrome.google.com/webstore/detail/obsidian-livesync-webclip/jfpaflmpckblieefkegjncjoceapakdf)
Repo is here:
[obsidian-livesync-webclip](https://github.com/vrtmrz/obsidian-livesync-webclip).
(Docs are a work in progress.)
Repo is here: [obsidian-livesync-webclip](https://github.com/vrtmrz/obsidian-livesync-webclip). (Docs are a work in progress.)

View File

@@ -4,73 +4,75 @@ import esbuild from "esbuild";
import process from "process";
import builtins from "builtin-modules";
import sveltePlugin from "esbuild-svelte";
import { sveltePreprocess } from "svelte-preprocess";
import sveltePreprocess from "svelte-preprocess";
import fs from "node:fs";
// import terser from "terser";
import { minify } from "terser";
import inlineWorkerPlugin from "esbuild-plugin-inline-worker";
import { terserOption } from "./terser.config.mjs";
import path from "node:path";
const banner = `/*
THIS IS A GENERATED/BUNDLED FILE BY ESBUILD AND TERSER
if you want to view the source, please visit the github repository of this plugin
*/
`;
const prod = process.argv[2] === "production" || process.env?.BUILD_MODE === "production";
const keepTest = true; //!prod;
const prod = process.argv[2] === "production";
const dev = process.argv[2] === "dev";
const keepTest = !prod || dev;
const terserOpt = {
sourceMap: !prod
? {
url: "inline",
}
: {},
format: {
indent_level: 2,
beautify: true,
comments: "some",
ecma: 2018,
preamble: banner,
webkit: true,
},
parse: {
// parse options
},
compress: {
// compress options
defaults: false,
evaluate: true,
inline: 3,
join_vars: true,
loops: true,
passes: prod ? 4 : 1,
reduce_vars: true,
reduce_funcs: true,
arrows: true,
collapse_vars: true,
comparisons: true,
lhs_constants: true,
hoist_props: true,
side_effects: true,
if_return: true,
ecma: 2018,
unused: true,
},
ecma: 2018, // specify one of: 5, 2015, 2016, etc.
enclose: false, // or specify true, or "args:values"
keep_classnames: true,
keep_fnames: true,
ie8: false,
module: false,
// nameCache: null, // or specify a name cache object
safari10: false,
toplevel: false,
};
const manifestJson = JSON.parse(fs.readFileSync("./manifest.json") + "");
const packageJson = JSON.parse(fs.readFileSync("./package.json") + "");
const updateInfo = JSON.stringify(fs.readFileSync("./updates.md") + "");
const PATHS_TEST_INSTALL = process.env?.PATHS_TEST_INSTALL || "";
const PATH_TEST_INSTALL = PATHS_TEST_INSTALL.split(path.delimiter).map(p => p.trim()).filter(p => p.length);
if (!prod) {
if (PATH_TEST_INSTALL) {
console.log(`Built files will be copied to ${PATH_TEST_INSTALL}`);
} else {
console.log("Development build: You can install the plug-in to Obsidian for testing by exporting the PATHS_TEST_INSTALL environment variable with the paths to your vault plugins directories separated by your system path delimiter (':' on Unix, ';' on Windows).");
}
} else {
console.log("Production build");
}
const moduleAliasPlugin = {
name: "module-alias",
setup(build) {
build.onResolve({ filter: /.(dev)(.ts|)$/ }, (args) => {
// console.log(args.path);
if (prod) {
let prodTs = args.path.replace(".dev", ".prod");
const statFile = prodTs.endsWith(".ts") ? prodTs : prodTs + ".ts";
const realPath = path.join(args.resolveDir, statFile);
console.log(`Checking ${statFile}`);
if (fs.existsSync(realPath)) {
console.log(`Replaced ${args.path} with ${prodTs}`);
return {
path: realPath,
namespace: "file",
};
}
}
return null;
});
build.onResolve({ filter: /.(platform)(.ts|)$/ }, (args) => {
// console.log(args.path);
if (prod) {
let prodTs = args.path.replace(".platform", ".obsidian");
const statFile = prodTs.endsWith(".ts") ? prodTs : prodTs + ".ts";
const realPath = path.join(args.resolveDir, statFile);
console.log(`Checking ${statFile}`);
if (fs.existsSync(realPath)) {
console.log(`Replaced ${args.path} with ${prodTs}`);
return {
path: realPath,
namespace: "file",
};
}
}
return null;
});
},
};
/** @type esbuild.Plugin[] */
const plugins = [
{
@@ -79,27 +81,15 @@ const plugins = [
let count = 0;
build.onEnd(async (result) => {
if (count++ === 0) {
console.log("first build:");
if (prod) {
console.log("MetaFile:");
if (result.metafile) {
fs.writeFileSync("meta.json", JSON.stringify(result.metafile));
let text = await esbuild.analyzeMetafile(result.metafile, {
verbose: true,
});
// console.log(text);
}
}
console.log("first build:", result);
} else {
console.log("subsequent build:");
}
const filename = `meta-${prod ? "prod" : "dev"}.json`;
await fs.promises.writeFile(filename, JSON.stringify(result.metafile, null, 2));
if (prod) {
console.log("Performing terser");
const src = fs.readFileSync("./main_org.js").toString();
// @ts-ignore
const ret = await minify(src, terserOption);
const ret = await minify(src, terserOpt);
if (ret && ret.code) {
fs.writeFileSync("./main.js", ret.code);
}
@@ -107,45 +97,15 @@ const plugins = [
} else {
fs.copyFileSync("./main_org.js", "./main.js");
}
if (PATH_TEST_INSTALL) {
for (const installPath of PATH_TEST_INSTALL) {
const realPath = path.resolve(installPath);
console.log(`Copying built files to ${realPath}`);
if (!fs.existsSync(realPath)) {
console.warn(`Test install path ${installPath} does not exist`);
continue;
}
const manifestX = JSON.parse(fs.readFileSync("./manifest.json") + "");
manifestX.version = manifestJson.version + "." + Date.now();
fs.writeFileSync(path.join(installPath, "manifest.json"), JSON.stringify(manifestX, null, 2));
fs.copyFileSync("./main.js", path.join(installPath, "main.js"));
fs.copyFileSync("./styles.css", path.join(installPath, "styles.css"));
}
}
});
},
},
];
const externals = [
"obsidian",
"electron",
"crypto",
"@codemirror/autocomplete",
"@codemirror/collab",
"@codemirror/commands",
"@codemirror/language",
"@codemirror/lint",
"@codemirror/search",
"@codemirror/state",
"@codemirror/view",
"@lezer/common",
"@lezer/highlight",
"@lezer/lr",
];
const externals = ["obsidian", "electron", "crypto", "@codemirror/autocomplete", "@codemirror/collab", "@codemirror/commands", "@codemirror/language", "@codemirror/lint", "@codemirror/search", "@codemirror/state", "@codemirror/view", "@lezer/common", "@lezer/highlight", "@lezer/lr"];
const context = await esbuild.context({
banner: {
js: "// Leave it all to terser",
js: banner,
},
entryPoints: ["src/main.ts"],
bundle: true,
@@ -161,9 +121,8 @@ const context = await esbuild.context({
target: "es2018",
logLevel: "info",
platform: "browser",
metafile: true,
sourcemap: prod ? false : "inline",
treeShaking: false,
treeShaking: true,
outfile: "main_org.js",
mainFields: ["browser", "module", "main"],
minifyWhitespace: false,
@@ -173,7 +132,6 @@ const context = await esbuild.context({
dropLabels: prod && !keepTest ? ["TEST", "DEV"] : [],
// keepNames: true,
plugins: [
moduleAliasPlugin,
inlineWorkerPlugin({
external: externals,
treeShaking: true,
@@ -186,7 +144,7 @@ const context = await esbuild.context({
],
});
if (prod) {
if (prod || dev) {
await context.rebuild();
process.exit(0);
} else {

View File

@@ -1,102 +0,0 @@
import typescriptEslint from "@typescript-eslint/eslint-plugin";
import svelte from "eslint-plugin-svelte";
import _import from "eslint-plugin-import";
import { fixupPluginRules } from "@eslint/compat";
import tsParser from "@typescript-eslint/parser";
import path from "node:path";
import { fileURLToPath } from "node:url";
import js from "@eslint/js";
import { FlatCompat } from "@eslint/eslintrc";
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const compat = new FlatCompat({
baseDirectory: __dirname,
recommendedConfig: js.configs.recommended,
allConfig: js.configs.all,
});
export default [
{
ignores: [
"**/node_modules/*",
"**/jest.config.js",
"src/lib/coverage",
"src/lib/browsertest",
"**/test.ts",
"**/tests.ts",
"**/**test.ts",
"**/**.test.ts",
"**/esbuild.*.mjs",
"**/terser.*.mjs",
"**/node_modules",
"**/build",
"**/.eslintrc.js.bak",
"src/lib/src/patches/pouchdb-utils",
"**/esbuild.config.mjs",
"**/rollup.config.js",
"modules/octagonal-wheels/rollup.config.js",
"modules/octagonal-wheels/dist/**/*",
"src/lib/test",
"src/lib/src/cli",
"**/main.js",
"src/apps/**/*",
".prettierrc.*.mjs",
".prettierrc.mjs",
"*.config.mjs"
],
},
...compat.extends(
"eslint:recommended",
"plugin:@typescript-eslint/eslint-recommended",
"plugin:@typescript-eslint/recommended"
),
{
plugins: {
"@typescript-eslint": typescriptEslint,
svelte,
import: fixupPluginRules(_import),
},
languageOptions: {
parser: tsParser,
ecmaVersion: 5,
sourceType: "module",
parserOptions: {
project: ["tsconfig.json"],
},
},
rules: {
"no-unused-vars": "off",
"@typescript-eslint/no-unused-vars": [
"error",
{
args: "none",
},
],
"no-unused-labels": "off",
"@typescript-eslint/ban-ts-comment": "off",
"no-prototype-builtins": "off",
"@typescript-eslint/no-empty-function": "off",
"require-await": "error",
"@typescript-eslint/require-await": "warn",
"@typescript-eslint/no-misused-promises": "warn",
"@typescript-eslint/no-floating-promises": "warn",
"no-async-promise-executor": "warn",
"@typescript-eslint/no-explicit-any": "off",
"@typescript-eslint/no-unnecessary-type-assertion": "error",
"no-constant-condition": [
"error",
{
checkLoops: false,
},
],
},
},
];

View File

@@ -1 +0,0 @@
PATHS_TEST_INSTALL=your-vault-plugin-path:and-another-path

View File

@@ -1,10 +0,0 @@
{
"id": "obsidian-livesync",
"name": "Self-hosted LiveSync",
"version": "0.25.24.beta3",
"minAppVersion": "0.9.12",
"description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
"author": "vorotamoroz",
"authorUrl": "https://github.com/vrtmrz",
"isDesktopOnly": false
}

View File

@@ -1,7 +1,7 @@
{
"id": "obsidian-livesync",
"name": "Self-hosted LiveSync",
"version": "0.25.41",
"version": "0.23.19",
"minAppVersion": "0.9.12",
"description": "Community implementation of self-hosted livesync. Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
"author": "vorotamoroz",

22294
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,102 +1,45 @@
{
"name": "obsidian-livesync",
"version": "0.25.41",
"version": "0.23.19",
"description": "Reflect your vault changes to some other devices immediately. Please make sure to disable other synchronize solutions to avoid content corruption or duplication.",
"main": "main.js",
"type": "module",
"scripts": {
"bakei18n": "npm run i18n:yaml2json && npm run i18n:bakejson",
"i18n:bakejson": "npx tsx ./src/lib/_tools/bakei18n.ts",
"i18n:yaml2json": "npx tsx ./src/lib/_tools/yaml2json.ts",
"i18n:json2yaml": "npx tsx ./src/lib/_tools/json2yaml.ts",
"prettyjson": "prettier --config ./.prettierrc.mjs ./src/lib/src/common/messagesJson/*.json --write --log-level error",
"postbakei18n": "prettier --config ./.prettierrc.mjs ./src/lib/src/common/messages/*.ts --write --log-level error",
"posti18n:yaml2json": "npm run prettyjson",
"predev": "npm run bakei18n",
"dev": "node --env-file=.env esbuild.config.mjs",
"prebuild": "npm run bakei18n",
"dev": "node esbuild.config.mjs",
"build": "node esbuild.config.mjs production",
"buildDev": "node esbuild.config.mjs dev",
"lint": "eslint src",
"svelte-check": "svelte-check --tsconfig ./tsconfig.json",
"tsc-check": "tsc --noEmit",
"pretty": "npm run prettyNoWrite -- --write --log-level error",
"prettyCheck": "npm run prettyNoWrite -- --check",
"prettyNoWrite": "prettier --config ./.prettierrc.mjs \"**/*.js\" \"**/*.ts\" \"**/*.json\" ",
"check": "npm run lint && npm run svelte-check",
"unittest": "deno test -A --no-check --coverage=cov_profile --v8-flags=--expose-gc --trace-leaks ./src/",
"test": "vitest run",
"test:unit": "vitest run --config vitest.config.unit.ts",
"test:unit:coverage": "vitest run --config vitest.config.unit.ts --coverage",
"test:install-playwright": "npx playwright install chromium",
"test:install-dependencies": "npm run test:install-playwright",
"test:coverage": "vitest run --coverage",
"test:docker-couchdb:up": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/couchdb-start.sh",
"test:docker-couchdb:init": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/couchdb-init.sh",
"test:docker-couchdb:start": "npm run test:docker-couchdb:up && sleep 5 && npm run test:docker-couchdb:init",
"test:docker-couchdb:down": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/couchdb-stop.sh",
"test:docker-couchdb:stop": "npm run test:docker-couchdb:down",
"test:docker-s3:up": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/minio-start.sh",
"test:docker-s3:init": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/minio-init.sh",
"test:docker-s3:start": "npm run test:docker-s3:up && sleep 3 && npm run test:docker-s3:init",
"test:docker-s3:down": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/minio-stop.sh",
"test:docker-s3:stop": "npm run test:docker-s3:down",
"test:docker-p2p:up": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/p2p-start.sh",
"test:docker-p2p:init": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/p2p-init.sh",
"test:docker-p2p:down": "npx dotenv-cli -e .env -e .test.env -- ./test/shell/p2p-stop.sh",
"test:docker-p2p:stop": "npm run test:docker-p2p:down",
"test:docker-all:up": "npm run test:docker-couchdb:up && npm run test:docker-s3:up && npm run test:docker-p2p:up",
"test:docker-all:init": "npm run test:docker-couchdb:init && npm run test:docker-s3:init && npm run test:docker-p2p:init",
"test:docker-all:down": "npm run test:docker-couchdb:down && npm run test:docker-s3:down && npm run test:docker-p2p:down",
"test:docker-all:start": "npm run test:docker-all:up && sleep 5 && npm run test:docker-all:init",
"test:docker-all:stop": "npm run test:docker-all:down",
"test:full": "npm run test:docker-all:start && vitest run --coverage && npm run test:docker-all:stop"
"lint": "eslint src"
},
"keywords": [],
"author": "vorotamoroz",
"license": "MIT",
"devDependencies": {
"@chialab/esbuild-plugin-worker": "^0.18.1",
"@eslint/compat": "^1.2.7",
"@eslint/eslintrc": "^3.3.0",
"@eslint/js": "^9.21.0",
"@sveltejs/vite-plugin-svelte": "^6.2.1",
"@tsconfig/svelte": "^5.0.5",
"@types/deno": "^2.3.0",
"@tsconfig/svelte": "^5.0.4",
"@types/diff-match-patch": "^1.0.36",
"@types/node": "^22.13.8",
"@types/node": "^20.14.10",
"@types/pouchdb": "^6.4.2",
"@types/pouchdb-adapter-http": "^6.1.6",
"@types/pouchdb-adapter-idb": "^6.1.7",
"@types/pouchdb-browser": "^6.1.5",
"@types/pouchdb-core": "^7.0.15",
"@types/pouchdb-core": "^7.0.14",
"@types/pouchdb-mapreduce": "^6.1.10",
"@types/pouchdb-replication": "^6.4.7",
"@types/transform-pouch": "^1.0.6",
"@typescript-eslint/eslint-plugin": "8.46.2",
"@typescript-eslint/parser": "8.46.2",
"@vitest/browser": "^4.0.16",
"@vitest/browser-playwright": "^4.0.16",
"@vitest/coverage-v8": "^4.0.16",
"builtin-modules": "5.0.0",
"dotenv": "^17.2.3",
"dotenv-cli": "^11.0.0",
"esbuild": "0.25.0",
"esbuild-plugin-inline-worker": "^0.1.1",
"esbuild-svelte": "^0.9.3",
"eslint": "^9.38.0",
"eslint-plugin-import": "^2.32.0",
"eslint-plugin-svelte": "^3.12.4",
"@typescript-eslint/eslint-plugin": "^7.16.0",
"@typescript-eslint/parser": "^7.16.0",
"builtin-modules": "^4.0.0",
"esbuild": "0.23.0",
"esbuild-svelte": "^0.8.1",
"eslint": "^8.57.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-plugin-import": "^2.29.1",
"events": "^3.3.0",
"glob": "^11.0.3",
"obsidian": "^1.8.7",
"playwright": "^1.57.0",
"postcss": "^8.5.3",
"obsidian": "^1.5.7",
"postcss": "^8.4.39",
"postcss-load-config": "^6.0.1",
"pouchdb-adapter-http": "^9.0.0",
"pouchdb-adapter-idb": "^9.0.0",
"pouchdb-adapter-indexeddb": "^9.0.0",
"pouchdb-adapter-memory": "^9.0.0",
"pouchdb-core": "^9.0.0",
"pouchdb-errors": "^9.0.0",
"pouchdb-find": "^9.0.0",
@@ -104,34 +47,25 @@
"pouchdb-merge": "^9.0.0",
"pouchdb-replication": "^9.0.0",
"pouchdb-utils": "^9.0.0",
"prettier": "3.5.2",
"svelte": "5.41.1",
"svelte-check": "^4.3.3",
"svelte-preprocess": "^6.0.3",
"terser": "^5.39.0",
"svelte": "^4.2.18",
"svelte-preprocess": "^6.0.2",
"terser": "^5.31.2",
"transform-pouch": "^2.0.0",
"tslib": "^2.8.1",
"tsx": "^4.20.6",
"typescript": "5.9.3",
"vite": "^7.3.0",
"vitest": "^4.0.16",
"webdriverio": "^9.23.0",
"yaml": "^2.8.0"
"tslib": "^2.6.3",
"typescript": "^5.5.3"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.808.0",
"@smithy/fetch-http-handler": "^5.0.2",
"@smithy/md5-js": "^4.0.2",
"@smithy/middleware-apply-body-checksum": "^4.1.0",
"@smithy/protocol-http": "^5.1.0",
"@smithy/querystring-builder": "^4.0.2",
"@aws-sdk/client-s3": "^3.621.0",
"@smithy/fetch-http-handler": "^3.2.1",
"@smithy/protocol-http": "^4.0.3",
"@smithy/querystring-builder": "^3.0.3",
"diff-match-patch": "^1.0.5",
"esbuild-plugin-inline-worker": "^0.1.1",
"fflate": "^0.8.2",
"idb": "^8.0.3",
"minimatch": "^10.0.2",
"octagonal-wheels": "^0.1.45",
"qrcode-generator": "^1.4.4",
"trystero": "^0.22.0",
"idb": "^8.0.0",
"minimatch": "^10.0.1",
"octagonal-wheels": "^0.1.13",
"xxhash-wasm": "0.4.2",
"xxhash-wasm-102": "npm:xxhash-wasm@^1.0.2"
}
}

View File

@@ -1,24 +0,0 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?

View File

@@ -1,30 +0,0 @@
# A pseudo client for Self-hosted LiveSync Peer-to-Peer Sync mode
## What is it for?
This is a pseudo client for the Self-hosted LiveSync Peer-to-Peer Sync mode. It is a simple pure-client-side web-application that can be connected to the Self-hosted LiveSync in peer-to-peer.
As long as you have a browser, it starts up, so if you leave it opened some device, it can replace your existing remote servers such as CouchDB.
> [!IMPORTANT]
> Of course, it has not been fully tested. Rather, it was created to be tested.
This pseudo client actually receives the data from other devices, and sends if some device requests it. However, it does not store **files** in the local storage. If you want to purge the data, please purge the browser's cache and indexedDB, local storage, etc.
## How to use it?
We can build the application by running the following command:
```bash
$ deno task build
```
Then, open the `dist/index.html` in the browser. It can be configured as the same as the Self-hosted LiveSync (Same components are used[^1]).
## Some notes
I will launch this application in the github pages later, so will be able to use it without building it. However, that shares the origin. Hence, the application that your have built and deployed would be more secure.
[^1]: Congrats! I made it modular. Finally...

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="icon.svg" />
<link rel="manifest" href="manifest.json" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Peer-to-Peer Daemon on Browser</title>
</head>
<body>
<div id="app"></div>
<script type="module" src="./src/main.ts"></script>
</body>
</html>

View File

@@ -1,25 +0,0 @@
{
"name": "webpeer",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview",
"check": "svelte-check --tsconfig ./tsconfig.app.json && tsc -p tsconfig.node.json"
},
"dependencies": {},
"devDependencies": {
"eslint-plugin-svelte": "^3.12.4",
"@sveltejs/vite-plugin-svelte": "^6.2.1",
"@tsconfig/svelte": "^5.0.5",
"svelte": "5.41.1",
"svelte-check": "^4.3.3",
"typescript": "5.9.3",
"vite": "^7.3.0"
},
"imports": {
"../../src/worker/bgWorker.ts": "../../src/worker/bgWorker.mock.ts"
}
}

View File

@@ -1,52 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="512"
height="512"
viewBox="0 0 511.99998 511.99998"
version="1.1"
id="svg1"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs1" />
<g
id="layer1"
transform="translate(-22.694448,-28.922305)">
<g
id="g4"
transform="matrix(4.6921194,0,0,4.6921194,-266.26061,-494.11652)">
<rect
style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.2366;stroke-opacity:1"
id="rect2"
width="109.11913"
height="109.11913"
x="61.583057"
y="111.47176" />
<g
id="g3"
transform="matrix(0.77702959,0,0,0.77702959,22.523192,34.973874)">
<path
d="m 104.50787,75.245039 h -3.77394 l -10.90251,-29.352906 c 25.15963,-14.257127 33.96551,-46.12600067 20.12771,-71.285639 -14.25713,-25.159637 -46.126,-33.96551 -71.28564,-20.12771 -12.16049,6.709237 -21.38569,18.450401 -24.74031,31.868875 l -38.99744,-4.193274 c -2.93529,-18.4504 -20.12771,-31.449546 -38.578109,-28.514255 -16.773091,2.515964 -28.933582,16.773091 -28.933582,33.546184 0,5.8705823 1.677309,11.7411643 4.6126,17.1924183 l -46.964659,46.1260007 c -8.80587,-6.709236 -19.70838,-10.483182 -31.03022,-10.483182 -28.93358,0 -52.41591,23.482328 -52.41591,52.415908 0,28.933581 23.48233,52.415911 52.41591,52.415911 l 10.48319,62.89909 c -17.19242,7.54789 -25.15964,27.6756 -17.61175,44.86802 7.54789,17.19242 27.6756,25.15964 44.86802,17.61175 15.93444,-6.70924 23.90165,-24.32098 19.28905,-41.09408 l 36.900806,-19.28905 c 23.901654,26.41762 64.9957237,28.51425 91.832674,4.6126 13.41847,-12.16049 21.38569,-29.77224 21.38569,-47.80331 0,-4.6126 -0.41933,-9.64453 -1.67731,-14.25713 l 40.67475,-20.96636 c 12.57982,14.25713 33.96551,15.51511 48.22264,2.93529 14.25712,-12.57982 15.51511,-33.96551 2.93529,-48.222641 -7.96722,-6.70924 -17.19242,-10.90251 -26.83695,-10.90251 z m -223.92077,140.055311 c -5.45125,-5.45125 -12.99914,-8.80587 -20.54703,-9.64452 l -10.48319,-62.8991 c 10.06386,-3.35461 18.86973,-9.64452 25.15964,-18.03107 l 38.997438,20.54704 c -6.289909,16.77309 -5.031927,35.64282 3.354619,51.57725 z m 41.094077,-85.54276 -38.578107,-20.12771 c 1.67731,-5.45126 2.51596,-10.902511 2.51596,-16.773091 0,-10.90251 -3.35462,-21.38569 -9.64453,-30.191565 l 46.964658,-45.706673 c 15.934437,9.644527 36.900802,5.031927 46.545332,-10.9025087 1.25798,-2.096637 2.09663,-4.193273 2.93529,-6.28990997 l 38.99744,4.19327297 c 0.83865,15.0957817 8.38654,29.3529097 20.54703,38.5781097 l -34.3848403,64.157075 c -27.2562697,-10.902511 -58.7058147,-1.25798 -75.8982327,23.063 z m 148.861183,-20.12771 c 0,2.51596 0.41933,5.03193 1.25798,7.54789 l -38.57811,20.12771 c -4.6126,-9.2252 -11.74116,-16.77309 -20.12771,-22.64367 l 34.38484,-64.157077 c 5.45126,2.096637 11.32184,2.935291 17.19242,2.935291 3.35462,0 6.70924,-0.419327 10.06385,-0.838654 l 10.90251,29.352909 c -10.06385,5.87058 -15.51511,16.35376 -15.09578,27.675601 z"
id="path1-1"
style="overflow:hidden;stroke-width:4.19327"
transform="matrix(0.26458333,0,0,0.26458333,130.85167,139.42444)" />
<path
id="path1-8-1-7"
style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.073263;stroke-opacity:1"
d="m 140.38615,132.15285 c -0.55386,0 -1.00708,0.45307 -1.00708,1.00708 v 12.05537 c 0,0.5546 0.45322,1.00707 1.00708,1.00707 h 0.504 v 1.51154 h 2.01461 v -1.51154 h 10.07399 v 1.51154 h 2.01461 v -1.51154 h 0.50354 c 0.55461,0 1.00754,-0.45247 1.00754,-1.00707 v -12.05537 c 0,-0.55401 -0.45293,-1.00708 -1.00754,-1.00708 z m 0.504,1.51108 h 14.10321 v 11.04783 h -14.10321 z m 1.00753,1.00754 v 9.03321 h 12.0886 v -9.03321 z m 3.52524,1.99776 c 1.3854,0 2.51906,1.13321 2.51906,2.51861 0,1.38467 -1.13366,2.51816 -2.51906,2.51816 -1.38467,0 -2.51771,-1.13349 -2.51771,-2.51816 0,-1.3854 1.13304,-2.51861 2.51771,-2.51861 z m 7.05183,0 c 0.27767,0 0.504,0.22706 0.504,0.504 v 4.02968 c 0,0.27694 -0.22633,0.50309 -0.504,0.50309 -0.27693,0 -0.50354,-0.22615 -0.50354,-0.50309 v -4.02968 c 0,-0.27694 0.22661,-0.504 0.50354,-0.504 z m -7.55492,0.504 v 0.60461 c -0.42786,0.15092 -0.75526,0.50306 -0.90692,0.90601 h -0.60461 v 1.00753 h 0.60461 c 0.15166,0.42859 0.47906,0.756 0.90692,0.90692 v 0.60461 h 1.00753 v -0.60461 c 0.42786,-0.15092 0.75509,-0.50397 0.90601,-0.90692 h 0.60462 v -1.00753 h -0.60462 c -0.15092,-0.42786 -0.47815,-0.75509 -0.90601,-0.90601 v -0.60461 z" />
<path
id="path1-8-1-7-3"
style="fill:#ffffff;fill-opacity:1"
d="m 2576.8666,993.66142 c -7.56,0 -13.7458,6.19115 -13.7458,13.75308 v 164.5449 c 0,7.57 6.1858,13.7458 13.7458,13.7458 h 6.8838 v 20.6369 h 27.499 v -20.6369 h 137.5019 v 20.6369 h 27.4989 v -20.6369 h 6.8693 c 7.57,0 13.7531,-6.1758 13.7531,-13.7458 v -164.5449 c 0,-7.56193 -6.1831,-13.75308 -13.7531,-13.75308 z m 6.8838,20.62968 h 192.4998 v 150.799 h -192.4998 z m 13.7531,13.753 v 123.2929 h 164.9936 v -123.2929 z m 48.1141,27.2674 c 18.91,0 34.3827,15.4654 34.3827,34.3754 0,18.9 -15.4727,34.3755 -34.3827,34.3755 -18.9,0 -34.3682,-15.4755 -34.3682,-34.3755 0,-18.91 15.4682,-34.3754 34.3682,-34.3754 z m 96.2499,0 c 3.79,0 6.8838,3.0965 6.8838,6.8765 v 55.0051 c 0,3.78 -3.0938,6.8693 -6.8838,6.8693 -3.78,0 -6.8693,-3.0893 -6.8693,-6.8693 v -55.0051 c 0,-3.78 3.0893,-6.8765 6.8693,-6.8765 z m -103.1192,6.8765 v 8.2519 c -5.84,2.06 -10.3078,6.8705 -12.3778,12.3705 h -8.2518 v 13.7531 h 8.2518 c 2.07,5.85 6.5378,10.3178 12.3778,12.3778 v 8.2518 h 13.7531 v -8.2518 c 5.84,-2.06 10.3105,-6.8778 12.3705,-12.3778 h 8.2446 v -13.7531 h -8.2446 c -2.06,-5.84 -6.5305,-10.3105 -12.3705,-12.3705 v -8.2519 z"
transform="matrix(0.06289731,0,0,0.06289731,-82.022365,94.831671)" />
<path
id="path1-8-1"
style="fill:#ffffff;fill-opacity:1"
d="m 2576.8708,993.66021 c -7.56,0 -13.7505,6.18852 -13.7505,13.75049 v 164.5474 c 0,7.57 6.1905,13.7454 13.7505,13.7454 h 6.8778 v 20.6333 h 27.5009 v -20.6333 h 137.4996 v 20.6333 h 27.5009 v -20.6333 h 6.8727 c 7.57,0 13.7504,-6.1754 13.7504,-13.7454 v -164.5474 c 0,-7.56197 -6.1804,-13.75049 -13.7504,-13.75049 z m 6.8778,20.62819 h 192.5014 v 150.797 h -192.5014 z m 13.7504,13.7556 v 123.296 h 165.0005 v -123.296 z m 48.119,27.2668 c 18.91,0 34.3838,15.4687 34.3838,34.3787 0,18.9 -15.4738,34.3685 -34.3838,34.3685 -18.9,0 -34.3685,-15.4685 -34.3685,-34.3685 0,-18.91 15.4685,-34.3787 34.3685,-34.3787 z m 96.2533,0 c 3.79,0 6.8778,3.0977 6.8778,6.8777 v 55.0019 c 0,3.78 -3.0878,6.8676 -6.8778,6.8676 -3.78,0 -6.8727,-3.0876 -6.8727,-6.8676 v -55.0019 c 0,-3.78 3.0927,-6.8777 6.8727,-6.8777 z m -103.1209,6.8777 v 8.2523 c -5.84,2.06 -10.311,6.8709 -12.381,12.3709 h -8.2472 v 13.7505 h 8.2472 c 2.07,5.85 6.541,10.3159 12.381,12.3759 v 8.2523 h 13.7505 v -8.2523 c 5.84,-2.06 10.3108,-6.8759 12.3708,-12.3759 h 8.2473 v -13.7505 h -8.2473 c -2.06,-5.84 -6.5308,-10.3109 -12.3708,-12.3709 v -8.2523 z"
transform="matrix(0.08943055,0,0,0.08943055,-115.49313,85.768735)" />
</g>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 7.0 KiB

View File

@@ -1,26 +0,0 @@
{
"name": "WebPeer - Pseudo client for Self-hosted LiveSync Peer-to-Peer Replication",
"short_name": "WepPeer",
"description": "A web-based pseudo peer-to-peer replication client using as like server for background sync.",
"start_url": "./",
"display": "standalone",
"background_color": "#fff",
"theme_color": "#fff",
"orientation": "any",
"icons": [
{
"src": "./icon.svg",
"sizes": "512x512",
"type": "image/svg+xml",
"maskable": true
}
],
"additional_icons": [
{
"src": "./icon.svg",
"sizes": "512x512",
"type": "image/svg+xml",
"maskable": true
}
]
}

View File

@@ -1,5 +0,0 @@
<script lang="ts">
import SyncMain from "./SyncMain.svelte";
</script>
<SyncMain></SyncMain>

View File

@@ -1,23 +0,0 @@
import { LOG_LEVEL_VERBOSE } from "@lib/common/types";
import { defaultLoggerEnv, setGlobalLogFunction } from "@lib/common/logger";
import { writable } from "svelte/store";
export const logs = writable([] as string[]);
let _logs = [] as string[];
const maxLines = 10000;
setGlobalLogFunction((msg, level) => {
console.log(msg);
const msgstr = typeof msg === "string" ? msg : JSON.stringify(msg);
const strLog = `${new Date().toISOString()}\u2001${msgstr}`;
_logs.push(strLog);
if (_logs.length > maxLines) {
_logs = _logs.slice(_logs.length - maxLines);
}
logs.set(_logs);
});
defaultLoggerEnv.minLogLevel = LOG_LEVEL_VERBOSE;
export const storeP2PStatusLine = writable("");

View File

@@ -1,364 +0,0 @@
import { PouchDB } from "@lib/pouchdb/pouchdb-browser";
import {
type EntryDoc,
type LOG_LEVEL,
type P2PSyncSetting,
LOG_LEVEL_NOTICE,
LOG_LEVEL_VERBOSE,
P2P_DEFAULT_SETTINGS,
REMOTE_P2P,
} from "@lib/common/types";
import { eventHub } from "@lib/hub/hub";
import type { Confirm } from "@lib/interfaces/Confirm";
import { LOG_LEVEL_INFO, Logger } from "@lib/common/logger";
import { storeP2PStatusLine } from "./CommandsShim";
import {
EVENT_P2P_PEER_SHOW_EXTRA_MENU,
type CommandShim,
type PeerStatus,
type PluginShim,
} from "@lib/replication/trystero/P2PReplicatorPaneCommon";
import {
closeP2PReplicator,
openP2PReplicator,
P2PLogCollector,
type P2PReplicatorBase,
} from "@lib/replication/trystero/P2PReplicatorCore";
import type { SimpleStore } from "octagonal-wheels/databases/SimpleStoreBase";
import { reactiveSource } from "octagonal-wheels/dataobject/reactive_v2";
import { EVENT_SETTING_SAVED } from "@lib/events/coreEvents";
import { unique } from "octagonal-wheels/collection";
import { BrowserServiceHub } from "@lib/services/BrowserServices";
import { TrysteroReplicator } from "@lib/replication/trystero/TrysteroReplicator";
import { SETTING_KEY_P2P_DEVICE_NAME } from "@lib/common/types";
import { ServiceContext } from "@lib/services/base/ServiceBase";
import type { InjectableServiceHub } from "@lib/services/InjectableServices";
import { Menu } from "@/lib/src/services/implements/browser/Menu";
function addToList(item: string, list: string) {
return unique(
list
.split(",")
.map((e) => e.trim())
.concat(item)
.filter((p) => p)
).join(",");
}
function removeFromList(item: string, list: string) {
return list
.split(",")
.map((e) => e.trim())
.filter((p) => p !== item)
.filter((p) => p)
.join(",");
}
export class P2PReplicatorShim implements P2PReplicatorBase, CommandShim {
storeP2PStatusLine = reactiveSource("");
plugin!: PluginShim;
// environment!: IEnvironment;
confirm!: Confirm;
// simpleStoreAPI!: ISimpleStoreAPI;
db?: PouchDB.Database<EntryDoc>;
services: InjectableServiceHub<ServiceContext>;
getDB() {
if (!this.db) {
throw new Error("DB not initialized");
}
return this.db;
}
_simpleStore!: SimpleStore<any>;
async closeDB() {
if (this.db) {
await this.db.close();
this.db = undefined;
}
}
constructor() {
const browserServiceHub = new BrowserServiceHub<ServiceContext>();
this.services = browserServiceHub;
this.services.vault.getVaultName.setHandler(() => "p2p-livesync-web-peer");
}
async init() {
// const { simpleStoreAPI } = await getWrappedSynchromesh();
// this.confirm = confirm;
this.confirm = this.services.UI.confirm;
// this.environment = environment;
if (this.db) {
try {
await this.closeDB();
} catch (ex) {
Logger("Error closing db", LOG_LEVEL_VERBOSE);
Logger(ex, LOG_LEVEL_VERBOSE);
}
}
const repStore = this.services.database.openSimpleStore<any>("p2p-livesync-web-peer");
this._simpleStore = repStore;
let _settings = (await repStore.get("settings")) || ({ ...P2P_DEFAULT_SETTINGS } as P2PSyncSetting);
this.plugin = {
saveSettings: async () => {
await repStore.set("settings", _settings);
eventHub.emitEvent(EVENT_SETTING_SAVED, _settings);
},
get settings() {
return _settings;
},
set settings(newSettings: P2PSyncSetting) {
_settings = { ..._settings, ...newSettings };
},
rebuilder: null,
services: this.services,
// $$scheduleAppReload: () => {},
// $$getVaultName: () => "p2p-livesync-web-peer",
};
// const deviceName = this.getDeviceName();
const database_name = this.settings.P2P_AppID + "-" + this.settings.P2P_roomID + "p2p-livesync-web-peer";
this.db = new PouchDB<EntryDoc>(database_name);
setTimeout(() => {
if (this.settings.P2P_AutoStart && this.settings.P2P_Enabled) {
void this.open();
}
}, 1000);
return this;
}
get settings() {
return this.plugin.settings;
}
_log(msg: any, level?: LOG_LEVEL): void {
Logger(msg, level);
}
_notice(msg: string, key?: string): void {
Logger(msg, LOG_LEVEL_NOTICE, key);
}
getSettings(): P2PSyncSetting {
return this.settings;
}
simpleStore(): SimpleStore<any> {
return this._simpleStore;
}
handleReplicatedDocuments(docs: EntryDoc[]): Promise<void> {
// No op. This is a client and does not need to process the docs
return Promise.resolve();
}
getPluginShim() {
return {};
}
getConfig(key: string) {
const vaultName = this.services.vault.getVaultName();
const dbKey = `${vaultName}-${key}`;
return localStorage.getItem(dbKey);
}
setConfig(key: string, value: string) {
const vaultName = this.services.vault.getVaultName();
const dbKey = `${vaultName}-${key}`;
localStorage.setItem(dbKey, value);
}
getDeviceName(): string {
return this.getConfig(SETTING_KEY_P2P_DEVICE_NAME) ?? this.plugin.services.vault.getVaultName();
}
getPlatform(): string {
return "pseudo-replicator";
}
m?: Menu;
afterConstructor(): void {
eventHub.onEvent(EVENT_P2P_PEER_SHOW_EXTRA_MENU, ({ peer, event }) => {
if (this.m) {
this.m.hide();
}
this.m = new Menu()
.addItem((item) => item.setTitle("📥 Only Fetch").onClick(() => this.replicateFrom(peer)))
.addItem((item) => item.setTitle("📤 Only Send").onClick(() => this.replicateTo(peer)))
.addSeparator()
// .addItem((item) => {
// item.setTitle("🔧 Get Configuration").onClick(async () => {
// await this.getRemoteConfig(peer);
// });
// })
// .addSeparator()
.addItem((item) => {
const mark = peer.syncOnConnect ? "checkmark" : null;
item.setTitle("Toggle Sync on connect")
.onClick(async () => {
await this.toggleProp(peer, "syncOnConnect");
})
.setIcon(mark);
})
.addItem((item) => {
const mark = peer.watchOnConnect ? "checkmark" : null;
item.setTitle("Toggle Watch on connect")
.onClick(async () => {
await this.toggleProp(peer, "watchOnConnect");
})
.setIcon(mark);
})
.addItem((item) => {
const mark = peer.syncOnReplicationCommand ? "checkmark" : null;
item.setTitle("Toggle Sync on `Replicate now` command")
.onClick(async () => {
await this.toggleProp(peer, "syncOnReplicationCommand");
})
.setIcon(mark);
});
void this.m.showAtPosition({ x: event.x, y: event.y });
});
this.p2pLogCollector.p2pReplicationLine.onChanged((line) => {
storeP2PStatusLine.set(line.value);
});
}
_replicatorInstance?: TrysteroReplicator;
p2pLogCollector = new P2PLogCollector();
async open() {
await openP2PReplicator(this);
}
async close() {
await closeP2PReplicator(this);
}
enableBroadcastCastings() {
return this?._replicatorInstance?.enableBroadcastChanges();
}
disableBroadcastCastings() {
return this?._replicatorInstance?.disableBroadcastChanges();
}
async initialiseP2PReplicator(): Promise<TrysteroReplicator> {
await this.init();
try {
if (this._replicatorInstance) {
await this._replicatorInstance.close();
this._replicatorInstance = undefined;
}
if (!this.settings.P2P_AppID) {
this.settings.P2P_AppID = P2P_DEFAULT_SETTINGS.P2P_AppID;
}
const getInitialDeviceName = () =>
this.getConfig(SETTING_KEY_P2P_DEVICE_NAME) || this.services.vault.getVaultName();
const getSettings = () => this.settings;
const store = () => this.simpleStore();
const getDB = () => this.getDB();
const getConfirm = () => this.confirm;
const getPlatform = () => this.getPlatform();
const env = {
get db() {
return getDB();
},
get confirm() {
return getConfirm();
},
get deviceName() {
return getInitialDeviceName();
},
get platform() {
return getPlatform();
},
get settings() {
return getSettings();
},
processReplicatedDocs: async (docs: EntryDoc[]): Promise<void> => {
await this.handleReplicatedDocuments(docs);
// No op. This is a client and does not need to process the docs
},
get simpleStore() {
return store();
},
};
this._replicatorInstance = new TrysteroReplicator(env);
return this._replicatorInstance;
} catch (e) {
this._log(
e instanceof Error ? e.message : "Something occurred on Initialising P2P Replicator",
LOG_LEVEL_INFO
);
this._log(e, LOG_LEVEL_VERBOSE);
throw e;
}
}
get replicator() {
return this._replicatorInstance!;
}
async replicateFrom(peer: PeerStatus) {
await this.replicator.replicateFrom(peer.peerId);
}
async replicateTo(peer: PeerStatus) {
await this.replicator.requestSynchroniseToPeer(peer.peerId);
}
async getRemoteConfig(peer: PeerStatus) {
Logger(
`Requesting remote config for ${peer.name}. Please input the passphrase on the remote device`,
LOG_LEVEL_NOTICE
);
const remoteConfig = await this.replicator.getRemoteConfig(peer.peerId);
if (remoteConfig) {
Logger(`Remote config for ${peer.name} is retrieved successfully`);
const DROP = "Yes, and drop local database";
const KEEP = "Yes, but keep local database";
const CANCEL = "No, cancel";
const yn = await this.confirm.askSelectStringDialogue(
`Do you really want to apply the remote config? This will overwrite your current config immediately and restart.
And you can also drop the local database to rebuild from the remote device.`,
[DROP, KEEP, CANCEL] as const,
{
defaultAction: CANCEL,
title: "Apply Remote Config ",
}
);
if (yn === DROP || yn === KEEP) {
if (yn === DROP) {
if (remoteConfig.remoteType !== REMOTE_P2P) {
const yn2 = await this.confirm.askYesNoDialog(
`Do you want to set the remote type to "P2P Sync" to rebuild by "P2P replication"?`,
{
title: "Rebuild from remote device",
}
);
if (yn2 === "yes") {
remoteConfig.remoteType = REMOTE_P2P;
remoteConfig.P2P_RebuildFrom = peer.name;
}
}
}
this.plugin.settings = remoteConfig;
await this.plugin.saveSettings();
if (yn === DROP) {
await this.plugin.rebuilder.scheduleFetch();
} else {
await this.plugin.services.appLifecycle.scheduleRestart();
}
} else {
Logger(`Cancelled\nRemote config for ${peer.name} is not applied`, LOG_LEVEL_NOTICE);
}
} else {
Logger(`Cannot retrieve remote config for ${peer.peerId}`);
}
}
async toggleProp(peer: PeerStatus, prop: "syncOnConnect" | "watchOnConnect" | "syncOnReplicationCommand") {
const settingMap = {
syncOnConnect: "P2P_AutoSyncPeers",
watchOnConnect: "P2P_AutoWatchPeers",
syncOnReplicationCommand: "P2P_SyncOnReplication",
} as const;
const targetSetting = settingMap[prop];
if (peer[prop]) {
this.plugin.settings[targetSetting] = removeFromList(peer.name, this.plugin.settings[targetSetting]);
await this.plugin.saveSettings();
} else {
this.plugin.settings[targetSetting] = addToList(peer.name, this.plugin.settings[targetSetting]);
await this.plugin.saveSettings();
}
}
}
export const cmdSyncShim = new P2PReplicatorShim();

View File

@@ -1,112 +0,0 @@
<script lang="ts">
import { storeP2PStatusLine, logs } from "./CommandsShim";
import P2PReplicatorPane from "@/features/P2PSync/P2PReplicator/P2PReplicatorPane.svelte";
import { onMount, tick } from "svelte";
import { cmdSyncShim } from "./P2PReplicatorShim";
import { eventHub } from "@lib/hub/hub";
import { EVENT_LAYOUT_READY } from "@lib/events/coreEvents";
let synchronised = $state(cmdSyncShim.init());
onMount(() => {
eventHub.emitEvent(EVENT_LAYOUT_READY);
return () => {
synchronised.then((e) => e.close());
};
});
let elP: HTMLDivElement;
logs.subscribe((log) => {
tick().then(() => elP?.scrollTo({ top: elP.scrollHeight }));
});
let statusLine = $state("");
storeP2PStatusLine.subscribe((status) => {
statusLine = status;
});
</script>
<main>
<div class="control">
{#await synchronised then cmdSync}
<P2PReplicatorPane plugin={cmdSync.plugin} {cmdSync}></P2PReplicatorPane>
{:catch error}
<p>{error.message}</p>
{/await}
</div>
<div class="log">
<div class="status">
{statusLine}
</div>
<div class="logslist" bind:this={elP}>
{#each $logs as log}
<p>{log}</p>
{/each}
</div>
</div>
</main>
<style>
main {
display: flex;
flex-direction: row;
flex-grow: 1;
max-height: 100vh;
box-sizing: border-box;
}
@media (max-width: 900px) {
main {
flex-direction: column;
}
}
@media (device-orientation: portrait) {
main {
flex-direction: column;
}
}
.log {
display: flex;
flex-direction: column;
justify-content: flex-start;
align-items: flex-start;
padding: 1em;
min-width: 50%;
}
@media (max-width: 900px) {
.log {
max-height: 50vh;
}
}
@media (device-orientation: portrait) {
.log {
max-height: 50vh;
}
}
.control {
padding: 1em 1em;
overflow-y: scroll;
flex-grow: 1;
}
.status {
flex-grow: 0;
/* max-height: 40px; */
/* height: 40px; */
flex-shrink: 0;
}
.logslist {
display: flex;
flex-direction: column;
justify-content: flex-start;
align-items: flex-start;
/* padding: 1em; */
width: 100%;
overflow-y: scroll;
flex-grow: 1;
flex-shrink: 1;
/* max-height: calc(100% - 40px); */
}
p {
margin: 0;
white-space: pre-wrap;
text-align: left;
word-break: break-all;
}
</style>

View File

@@ -1,74 +0,0 @@
<script lang="ts">
import { Menu } from "@/lib/src/services/implements/browser/Menu";
import { getDialogContext } from "@lib/services/implements/base/SvelteDialog";
let result = $state<string | boolean>("");
const context = getDialogContext();
async function testUI() {
const confirm = await context.services.confirm;
const ret = await confirm.askString("Your name", "What is your name?", "John Doe", false);
result = ret;
}
let resultPassword = $state<string | boolean>("");
async function testPassword() {
const confirm = await context.services.confirm;
const ret = await confirm.askString("passphrase", "?", "anythingonlyyouknow", true);
resultPassword = ret;
}
async function testMenu(event: MouseEvent) {
const m = new Menu()
.addItem((item) => item.setTitle("📥 Only Fetch").onClick(() => {}))
.addItem((item) => item.setTitle("📤 Only Send").onClick(() => {}))
.addSeparator()
.addItem((item) => {
item.setTitle("🔧 Get Configuration").onClick(async () => {
console.log("Get Configuration");
});
})
.addSeparator()
.addItem((item) => {
const mark = "checkmark";
item.setTitle("Toggle Sync on connect")
.onClick(async () => {
console.log("Toggle Sync on connect");
// await this.toggleProp(peer, "syncOnConnect");
})
.setIcon(mark);
})
.addItem((item) => {
const mark = null;
item.setTitle("Toggle Watch on connect")
.onClick(async () => {
console.log("Toggle Watch on connect");
// await this.toggleProp(peer, "watchOnConnect");
})
.setIcon(mark);
})
.addItem((item) => {
const mark = null;
item.setTitle("Toggle Sync on `Replicate now` command")
.onClick(async () => {})
.setIcon(mark);
});
m.showAtPosition({ x: event.x, y: event.y });
}
</script>
<main>
<h1>UI Test</h1>
<article>
<div>
<button onclick={() => testUI()}> String input </button>
{result}
</div>
<div>
<button onclick={() => testPassword()}> Password Input </button>
{resultPassword}
</div>
<div>
<button onclick={testMenu}>Menu</button>
</div>
</article>
</main>

View File

@@ -1,112 +0,0 @@
:root {
font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif;
line-height: 1.5;
font-weight: 400;
color-scheme: light dark;
color: rgba(255, 255, 255, 0.87);
background-color: #242424;
font-synthesis: none;
text-rendering: optimizeLegibility;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
--background-primary: #ffffff;
--background-primary-alt: #e9e9e9;
--size-4-1: 0.25em;
--tag-background: #f0f0f0;
--tag-border-width: 1px;
--tag-border-color: #cfffdd;
--background-modifier-success: #d4f3e9;
--background-secondary: #f0f0f0;
--background-modifier-error: #f8d7da;
--background-modifier-error-hover: #f5c6cb;
--interactive-accent: #007bff;
--interactive-accent-hover: #0056b3;
--text-normal: #333;
--text-warning: #f0ad4e;
}
a {
font-weight: 500;
color: #646cff;
text-decoration: inherit;
}
a:hover {
color: #535bf2;
}
body {
margin: 0;
padding: 0;
display: flex;
min-width: 320px;
min-height: 100vh;
box-sizing: border-box;
}
h1 {
font-size: 3.2em;
line-height: 1.1;
}
.card {
padding: 2em;
}
#app {
margin: 0;
padding: 0;
text-align: center;
display: flex;
flex-grow: 1;
}
button {
border-radius: 8px;
border: 1px solid transparent;
padding: 0.6em 1.2em;
font-size: 1em;
font-weight: 500;
font-family: inherit;
background-color: #1a1a1a;
cursor: pointer;
transition: border-color 0.25s;
}
button:hover {
border-color: #646cff;
}
button:focus,
button:focus-visible {
outline: 4px auto -webkit-focus-ring-color;
}
input,
select {
border-radius: 8px;
border: 1px solid #1a1a1a;
padding: 0.6em 1.2em;
font-size: 1em;
font-weight: 500;
font-family: inherit;
transition: border-color 0.25s;
}
@media (prefers-color-scheme: light) {
:root {
color: #213547;
background-color: #ffffff;
}
a:hover {
color: #747bff;
}
button {
background-color: #f9f9f9;
}
}

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="26.6" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 308"><path fill="#FF3E00" d="M239.682 40.707C211.113-.182 154.69-12.301 113.895 13.69L42.247 59.356a82.198 82.198 0 0 0-37.135 55.056a86.566 86.566 0 0 0 8.536 55.576a82.425 82.425 0 0 0-12.296 30.719a87.596 87.596 0 0 0 14.964 66.244c28.574 40.893 84.997 53.007 125.787 27.016l71.648-45.664a82.182 82.182 0 0 0 37.135-55.057a86.601 86.601 0 0 0-8.53-55.577a82.409 82.409 0 0 0 12.29-30.718a87.573 87.573 0 0 0-14.963-66.244"></path><path fill="#FFF" d="M106.889 270.841c-23.102 6.007-47.497-3.036-61.103-22.648a52.685 52.685 0 0 1-9.003-39.85a49.978 49.978 0 0 1 1.713-6.693l1.35-4.115l3.671 2.697a92.447 92.447 0 0 0 28.036 14.007l2.663.808l-.245 2.659a16.067 16.067 0 0 0 2.89 10.656a17.143 17.143 0 0 0 18.397 6.828a15.786 15.786 0 0 0 4.403-1.935l71.67-45.672a14.922 14.922 0 0 0 6.734-9.977a15.923 15.923 0 0 0-2.713-12.011a17.156 17.156 0 0 0-18.404-6.832a15.78 15.78 0 0 0-4.396 1.933l-27.35 17.434a52.298 52.298 0 0 1-14.553 6.391c-23.101 6.007-47.497-3.036-61.101-22.649a52.681 52.681 0 0 1-9.004-39.849a49.428 49.428 0 0 1 22.34-33.114l71.664-45.677a52.218 52.218 0 0 1 14.563-6.398c23.101-6.007 47.497 3.036 61.101 22.648a52.685 52.685 0 0 1 9.004 39.85a50.559 50.559 0 0 1-1.713 6.692l-1.35 4.116l-3.67-2.693a92.373 92.373 0 0 0-28.037-14.013l-2.664-.809l.246-2.658a16.099 16.099 0 0 0-2.89-10.656a17.143 17.143 0 0 0-18.398-6.828a15.786 15.786 0 0 0-4.402 1.935l-71.67 45.674a14.898 14.898 0 0 0-6.73 9.975a15.9 15.9 0 0 0 2.709 12.012a17.156 17.156 0 0 0 18.404 6.832a15.841 15.841 0 0 0 4.402-1.935l27.345-17.427a52.147 52.147 0 0 1 14.552-6.397c23.101-6.006 47.497 3.037 61.102 22.65a52.681 52.681 0 0 1 9.003 39.848a49.453 49.453 0 0 1-22.34 33.12l-71.664 45.673a52.218 52.218 0 0 1-14.563 6.398"></path></svg>

Before

Width:  |  Height:  |  Size: 1.9 KiB

View File

@@ -1,9 +0,0 @@
import { mount } from "svelte";
import "./app.css";
import App from "./App.svelte";
const app = mount(App, {
target: document.getElementById("app")!,
});
export default app;

View File

@@ -1,9 +0,0 @@
import { mount } from "svelte";
import "./app.css";
import App from "./UITest.svelte";
const app = mount(App, {
target: document.getElementById("app")!,
});
export default app;

View File

@@ -1,2 +0,0 @@
/// <reference types="svelte" />
/// <reference types="vite/client" />

View File

@@ -1,7 +0,0 @@
import { vitePreprocess } from "@sveltejs/vite-plugin-svelte";
export default {
// Consult https://svelte.dev/docs#compile-time-svelte-preprocess
// for more information about preprocessors
preprocess: vitePreprocess(),
};

View File

@@ -1,25 +0,0 @@
{
"extends": "@tsconfig/svelte/tsconfig.json",
"compilerOptions": {
"sourceRoot": "../",
"target": "ESNext",
"useDefineForClassFields": true,
"module": "ESNext",
"resolveJsonModule": true,
/**
* Typecheck JS in `.svelte` and `.js` files by default.
* Disable checkJs if you'd like to use dynamic types in JS.
* Note that setting allowJs false does not prevent the use
* of JS in `.svelte` files.
*/
"allowJs": true,
"checkJs": true,
"isolatedModules": true,
"moduleDetection": "force",
"paths": {
"@/*": ["../../*"],
"@lib/*": ["../../lib/src/*"]
}
},
"include": ["src/**/*.ts", "src/**/*.js", "src/**/*.svelte"]
}

View File

@@ -1,4 +0,0 @@
{
"files": [],
"references": [{ "path": "./tsconfig.app.json" }, { "path": "./tsconfig.node.json" }]
}

View File

@@ -1,28 +0,0 @@
{
"compilerOptions": {
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
"target": "ES2022",
"lib": ["ES2023"],
"module": "ESNext",
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"isolatedModules": true,
"moduleDetection": "force",
"noEmit": true,
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedSideEffectImports": true,
"paths": {
"@/*": ["../../*"],
"@lib/*": ["../../lib/src/*"]
}
},
"include": ["vite.config.ts"]
}

View File

@@ -1,16 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="icon.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Peer-to-Peer Daemon on Browser</title>
</head>
<body>
<div id="app"></div>
<script type="module" src="./src/uitest.ts"></script>
</body>
</html>

View File

@@ -1,24 +0,0 @@
import { defineConfig } from "vite";
import { svelte } from "@sveltejs/vite-plugin-svelte";
import path from "node:path";
// https://vite.dev/config/
export default defineConfig({
plugins: [svelte()],
resolve: {
alias: {
"@": path.resolve(__dirname, "../../"),
"@lib": path.resolve(__dirname, "../../lib/src"),
},
},
base: "./",
build: {
outDir: "dist",
emptyOutDir: true,
rollupOptions: {
input: {
index: "index.html",
// uitest: "uitest.html",
},
},
},
});

View File

@@ -1,99 +1,51 @@
import { deleteDB, type IDBPDatabase, openDB } from "idb";
import type { KeyValueDatabase } from "../lib/src/interfaces/KeyValueDatabase.ts";
import { serialized } from "octagonal-wheels/concurrency/lock";
import { Logger } from "octagonal-wheels/common/logger";
export interface KeyValueDatabase {
get<T>(key: IDBValidKey): Promise<T>;
set<T>(key: IDBValidKey, value: T): Promise<IDBValidKey>;
del(key: IDBValidKey): Promise<void>;
clear(): Promise<void>;
keys(query?: IDBValidKey | IDBKeyRange, count?: number): Promise<IDBValidKey[]>;
close(): void;
destroy(): Promise<void>;
}
const databaseCache: { [key: string]: IDBPDatabase<any> } = {};
export { OpenKeyValueDatabase } from "./KeyValueDBv2.ts";
export const _OpenKeyValueDatabase = async (dbKey: string): Promise<KeyValueDatabase> => {
export const OpenKeyValueDatabase = async (dbKey: string): Promise<KeyValueDatabase> => {
if (dbKey in databaseCache) {
databaseCache[dbKey].close();
delete databaseCache[dbKey];
}
const storeKey = dbKey;
let db: IDBPDatabase<any> | null = null;
const _openDB = () => {
return serialized("keyvaluedb-" + dbKey, async () => {
const dbInstance = await openDB(dbKey, 1, {
upgrade(db, _oldVersion, _newVersion, _transaction, _event) {
return db.createObjectStore(storeKey);
},
blocking(currentVersion, blockedVersion, event) {
Logger(
`Blocking database open for ${dbKey}: currentVersion=${currentVersion}, blockedVersion=${blockedVersion}`
);
databaseCache[dbKey]?.close();
delete databaseCache[dbKey];
},
blocked(currentVersion, blockedVersion, event) {
Logger(
`Database open blocked for ${dbKey}: currentVersion=${currentVersion}, blockedVersion=${blockedVersion}`
);
},
terminated() {
Logger(`Database connection terminated for ${dbKey}`);
},
});
databaseCache[dbKey] = dbInstance;
return dbInstance;
});
};
const closeDB = () => {
if (db) {
db.close();
delete databaseCache[dbKey];
db = null;
}
};
db = await _openDB();
const dbPromise = openDB(dbKey, 1, {
upgrade(db) {
db.createObjectStore(storeKey);
},
});
const db = await dbPromise;
databaseCache[dbKey] = db;
return {
async get<T>(key: IDBValidKey): Promise<T> {
if (!db) {
db = await _openDB();
databaseCache[dbKey] = db;
}
return await db.get(storeKey, key);
},
async set<T>(key: IDBValidKey, value: T) {
if (!db) {
db = await _openDB();
databaseCache[dbKey] = db;
}
return await db.put(storeKey, value, key);
},
async del(key: IDBValidKey) {
if (!db) {
db = await _openDB();
databaseCache[dbKey] = db;
}
return await db.delete(storeKey, key);
},
async clear() {
if (!db) {
db = await _openDB();
databaseCache[dbKey] = db;
}
return await db.clear(storeKey);
},
async keys(query?: IDBValidKey | IDBKeyRange, count?: number) {
if (!db) {
db = await _openDB();
databaseCache[dbKey] = db;
}
return await db.getAllKeys(storeKey, query, count);
},
close() {
delete databaseCache[dbKey];
return Promise.resolve(closeDB());
return db.close();
},
async destroy() {
delete databaseCache[dbKey];
// await closeDB();
await deleteDB(dbKey, {
blocked() {
console.warn(`Database delete blocked for ${dbKey}`);
},
});
db.close();
await deleteDB(dbKey);
},
};
};

View File

@@ -1,154 +0,0 @@
import { LOG_LEVEL_VERBOSE, Logger } from "@/lib/src/common/logger";
import type { KeyValueDatabase } from "@/lib/src/interfaces/KeyValueDatabase";
import { deleteDB, openDB, type IDBPDatabase } from "idb";
import { serialized } from "octagonal-wheels/concurrency/lock";
const databaseCache = new Map<string, IDBKeyValueDatabase>();
export async function OpenKeyValueDatabase(dbKey: string): Promise<KeyValueDatabase> {
return await serialized(`OpenKeyValueDatabase-${dbKey}`, async () => {
const cachedDB = databaseCache.get(dbKey);
if (cachedDB) {
if (!cachedDB.isDestroyed) {
return cachedDB;
}
await cachedDB.ensuredDestroyed;
databaseCache.delete(dbKey);
}
const newDB = new IDBKeyValueDatabase(dbKey);
try {
await newDB.getIsReady();
databaseCache.set(dbKey, newDB);
return newDB;
} catch (e) {
databaseCache.delete(dbKey);
throw e;
}
});
}
export class IDBKeyValueDatabase implements KeyValueDatabase {
protected _dbPromise: Promise<IDBPDatabase<any>> | null = null;
protected dbKey: string;
protected storeKey: string;
protected _isDestroyed: boolean = false;
protected destroyedPromise: Promise<void> | null = null;
get isDestroyed() {
return this._isDestroyed;
}
get ensuredDestroyed(): Promise<void> {
if (this.destroyedPromise) {
return this.destroyedPromise;
}
return Promise.resolve();
}
async getIsReady(): Promise<boolean> {
await this.ensureDB();
return this.isDestroyed === false;
}
protected ensureDB() {
if (this._isDestroyed) {
throw new Error("Database is destroyed");
}
if (this._dbPromise) {
return this._dbPromise;
}
this._dbPromise = openDB(this.dbKey, undefined, {
upgrade: (db, _oldVersion, _newVersion, _transaction, _event) => {
if (!db.objectStoreNames.contains(this.storeKey)) {
return db.createObjectStore(this.storeKey);
}
},
blocking: (currentVersion, blockedVersion, event) => {
Logger(
`Blocking database open for ${this.dbKey}: currentVersion=${currentVersion}, blockedVersion=${blockedVersion}`,
LOG_LEVEL_VERBOSE
);
// This `this` is not this openDB instance, previously opened DB. Let it be closed in the terminated handler.
void this.closeDB(true);
},
blocked: (currentVersion, blockedVersion, event) => {
Logger(
`Database open blocked for ${this.dbKey}: currentVersion=${currentVersion}, blockedVersion=${blockedVersion}`,
LOG_LEVEL_VERBOSE
);
},
terminated: () => {
Logger(`Database connection terminated for ${this.dbKey}`, LOG_LEVEL_VERBOSE);
this._dbPromise = null;
},
}).catch((e) => {
this._dbPromise = null;
throw e;
});
return this._dbPromise;
}
protected async closeDB(setDestroyed: boolean = false) {
if (this._dbPromise) {
const tempPromise = this._dbPromise;
this._dbPromise = null;
try {
const dbR = await tempPromise;
dbR.close();
} catch (e) {
Logger(`Error closing database`);
Logger(e, LOG_LEVEL_VERBOSE);
}
}
this._dbPromise = null;
if (setDestroyed) {
this._isDestroyed = true;
this.destroyedPromise = Promise.resolve();
}
}
get DB(): Promise<IDBPDatabase<any>> {
if (this._isDestroyed) {
return Promise.reject(new Error("Database is destroyed"));
}
return this.ensureDB();
}
constructor(dbKey: string) {
this.dbKey = dbKey;
this.storeKey = dbKey;
}
async get<U>(key: IDBValidKey): Promise<U> {
const db = await this.DB;
return await db.get(this.storeKey, key);
}
async set<U>(key: IDBValidKey, value: U): Promise<IDBValidKey> {
const db = await this.DB;
await db.put(this.storeKey, value, key);
return key;
}
async del(key: IDBValidKey): Promise<void> {
const db = await this.DB;
return await db.delete(this.storeKey, key);
}
async clear(): Promise<void> {
const db = await this.DB;
return await db.clear(this.storeKey);
}
async keys(query?: IDBValidKey | IDBKeyRange, count?: number): Promise<IDBValidKey[]> {
const db = await this.DB;
return await db.getAllKeys(this.storeKey, query, count);
}
async close(): Promise<void> {
await this.closeDB();
}
async destroy(): Promise<void> {
this._isDestroyed = true;
this.destroyedPromise = (async () => {
await this.closeDB();
await deleteDB(this.dbKey, {
blocked: () => {
Logger(`Database delete blocked for ${this.dbKey}`);
},
});
})();
await this.destroyedPromise;
}
}

View File

@@ -1,14 +1,17 @@
// This file is based on a file that was published by the @remotely-save, under the Apache 2 License.
// I would love to express my deepest gratitude to the original authors for their hard work and dedication. Without their contributions, this project would not have been possible.
//
//
// Original Implementation is here: https://github.com/remotely-save/remotely-save/blob/28b99557a864ef59c19d2ad96101196e401718f0/src/remoteForS3.ts
import { FetchHttpHandler, type FetchHttpHandlerOptions } from "@smithy/fetch-http-handler";
import {
FetchHttpHandler,
type FetchHttpHandlerOptions,
} from "@smithy/fetch-http-handler";
import { HttpRequest, HttpResponse, type HttpHandlerOptions } from "@smithy/protocol-http";
//@ts-ignore
import { requestTimeout } from "@smithy/fetch-http-handler/dist-es/request-timeout";
import { buildQueryString } from "@smithy/querystring-builder";
import { requestUrl, type RequestUrlParam } from "../../../deps.ts";
import { requestUrl, type RequestUrlParam } from "../deps.ts";
////////////////////////////////////////////////////////////////////////////////
// special handler using Obsidian requestUrl
////////////////////////////////////////////////////////////////////////////////
@@ -22,13 +25,20 @@ import { requestUrl, type RequestUrlParam } from "../../../deps.ts";
export class ObsHttpHandler extends FetchHttpHandler {
requestTimeoutInMs: number | undefined;
reverseProxyNoSignUrl: string | undefined;
constructor(options?: FetchHttpHandlerOptions, reverseProxyNoSignUrl?: string) {
constructor(
options?: FetchHttpHandlerOptions,
reverseProxyNoSignUrl?: string
) {
super(options);
this.requestTimeoutInMs = options === undefined ? undefined : options.requestTimeout;
this.requestTimeoutInMs =
options === undefined ? undefined : options.requestTimeout;
this.reverseProxyNoSignUrl = reverseProxyNoSignUrl;
}
// eslint-disable-next-line require-await
async handle(request: HttpRequest, { abortSignal }: HttpHandlerOptions = {}): Promise<{ response: HttpResponse }> {
async handle(
request: HttpRequest,
{ abortSignal }: HttpHandlerOptions = {}
): Promise<{ response: HttpResponse }> {
if (abortSignal?.aborted) {
const abortError = new Error("Request aborted");
abortError.name = "AbortError";
@@ -44,13 +54,18 @@ export class ObsHttpHandler extends FetchHttpHandler {
}
const { port, method } = request;
let url = `${request.protocol}//${request.hostname}${port ? `:${port}` : ""}${path}`;
if (this.reverseProxyNoSignUrl !== undefined && this.reverseProxyNoSignUrl !== "") {
let url = `${request.protocol}//${request.hostname}${port ? `:${port}` : ""
}${path}`;
if (
this.reverseProxyNoSignUrl !== undefined &&
this.reverseProxyNoSignUrl !== ""
) {
const urlObj = new URL(url);
urlObj.host = this.reverseProxyNoSignUrl;
url = urlObj.href;
}
const body = method === "GET" || method === "HEAD" ? undefined : request.body;
const body =
method === "GET" || method === "HEAD" ? undefined : request.body;
const transformedHeaders: Record<string, string> = {};
for (const key of Object.keys(request.headers)) {

View File

@@ -1,28 +0,0 @@
import { ItemView } from "@/deps.ts";
import { type mount, unmount } from "svelte";
export abstract class SvelteItemView extends ItemView {
abstract instantiateComponent(target: HTMLElement): ReturnType<typeof mount> | Promise<ReturnType<typeof mount>>;
component?: ReturnType<typeof mount>;
async onOpen() {
await super.onOpen();
this.contentEl.empty();
await this._dismountComponent();
this.component = await this.instantiateComponent(this.contentEl);
return;
}
async _dismountComponent() {
if (this.component) {
await unmount(this.component);
this.component = undefined;
}
}
async onClose() {
await super.onClose();
if (this.component) {
await unmount(this.component);
this.component = undefined;
}
return;
}
}

235
src/common/dialogs.ts Normal file
View File

@@ -0,0 +1,235 @@
import { ButtonComponent } from "obsidian";
import { App, FuzzySuggestModal, MarkdownRenderer, Modal, Plugin, Setting } from "../deps.ts";
import ObsidianLiveSyncPlugin from "../main.ts";
//@ts-ignore
import PluginPane from "../ui/PluginPane.svelte";
export class PluginDialogModal extends Modal {
plugin: ObsidianLiveSyncPlugin;
component: PluginPane | undefined;
isOpened() {
return this.component != undefined;
}
constructor(app: App, plugin: ObsidianLiveSyncPlugin) {
super(app);
this.plugin = plugin;
}
onOpen() {
const { contentEl } = this;
this.contentEl.style.overflow = "auto";
this.contentEl.style.display = "flex";
this.contentEl.style.flexDirection = "column";
this.titleEl.setText("Customization Sync (Beta3)")
if (!this.component) {
this.component = new PluginPane({
target: contentEl,
props: { plugin: this.plugin },
});
}
}
onClose() {
if (this.component) {
this.component.$destroy();
this.component = undefined;
}
}
}
export class InputStringDialog extends Modal {
result: string | false = false;
onSubmit: (result: string | false) => void;
title: string;
key: string;
placeholder: string;
isManuallyClosed = false;
isPassword = false;
constructor(app: App, title: string, key: string, placeholder: string, isPassword: boolean, onSubmit: (result: string | false) => void) {
super(app);
this.onSubmit = onSubmit;
this.title = title;
this.placeholder = placeholder;
this.key = key;
this.isPassword = isPassword;
}
onOpen() {
const { contentEl } = this;
this.titleEl.setText(this.title);
const formEl = contentEl.createDiv();
new Setting(formEl).setName(this.key).setClass(this.isPassword ? "password-input" : "normal-input").addText((text) =>
text.onChange((value) => {
this.result = value;
})
);
new Setting(formEl).addButton((btn) =>
btn
.setButtonText("Ok")
.setCta()
.onClick(() => {
this.isManuallyClosed = true;
this.close();
})
).addButton((btn) =>
btn
.setButtonText("Cancel")
.setCta()
.onClick(() => {
this.close();
})
);
}
onClose() {
const { contentEl } = this;
contentEl.empty();
if (this.isManuallyClosed) {
this.onSubmit(this.result);
} else {
this.onSubmit(false);
}
}
}
export class PopoverSelectString extends FuzzySuggestModal<string> {
app: App;
callback: ((e: string) => void) | undefined = () => { };
getItemsFun: () => string[] = () => {
return ["yes", "no"];
}
constructor(app: App, note: string, placeholder: string | undefined, getItemsFun: (() => string[]) | undefined, callback: (e: string) => void) {
super(app);
this.app = app;
this.setPlaceholder((placeholder ?? "y/n) ") + note);
if (getItemsFun) this.getItemsFun = getItemsFun;
this.callback = callback;
}
getItems(): string[] {
return this.getItemsFun();
}
getItemText(item: string): string {
return item;
}
onChooseItem(item: string, evt: MouseEvent | KeyboardEvent): void {
// debugger;
this.callback?.(item);
this.callback = undefined;
}
onClose(): void {
setTimeout(() => {
if (this.callback) {
this.callback("");
this.callback = undefined;
}
}, 100);
}
}
export class MessageBox extends Modal {
plugin: Plugin;
title: string;
contentMd: string;
buttons: string[];
result: string | false = false;
isManuallyClosed = false;
defaultAction: string | undefined;
timeout: number | undefined;
timer: ReturnType<typeof setInterval> | undefined = undefined;
defaultButtonComponent: ButtonComponent | undefined;
onSubmit: (result: string | false) => void;
constructor(plugin: Plugin, title: string, contentMd: string, buttons: string[], defaultAction: (typeof buttons)[number], timeout: number | undefined, onSubmit: (result: (typeof buttons)[number] | false) => void) {
super(plugin.app);
this.plugin = plugin;
this.title = title;
this.contentMd = contentMd;
this.buttons = buttons;
this.onSubmit = onSubmit;
this.defaultAction = defaultAction;
this.timeout = timeout;
if (this.timeout) {
this.timer = setInterval(() => {
if (this.timeout === undefined) return;
this.timeout--;
if (this.timeout < 0) {
if (this.timer) {
clearInterval(this.timer);
this.timer = undefined;
}
this.result = defaultAction;
this.isManuallyClosed = true;
this.close();
} else {
this.defaultButtonComponent?.setButtonText(`( ${this.timeout} ) ${defaultAction}`);
}
}, 1000);
}
}
onOpen() {
const { contentEl } = this;
this.titleEl.setText(this.title);
contentEl.addEventListener("click", () => {
if (this.timer) {
clearInterval(this.timer);
this.timer = undefined;
}
})
const div = contentEl.createDiv();
MarkdownRenderer.render(this.plugin.app, this.contentMd, div, "/", this.plugin);
const buttonSetting = new Setting(contentEl);
buttonSetting.controlEl.style.flexWrap = "wrap";
for (const button of this.buttons) {
buttonSetting.addButton((btn) => {
btn
.setButtonText(button)
.onClick(() => {
this.isManuallyClosed = true;
this.result = button;
if (this.timer) {
clearInterval(this.timer);
this.timer = undefined;
}
this.close();
})
if (button == this.defaultAction) {
this.defaultButtonComponent = btn;
}
return btn;
}
)
}
}
onClose() {
const { contentEl } = this;
contentEl.empty();
if (this.timer) {
clearInterval(this.timer);
this.timer = undefined;
}
if (this.isManuallyClosed) {
this.onSubmit(this.result);
} else {
this.onSubmit(false);
}
}
}
export function confirmWithMessage(plugin: Plugin, title: string, contentMd: string, buttons: string[], defaultAction: (typeof buttons)[number], timeout?: number): Promise<(typeof buttons)[number] | false> {
return new Promise((res) => {
const dialog = new MessageBox(plugin, title, contentMd, buttons, defaultAction, timeout, (result) => res(result));
dialog.open();
});
}

View File

@@ -1,55 +0,0 @@
import { eventHub } from "../lib/src/hub/hub";
import type ObsidianLiveSyncPlugin from "../main";
export const EVENT_PLUGIN_LOADED = "plugin-loaded";
export const EVENT_PLUGIN_UNLOADED = "plugin-unloaded";
export const EVENT_FILE_SAVED = "file-saved";
export const EVENT_LEAF_ACTIVE_CHANGED = "leaf-active-changed";
export const EVENT_REQUEST_OPEN_SETTINGS = "request-open-settings";
export const EVENT_REQUEST_OPEN_SETTING_WIZARD = "request-open-setting-wizard";
export const EVENT_REQUEST_OPEN_SETUP_URI = "request-open-setup-uri";
export const EVENT_REQUEST_COPY_SETUP_URI = "request-copy-setup-uri";
export const EVENT_REQUEST_SHOW_SETUP_QR = "request-show-setup-qr";
export const EVENT_REQUEST_RELOAD_SETTING_TAB = "reload-setting-tab";
export const EVENT_REQUEST_OPEN_PLUGIN_SYNC_DIALOG = "request-open-plugin-sync-dialog";
export const EVENT_REQUEST_OPEN_P2P = "request-open-p2p";
export const EVENT_REQUEST_CLOSE_P2P = "request-close-p2p";
export const EVENT_REQUEST_RUN_DOCTOR = "request-run-doctor";
export const EVENT_REQUEST_RUN_FIX_INCOMPLETE = "request-run-fix-incomplete";
export const EVENT_ON_UNRESOLVED_ERROR = "on-unresolved-error";
export const EVENT_ANALYSE_DB_USAGE = "analyse-db-usage";
export const EVENT_REQUEST_PERFORM_GC_V3 = "request-perform-gc-v3";
export const EVENT_REQUEST_CHECK_REMOTE_SIZE = "request-check-remote-size";
// export const EVENT_FILE_CHANGED = "file-changed";
declare global {
interface LSEvents {
[EVENT_PLUGIN_LOADED]: ObsidianLiveSyncPlugin;
[EVENT_PLUGIN_UNLOADED]: undefined;
[EVENT_REQUEST_OPEN_PLUGIN_SYNC_DIALOG]: undefined;
[EVENT_REQUEST_OPEN_SETTINGS]: undefined;
[EVENT_REQUEST_OPEN_SETTING_WIZARD]: undefined;
[EVENT_REQUEST_RELOAD_SETTING_TAB]: undefined;
[EVENT_LEAF_ACTIVE_CHANGED]: undefined;
[EVENT_REQUEST_CLOSE_P2P]: undefined;
[EVENT_REQUEST_OPEN_P2P]: undefined;
[EVENT_REQUEST_OPEN_SETUP_URI]: undefined;
[EVENT_REQUEST_COPY_SETUP_URI]: undefined;
[EVENT_REQUEST_SHOW_SETUP_QR]: undefined;
[EVENT_REQUEST_RUN_DOCTOR]: string;
[EVENT_REQUEST_RUN_FIX_INCOMPLETE]: undefined;
[EVENT_ON_UNRESOLVED_ERROR]: undefined;
[EVENT_ANALYSE_DB_USAGE]: undefined;
[EVENT_REQUEST_CHECK_REMOTE_SIZE]: undefined;
[EVENT_REQUEST_PERFORM_GC_V3]: undefined;
}
}
export * from "../lib/src/events/coreEvents.ts";
export { eventHub };

View File

@@ -1,12 +0,0 @@
import type { TFile } from "../deps";
import type { FilePathWithPrefix, LoadedEntry } from "../lib/src/common/types";
export const EVENT_REQUEST_SHOW_HISTORY = "show-history";
declare global {
interface LSEvents {
[EVENT_REQUEST_SHOW_HISTORY]:
| { file: TFile; fileOnDB: LoadedEntry }
| { file: FilePathWithPrefix; fileOnDB: LoadedEntry };
}
}

View File

@@ -1,4 +1,4 @@
import { PersistentMap } from "octagonal-wheels/dataobject/PersistentMap";
import { PersistentMap } from "../lib/src/dataobject/PersistentMap.ts";
export let sameChangePairs: PersistentMap<number[]>;

View File

@@ -1,6 +1,5 @@
import { type PluginManifest, TFile } from "../deps.ts";
import { type DatabaseEntry, type EntryBody, type FilePath } from "../lib/src/common/types.ts";
export type { CacheData, FileEventItem } from "../lib/src/common/types.ts";
export interface PluginDataEntry extends DatabaseEntry {
deviceVaultName: string;
@@ -49,6 +48,23 @@ export type queueItem = {
warned?: boolean;
};
export type CacheData = string | ArrayBuffer;
export type FileEventType = "CREATE" | "DELETE" | "CHANGED" | "RENAME" | "INTERNAL";
export type FileEventArgs = {
file: FileInfo | InternalFileInfo;
cache?: CacheData;
oldPath?: string;
ctx?: any;
}
export type FileEventItem = {
type: FileEventType,
args: FileEventArgs,
key: string,
skipBatchWait?: boolean,
cancelled?: boolean,
batched?: boolean
}
// Hidden items (Now means `chunk`)
export const CHeader = "h:";
@@ -65,5 +81,5 @@ export const ICHeaderLength = ICHeader.length;
export const ICXHeader = "ix:";
export const FileWatchEventQueueMax = 10;
export const configURIBase = "obsidian://setuplivesync?settings=";
export { configURIBase, configURIBaseQR } from "../lib/src/common/types.ts";

View File

@@ -1,57 +1,28 @@
import { normalizePath, Platform, TAbstractFile, type RequestUrlParam, requestUrl } from "../deps.ts";
import {
path2id_base,
id2path_base,
isValidFilenameInLinux,
isValidFilenameInDarwin,
isValidFilenameInWidows,
isValidFilenameInAndroid,
stripAllPrefixes,
} from "../lib/src/string_and_binary/path.ts";
import { normalizePath, Platform, TAbstractFile, App, type RequestUrlParam, requestUrl, TFile } from "../deps.ts";
import { path2id_base, id2path_base, isValidFilenameInLinux, isValidFilenameInDarwin, isValidFilenameInWidows, isValidFilenameInAndroid, stripAllPrefixes } from "../lib/src/string_and_binary/path.ts";
import { Logger } from "../lib/src/common/logger.ts";
import {
LOG_LEVEL_INFO,
LOG_LEVEL_NOTICE,
LOG_LEVEL_VERBOSE,
type AnyEntry,
type CouchDBCredentials,
type DocumentID,
type EntryHasPath,
type FilePath,
type FilePathWithPrefix,
type UXFileInfo,
type UXFileInfoStub,
} from "../lib/src/common/types.ts";
import { LOG_LEVEL_VERBOSE, type AnyEntry, type DocumentID, type EntryHasPath, type FilePath, type FilePathWithPrefix } from "../lib/src/common/types.ts";
import { CHeader, ICHeader, ICHeaderLength, ICXHeader, PSCHeader } from "./types.ts";
import { InputStringDialog, PopoverSelectString } from "./dialogs.ts";
import type ObsidianLiveSyncPlugin from "../main.ts";
import { writeString } from "../lib/src/string_and_binary/convert.ts";
import { fireAndForget } from "../lib/src/common/utils.ts";
import { sameChangePairs } from "./stores.ts";
import { scheduleTask } from "octagonal-wheels/concurrency/task";
import { EVENT_PLUGIN_UNLOADED, eventHub } from "./events.ts";
import { promiseWithResolver, type PromiseWithResolvers } from "octagonal-wheels/promises";
import { AuthorizationHeaderGenerator } from "../lib/src/replication/httplib.ts";
import type { KeyValueDatabase } from "../lib/src/interfaces/KeyValueDatabase.ts";
export { scheduleTask, cancelTask, cancelAllTasks } from "octagonal-wheels/concurrency/task";
export { scheduleTask, setPeriodicTask, cancelTask, cancelAllTasks, cancelPeriodicTask, cancelAllPeriodicTask, } from "../lib/src/concurrency/task.ts";
// For backward compatibility, using the path for determining id.
// Only CouchDB unacceptable ID (that starts with an underscore) has been prefixed with "/".
// The first slash will be deleted when the path is normalized.
export async function path2id(
filename: FilePathWithPrefix | FilePath,
obfuscatePassphrase: string | false,
caseInsensitive: boolean
): Promise<DocumentID> {
export async function path2id(filename: FilePathWithPrefix | FilePath, obfuscatePassphrase: string | false): Promise<DocumentID> {
const temp = filename.split(":");
const path = temp.pop();
const normalizedPath = normalizePath(path as FilePath);
temp.push(normalizedPath);
const fixedPath = temp.join(":") as FilePathWithPrefix;
const out = await path2id_base(fixedPath, obfuscatePassphrase, caseInsensitive);
const out = await path2id_base(fixedPath, obfuscatePassphrase);
return out;
}
export function id2path(id: DocumentID, entry?: EntryHasPath): FilePathWithPrefix {
@@ -65,6 +36,7 @@ export function id2path(id: DocumentID, entry?: EntryHasPath): FilePathWithPrefi
}
export function getPath(entry: AnyEntry) {
return id2path(entry._id, entry);
}
export function getPathWithoutPrefix(entry: AnyEntry) {
const f = getPath(entry);
@@ -75,25 +47,6 @@ export function getPathFromTFile(file: TAbstractFile) {
return file.path as FilePath;
}
export function isInternalFile(file: UXFileInfoStub | string | FilePathWithPrefix) {
if (typeof file == "string") return file.startsWith(ICHeader);
if (file.isInternal) return true;
return false;
}
export function getPathFromUXFileInfo(file: UXFileInfoStub | string | FilePathWithPrefix) {
if (typeof file == "string") return file as FilePathWithPrefix;
return file.path;
}
export function getStoragePathFromUXFileInfo(file: UXFileInfoStub | string | FilePathWithPrefix) {
if (typeof file == "string") return stripAllPrefixes(file as FilePathWithPrefix);
return stripAllPrefixes(file.path);
}
export function getDatabasePathFromUXFileInfo(file: UXFileInfoStub | string | FilePathWithPrefix) {
if (typeof file == "string" && file.startsWith(ICXHeader)) return file as FilePathWithPrefix;
const prefix = isInternalFile(file) ? ICHeader : "";
if (typeof file == "string") return (prefix + stripAllPrefixes(file as FilePathWithPrefix)) as FilePathWithPrefix;
return (prefix + stripAllPrefixes(file.path)) as FilePathWithPrefix;
}
const memos: { [key: string]: any } = {};
export function memoObject<T>(key: string, obj: T): T {
@@ -103,7 +56,7 @@ export function memoObject<T>(key: string, obj: T): T {
export async function memoIfNotExist<T>(key: string, func: () => T | Promise<T>): Promise<T> {
if (!(key in memos)) {
const w = func();
const v = w instanceof Promise ? await w : w;
const v = w instanceof Promise ? (await w) : w;
memos[key] = v;
}
return memos[key] as T;
@@ -119,6 +72,196 @@ export function disposeMemoObject(key: string) {
delete memos[key];
}
export function isSensibleMargeApplicable(path: string) {
if (path.endsWith(".md")) return true;
return false;
}
export function isObjectMargeApplicable(path: string) {
if (path.endsWith(".canvas")) return true;
if (path.endsWith(".json")) return true;
return false;
}
export function tryParseJSON(str: string, fallbackValue?: any) {
try {
return JSON.parse(str);
} catch (ex) {
return fallbackValue;
}
}
const MARK_OPERATOR = `\u{0001}`;
const MARK_DELETED = `${MARK_OPERATOR}__DELETED`;
const MARK_ISARRAY = `${MARK_OPERATOR}__ARRAY`;
const MARK_SWAPPED = `${MARK_OPERATOR}__SWAP`;
function unorderedArrayToObject(obj: Array<any>) {
return obj.map(e => ({ [e.id as string]: e })).reduce((p, c) => ({ ...p, ...c }), {})
}
function objectToUnorderedArray(obj: object) {
const entries = Object.entries(obj);
if (entries.some(e => e[0] != e[1]?.id)) throw new Error("Item looks like not unordered array")
return entries.map(e => e[1]);
}
function generatePatchUnorderedArray(from: Array<any>, to: Array<any>) {
if (from.every(e => typeof (e) == "object" && ("id" in e)) && to.every(e => typeof (e) == "object" && ("id" in e))) {
const fObj = unorderedArrayToObject(from);
const tObj = unorderedArrayToObject(to);
const diff = generatePatchObj(fObj, tObj);
if (Object.keys(diff).length > 0) {
return { [MARK_ISARRAY]: diff };
} else {
return {};
}
}
return { [MARK_SWAPPED]: to };
}
export function generatePatchObj(from: Record<string | number | symbol, any>, to: Record<string | number | symbol, any>) {
const entries = Object.entries(from);
const tempMap = new Map<string | number | symbol, any>(entries);
const ret = {} as Record<string | number | symbol, any>;
const newEntries = Object.entries(to);
for (const [key, value] of newEntries) {
if (!tempMap.has(key)) {
//New
ret[key] = value;
tempMap.delete(key);
} else {
//Exists
const v = tempMap.get(key);
if (typeof (v) !== typeof (value) || (Array.isArray(v) !== Array.isArray(value))) {
//if type is not match, replace completely.
ret[key] = { [MARK_SWAPPED]: value };
} else {
if (typeof (v) == "object" && typeof (value) == "object" && !Array.isArray(v) && !Array.isArray(value)) {
const wk = generatePatchObj(v, value);
if (Object.keys(wk).length > 0) ret[key] = wk;
} else if (typeof (v) == "object" && typeof (value) == "object" && Array.isArray(v) && Array.isArray(value)) {
const wk = generatePatchUnorderedArray(v, value);
if (Object.keys(wk).length > 0) ret[key] = wk;
} else if (typeof (v) != "object" && typeof (value) != "object") {
if (JSON.stringify(tempMap.get(key)) !== JSON.stringify(value)) {
ret[key] = value;
}
} else {
if (JSON.stringify(tempMap.get(key)) !== JSON.stringify(value)) {
ret[key] = { [MARK_SWAPPED]: value };
}
}
}
tempMap.delete(key);
}
}
//Not used item, means deleted one
for (const [key,] of tempMap) {
ret[key] = MARK_DELETED
}
return ret;
}
export function applyPatch(from: Record<string | number | symbol, any>, patch: Record<string | number | symbol, any>) {
const ret = from;
const patches = Object.entries(patch);
for (const [key, value] of patches) {
if (value == MARK_DELETED) {
delete ret[key];
continue;
}
if (typeof (value) == "object") {
if (MARK_SWAPPED in value) {
ret[key] = value[MARK_SWAPPED];
continue;
}
if (MARK_ISARRAY in value) {
if (!(key in ret)) ret[key] = [];
if (!Array.isArray(ret[key])) {
throw new Error("Patch target type is mismatched (array to something)");
}
const orgArrayObject = unorderedArrayToObject(ret[key]);
const appliedObject = applyPatch(orgArrayObject, value[MARK_ISARRAY]);
const appliedArray = objectToUnorderedArray(appliedObject);
ret[key] = [...appliedArray];
} else {
if (!(key in ret)) {
ret[key] = value;
continue;
}
ret[key] = applyPatch(ret[key], value);
}
} else {
ret[key] = value;
}
}
return ret;
}
export function mergeObject(
objA: Record<string | number | symbol, any> | [any],
objB: Record<string | number | symbol, any> | [any]
) {
const newEntries = Object.entries(objB);
const ret: any = { ...objA };
if (
typeof objA !== typeof objB ||
Array.isArray(objA) !== Array.isArray(objB)
) {
return objB;
}
for (const [key, v] of newEntries) {
if (key in ret) {
const value = ret[key];
if (
typeof v !== typeof value ||
Array.isArray(v) !== Array.isArray(value)
) {
//if type is not match, replace completely.
ret[key] = v;
} else {
if (
typeof v == "object" &&
typeof value == "object" &&
!Array.isArray(v) &&
!Array.isArray(value)
) {
ret[key] = mergeObject(v, value);
} else if (
typeof v == "object" &&
typeof value == "object" &&
Array.isArray(v) &&
Array.isArray(value)
) {
ret[key] = [...new Set([...v, ...value])];
} else {
ret[key] = v;
}
}
} else {
ret[key] = v;
}
}
const retSorted = Object.fromEntries(Object.entries(ret).sort((a, b) => a[0] < b[0] ? -1 : a[0] > b[0] ? 1 : 0));
if (Array.isArray(objA) && Array.isArray(objB)) {
return Object.values(retSorted);
}
return retSorted;
}
export function flattenObject(obj: Record<string | number | symbol, any>, path: string[] = []): [string, any][] {
if (typeof (obj) != "object") return [[path.join("."), obj]];
if (Array.isArray(obj)) return [[path.join("."), JSON.stringify(obj)]];
const e = Object.entries(obj);
const ret = []
for (const [key, value] of e) {
const p = flattenObject(value, [...path, key]);
ret.push(...p);
}
return ret;
}
export function isValidPath(filename: string) {
if (Platform.isDesktop) {
// if(Platform.isMacOS) return isValidFilenameInDarwin(filename);
@@ -137,10 +280,11 @@ export function trimPrefix(target: string, prefix: string) {
return target.startsWith(prefix) ? target.substring(prefix.length) : target;
}
/**
* returns is internal chunk of file
* @param id ID
* @returns
* @returns
*/
export function isInternalMetadata(id: FilePath | FilePathWithPrefix | DocumentID): boolean {
return id.startsWith(ICHeader);
@@ -149,7 +293,7 @@ export function stripInternalMetadataPrefix<T extends FilePath | FilePathWithPre
return id.substring(ICHeaderLength) as T;
}
export function id2InternalMetadataId(id: DocumentID): DocumentID {
return (ICHeader + id) as DocumentID;
return ICHeader + id as DocumentID;
}
// const CHeaderLength = CHeader.length;
@@ -164,16 +308,37 @@ export function isCustomisationSyncMetadata(str: string): boolean {
return str.startsWith(ICXHeader);
}
export const askYesNo = (app: App, message: string): Promise<"yes" | "no"> => {
return new Promise((res) => {
const popover = new PopoverSelectString(app, message, undefined, undefined, (result) => res(result as "yes" | "no"));
popover.open();
});
};
export const askSelectString = (app: App, message: string, items: string[]): Promise<string> => {
const getItemsFun = () => items;
return new Promise((res) => {
const popover = new PopoverSelectString(app, message, "", getItemsFun, (result) => res(result));
popover.open();
});
};
export const askString = (app: App, title: string, key: string, placeholder: string, isPassword: boolean = false): Promise<string | false> => {
return new Promise((res) => {
const dialog = new InputStringDialog(app, title, key, placeholder, isPassword, (result) => res(result));
dialog.open();
});
};
export class PeriodicProcessor {
_process: () => Promise<any>;
_timer?: number = undefined;
_timer?: number;
_plugin: ObsidianLiveSyncPlugin;
constructor(plugin: ObsidianLiveSyncPlugin, process: () => Promise<any>) {
this._plugin = plugin;
this._process = process;
eventHub.onceEvent(EVENT_PLUGIN_UNLOADED, () => {
this.disable();
});
}
async process() {
try {
@@ -185,16 +350,12 @@ export class PeriodicProcessor {
enable(interval: number) {
this.disable();
if (interval == 0) return;
this._timer = window.setInterval(
() =>
fireAndForget(async () => {
await this.process();
if (this._plugin.services?.appLifecycle?.hasUnloaded()) {
this.disable();
}
}),
interval
);
this._timer = window.setInterval(() => fireAndForget(async () => {
await this.process();
if (this._plugin._unloaded) {
this.disable();
}
}), interval);
this._plugin.registerInterval(this._timer);
}
disable() {
@@ -205,21 +366,11 @@ export class PeriodicProcessor {
}
}
export const _requestToCouchDBFetch = async (
baseUri: string,
username: string,
password: string,
path?: string,
body?: string | any,
method?: string
) => {
export const _requestToCouchDBFetch = async (baseUri: string, username: string, password: string, path?: string, body?: string | any, method?: string) => {
const utf8str = String.fromCharCode.apply(null, [...writeString(`${username}:${password}`)]);
const encoded = window.btoa(utf8str);
const authHeader = "Basic " + encoded;
const transformedHeaders: Record<string, string> = {
authorization: authHeader,
"content-type": "application/json",
};
const transformedHeaders: Record<string, string> = { authorization: authHeader, "content-type": "application/json" };
const uri = `${baseUri}/${path}`;
const requestParam = {
url: uri,
@@ -229,21 +380,13 @@ export const _requestToCouchDBFetch = async (
body: JSON.stringify(body),
};
return await fetch(uri, requestParam);
};
}
export const _requestToCouchDB = async (
baseUri: string,
credentials: CouchDBCredentials,
origin: string,
path?: string,
body?: any,
method?: string,
customHeaders?: Record<string, string>
) => {
// Create each time to avoid caching.
const authHeaderGen = new AuthorizationHeaderGenerator();
const authHeader = await authHeaderGen.getAuthorizationHeader(credentials);
const transformedHeaders: Record<string, string> = { authorization: authHeader, origin: origin, ...customHeaders };
export const _requestToCouchDB = async (baseUri: string, username: string, password: string, origin: string, path?: string, body?: any, method?: string) => {
const utf8str = String.fromCharCode.apply(null, [...writeString(`${username}:${password}`)]);
const encoded = window.btoa(utf8str);
const authHeader = "Basic " + encoded;
const transformedHeaders: Record<string, string> = { authorization: authHeader, origin: origin };
const uri = `${baseUri}/${path}`;
const requestParam: RequestUrlParam = {
url: uri,
@@ -253,57 +396,37 @@ export const _requestToCouchDB = async (
body: body ? JSON.stringify(body) : undefined,
};
return await requestUrl(requestParam);
};
/**
* @deprecated Use requestToCouchDBWithCredentials instead.
*/
export const requestToCouchDB = async (
baseUri: string,
username: string,
password: string,
origin: string = "",
key?: string,
body?: string,
method?: string,
customHeaders?: Record<string, string>
) => {
}
export const requestToCouchDB = async (baseUri: string, username: string, password: string, origin: string = "", key?: string, body?: string, method?: string) => {
const uri = `_node/_local/_config${key ? "/" + key : ""}`;
return await _requestToCouchDB(
baseUri,
{ username, password, type: "basic" },
origin,
uri,
body,
method,
customHeaders
);
return await _requestToCouchDB(baseUri, username, password, origin, uri, body, method);
};
export function requestToCouchDBWithCredentials(
baseUri: string,
credentials: CouchDBCredentials,
origin: string = "",
key?: string,
body?: string,
method?: string,
customHeaders?: Record<string, string>
) {
const uri = `_node/_local/_config${key ? "/" + key : ""}`;
return _requestToCouchDB(baseUri, credentials, origin, uri, body, method, customHeaders);
export async function performRebuildDB(plugin: ObsidianLiveSyncPlugin, method: "localOnly" | "remoteOnly" | "rebuildBothByThisDevice" | "localOnlyWithChunks") {
if (method == "localOnly") {
await plugin.addOnSetup.fetchLocal();
}
if (method == "localOnlyWithChunks") {
await plugin.addOnSetup.fetchLocal(true);
}
if (method == "remoteOnly") {
await plugin.addOnSetup.rebuildRemote();
}
if (method == "rebuildBothByThisDevice") {
await plugin.addOnSetup.rebuildEverything();
}
}
export const BASE_IS_NEW = Symbol("base");
export const TARGET_IS_NEW = Symbol("target");
export const EVEN = Symbol("even");
// Why 2000? : ZIP FILE Does not have enough resolution.
const resolution = 2000;
export function compareMTime(
baseMTime: number,
targetMTime: number
): typeof BASE_IS_NEW | typeof TARGET_IS_NEW | typeof EVEN {
const truncatedBaseMTime = ~~(baseMTime / resolution) * resolution;
const truncatedTargetMTime = ~~(targetMTime / resolution) * resolution;
export function compareMTime(baseMTime: number, targetMTime: number): typeof BASE_IS_NEW | typeof TARGET_IS_NEW | typeof EVEN {
const truncatedBaseMTime = (~~(baseMTime / resolution)) * resolution;
const truncatedTargetMTime = (~~(targetMTime / resolution)) * resolution;
// Logger(`Resolution MTime ${truncatedBaseMTime} and ${truncatedTargetMTime} `, LOG_LEVEL_VERBOSE);
if (truncatedBaseMTime == truncatedTargetMTime) return EVEN;
if (truncatedBaseMTime > truncatedTargetMTime) return BASE_IS_NEW;
@@ -311,43 +434,30 @@ export function compareMTime(
throw new Error("Unexpected error");
}
function getKey(file: AnyEntry | string | UXFileInfoStub) {
const key = typeof file == "string" ? file : stripAllPrefixes(file.path);
return key;
}
export function markChangesAreSame(file: AnyEntry | string | UXFileInfoStub, mtime1: number, mtime2: number) {
export function markChangesAreSame(file: TFile | AnyEntry | string, mtime1: number, mtime2: number) {
if (mtime1 === mtime2) return true;
const key = getKey(file);
const key = typeof file == "string" ? file : file instanceof TFile ? file.path : file.path ?? file._id;
const pairs = sameChangePairs.get(key, []) || [];
if (pairs.some((e) => e == mtime1 || e == mtime2)) {
if (pairs.some(e => e == mtime1 || e == mtime2)) {
sameChangePairs.set(key, [...new Set([...pairs, mtime1, mtime2])]);
} else {
sameChangePairs.set(key, [mtime1, mtime2]);
}
}
export function unmarkChanges(file: AnyEntry | string | UXFileInfoStub) {
const key = getKey(file);
sameChangePairs.delete(key);
}
export function isMarkedAsSameChanges(file: UXFileInfoStub | AnyEntry | string, mtimes: number[]) {
const key = getKey(file);
export function isMarkedAsSameChanges(file: TFile | AnyEntry | string, mtimes: number[]) {
const key = typeof file == "string" ? file : file instanceof TFile ? file.path : file.path ?? file._id;
const pairs = sameChangePairs.get(key, []) || [];
if (mtimes.every((e) => pairs.indexOf(e) !== -1)) {
if (mtimes.every(e => pairs.indexOf(e) !== -1)) {
return EVEN;
}
}
export function compareFileFreshness(
baseFile: UXFileInfoStub | AnyEntry | undefined,
checkTarget: UXFileInfo | AnyEntry | undefined
): typeof BASE_IS_NEW | typeof TARGET_IS_NEW | typeof EVEN {
export function compareFileFreshness(baseFile: TFile | AnyEntry | undefined, checkTarget: TFile | AnyEntry | undefined): typeof BASE_IS_NEW | typeof TARGET_IS_NEW | typeof EVEN {
if (baseFile === undefined && checkTarget == undefined) return EVEN;
if (baseFile == undefined) return TARGET_IS_NEW;
if (checkTarget == undefined) return BASE_IS_NEW;
const modifiedBase = "stat" in baseFile ? (baseFile?.stat?.mtime ?? 0) : (baseFile?.mtime ?? 0);
const modifiedTarget = "stat" in checkTarget ? (checkTarget?.stat?.mtime ?? 0) : (checkTarget?.mtime ?? 0);
const modifiedBase = baseFile instanceof TFile ? baseFile?.stat?.mtime ?? 0 : baseFile?.mtime ?? 0;
const modifiedTarget = checkTarget instanceof TFile ? checkTarget?.stat?.mtime ?? 0 : checkTarget?.mtime ?? 0;
if (modifiedBase && modifiedTarget && isMarkedAsSameChanges(baseFile, [modifiedBase, modifiedTarget])) {
return EVEN;
@@ -355,214 +465,3 @@ export function compareFileFreshness(
return compareMTime(modifiedBase, modifiedTarget);
}
const _cached = new Map<
string,
{
value: any;
context: Map<string, any>;
}
>();
export type MemoOption = {
key: string;
forceUpdate?: boolean;
validator?: (context: Map<string, any>) => boolean;
};
export function useMemo<T>(
{ key, forceUpdate, validator }: MemoOption,
updateFunc: (context: Map<string, any>, prev: T) => T
): T {
const cached = _cached.get(key);
const context = cached?.context || new Map<string, any>();
if (cached && !forceUpdate && (!validator || (validator && !validator(context)))) {
return cached.value;
}
const value = updateFunc(context, cached?.value);
if (value !== cached?.value) {
_cached.set(key, { value, context });
}
return value;
}
// const _static = new Map<string, any>();
const _staticObj = new Map<
string,
{
value: any;
}
>();
export function useStatic<T>(key: string): { value: T | undefined };
export function useStatic<T>(key: string, initial: T): { value: T };
export function useStatic<T>(key: string, initial?: T) {
// if (!_static.has(key) && initial) {
// _static.set(key, initial);
// }
const obj = _staticObj.get(key);
if (obj !== undefined) {
return obj;
} else {
// let buf = initial;
const obj = {
_buf: initial,
get value() {
return this._buf as T;
},
set value(value: T) {
this._buf = value;
},
};
_staticObj.set(key, obj);
return obj;
}
}
export function disposeMemo(key: string) {
_cached.delete(key);
}
export function disposeAllMemo() {
_cached.clear();
}
export function displayRev(rev: string) {
const [number, hash] = rev.split("-");
return `${number}-${hash.substring(0, 6)}`;
}
type DocumentProps = {
id: DocumentID;
rev?: string;
prefixedPath: FilePathWithPrefix;
path: FilePath;
isDeleted: boolean;
revDisplay: string;
shortenedId: string;
shortenedPath: string;
};
export function getDocProps(doc: AnyEntry): DocumentProps {
const id = doc._id;
const shortenedId = id.substring(0, 10);
const prefixedPath = getPath(doc);
const path = stripAllPrefixes(prefixedPath);
const rev = doc._rev;
const revDisplay = rev ? displayRev(rev) : "0-NOREVS";
// const prefix = prefixedPath.substring(0, prefixedPath.length - path.length);
const shortenedPath = path.substring(0, 10);
const isDeleted = doc._deleted || doc.deleted || false;
return { id, rev, revDisplay, prefixedPath, path, isDeleted, shortenedId, shortenedPath };
}
export function getLogLevel(showNotice: boolean) {
return showNotice ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO;
}
export type MapLike<K, V> = {
set(key: K, value: V): Map<K, V>;
clear(): void;
delete(key: K): boolean;
get(key: K): V | undefined;
has(key: K): boolean;
keys: () => IterableIterator<K>;
get size(): number;
};
export async function autosaveCache<K, V>(db: KeyValueDatabase, mapKey: string): Promise<MapLike<K, V>> {
const savedData = (await db.get<Map<K, V>>(mapKey)) ?? new Map<K, V>();
const _commit = () => {
try {
scheduleTask("commit-map-save-" + mapKey, 250, async () => {
await db.set(mapKey, savedData);
});
} catch {
// NO OP.
}
};
return {
set(key: K, value: V) {
const modified = savedData.get(key) !== value;
const result = savedData.set(key, value);
if (modified) {
_commit();
}
return result;
},
clear(): void {
savedData.clear();
_commit();
},
delete(key: K): boolean {
const result = savedData.delete(key);
if (result) {
_commit();
}
return result;
},
get(key: K): V | undefined {
return savedData.get(key);
},
has(key) {
return savedData.has(key);
},
keys() {
return savedData.keys();
},
get size() {
return savedData.size;
},
};
}
export function onlyInNTimes(n: number, proc: (progress: number) => any) {
let counter = 0;
return function () {
if (counter++ % n == 0) {
proc(counter);
}
};
}
const waitingTasks = {} as Record<string, { task?: PromiseWithResolvers<any>; previous: number; leastNext: number }>;
export function rateLimitedSharedExecution<T>(key: string, interval: number, proc: () => Promise<T>): Promise<T> {
if (!(key in waitingTasks)) {
waitingTasks[key] = { task: undefined, previous: 0, leastNext: 0 };
}
if (waitingTasks[key].task) {
// Extend the previous execution time.
waitingTasks[key].leastNext = Date.now() + interval;
return waitingTasks[key].task.promise;
}
const previous = waitingTasks[key].previous;
const delay = previous == 0 ? 0 : Math.max(interval - (Date.now() - previous), 0);
const task = promiseWithResolver<T>();
void task.promise.finally(() => {
if (waitingTasks[key].task === task) {
waitingTasks[key].task = undefined;
waitingTasks[key].previous = Math.max(Date.now(), waitingTasks[key].leastNext);
}
});
waitingTasks[key] = {
task,
previous: Date.now(),
leastNext: Date.now() + interval,
};
void scheduleTask("thin-out-" + key, delay, async () => {
try {
task.resolve(await proc());
} catch (ex) {
task.reject(ex);
}
});
return task.promise;
}
export function updatePreviousExecutionTime(key: string, timeDelta: number = 0) {
if (!(key in waitingTasks)) {
waitingTasks[key] = { task: undefined, previous: 0, leastNext: 0 };
}
waitingTasks[key].leastNext = Math.max(Date.now() + timeDelta, waitingTasks[key].leastNext);
}

View File

@@ -1,47 +1,13 @@
import { type FilePath } from "./lib/src/common/types.ts";
export {
addIcon,
App,
debounce,
Editor,
FuzzySuggestModal,
MarkdownRenderer,
MarkdownView,
Modal,
Notice,
Platform,
Plugin,
PluginSettingTab,
requestUrl,
sanitizeHTMLToDom,
Setting,
stringifyYaml,
TAbstractFile,
TextAreaComponent,
TFile,
TFolder,
parseYaml,
ItemView,
WorkspaceLeaf,
Menu,
request,
getLanguage,
ButtonComponent,
TextComponent,
ToggleComponent,
DropdownComponent,
addIcon, App, debounce, Editor, FuzzySuggestModal, MarkdownRenderer, MarkdownView, Modal, Notice, Platform, Plugin, PluginSettingTab, requestUrl, sanitizeHTMLToDom, Setting, stringifyYaml, TAbstractFile, TextAreaComponent, TFile, TFolder,
parseYaml, ItemView, WorkspaceLeaf
} from "obsidian";
export type {
DataWriteOptions,
PluginManifest,
RequestUrlParam,
RequestUrlResponse,
MarkdownFileInfo,
ListedFiles,
ValueComponent,
export type { DataWriteOptions, PluginManifest, RequestUrlParam, RequestUrlResponse, MarkdownFileInfo, ListedFiles } from "obsidian";
import {
normalizePath as normalizePath_
} from "obsidian";
import { normalizePath as normalizePath_ } from "obsidian";
const normalizePath = normalizePath_ as <T extends string | FilePath>(from: T) => T;
export { normalizePath };
export { type Diff, DIFF_DELETE, DIFF_EQUAL, DIFF_INSERT, diff_match_patch } from "diff-match-patch";
export { normalizePath }
export { type Diff, DIFF_DELETE, DIFF_EQUAL, DIFF_INSERT, diff_match_patch } from "diff-match-patch";

View File

@@ -0,0 +1,776 @@
import { normalizePath, type PluginManifest, type ListedFiles } from "../deps.ts";
import { type EntryDoc, type LoadedEntry, type InternalFileEntry, type FilePathWithPrefix, type FilePath, LOG_LEVEL_INFO, LOG_LEVEL_NOTICE, LOG_LEVEL_VERBOSE, MODE_SELECTIVE, MODE_PAUSED, type SavingEntry, type DocumentID } from "../lib/src/common/types.ts";
import { type InternalFileInfo, ICHeader, ICHeaderEnd } from "../common/types.ts";
import { readAsBlob, isDocContentSame, sendSignal, readContent, createBlob } from "../lib/src/common/utils.ts";
import { Logger } from "../lib/src/common/logger.ts";
import { PouchDB } from "../lib/src/pouchdb/pouchdb-browser.js";
import { isInternalMetadata, PeriodicProcessor } from "../common/utils.ts";
import { serialized } from "../lib/src/concurrency/lock.ts";
import { JsonResolveModal } from "../ui/JsonResolveModal.ts";
import { LiveSyncCommands } from "./LiveSyncCommands.ts";
import { addPrefix, stripAllPrefixes } from "../lib/src/string_and_binary/path.ts";
import { QueueProcessor } from "../lib/src/concurrency/processor.ts";
import { hiddenFilesEventCount, hiddenFilesProcessingCount } from "../lib/src/mock_and_interop/stores.ts";
export class HiddenFileSync extends LiveSyncCommands {
periodicInternalFileScanProcessor: PeriodicProcessor = new PeriodicProcessor(this.plugin, async () => this.settings.syncInternalFiles && this.localDatabase.isReady && await this.syncInternalFilesAndDatabase("push", false));
get kvDB() {
return this.plugin.kvDB;
}
getConflictedDoc(path: FilePathWithPrefix, rev: string) {
return this.plugin.getConflictedDoc(path, rev);
}
onunload() {
this.periodicInternalFileScanProcessor?.disable();
}
onload() {
this.plugin.addCommand({
id: "livesync-scaninternal",
name: "Sync hidden files",
callback: () => {
this.syncInternalFilesAndDatabase("safe", true);
},
});
}
async onInitializeDatabase(showNotice: boolean) {
if (this.settings.syncInternalFiles) {
try {
Logger("Synchronizing hidden files...");
await this.syncInternalFilesAndDatabase("push", showNotice);
Logger("Synchronizing hidden files done");
} catch (ex) {
Logger("Synchronizing hidden files failed");
Logger(ex, LOG_LEVEL_VERBOSE);
}
}
}
async beforeReplicate(showNotice: boolean) {
if (this.localDatabase.isReady && this.settings.syncInternalFiles && this.settings.syncInternalFilesBeforeReplication && !this.settings.watchInternalFileChanges) {
await this.syncInternalFilesAndDatabase("push", showNotice);
}
}
async onResume() {
this.periodicInternalFileScanProcessor?.disable();
if (this.plugin.suspended)
return;
if (this.settings.syncInternalFiles) {
await this.syncInternalFilesAndDatabase("safe", false);
}
this.periodicInternalFileScanProcessor.enable(this.settings.syncInternalFiles && this.settings.syncInternalFilesInterval ? (this.settings.syncInternalFilesInterval * 1000) : 0);
}
parseReplicationResultItem(docs: PouchDB.Core.ExistingDocument<EntryDoc>) {
return false;
}
realizeSettingSyncMode(): Promise<void> {
this.periodicInternalFileScanProcessor?.disable();
if (this.plugin.suspended)
return Promise.resolve();
if (!this.plugin.isReady)
return Promise.resolve();
this.periodicInternalFileScanProcessor.enable(this.settings.syncInternalFiles && this.settings.syncInternalFilesInterval ? (this.settings.syncInternalFilesInterval * 1000) : 0);
return Promise.resolve();
}
procInternalFile(filename: string) {
this.internalFileProcessor.enqueue(filename);
}
internalFileProcessor = new QueueProcessor<string, any>(
async (filenames) => {
Logger(`START :Applying hidden ${filenames.length} files change`, LOG_LEVEL_VERBOSE);
await this.syncInternalFilesAndDatabase("pull", false, false, filenames);
Logger(`DONE :Applying hidden ${filenames.length} files change`, LOG_LEVEL_VERBOSE);
return;
}, { batchSize: 100, concurrentLimit: 1, delay: 10, yieldThreshold: 100, suspended: false, totalRemainingReactiveSource: hiddenFilesEventCount }
);
recentProcessedInternalFiles = [] as string[];
async watchVaultRawEventsAsync(path: FilePath) {
if (!this.settings.syncInternalFiles) return;
// Exclude files handled by customization sync
const configDir = normalizePath(this.app.vault.configDir);
const synchronisedInConfigSync = !this.settings.usePluginSync ? [] : Object.values(this.settings.pluginSyncExtendedSetting).filter(e => e.mode == MODE_SELECTIVE || e.mode == MODE_PAUSED).map(e => e.files).flat().map(e => `${configDir}/${e}`.toLowerCase());
if (synchronisedInConfigSync.some(e => e.startsWith(path.toLowerCase()))) {
Logger(`Hidden file skipped: ${path} is synchronized in customization sync.`, LOG_LEVEL_VERBOSE);
return;
}
const stat = await this.vaultAccess.adapterStat(path);
// sometimes folder is coming.
if (stat != null && stat.type != "file") {
return;
}
const mtime = stat == null ? 0 : stat?.mtime ?? 0;
const storageMTime = ~~((mtime) / 1000);
const key = `${path}-${storageMTime}`;
if (mtime != 0 && this.recentProcessedInternalFiles.contains(key)) {
//If recently processed, it may caused by self.
return;
}
this.recentProcessedInternalFiles = [key, ...this.recentProcessedInternalFiles].slice(0, 100);
// const id = await this.path2id(path, ICHeader);
const prefixedFileName = addPrefix(path, ICHeader);
const filesOnDB = await this.localDatabase.getDBEntryMeta(prefixedFileName);
const dbMTime = ~~((filesOnDB && filesOnDB.mtime || 0) / 1000);
// Skip unchanged file.
if (dbMTime == storageMTime) {
// Logger(`STORAGE --> DB:${path}: (hidden) Nothing changed`);
return;
}
// Do not compare timestamp. Always local data should be preferred except this plugin wrote one.
if (storageMTime == 0) {
await this.deleteInternalFileOnDatabase(path);
} else {
await this.storeInternalFileToDatabase({ path: path, mtime, ctime: stat?.ctime ?? mtime, size: stat?.size ?? 0 });
}
}
async resolveConflictOnInternalFiles() {
// Scan all conflicted internal files
const conflicted = this.localDatabase.findEntries(ICHeader, ICHeaderEnd, { conflicts: true });
this.conflictResolutionProcessor.suspend();
try {
for await (const doc of conflicted) {
if (!("_conflicts" in doc))
continue;
if (isInternalMetadata(doc._id)) {
this.conflictResolutionProcessor.enqueue(doc.path);
}
}
} catch (ex) {
Logger("something went wrong on resolving all conflicted internal files");
Logger(ex, LOG_LEVEL_VERBOSE);
}
await this.conflictResolutionProcessor.startPipeline().waitForAllProcessed();
}
async resolveByNewerEntry(id: DocumentID, path: FilePathWithPrefix, currentDoc: EntryDoc, currentRev: string, conflictedRev: string) {
const conflictedDoc = await this.localDatabase.getRaw(id, { rev: conflictedRev });
// determine which revision should been deleted.
// simply check modified time
const mtimeCurrent = ("mtime" in currentDoc && currentDoc.mtime) || 0;
const mtimeConflicted = ("mtime" in conflictedDoc && conflictedDoc.mtime) || 0;
// Logger(`Revisions:${new Date(mtimeA).toLocaleString} and ${new Date(mtimeB).toLocaleString}`);
// console.log(`mtime:${mtimeA} - ${mtimeB}`);
const delRev = mtimeCurrent < mtimeConflicted ? currentRev : conflictedRev;
// delete older one.
await this.localDatabase.removeRevision(id, delRev);
Logger(`Older one has been deleted:${path}`);
const cc = await this.localDatabase.getRaw(id, { conflicts: true });
if (cc._conflicts?.length === 0) {
await this.extractInternalFileFromDatabase(stripAllPrefixes(path))
} else {
this.conflictResolutionProcessor.enqueue(path);
}
// check the file again
}
conflictResolutionProcessor = new QueueProcessor(async (paths: FilePathWithPrefix[]) => {
const path = paths[0];
sendSignal(`cancel-internal-conflict:${path}`);
try {
// Retrieve data
const id = await this.path2id(path, ICHeader);
const doc = await this.localDatabase.getRaw(id, { conflicts: true });
// if (!("_conflicts" in doc)){
// return [];
// }
if (doc._conflicts === undefined) return [];
if (doc._conflicts.length == 0)
return [];
Logger(`Hidden file conflicted:${path}`);
const conflicts = doc._conflicts.sort((a, b) => Number(a.split("-")[0]) - Number(b.split("-")[0]));
const revA = doc._rev;
const revB = conflicts[0];
if (path.endsWith(".json")) {
const conflictedRev = conflicts[0];
const conflictedRevNo = Number(conflictedRev.split("-")[0]);
//Search
const revFrom = (await this.localDatabase.getRaw<EntryDoc>(id, { revs_info: true }));
const commonBase = revFrom._revs_info?.filter(e => e.status == "available" && Number(e.rev.split("-")[0]) < conflictedRevNo).first()?.rev ?? "";
const result = await this.plugin.mergeObject(path, commonBase, doc._rev, conflictedRev);
if (result) {
Logger(`Object merge:${path}`, LOG_LEVEL_INFO);
const filename = stripAllPrefixes(path);
const isExists = await this.plugin.vaultAccess.adapterExists(filename);
if (!isExists) {
await this.vaultAccess.ensureDirectory(filename);
}
await this.plugin.vaultAccess.adapterWrite(filename, result);
const stat = await this.vaultAccess.adapterStat(filename);
if (!stat) {
throw new Error(`conflictResolutionProcessor: Failed to stat file ${filename}`);
}
await this.storeInternalFileToDatabase({ path: filename, ...stat });
await this.extractInternalFileFromDatabase(filename);
await this.localDatabase.removeRevision(id, revB);
this.conflictResolutionProcessor.enqueue(path);
return [];
} else {
Logger(`Object merge is not applicable.`, LOG_LEVEL_VERBOSE);
}
return [{ path, revA, revB, id, doc }];
}
// When not JSON file, resolve conflicts by choosing a newer one.
await this.resolveByNewerEntry(id, path, doc, revA, revB);
return [];
} catch (ex) {
Logger(`Failed to resolve conflict (Hidden): ${path}`);
Logger(ex, LOG_LEVEL_VERBOSE);
return [];
}
}, {
suspended: false, batchSize: 1, concurrentLimit: 5, delay: 10, keepResultUntilDownstreamConnected: true, yieldThreshold: 10,
pipeTo: new QueueProcessor(async (results) => {
const { id, doc, path, revA, revB } = results[0];
const docAMerge = await this.localDatabase.getDBEntry(path, { rev: revA });
const docBMerge = await this.localDatabase.getDBEntry(path, { rev: revB });
if (docAMerge != false && docBMerge != false) {
if (await this.showJSONMergeDialogAndMerge(docAMerge, docBMerge)) {
// Again for other conflicted revisions.
this.conflictResolutionProcessor.enqueue(path);
}
return;
} else {
// If either revision could not read, force resolving by the newer one.
await this.resolveByNewerEntry(id, path, doc, revA, revB);
}
}, { suspended: false, batchSize: 1, concurrentLimit: 1, delay: 10, keepResultUntilDownstreamConnected: false, yieldThreshold: 10 })
})
queueConflictCheck(path: FilePathWithPrefix) {
this.conflictResolutionProcessor.enqueue(path);
}
//TODO: Tidy up. Even though it is experimental feature, So dirty...
async syncInternalFilesAndDatabase(direction: "push" | "pull" | "safe" | "pullForce" | "pushForce", showMessage: boolean, filesAll: InternalFileInfo[] | false = false, targetFiles: string[] | false = false) {
await this.resolveConflictOnInternalFiles();
const logLevel = showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO;
Logger("Scanning hidden files.", logLevel, "sync_internal");
const ignorePatterns = this.settings.syncInternalFilesIgnorePatterns
.replace(/\n| /g, "")
.split(",").filter(e => e).map(e => new RegExp(e, "i"));
const configDir = normalizePath(this.app.vault.configDir);
let files: InternalFileInfo[] =
filesAll ? filesAll : (await this.scanInternalFiles())
const synchronisedInConfigSync = !this.settings.usePluginSync ? [] : Object.values(this.settings.pluginSyncExtendedSetting).filter(e => e.mode == MODE_SELECTIVE || e.mode == MODE_PAUSED).map(e => e.files).flat().map(e => `${configDir}/${e}`.toLowerCase());
files = files.filter(file => synchronisedInConfigSync.every(filterFile => !file.path.toLowerCase().startsWith(filterFile)))
const filesOnDB = ((await this.localDatabase.allDocsRaw({ startkey: ICHeader, endkey: ICHeaderEnd, include_docs: true })).rows.map(e => e.doc) as InternalFileEntry[]).filter(e => !e.deleted);
const allFileNamesSrc = [...new Set([...files.map(e => normalizePath(e.path)), ...filesOnDB.map(e => stripAllPrefixes(this.getPath(e)))])];
const allFileNames = allFileNamesSrc.filter(filename => !targetFiles || (targetFiles && targetFiles.indexOf(filename) !== -1)).filter(path => synchronisedInConfigSync.every(filterFile => !path.toLowerCase().startsWith(filterFile)))
function compareMTime(a: number, b: number) {
const wa = ~~(a / 1000);
const wb = ~~(b / 1000);
const diff = wa - wb;
return diff;
}
const fileCount = allFileNames.length;
let processed = 0;
let filesChanged = 0;
// count updated files up as like this below:
// .obsidian: 2
// .obsidian/workspace: 1
// .obsidian/plugins: 1
// .obsidian/plugins/recent-files-obsidian: 1
// .obsidian/plugins/recent-files-obsidian/data.json: 1
const updatedFolders: { [key: string]: number; } = {};
const countUpdatedFolder = (path: string) => {
const pieces = path.split("/");
let c = pieces.shift();
let pathPieces = "";
filesChanged++;
while (c) {
pathPieces += (pathPieces != "" ? "/" : "") + c;
pathPieces = normalizePath(pathPieces);
if (!(pathPieces in updatedFolders)) {
updatedFolders[pathPieces] = 0;
}
updatedFolders[pathPieces]++;
c = pieces.shift();
}
};
// Cache update time information for files which have already been processed (mainly for files that were skipped due to the same content)
let caches: { [key: string]: { storageMtime: number; docMtime: number; }; } = {};
caches = await this.kvDB.get<{ [key: string]: { storageMtime: number; docMtime: number; }; }>("diff-caches-internal") || {};
const filesMap = files.reduce((acc, cur) => {
acc[cur.path] = cur;
return acc;
}, {} as { [key: string]: InternalFileInfo; });
const filesOnDBMap = filesOnDB.reduce((acc, cur) => {
acc[stripAllPrefixes(this.getPath(cur))] = cur;
return acc;
}, {} as { [key: string]: InternalFileEntry; });
await new QueueProcessor(async (filenames: FilePath[]) => {
const filename = filenames[0];
processed++;
if (processed % 100 == 0) {
Logger(`Hidden file: ${processed}/${fileCount}`, logLevel, "sync_internal");
}
if (!filename) return [];
if (ignorePatterns.some(e => filename.match(e)))
return [];
if (await this.plugin.isIgnoredByIgnoreFiles(filename)) {
return [];
}
const fileOnStorage = filename in filesMap ? filesMap[filename] : undefined;
const fileOnDatabase = filename in filesOnDBMap ? filesOnDBMap[filename] : undefined;
return [{
filename,
fileOnStorage,
fileOnDatabase,
}]
}, { suspended: true, batchSize: 1, concurrentLimit: 10, delay: 0, totalRemainingReactiveSource: hiddenFilesProcessingCount })
.pipeTo(new QueueProcessor(async (params) => {
const
{
filename,
fileOnStorage: xFileOnStorage,
fileOnDatabase: xFileOnDatabase
} = params[0];
if (xFileOnStorage && xFileOnDatabase) {
const cache = filename in caches ? caches[filename] : { storageMtime: 0, docMtime: 0 };
// Both => Synchronize
if ((direction != "pullForce" && direction != "pushForce") && xFileOnDatabase.mtime == cache.docMtime && xFileOnStorage.mtime == cache.storageMtime) {
return;
}
const nw = compareMTime(xFileOnStorage.mtime, xFileOnDatabase.mtime);
if (nw > 0 || direction == "pushForce") {
await this.storeInternalFileToDatabase(xFileOnStorage);
}
if (nw < 0 || direction == "pullForce") {
// skip if not extraction performed.
if (!await this.extractInternalFileFromDatabase(filename))
return;
}
// If process successfully updated or file contents are same, update cache.
cache.docMtime = xFileOnDatabase.mtime;
cache.storageMtime = xFileOnStorage.mtime;
caches[filename] = cache;
countUpdatedFolder(filename);
} else if (!xFileOnStorage && xFileOnDatabase) {
if (direction == "push" || direction == "pushForce") {
if (xFileOnDatabase.deleted)
return;
await this.deleteInternalFileOnDatabase(filename, false);
} else if (direction == "pull" || direction == "pullForce") {
if (await this.extractInternalFileFromDatabase(filename)) {
countUpdatedFolder(filename);
}
} else if (direction == "safe") {
if (xFileOnDatabase.deleted)
return;
if (await this.extractInternalFileFromDatabase(filename)) {
countUpdatedFolder(filename);
}
}
} else if (xFileOnStorage && !xFileOnDatabase) {
if (direction == "push" || direction == "pushForce" || direction == "safe") {
await this.storeInternalFileToDatabase(xFileOnStorage);
} else {
await this.extractInternalFileFromDatabase(xFileOnStorage.path);
}
} else {
throw new Error("Invalid state on hidden file sync");
// Something corrupted?
}
return;
}, { suspended: true, batchSize: 1, concurrentLimit: 5, delay: 0 }))
.root
.enqueueAll(allFileNames)
.startPipeline().waitForAllDoneAndTerminate();
await this.kvDB.set("diff-caches-internal", caches);
// When files has been retrieved from the database. they must be reloaded.
if ((direction == "pull" || direction == "pullForce") && filesChanged != 0) {
// Show notification to restart obsidian when something has been changed in configDir.
if (configDir in updatedFolders) {
// Numbers of updated files that is below of configDir.
let updatedCount = updatedFolders[configDir];
try {
//@ts-ignore
const manifests = Object.values(this.app.plugins.manifests) as any as PluginManifest[];
//@ts-ignore
const enabledPlugins = this.app.plugins.enabledPlugins as Set<string>;
const enabledPluginManifests = manifests.filter(e => enabledPlugins.has(e.id));
for (const manifest of enabledPluginManifests) {
if (manifest.dir && manifest.dir in updatedFolders) {
// If notified about plug-ins, reloading Obsidian may not be necessary.
updatedCount -= updatedFolders[manifest.dir];
const updatePluginId = manifest.id;
const updatePluginName = manifest.name;
this.plugin.askInPopup(`updated-${updatePluginId}`, `Files in ${updatePluginName} has been updated, Press {HERE} to reload ${updatePluginName}, or press elsewhere to dismiss this message.`, (anchor) => {
anchor.text = "HERE";
anchor.addEventListener("click", async () => {
Logger(`Unloading plugin: ${updatePluginName}`, LOG_LEVEL_NOTICE, "plugin-reload-" + updatePluginId);
// @ts-ignore
await this.app.plugins.unloadPlugin(updatePluginId);
// @ts-ignore
await this.app.plugins.loadPlugin(updatePluginId);
Logger(`Plugin reloaded: ${updatePluginName}`, LOG_LEVEL_NOTICE, "plugin-reload-" + updatePluginId);
});
}
);
}
}
} catch (ex) {
Logger("Error on checking plugin status.");
Logger(ex, LOG_LEVEL_VERBOSE);
}
// If something changes left, notify for reloading Obsidian.
if (updatedCount != 0) {
if (!this.plugin.isReloadingScheduled) {
this.plugin.askInPopup(`updated-any-hidden`, `Hidden files have been synchronised, Press {HERE} to schedule a reload of Obsidian, or press elsewhere to dismiss this message.`, (anchor) => {
anchor.text = "HERE";
anchor.addEventListener("click", () => {
this.plugin.scheduleAppReload();
});
});
}
}
}
}
Logger(`Hidden files scanned: ${filesChanged} files had been modified`, logLevel, "sync_internal");
}
async storeInternalFileToDatabase(file: InternalFileInfo, forceWrite = false) {
if (await this.plugin.isIgnoredByIgnoreFiles(file.path)) {
return
}
const id = await this.path2id(file.path, ICHeader);
const prefixedFileName = addPrefix(file.path, ICHeader);
const content = createBlob(await this.plugin.vaultAccess.adapterReadAuto(file.path));
const mtime = file.mtime;
return await serialized("file-" + prefixedFileName, async () => {
try {
const old = await this.localDatabase.getDBEntry(prefixedFileName, undefined, false, false);
let saveData: SavingEntry;
if (old === false) {
saveData = {
_id: id,
path: prefixedFileName,
data: content,
mtime,
ctime: mtime,
datatype: "newnote",
size: file.size,
children: [],
deleted: false,
type: "newnote",
eden: {},
};
} else {
if (await isDocContentSame(readAsBlob(old), content) && !forceWrite) {
// Logger(`STORAGE --> DB:${file.path}: (hidden) Not changed`, LOG_LEVEL_VERBOSE);
return;
}
saveData =
{
...old,
data: content,
mtime,
size: file.size,
datatype: old.datatype,
children: [],
deleted: false,
type: old.datatype,
};
}
const ret = await this.localDatabase.putDBEntry(saveData);
Logger(`STORAGE --> DB:${file.path}: (hidden) Done`);
return ret;
} catch (ex) {
Logger(`STORAGE --> DB:${file.path}: (hidden) Failed`);
Logger(ex, LOG_LEVEL_VERBOSE);
return false;
}
});
}
async deleteInternalFileOnDatabase(filename: FilePath, forceWrite = false) {
const id = await this.path2id(filename, ICHeader);
const prefixedFileName = addPrefix(filename, ICHeader);
const mtime = new Date().getTime();
if (await this.plugin.isIgnoredByIgnoreFiles(filename)) {
return
}
await serialized("file-" + prefixedFileName, async () => {
try {
const old = await this.localDatabase.getDBEntryMeta(prefixedFileName, undefined, true) as InternalFileEntry | false;
let saveData: InternalFileEntry;
if (old === false) {
saveData = {
_id: id,
path: prefixedFileName,
mtime,
ctime: mtime,
size: 0,
children: [],
deleted: true,
type: "newnote",
eden: {}
};
} else {
// Remove all conflicted before deleting.
const conflicts = await this.localDatabase.getRaw(old._id, { conflicts: true });
if (conflicts._conflicts !== undefined) {
for (const conflictRev of conflicts._conflicts) {
await this.localDatabase.removeRevision(old._id, conflictRev);
Logger(`STORAGE -x> DB:${filename}: (hidden) conflict removed ${old._rev} => ${conflictRev}`, LOG_LEVEL_VERBOSE);
}
}
if (old.deleted) {
Logger(`STORAGE -x> DB:${filename}: (hidden) already deleted`);
return;
}
saveData =
{
...old,
mtime,
size: 0,
children: [],
deleted: true,
type: "newnote",
};
}
await this.localDatabase.putRaw(saveData);
Logger(`STORAGE -x> DB:${filename}: (hidden) Done`);
} catch (ex) {
Logger(`STORAGE -x> DB:${filename}: (hidden) Failed`);
Logger(ex, LOG_LEVEL_VERBOSE);
return false;
}
});
}
async extractInternalFileFromDatabase(filename: FilePath, force = false) {
const isExists = await this.plugin.vaultAccess.adapterExists(filename);
const prefixedFileName = addPrefix(filename, ICHeader);
if (await this.plugin.isIgnoredByIgnoreFiles(filename)) {
return;
}
return await serialized("file-" + prefixedFileName, async () => {
try {
// Check conflicted status
const fileOnDB = await this.localDatabase.getDBEntry(prefixedFileName, { conflicts: true }, false, true, true);
if (fileOnDB === false)
throw new Error(`File not found on database.:${filename}`);
// Prevent overwrite for Prevent overwriting while some conflicted revision exists.
if (fileOnDB?._conflicts?.length) {
Logger(`Hidden file ${filename} has conflicted revisions, to keep in safe, writing to storage has been prevented`, LOG_LEVEL_INFO);
return;
}
const deleted = fileOnDB.deleted || fileOnDB._deleted || false;
if (deleted) {
if (!isExists) {
Logger(`STORAGE <x- DB:${filename}: deleted (hidden) Deleted on DB, but the file is already not found on storage.`);
} else {
Logger(`STORAGE <x- DB:${filename}: deleted (hidden).`);
await this.plugin.vaultAccess.adapterRemove(filename);
try {
//@ts-ignore internalAPI
await this.app.vault.adapter.reconcileInternalFile(filename);
} catch (ex) {
Logger("Failed to call internal API(reconcileInternalFile)", LOG_LEVEL_VERBOSE);
Logger(ex, LOG_LEVEL_VERBOSE);
}
}
return true;
}
if (!isExists) {
await this.vaultAccess.ensureDirectory(filename);
await this.plugin.vaultAccess.adapterWrite(filename, readContent(fileOnDB), { mtime: fileOnDB.mtime, ctime: fileOnDB.ctime });
try {
//@ts-ignore internalAPI
await this.app.vault.adapter.reconcileInternalFile(filename);
} catch (ex) {
Logger("Failed to call internal API(reconcileInternalFile)", LOG_LEVEL_VERBOSE);
Logger(ex, LOG_LEVEL_VERBOSE);
}
Logger(`STORAGE <-- DB:${filename}: written (hidden,new${force ? ", force" : ""})`);
return true;
} else {
const content = await this.plugin.vaultAccess.adapterReadAuto(filename);
const docContent = readContent(fileOnDB);
if (await isDocContentSame(content, docContent) && !force) {
// Logger(`STORAGE <-- DB:${filename}: skipped (hidden) Not changed`, LOG_LEVEL_VERBOSE);
return true;
}
await this.plugin.vaultAccess.adapterWrite(filename, docContent, { mtime: fileOnDB.mtime, ctime: fileOnDB.ctime });
try {
//@ts-ignore internalAPI
await this.app.vault.adapter.reconcileInternalFile(filename);
} catch (ex) {
Logger("Failed to call internal API(reconcileInternalFile)", LOG_LEVEL_VERBOSE);
Logger(ex, LOG_LEVEL_VERBOSE);
}
Logger(`STORAGE <-- DB:${filename}: written (hidden, overwrite${force ? ", force" : ""})`);
return true;
}
} catch (ex) {
Logger(`STORAGE <-- DB:${filename}: written (hidden, overwrite${force ? ", force" : ""}) Failed`);
Logger(ex, LOG_LEVEL_VERBOSE);
return false;
}
});
}
showJSONMergeDialogAndMerge(docA: LoadedEntry, docB: LoadedEntry): Promise<boolean> {
return new Promise((res) => {
Logger("Opening data-merging dialog", LOG_LEVEL_VERBOSE);
const docs = [docA, docB];
const path = stripAllPrefixes(docA.path);
const modal = new JsonResolveModal(this.app, path, [docA, docB], async (keep, result) => {
// modal.close();
try {
const filename = path;
let needFlush = false;
if (!result && !keep) {
Logger(`Skipped merging: ${filename}`);
res(false);
return;
}
//Delete old revisions
if (result || keep) {
for (const doc of docs) {
if (doc._rev != keep) {
if (await this.localDatabase.deleteDBEntry(this.getPath(doc), { rev: doc._rev })) {
Logger(`Conflicted revision has been deleted: ${filename}`);
needFlush = true;
}
}
}
}
if (!keep && result) {
const isExists = await this.plugin.vaultAccess.adapterExists(filename);
if (!isExists) {
await this.vaultAccess.ensureDirectory(filename);
}
await this.plugin.vaultAccess.adapterWrite(filename, result);
const stat = await this.plugin.vaultAccess.adapterStat(filename);
if (!stat) {
throw new Error("Stat failed");
}
const mtime = stat?.mtime ?? 0;
await this.storeInternalFileToDatabase({ path: filename, mtime, ctime: stat?.ctime ?? mtime, size: stat?.size ?? 0 }, true);
try {
//@ts-ignore internalAPI
await this.app.vault.adapter.reconcileInternalFile(filename);
} catch (ex) {
Logger("Failed to call internal API(reconcileInternalFile)", LOG_LEVEL_VERBOSE);
Logger(ex, LOG_LEVEL_VERBOSE);
}
Logger(`STORAGE <-- DB:${filename}: written (hidden,merged)`);
}
if (needFlush) {
await this.extractInternalFileFromDatabase(filename, false);
Logger(`STORAGE --> DB:${filename}: extracted (hidden,merged)`);
}
res(true);
} catch (ex) {
Logger("Could not merge conflicted json");
Logger(ex, LOG_LEVEL_VERBOSE);
res(false);
}
});
modal.open();
});
}
async scanInternalFiles(): Promise<InternalFileInfo[]> {
const configDir = normalizePath(this.app.vault.configDir);
const ignoreFilter = this.settings.syncInternalFilesIgnorePatterns
.replace(/\n| /g, "")
.split(",").filter(e => e).map(e => new RegExp(e, "i"));
const synchronisedInConfigSync = !this.settings.usePluginSync ? [] : Object.values(this.settings.pluginSyncExtendedSetting).filter(e => e.mode == MODE_SELECTIVE || e.mode == MODE_PAUSED).map(e => e.files).flat().map(e => `${configDir}/${e}`.toLowerCase());
const root = this.app.vault.getRoot();
const findRoot = root.path;
const filenames = (await this.getFiles(findRoot, [], undefined, ignoreFilter)).filter(e => e.startsWith(".")).filter(e => !e.startsWith(".trash"));
const files = filenames.filter(path => synchronisedInConfigSync.every(filterFile => !path.toLowerCase().startsWith(filterFile))).map(async (e) => {
return {
path: e as FilePath,
stat: await this.plugin.vaultAccess.adapterStat(e)
};
});
const result: InternalFileInfo[] = [];
for (const f of files) {
const w = await f;
if (await this.plugin.isIgnoredByIgnoreFiles(w.path)) {
continue
}
const mtime = w.stat?.mtime ?? 0
const ctime = w.stat?.ctime ?? mtime;
const size = w.stat?.size ?? 0;
result.push({
...w,
mtime, ctime, size
});
}
return result;
}
async getFiles(
path: string,
ignoreList: string[],
filter?: RegExp[],
ignoreFilter?: RegExp[]
) {
let w: ListedFiles;
try {
w = await this.app.vault.adapter.list(path);
} catch (ex) {
Logger(`Could not traverse(HiddenSync):${path}`, LOG_LEVEL_INFO);
Logger(ex, LOG_LEVEL_VERBOSE);
return [];
}
const filesSrc = [
...w.files
.filter((e) => !ignoreList.some((ee) => e.endsWith(ee)))
.filter((e) => !filter || filter.some((ee) => e.match(ee)))
.filter((e) => !ignoreFilter || ignoreFilter.every((ee) => !e.match(ee)))
];
let files = [] as string[];
for (const file of filesSrc) {
if (!await this.plugin.isIgnoredByIgnoreFiles(file)) {
files.push(file);
}
}
L1: for (const v of w.folders) {
for (const ignore of ignoreList) {
if (v.endsWith(ignore)) {
continue L1;
}
}
if (ignoreFilter && ignoreFilter.some(e => v.match(e))) {
continue L1;
}
if (await this.plugin.isIgnoredByIgnoreFiles(v)) {
continue L1;
}
files = files.concat(await this.getFiles(v, ignoreList, filter, ignoreFilter));
}
return files;
}
}

View File

@@ -0,0 +1,423 @@
import { type EntryDoc, type ObsidianLiveSyncSettings, DEFAULT_SETTINGS, LOG_LEVEL_NOTICE, REMOTE_COUCHDB, REMOTE_MINIO } from "../lib/src/common/types.ts";
import { configURIBase } from "../common/types.ts";
import { Logger } from "../lib/src/common/logger.ts";
import { PouchDB } from "../lib/src/pouchdb/pouchdb-browser.js";
import { askSelectString, askYesNo, askString } from "../common/utils.ts";
import { decrypt, encrypt } from "../lib/src/encryption/e2ee_v2.ts";
import { LiveSyncCommands } from "./LiveSyncCommands.ts";
import { delay, fireAndForget } from "../lib/src/common/utils.ts";
import { confirmWithMessage } from "../common/dialogs.ts";
import { Platform } from "../deps.ts";
import { fetchAllUsedChunks } from "../lib/src/pouchdb/utils_couchdb.ts";
import type { LiveSyncCouchDBReplicator } from "../lib/src/replication/couchdb/LiveSyncReplicator.js";
export class SetupLiveSync extends LiveSyncCommands {
onunload() { }
onload(): void | Promise<void> {
this.plugin.registerObsidianProtocolHandler("setuplivesync", async (conf: any) => await this.setupWizard(conf.settings));
this.plugin.addCommand({
id: "livesync-copysetupuri",
name: "Copy settings as a new setup URI",
callback: () => fireAndForget(this.command_copySetupURI()),
});
this.plugin.addCommand({
id: "livesync-copysetupuri-short",
name: "Copy settings as a new setup URI (With customization sync)",
callback: () => fireAndForget(this.command_copySetupURIWithSync()),
});
this.plugin.addCommand({
id: "livesync-copysetupurifull",
name: "Copy settings as a new setup URI (Full)",
callback: () => fireAndForget(this.command_copySetupURIFull()),
});
this.plugin.addCommand({
id: "livesync-opensetupuri",
name: "Use the copied setup URI (Formerly Open setup URI)",
callback: () => fireAndForget(this.command_openSetupURI()),
});
}
onInitializeDatabase(showNotice: boolean) { }
beforeReplicate(showNotice: boolean) { }
onResume() { }
parseReplicationResultItem(docs: PouchDB.Core.ExistingDocument<EntryDoc>): boolean | Promise<boolean> {
return false;
}
async realizeSettingSyncMode() { }
async command_copySetupURI(stripExtra = true) {
const encryptingPassphrase = await askString(this.app, "Encrypt your settings", "The passphrase to encrypt the setup URI", "", true);
if (encryptingPassphrase === false)
return;
const setting = { ...this.settings, configPassphraseStore: "", encryptedCouchDBConnection: "", encryptedPassphrase: "" } as Partial<ObsidianLiveSyncSettings>;
if (stripExtra) {
delete setting.pluginSyncExtendedSetting;
}
const keys = Object.keys(setting) as (keyof ObsidianLiveSyncSettings)[];
for (const k of keys) {
if (JSON.stringify(k in setting ? setting[k] : "") == JSON.stringify(k in DEFAULT_SETTINGS ? DEFAULT_SETTINGS[k] : "*")) {
delete setting[k];
}
}
const encryptedSetting = encodeURIComponent(await encrypt(JSON.stringify(setting), encryptingPassphrase, false));
const uri = `${configURIBase}${encryptedSetting}`;
await navigator.clipboard.writeText(uri);
Logger("Setup URI copied to clipboard", LOG_LEVEL_NOTICE);
}
async command_copySetupURIFull() {
const encryptingPassphrase = await askString(this.app, "Encrypt your settings", "The passphrase to encrypt the setup URI", "", true);
if (encryptingPassphrase === false)
return;
const setting = { ...this.settings, configPassphraseStore: "", encryptedCouchDBConnection: "", encryptedPassphrase: "" };
const encryptedSetting = encodeURIComponent(await encrypt(JSON.stringify(setting), encryptingPassphrase, false));
const uri = `${configURIBase}${encryptedSetting}`;
await navigator.clipboard.writeText(uri);
Logger("Setup URI copied to clipboard", LOG_LEVEL_NOTICE);
}
async command_copySetupURIWithSync() {
await this.command_copySetupURI(false);
}
async command_openSetupURI() {
const setupURI = await askString(this.app, "Easy setup", "Set up URI", `${configURIBase}aaaaa`);
if (setupURI === false)
return;
if (!setupURI.startsWith(`${configURIBase}`)) {
Logger("Set up URI looks wrong.", LOG_LEVEL_NOTICE);
return;
}
const config = decodeURIComponent(setupURI.substring(configURIBase.length));
console.dir(config);
await this.setupWizard(config);
}
async setupWizard(confString: string) {
try {
const oldConf = JSON.parse(JSON.stringify(this.settings));
const encryptingPassphrase = await askString(this.app, "Passphrase", "The passphrase to decrypt your setup URI", "", true);
if (encryptingPassphrase === false)
return;
const newConf = await JSON.parse(await decrypt(confString, encryptingPassphrase, false));
if (newConf) {
const result = await askYesNo(this.app, "Importing LiveSync's conf, OK?");
if (result == "yes") {
const newSettingW = Object.assign({}, DEFAULT_SETTINGS, newConf) as ObsidianLiveSyncSettings;
this.plugin.replicator.closeReplication();
this.settings.suspendFileWatching = true;
console.dir(newSettingW);
// Back into the default method once.
newSettingW.configPassphraseStore = "";
newSettingW.encryptedPassphrase = "";
newSettingW.encryptedCouchDBConnection = "";
newSettingW.additionalSuffixOfDatabaseName = `${("appId" in this.app ? this.app.appId : "")}`
const setupJustImport = "Just import setting";
const setupAsNew = "Set it up as secondary or subsequent device";
const setupAsMerge = "Secondary device but try keeping local changes";
const setupAgain = "Reconfigure and reconstitute the data";
const setupManually = "Leave everything to me";
newSettingW.syncInternalFiles = false;
newSettingW.usePluginSync = false;
newSettingW.isConfigured = true;
// Migrate completely obsoleted configuration.
if (!newSettingW.useIndexedDBAdapter) {
newSettingW.useIndexedDBAdapter = true;
}
const setupType = await askSelectString(this.app, "How would you like to set it up?", [setupAsNew, setupAgain, setupAsMerge, setupJustImport, setupManually]);
if (setupType == setupJustImport) {
this.plugin.settings = newSettingW;
this.plugin.usedPassphrase = "";
await this.plugin.saveSettings();
} else if (setupType == setupAsNew) {
this.plugin.settings = newSettingW;
this.plugin.usedPassphrase = "";
await this.fetchLocal();
} else if (setupType == setupAsMerge) {
this.plugin.settings = newSettingW;
this.plugin.usedPassphrase = "";
await this.fetchLocalWithRebuild();
} else if (setupType == setupAgain) {
const confirm = "I know this operation will rebuild all my databases with files on this device, and files that are on the remote database and I didn't synchronize to any other devices will be lost and want to proceed indeed.";
if (await askSelectString(this.app, "Do you really want to do this?", ["Cancel", confirm]) != confirm) {
return;
}
this.plugin.settings = newSettingW;
this.plugin.usedPassphrase = "";
await this.rebuildEverything();
} else if (setupType == setupManually) {
const keepLocalDB = await askYesNo(this.app, "Keep local DB?");
const keepRemoteDB = await askYesNo(this.app, "Keep remote DB?");
if (keepLocalDB == "yes" && keepRemoteDB == "yes") {
// nothing to do. so peaceful.
this.plugin.settings = newSettingW;
this.plugin.usedPassphrase = "";
this.suspendAllSync();
this.suspendExtraSync();
await this.plugin.saveSettings();
const replicate = await askYesNo(this.app, "Unlock and replicate?");
if (replicate == "yes") {
await this.plugin.replicate(true);
await this.plugin.markRemoteUnlocked();
}
Logger("Configuration loaded.", LOG_LEVEL_NOTICE);
return;
}
if (keepLocalDB == "no" && keepRemoteDB == "no") {
const reset = await askYesNo(this.app, "Drop everything?");
if (reset != "yes") {
Logger("Cancelled", LOG_LEVEL_NOTICE);
this.plugin.settings = oldConf;
return;
}
}
let initDB;
this.plugin.settings = newSettingW;
this.plugin.usedPassphrase = "";
await this.plugin.saveSettings();
if (keepLocalDB == "no") {
await this.plugin.resetLocalDatabase();
await this.plugin.localDatabase.initializeDatabase();
const rebuild = await askYesNo(this.app, "Rebuild the database?");
if (rebuild == "yes") {
initDB = this.plugin.initializeDatabase(true);
} else {
await this.plugin.markRemoteResolved();
}
}
if (keepRemoteDB == "no") {
await this.plugin.tryResetRemoteDatabase();
await this.plugin.markRemoteLocked();
}
if (keepLocalDB == "no" || keepRemoteDB == "no") {
const replicate = await askYesNo(this.app, "Replicate once?");
if (replicate == "yes") {
if (initDB != null) {
await initDB;
}
await this.plugin.replicate(true);
}
}
}
}
Logger("Configuration loaded.", LOG_LEVEL_NOTICE);
} else {
Logger("Cancelled.", LOG_LEVEL_NOTICE);
}
} catch (ex) {
Logger("Couldn't parse or decrypt configuration uri.", LOG_LEVEL_NOTICE);
}
}
suspendExtraSync() {
Logger("Hidden files and plugin synchronization have been temporarily disabled. Please enable them after the fetching, if you need them.", LOG_LEVEL_NOTICE)
this.plugin.settings.syncInternalFiles = false;
this.plugin.settings.usePluginSync = false;
this.plugin.settings.autoSweepPlugins = false;
}
async askHiddenFileConfiguration(opt: { enableFetch?: boolean, enableOverwrite?: boolean }) {
this.plugin.addOnSetup.suspendExtraSync();
const message = `Would you like to enable \`Hidden File Synchronization\` or \`Customization sync\`?
${opt.enableFetch ? " - Fetch: Use files stored from other devices. \n" : ""}${opt.enableOverwrite ? "- Overwrite: Use files from this device. \n" : ""}- Custom: Synchronize only customization files with a dedicated interface.
- Keep them disabled: Do not use hidden file synchronization.
Of course, we are able to disable these features.`
const CHOICE_FETCH = "Fetch";
const CHOICE_OVERWRITE = "Overwrite";
const CHOICE_CUSTOMIZE = "Custom";
const CHOICE_DISMISS = "keep them disabled";
const choices = [];
if (opt?.enableFetch) {
choices.push(CHOICE_FETCH);
}
if (opt?.enableOverwrite) {
choices.push(CHOICE_OVERWRITE);
}
choices.push(CHOICE_CUSTOMIZE);
choices.push(CHOICE_DISMISS);
const ret = await confirmWithMessage(this.plugin, "Hidden file sync", message, choices, CHOICE_DISMISS, 40);
if (ret == CHOICE_FETCH) {
await this.configureHiddenFileSync("FETCH");
} else if (ret == CHOICE_OVERWRITE) {
await this.configureHiddenFileSync("OVERWRITE");
} else if (ret == CHOICE_DISMISS) {
await this.configureHiddenFileSync("DISABLE");
} else if (ret == CHOICE_CUSTOMIZE) {
await this.configureHiddenFileSync("CUSTOMIZE");
}
}
async configureHiddenFileSync(mode: "FETCH" | "OVERWRITE" | "MERGE" | "DISABLE" | "CUSTOMIZE") {
this.plugin.addOnSetup.suspendExtraSync();
if (mode == "DISABLE") {
this.plugin.settings.syncInternalFiles = false;
this.plugin.settings.usePluginSync = false;
await this.plugin.saveSettings();
return;
}
if (mode != "CUSTOMIZE") {
Logger("Gathering files for enabling Hidden File Sync", LOG_LEVEL_NOTICE);
if (mode == "FETCH") {
await this.plugin.addOnHiddenFileSync.syncInternalFilesAndDatabase("pullForce", true);
} else if (mode == "OVERWRITE") {
await this.plugin.addOnHiddenFileSync.syncInternalFilesAndDatabase("pushForce", true);
} else if (mode == "MERGE") {
await this.plugin.addOnHiddenFileSync.syncInternalFilesAndDatabase("safe", true);
}
this.plugin.settings.syncInternalFiles = true;
await this.plugin.saveSettings();
Logger(`Done! Restarting the app is strongly recommended!`, LOG_LEVEL_NOTICE);
} else if (mode == "CUSTOMIZE") {
if (!this.plugin.deviceAndVaultName) {
let name = await askString(this.app, "Device name", "Please set this device name", `desktop`);
if (!name) {
if (Platform.isAndroidApp) {
name = "android-app"
} else if (Platform.isIosApp) {
name = "ios"
} else if (Platform.isMacOS) {
name = "macos"
} else if (Platform.isMobileApp) {
name = "mobile-app"
} else if (Platform.isMobile) {
name = "mobile"
} else if (Platform.isSafari) {
name = "safari"
} else if (Platform.isDesktop) {
name = "desktop"
} else if (Platform.isDesktopApp) {
name = "desktop-app"
} else {
name = "unknown"
}
name = name + Math.random().toString(36).slice(-4);
}
this.plugin.deviceAndVaultName = name;
}
this.plugin.settings.usePluginSync = true;
await this.plugin.saveSettings();
await this.plugin.addOnConfigSync.scanAllConfigFiles(true);
}
}
suspendAllSync() {
this.plugin.settings.liveSync = false;
this.plugin.settings.periodicReplication = false;
this.plugin.settings.syncOnSave = false;
this.plugin.settings.syncOnEditorSave = false;
this.plugin.settings.syncOnStart = false;
this.plugin.settings.syncOnFileOpen = false;
this.plugin.settings.syncAfterMerge = false;
//this.suspendExtraSync();
}
async suspendReflectingDatabase() {
if (this.plugin.settings.doNotSuspendOnFetching) return;
if (this.plugin.settings.remoteType == REMOTE_MINIO) return;
Logger(`Suspending reflection: Database and storage changes will not be reflected in each other until completely finished the fetching.`, LOG_LEVEL_NOTICE);
this.plugin.settings.suspendParseReplicationResult = true;
this.plugin.settings.suspendFileWatching = true;
await this.plugin.saveSettings();
}
async resumeReflectingDatabase() {
if (this.plugin.settings.doNotSuspendOnFetching) return;
if (this.plugin.settings.remoteType == REMOTE_MINIO) return;
Logger(`Database and storage reflection has been resumed!`, LOG_LEVEL_NOTICE);
this.plugin.settings.suspendParseReplicationResult = false;
this.plugin.settings.suspendFileWatching = false;
await this.plugin.syncAllFiles(true);
await this.plugin.loadQueuedFiles();
await this.plugin.saveSettings();
}
async askUseNewAdapter() {
if (!this.plugin.settings.useIndexedDBAdapter) {
const message = `Now this plugin has been configured to use the old database adapter for keeping compatibility. Do you want to deactivate it?`;
const CHOICE_YES = "Yes, disable and use latest";
const CHOICE_NO = "No, keep compatibility";
const choices = [CHOICE_YES, CHOICE_NO];
const ret = await confirmWithMessage(this.plugin, "Database adapter", message, choices, CHOICE_YES, 10);
if (ret == CHOICE_YES) {
this.plugin.settings.useIndexedDBAdapter = true;
}
}
}
async resetLocalDatabase() {
if (this.plugin.settings.isConfigured && this.plugin.settings.additionalSuffixOfDatabaseName == "") {
// Discard the non-suffixed database
await this.plugin.resetLocalDatabase();
}
this.plugin.settings.additionalSuffixOfDatabaseName = `${("appId" in this.app ? this.app.appId : "")}`
await this.plugin.resetLocalDatabase();
}
async fetchRemoteChunks() {
if (!this.plugin.settings.doNotSuspendOnFetching && this.plugin.settings.readChunksOnline && this.plugin.settings.remoteType == REMOTE_COUCHDB) {
Logger(`Fetching chunks`, LOG_LEVEL_NOTICE);
const replicator = this.plugin.getReplicator() as LiveSyncCouchDBReplicator;
const remoteDB = await replicator.connectRemoteCouchDBWithSetting(this.settings, this.plugin.getIsMobile(), true);
if (typeof remoteDB == "string") {
Logger(remoteDB, LOG_LEVEL_NOTICE);
} else {
await fetchAllUsedChunks(this.localDatabase.localDatabase, remoteDB.db);
}
Logger(`Fetching chunks done`, LOG_LEVEL_NOTICE);
}
}
async fetchLocal(makeLocalChunkBeforeSync?: boolean) {
this.suspendExtraSync();
await this.askUseNewAdapter();
this.plugin.settings.isConfigured = true;
await this.suspendReflectingDatabase();
await this.plugin.realizeSettingSyncMode();
await this.resetLocalDatabase();
await delay(1000);
await this.plugin.openDatabase();
this.plugin.isReady = true;
if (makeLocalChunkBeforeSync) {
await this.plugin.initializeDatabase(true);
}
await this.plugin.markRemoteResolved();
await delay(500);
await this.plugin.replicateAllFromServer(true);
await delay(1000);
await this.plugin.replicateAllFromServer(true);
await this.resumeReflectingDatabase();
await this.askHiddenFileConfiguration({ enableFetch: true });
}
async fetchLocalWithRebuild() {
return await this.fetchLocal(true);
}
async rebuildRemote() {
this.suspendExtraSync();
this.plugin.settings.isConfigured = true;
await this.plugin.realizeSettingSyncMode();
await this.plugin.markRemoteLocked();
await this.plugin.tryResetRemoteDatabase();
await this.plugin.markRemoteLocked();
await delay(500);
await this.askHiddenFileConfiguration({ enableOverwrite: true });
await delay(1000);
await this.plugin.replicateAllToServer(true);
await delay(1000);
await this.plugin.replicateAllToServer(true);
}
async rebuildEverything() {
this.suspendExtraSync();
await this.askUseNewAdapter();
this.plugin.settings.isConfigured = true;
await this.plugin.realizeSettingSyncMode();
await this.resetLocalDatabase();
await delay(1000);
await this.plugin.initializeDatabase(true);
await this.plugin.markRemoteLocked();
await this.plugin.tryResetRemoteDatabase();
await this.plugin.markRemoteLocked();
await delay(500);
await this.askHiddenFileConfiguration({ enableOverwrite: true });
await delay(1000);
await this.plugin.replicateAllToServer(true);
await delay(1000);
await this.plugin.replicateAllToServer(true);
}
}

View File

@@ -1,37 +0,0 @@
import { mount, unmount } from "svelte";
import { App, Modal } from "../../deps.ts";
import ObsidianLiveSyncPlugin from "../../main.ts";
import PluginPane from "./PluginPane.svelte";
export class PluginDialogModal extends Modal {
plugin: ObsidianLiveSyncPlugin;
component: ReturnType<typeof mount> | undefined;
isOpened() {
return this.component != undefined;
}
constructor(app: App, plugin: ObsidianLiveSyncPlugin) {
super(app);
this.plugin = plugin;
}
onOpen() {
const { contentEl } = this;
this.contentEl.style.overflow = "auto";
this.contentEl.style.display = "flex";
this.contentEl.style.flexDirection = "column";
this.titleEl.setText("Customization Sync (Beta3)");
if (!this.component) {
this.component = mount(PluginPane, {
target: contentEl,
props: { plugin: this.plugin },
});
}
}
onClose() {
if (this.component) {
void unmount(this.component);
this.component = undefined;
}
}
}

View File

@@ -1,228 +0,0 @@
<script lang="ts">
import { type Diff, DIFF_DELETE, DIFF_INSERT, diff_match_patch } from "../../deps.ts";
import type { FilePath, LoadedEntry } from "../../lib/src/common/types.ts";
import { decodeBinary, readString } from "../../lib/src/string_and_binary/convert.ts";
import { getDocData, isObjectDifferent, mergeObject } from "../../lib/src/common/utils.ts";
interface Props {
docs?: LoadedEntry[];
callback?: (keepRev?: string, mergedStr?: string) => Promise<void>;
filename?: FilePath;
nameA?: string;
nameB?: string;
defaultSelect?: string;
keepOrder?: boolean;
hideLocal?: boolean;
}
let {
docs = $bindable([]),
callback = $bindable((async (_, __) => {
Promise.resolve();
}) as (keepRev?: string, mergedStr?: string) => Promise<void>),
filename = $bindable("" as FilePath),
nameA = $bindable("A"),
nameB = $bindable("B"),
defaultSelect = $bindable("" as string),
keepOrder = $bindable(false),
hideLocal = $bindable(false),
}: Props = $props();
type JSONData = Record<string | number | symbol, any> | [any];
const docsArray = $derived.by(() => {
if (docs && docs.length >= 1) {
if (keepOrder || docs[0].mtime < docs[1].mtime) {
return { a: docs[0], b: docs[1] } as const;
} else {
return { a: docs[1], b: docs[0] } as const;
}
}
return { a: false, b: false } as const;
});
const docA = $derived(docsArray.a);
const docB = $derived(docsArray.b);
const docAContent = $derived(docA && docToString(docA));
const docBContent = $derived(docB && docToString(docB));
function parseJson(json: string | false) {
if (json === false) return false;
try {
return JSON.parse(json) as JSONData;
} catch (ex) {
return false;
}
}
const objA = $derived(parseJson(docAContent) || {});
const objB = $derived(parseJson(docBContent) || {});
const objAB = $derived(mergeObject(objA, objB));
const objBAw = $derived(mergeObject(objB, objA));
const objBA = $derived(isObjectDifferent(objBAw, objAB) ? objBAw : false);
let diffs: Diff[] = $derived.by(() => (objA && selectedObj ? getJsonDiff(objA, selectedObj) : []));
type SelectModes = "" | "A" | "B" | "AB" | "BA";
let mode: SelectModes = $state(defaultSelect as SelectModes);
function docToString(doc: LoadedEntry) {
return doc.datatype == "plain" ? getDocData(doc.data) : readString(new Uint8Array(decodeBinary(doc.data)));
}
function revStringToRevNumber(rev?: string) {
if (!rev) return "";
return rev.split("-")[0];
}
function getDiff(left: string, right: string) {
const dmp = new diff_match_patch();
const mapLeft = dmp.diff_linesToChars_(left, right);
const diffLeftSrc = dmp.diff_main(mapLeft.chars1, mapLeft.chars2, false);
dmp.diff_charsToLines_(diffLeftSrc, mapLeft.lineArray);
return diffLeftSrc;
}
function getJsonDiff(a: object, b: object) {
return getDiff(JSON.stringify(a, null, 2), JSON.stringify(b, null, 2));
}
function apply() {
if (!docA || !docB) return;
if (docA._id == docB._id) {
if (mode == "A") return callback(docA._rev!, undefined);
if (mode == "B") return callback(docB._rev!, undefined);
} else {
if (mode == "A") return callback(undefined, docToString(docA));
if (mode == "B") return callback(undefined, docToString(docB));
}
if (mode == "BA") return callback(undefined, JSON.stringify(objBA, null, 2));
if (mode == "AB") return callback(undefined, JSON.stringify(objAB, null, 2));
callback(undefined, undefined);
}
function cancel() {
callback(undefined, undefined);
}
const mergedObjs = $derived.by(
() =>
({
"": false,
A: objA,
B: objB,
AB: objAB,
BA: objBA,
}) as Record<SelectModes, JSONData | false>
);
let selectedObj = $derived(mode in mergedObjs ? mergedObjs[mode] : {});
let modesSrc = $state([] as ["" | "A" | "B" | "AB" | "BA", string][]);
const modes = $derived.by(() => {
let newModes = [] as typeof modesSrc;
if (!hideLocal) {
newModes.push(["", "Not now"]);
newModes.push(["A", nameA || "A"]);
}
newModes.push(["B", nameB || "B"]);
newModes.push(["AB", `${nameA || "A"} + ${nameB || "B"}`]);
newModes.push(["BA", `${nameB || "B"} + ${nameA || "A"}`]);
return newModes;
});
</script>
<h2>{filename}</h2>
{#if !docA || !docB}
<div class="message">Just for a minute, please!</div>
<div class="buttons">
<button onclick={apply}>Dismiss</button>
</div>
{:else}
<div class="options">
{#each modes as m}
{#if m[0] == "" || mergedObjs[m[0]] != false}
<label class={`sls-setting-label ${m[0] == mode ? "selected" : ""}`}
><input type="radio" name="disp" bind:group={mode} value={m[0]} class="sls-setting-tab" />
<div class="sls-setting-menu-btn">{m[1]}</div></label
>
{/if}
{/each}
</div>
{#if selectedObj != false}
<div class="op-scrollable json-source">
{#each diffs as diff}
<span class={diff[0] == DIFF_DELETE ? "deleted" : diff[0] == DIFF_INSERT ? "added" : "normal"}
>{diff[1]}</span
>
{/each}
</div>
{:else}
NO PREVIEW
{/if}
<div class="infos">
<table>
<tbody>
<tr>
<th>{nameA}</th>
<td
>{#if docA._id == docB._id}
Rev:{revStringToRevNumber(docA._rev)}
{/if}
{new Date(docA.mtime).toLocaleString()}</td
>
<td>
{docAContent && docAContent.length} letters
</td>
</tr>
<tr>
<th>{nameB}</th>
<td
>{#if docA._id == docB._id}
Rev:{revStringToRevNumber(docB._rev)}
{/if}
{new Date(docB.mtime).toLocaleString()}</td
>
<td>
{docBContent && docBContent.length} letters
</td>
</tr>
</tbody>
</table>
</div>
<div class="buttons">
{#if hideLocal}
<button onclick={cancel}>Cancel</button>
{/if}
<button onclick={apply}>Apply</button>
</div>
{/if}
<style>
.spacer {
flex-grow: 1;
}
.infos {
display: flex;
justify-content: space-between;
margin: 4px 0.5em;
}
.deleted {
text-decoration: line-through;
}
* {
box-sizing: border-box;
}
.scroller {
display: flex;
flex-direction: column;
overflow-y: scroll;
max-height: 60vh;
user-select: text;
-webkit-user-select: text;
}
.json-source {
white-space: pre;
height: auto;
overflow: auto;
min-height: var(--font-ui-medium);
flex-grow: 1;
}
</style>

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +1,8 @@
import { LOG_LEVEL_VERBOSE, Logger } from "octagonal-wheels/common/logger";
import { getPath } from "../common/utils.ts";
import {
LOG_LEVEL_INFO,
LOG_LEVEL_NOTICE,
type AnyEntry,
type DocumentID,
type FilePath,
type FilePathWithPrefix,
type LOG_LEVEL,
} from "../lib/src/common/types.ts";
import { type AnyEntry, type DocumentID, type EntryDoc, type EntryHasPath, type FilePath, type FilePathWithPrefix } from "../lib/src/common/types.ts";
import { PouchDB } from "../lib/src/pouchdb/pouchdb-browser.js";
import type ObsidianLiveSyncPlugin from "../main.ts";
import { MARK_DONE } from "../modules/features/ModuleLog.ts";
import type { LiveSyncCore } from "../main.ts";
import { __$checkInstanceBinding } from "../lib/src/dev/checks.ts";
let noticeIndex = 0;
export abstract class LiveSyncCommands {
plugin: ObsidianLiveSyncPlugin;
get app() {
@@ -26,77 +14,27 @@ export abstract class LiveSyncCommands {
get localDatabase() {
return this.plugin.localDatabase;
}
get services() {
return this.plugin.services;
get vaultAccess() {
return this.plugin.vaultAccess;
}
id2path(id: DocumentID, entry?: EntryHasPath, stripPrefix?: boolean): FilePathWithPrefix {
return this.plugin.id2path(id, entry, stripPrefix);
}
// id2path(id: DocumentID, entry?: EntryHasPath, stripPrefix?: boolean): FilePathWithPrefix {
// return this.plugin.$$id2path(id, entry, stripPrefix);
// }
async path2id(filename: FilePathWithPrefix | FilePath, prefix?: string): Promise<DocumentID> {
return await this.services.path.path2id(filename, prefix);
return await this.plugin.path2id(filename, prefix);
}
getPath(entry: AnyEntry): FilePathWithPrefix {
return getPath(entry);
return this.plugin.getPath(entry);
}
constructor(plugin: ObsidianLiveSyncPlugin) {
this.plugin = plugin;
this.onBindFunction(plugin, plugin.services);
__$checkInstanceBinding(this);
}
abstract onunload(): void;
abstract onload(): void | Promise<void>;
_isMainReady() {
return this.plugin.services.appLifecycle.isReady();
}
_isMainSuspended() {
return this.services.appLifecycle.isSuspended();
}
_isDatabaseReady() {
return this.services.database.isDatabaseReady();
}
_log = (msg: any, level: LOG_LEVEL = LOG_LEVEL_INFO, key?: string) => {
if (typeof msg === "string" && level !== LOG_LEVEL_NOTICE) {
msg = `[${this.constructor.name}]\u{200A} ${msg}`;
}
// console.log(msg);
Logger(msg, level, key);
};
_verbose = (msg: any, key?: string) => {
this._log(msg, LOG_LEVEL_VERBOSE, key);
};
_info = (msg: any, key?: string) => {
this._log(msg, LOG_LEVEL_INFO, key);
};
_notice = (msg: any, key?: string) => {
this._log(msg, LOG_LEVEL_NOTICE, key);
};
_progress = (prefix: string = "", level: LOG_LEVEL = LOG_LEVEL_NOTICE) => {
const key = `keepalive-progress-${noticeIndex++}`;
return {
log: (msg: any) => {
this._log(prefix + msg, level, key);
},
once: (msg: any) => {
this._log(prefix + msg, level);
},
done: (msg: string = "Done") => {
this._log(prefix + msg + MARK_DONE, level, key);
},
};
};
_debug = (msg: any, key?: string) => {
this._log(msg, LOG_LEVEL_VERBOSE, key);
};
onBindFunction(core: LiveSyncCore, services: typeof core.services) {
// Override if needed.
}
abstract onInitializeDatabase(showNotice: boolean): void | Promise<void>;
abstract beforeReplicate(showNotice: boolean): void | Promise<void>;
abstract onResume(): void | Promise<void>;
abstract parseReplicationResultItem(docs: PouchDB.Core.ExistingDocument<EntryDoc>): Promise<boolean> | boolean;
abstract realizeSettingSyncMode(): Promise<void>;
}

View File

@@ -1,966 +0,0 @@
import { sizeToHumanReadable } from "octagonal-wheels/number";
import {
EntryTypes,
LOG_LEVEL_INFO,
LOG_LEVEL_NOTICE,
LOG_LEVEL_VERBOSE,
type DocumentID,
type EntryDoc,
type EntryLeaf,
type FilePathWithPrefix,
type MetaEntry,
} from "../../lib/src/common/types";
import { getNoFromRev } from "../../lib/src/pouchdb/LiveSyncLocalDB";
import { LiveSyncCommands } from "../LiveSyncCommands";
import { serialized } from "octagonal-wheels/concurrency/lock_v2";
import { arrayToChunkedArray } from "octagonal-wheels/collection";
import { EVENT_ANALYSE_DB_USAGE, EVENT_REQUEST_PERFORM_GC_V3, eventHub } from "@/common/events";
import type { LiveSyncCouchDBReplicator } from "@/lib/src/replication/couchdb/LiveSyncReplicator";
import { delay, parseHeaderValues } from "@/lib/src/common/utils";
import { generateCredentialObject } from "@/lib/src/replication/httplib";
import { _requestToCouchDB } from "@/common/utils";
const DB_KEY_SEQ = "gc-seq";
const DB_KEY_CHUNK_SET = "chunk-set";
const DB_KEY_DOC_USAGE_MAP = "doc-usage-map";
type ChunkID = DocumentID;
type NoteDocumentID = DocumentID;
type Rev = string;
type ChunkUsageMap = Map<NoteDocumentID, Map<Rev, Set<ChunkID>>>;
export class LocalDatabaseMaintenance extends LiveSyncCommands {
onunload(): void {
// NO OP.
}
onload(): void | Promise<void> {
// NO OP.
this.plugin.addCommand({
id: "analyse-database",
name: "Analyse Database Usage (advanced)",
icon: "database-search",
callback: async () => {
await this.analyseDatabase();
},
});
this.plugin.addCommand({
id: "gc-v3",
name: "Garbage Collection V3 (advanced, beta)",
icon: "trash-2",
callback: async () => {
await this.gcv3();
},
});
eventHub.onEvent(EVENT_ANALYSE_DB_USAGE, () => this.analyseDatabase());
eventHub.onEvent(EVENT_REQUEST_PERFORM_GC_V3, () => this.gcv3());
}
async allChunks(includeDeleted: boolean = false) {
const p = this._progress("", LOG_LEVEL_NOTICE);
p.log("Retrieving chunks informations..");
try {
const ret = await this.localDatabase.allChunks(includeDeleted);
return ret;
} finally {
p.done();
}
}
get database() {
return this.localDatabase.localDatabase;
}
clearHash() {
this.localDatabase.clearCaches();
}
async confirm(title: string, message: string, affirmative = "Yes", negative = "No") {
return (
(await this.plugin.confirm.askSelectStringDialogue(message, [affirmative, negative], {
title,
defaultAction: affirmative,
})) === affirmative
);
}
isAvailable() {
if (!this.settings.doNotUseFixedRevisionForChunks) {
this._notice("Please enable 'Compute revisions for chunks' in settings to use Garbage Collection.");
return false;
}
if (this.settings.readChunksOnline) {
this._notice("Please disable 'Read chunks online' in settings to use Garbage Collection.");
return false;
}
return true;
}
/**
* Resurrect deleted chunks that are still used in the database.
*/
async resurrectChunks() {
if (!this.isAvailable()) return;
const { used, existing } = await this.allChunks(true);
const excessiveDeletions = [...existing]
.filter(([key, e]) => e._deleted)
.filter(([key, e]) => used.has(e._id))
.map(([key, e]) => e);
const completelyLostChunks = [] as string[];
// Data lost chunks : chunks that are deleted and data is purged.
const dataLostChunks = [...existing]
.filter(([key, e]) => e._deleted && e.data === "")
.map(([key, e]) => e)
.filter((e) => used.has(e._id));
for (const e of dataLostChunks) {
// Retrieve the data from the previous revision.
const doc = await this.database.get(e._id, { rev: e._rev, revs: true, revs_info: true, conflicts: true });
const history = doc._revs_info || [];
// Chunks are immutable. So, we can resurrect the chunk by copying the data from any of previous revisions.
let resurrected = null as null | string;
const availableRevs = history
.filter((e) => e.status == "available")
.map((e) => e.rev)
.sort((a, b) => getNoFromRev(a) - getNoFromRev(b));
for (const rev of availableRevs) {
const revDoc = await this.database.get(e._id, { rev: rev });
if (revDoc.type == "leaf" && revDoc.data !== "") {
// Found the data.
resurrected = revDoc.data;
break;
}
}
// If the data is not found, we cannot resurrect the chunk, add it to the excessiveDeletions.
if (resurrected !== null) {
excessiveDeletions.push({ ...e, data: resurrected, _deleted: false });
} else {
completelyLostChunks.push(e._id);
}
}
// Chunks to be resurrected.
const resurrectChunks = excessiveDeletions.filter((e) => e.data !== "").map((e) => ({ ...e, _deleted: false }));
if (resurrectChunks.length == 0) {
this._notice("No chunks are found to be resurrected.");
return;
}
const message = `We have following chunks that are deleted but still used in the database.
- Completely lost chunks: ${completelyLostChunks.length}
- Resurrectable chunks: ${resurrectChunks.length}
Do you want to resurrect these chunks?`;
if (await this.confirm("Resurrect Chunks", message, "Resurrect", "Cancel")) {
const result = await this.database.bulkDocs(resurrectChunks);
this.clearHash();
const resurrectedChunks = result.filter((e) => "ok" in e).map((e) => e.id);
this._notice(`Resurrected chunks: ${resurrectedChunks.length} / ${resurrectChunks.length}`);
} else {
this._notice("Resurrect operation is cancelled.");
}
}
/**
* Commit deletion of files that are marked as deleted.
* This method makes the deletion permanent, and the files will not be recovered.
* After this, chunks that are used in the deleted files become ready for compaction.
*/
async commitFileDeletion() {
if (!this.isAvailable()) return;
const p = this._progress("", LOG_LEVEL_NOTICE);
p.log("Searching for deleted files..");
const docs = await this.database.allDocs<MetaEntry>({ include_docs: true });
const deletedDocs = docs.rows.filter(
(e) => (e.doc?.type == "newnote" || e.doc?.type == "plain") && e.doc?.deleted
);
if (deletedDocs.length == 0) {
p.done("No deleted files found.");
return;
}
p.log(`Found ${deletedDocs.length} deleted files.`);
const message = `We have following files that are marked as deleted.
- Deleted files: ${deletedDocs.length}
Are you sure to delete these files permanently?
Note: **Make sure to synchronise all devices before deletion.**
> [!Note]
> This operation affects the database permanently. Deleted files will not be recovered after this operation.
> And, the chunks that are used in the deleted files will be ready for compaction.`;
const deletingDocs = deletedDocs.map((e) => ({ ...e.doc, _deleted: true }) as MetaEntry);
if (await this.confirm("Delete Files", message, "Delete", "Cancel")) {
const result = await this.database.bulkDocs(deletingDocs);
this.clearHash();
p.done(`Deleted ${result.filter((e) => "ok" in e).length} / ${deletedDocs.length} files.`);
} else {
p.done("Deletion operation is cancelled.");
}
}
/**
* Commit deletion of chunks that are not used in the database.
* This method makes the deletion permanent, and the chunks will not be recovered if the database run compaction.
* After this, the database can shrink the database size by compaction.
* It is recommended to compact the database after this operation (History should be kept once before compaction).
*/
async commitChunkDeletion() {
if (!this.isAvailable()) return;
const { existing } = await this.allChunks(true);
const deletedChunks = [...existing].filter(([key, e]) => e._deleted && e.data !== "").map(([key, e]) => e);
const deletedNotVacantChunks = deletedChunks.map((e) => ({ ...e, data: "", _deleted: true }));
const size = deletedChunks.reduce((acc, e) => acc + e.data.length, 0);
const humanSize = sizeToHumanReadable(size);
const message = `We have following chunks that are marked as deleted.
- Deleted chunks: ${deletedNotVacantChunks.length} (${humanSize})
Are you sure to delete these chunks permanently?
Note: **Make sure to synchronise all devices before deletion.**
> [!Note]
> This operation finally reduces the capacity of the remote.`;
if (deletedNotVacantChunks.length == 0) {
this._notice("No deleted chunks found.");
return;
}
if (await this.confirm("Delete Chunks", message, "Delete", "Cancel")) {
const result = await this.database.bulkDocs(deletedNotVacantChunks);
this.clearHash();
this._notice(
`Deleted chunks: ${result.filter((e) => "ok" in e).length} / ${deletedNotVacantChunks.length}`
);
} else {
this._notice("Deletion operation is cancelled.");
}
}
/**
* Compact the database.
* This method removes all deleted chunks that are not used in the database.
* Make sure all devices are synchronized before running this method.
*/
async markUnusedChunks() {
if (!this.isAvailable()) return;
const { used, existing } = await this.allChunks();
const existChunks = [...existing];
const unusedChunks = existChunks.filter(([key, e]) => !used.has(e._id)).map(([key, e]) => e);
const deleteChunks = unusedChunks.map((e) => ({
...e,
_deleted: true,
}));
const size = deleteChunks.reduce((acc, e) => acc + e.data.length, 0);
const humanSize = sizeToHumanReadable(size);
if (deleteChunks.length == 0) {
this._notice("No unused chunks found.");
return;
}
const message = `We have following chunks that are not used from any files.
- Chunks: ${deleteChunks.length} (${humanSize})
Are you sure to mark these chunks to be deleted?
Note: **Make sure to synchronise all devices before deletion.**
> [!Note]
> This operation will not reduces the capacity of the remote until permanent deletion.`;
if (await this.confirm("Mark unused chunks", message, "Mark", "Cancel")) {
const result = await this.database.bulkDocs(deleteChunks);
this.clearHash();
this._notice(`Marked chunks: ${result.filter((e) => "ok" in e).length} / ${deleteChunks.length}`);
}
}
async removeUnusedChunks() {
const { used, existing } = await this.allChunks();
const existChunks = [...existing];
const unusedChunks = existChunks.filter(([key, e]) => !used.has(e._id)).map(([key, e]) => e);
const deleteChunks = unusedChunks.map((e) => ({
...e,
data: "",
_deleted: true,
}));
const size = unusedChunks.reduce((acc, e) => acc + e.data.length, 0);
const humanSize = sizeToHumanReadable(size);
if (deleteChunks.length == 0) {
this._notice("No unused chunks found.");
return;
}
const message = `We have following chunks that are not used from any files.
- Chunks: ${deleteChunks.length} (${humanSize})
Are you sure to delete these chunks?
Note: **Make sure to synchronise all devices before deletion.**
> [!Note]
> Chunks referenced from deleted files are not deleted. Please run "Commit File Deletion" before this operation.`;
if (await this.confirm("Mark unused chunks", message, "Mark", "Cancel")) {
const result = await this.database.bulkDocs(deleteChunks);
this._notice(`Deleted chunks: ${result.filter((e) => "ok" in e).length} / ${deleteChunks.length}`);
this.clearHash();
}
}
async scanUnusedChunks() {
const kvDB = this.plugin.kvDB;
const chunkSet = (await kvDB.get<Set<DocumentID>>(DB_KEY_CHUNK_SET)) || new Set();
const chunkUsageMap = (await kvDB.get<ChunkUsageMap>(DB_KEY_DOC_USAGE_MAP)) || new Map();
const KEEP_MAX_REVS = 10;
const unusedSet = new Set<DocumentID>([...chunkSet]);
for (const [, revIdMap] of chunkUsageMap) {
const sortedRevId = [...revIdMap.entries()].sort((a, b) => getNoFromRev(b[0]) - getNoFromRev(a[0]));
if (sortedRevId.length > KEEP_MAX_REVS) {
// If we have more revisions than we want to keep, we need to delete the extras
}
const keepRevID = sortedRevId.slice(0, KEEP_MAX_REVS);
keepRevID.forEach((e) => e[1].forEach((ee) => unusedSet.delete(ee)));
}
return {
chunkSet,
chunkUsageMap,
unusedSet,
};
}
/**
* Track changes in the database and update the chunk usage map for garbage collection.
* Note that this only able to perform without Fetch chunks on demand.
*/
async trackChanges(fromStart: boolean = false, showNotice: boolean = false) {
if (!this.isAvailable()) return;
const logLevel = showNotice ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO;
const kvDB = this.plugin.kvDB;
const previousSeq = fromStart ? "" : await kvDB.get<string>(DB_KEY_SEQ);
const chunkSet = (await kvDB.get<Set<DocumentID>>(DB_KEY_CHUNK_SET)) || new Set();
const chunkUsageMap = (await kvDB.get<ChunkUsageMap>(DB_KEY_DOC_USAGE_MAP)) || new Map();
const db = this.localDatabase.localDatabase;
const verbose = (msg: string) => this._verbose(msg);
const processDoc = async (doc: EntryDoc, isDeleted: boolean) => {
if (!("children" in doc)) {
return;
}
const id = doc._id;
const rev = doc._rev!;
const deleted = doc._deleted || isDeleted;
const softDeleted = doc.deleted;
const children = (doc.children || []) as DocumentID[];
if (!chunkUsageMap.has(id)) {
chunkUsageMap.set(id, new Map<Rev, Set<ChunkID>>());
}
for (const chunkId of children) {
if (deleted) {
chunkUsageMap.get(id)!.delete(rev);
// chunkSet.add(chunkId as DocumentID);
} else {
if (softDeleted) {
//TODO: Soft delete
chunkUsageMap.get(id)!.set(rev, (chunkUsageMap.get(id)!.get(rev) || new Set()).add(chunkId));
} else {
chunkUsageMap.get(id)!.set(rev, (chunkUsageMap.get(id)!.get(rev) || new Set()).add(chunkId));
}
}
}
verbose(
`Tracking chunk: ${id}/${rev} (${doc?.path}), deleted: ${deleted ? "yes" : "no"} Soft-Deleted:${softDeleted ? "yes" : "no"}`
);
return await Promise.resolve();
};
// let saveQueue = 0;
const saveState = async (seq: string | number) => {
await kvDB.set(DB_KEY_SEQ, seq);
await kvDB.set(DB_KEY_CHUNK_SET, chunkSet);
await kvDB.set(DB_KEY_DOC_USAGE_MAP, chunkUsageMap);
};
const processDocRevisions = async (doc: EntryDoc) => {
try {
const oldRevisions = await db.get(doc._id, { revs: true, revs_info: true, conflicts: true });
const allRevs = oldRevisions._revs_info?.length || 0;
const info = (oldRevisions._revs_info || [])
.filter((e) => e.status == "available" && e.rev != doc._rev)
.filter((info) => !chunkUsageMap.get(doc._id)?.has(info.rev));
const infoLength = info.length;
this._log(`Found ${allRevs} old revisions for ${doc._id} . ${infoLength} items to check `);
if (info.length > 0) {
const oldDocs = await Promise.all(
info
.filter((revInfo) => revInfo.status == "available")
.map((revInfo) => db.get(doc._id, { rev: revInfo.rev }))
).then((docs) => docs.filter((doc) => doc));
for (const oldDoc of oldDocs) {
await processDoc(oldDoc as EntryDoc, false);
}
}
} catch (ex) {
if ((ex as any)?.status == 404) {
this._log(`No revisions found for ${doc._id}`, LOG_LEVEL_VERBOSE);
} else {
this._log(`Error finding revisions for ${doc._id}`);
this._verbose(ex);
}
}
};
const processChange = async (doc: EntryDoc, isDeleted: boolean, seq: string | number) => {
if (doc.type === EntryTypes.CHUNK) {
if (isDeleted) return;
chunkSet.add(doc._id);
} else if ("children" in doc) {
await processDoc(doc, isDeleted);
await serialized("x-process-doc", async () => await processDocRevisions(doc));
}
};
// Track changes
let i = 0;
await db
.changes({
since: previousSeq || "",
live: false,
conflicts: true,
include_docs: true,
style: "all_docs",
return_docs: false,
})
.on("change", async (change) => {
// handle change
await processChange(change.doc!, change.deleted ?? false, change.seq);
if (i++ % 100 == 0) {
await saveState(change.seq);
}
})
.on("complete", async (info) => {
await saveState(info.last_seq);
});
// Track all changed docs and new-leafs;
const result = await this.scanUnusedChunks();
const message = `Total chunks: ${result.chunkSet.size}\nUnused chunks: ${result.unusedSet.size}`;
this._log(message, logLevel);
}
async performGC(showingNotice = false) {
if (!this.isAvailable()) return;
await this.trackChanges(false, showingNotice);
const title = "Are all devices synchronised?";
const confirmMessage = `This function deletes unused chunks from the device. If there are differences between devices, some chunks may be missing when resolving conflicts.
Be sure to synchronise before executing.
However, if you have deleted them, you may be able to recover them by performing Hatch -> Recreate missing chunks for all files.
Are you ready to delete unused chunks?`;
const logLevel = showingNotice ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO;
const BUTTON_OK = `Yes, delete chunks`;
const BUTTON_CANCEL = "Cancel";
const result = await this.plugin.confirm.askSelectStringDialogue(
confirmMessage,
[BUTTON_OK, BUTTON_CANCEL] as const,
{
title,
defaultAction: BUTTON_CANCEL,
}
);
if (result !== BUTTON_OK) {
this._log("User cancelled chunk deletion", logLevel);
return;
}
const { unusedSet, chunkSet } = await this.scanUnusedChunks();
const deleteChunks = await this.database.allDocs({
keys: [...unusedSet],
include_docs: true,
});
for (const chunk of deleteChunks.rows) {
if ((chunk as any)?.value?.deleted) {
chunkSet.delete(chunk.key as DocumentID);
}
}
const deleteDocs = deleteChunks.rows
.filter((e) => "doc" in e)
.map((e) => ({
...(e as any).doc!,
_deleted: true,
}));
this._log(`Deleting chunks: ${deleteDocs.length}`, logLevel);
const deleteChunkBatch = arrayToChunkedArray(deleteDocs, 100);
let successCount = 0;
let errored = 0;
for (const batch of deleteChunkBatch) {
const results = await this.database.bulkDocs(batch as EntryLeaf[]);
for (const result of results) {
if ("ok" in result) {
chunkSet.delete(result.id as DocumentID);
successCount++;
} else {
this._log(`Failed to delete doc: ${result.id}`, LOG_LEVEL_VERBOSE);
errored++;
}
}
this._log(`Deleting chunks: ${successCount} `, logLevel, "gc-preforming");
}
const message = `Garbage Collection completed.
Success: ${successCount}, Errored: ${errored}`;
this._log(message, logLevel);
const kvDB = this.plugin.kvDB;
await kvDB.set(DB_KEY_CHUNK_SET, chunkSet);
}
// Analyse the database and report chunk usage.
async analyseDatabase() {
if (!this.isAvailable()) return;
const db = this.localDatabase.localDatabase;
// Map of chunk ID to its info
type ChunkInfo = {
id: DocumentID;
refCount: number;
length: number;
};
const chunkMap = new Map<DocumentID, Set<ChunkInfo>>();
// Map of document ID to its info
type DocumentInfo = {
id: DocumentID;
rev: Rev;
chunks: Set<ChunkID>;
uniqueChunks: Set<ChunkID>;
sharedChunks: Set<ChunkID>;
path: FilePathWithPrefix;
};
const docMap = new Map<DocumentID, Set<DocumentInfo>>();
const info = await db.info();
// Total number of revisions to process (approximate)
const maxSeq = new Number(info.update_seq);
let processed = 0;
let read = 0;
let errored = 0;
// Fetch Tasks
const ft = [] as ReturnType<typeof fetchRevision>[];
// Fetch a specific revision of a document and make note of its chunks, or add chunk info.
const fetchRevision = async (id: DocumentID, rev: Rev, seq: string | number) => {
try {
processed++;
const doc = await db.get(id, { rev: rev });
if (doc) {
if ("children" in doc) {
const id = doc._id;
const rev = doc._rev;
const children = (doc.children || []) as DocumentID[];
const set = docMap.get(id) || new Set();
set.add({
id,
rev,
chunks: new Set(children),
uniqueChunks: new Set(),
sharedChunks: new Set(),
path: doc.path,
});
docMap.set(id, set);
} else if (doc.type === EntryTypes.CHUNK) {
const id = doc._id as DocumentID;
if (chunkMap.has(id)) {
return;
}
if (doc._deleted) {
// Deleted chunk, skip (possibly resurrected later)
return;
}
const length = doc.data.length;
const set = chunkMap.get(id) || new Set();
set.add({ id, length, refCount: 0 });
chunkMap.set(id, set);
}
read++;
} else {
this._log(`Analysing Database: not found: ${id} / ${rev}`);
errored++;
}
} catch (error) {
this._log(`Error fetching document ${id} / ${rev}: $`, LOG_LEVEL_NOTICE);
this._log(error, LOG_LEVEL_VERBOSE);
errored++;
}
if (processed % 100 == 0) {
this._log(`Analysing database: ${read} (${errored}) / ${maxSeq} `, LOG_LEVEL_NOTICE, "db-analyse");
}
};
// Enumerate all documents and their revisions.
const IDs = this.localDatabase.findEntryNames("", "", {});
for await (const id of IDs) {
const revList = await this.localDatabase.getRaw(id as DocumentID, {
revs: true,
revs_info: true,
conflicts: true,
});
const revInfos = revList._revs_info || [];
for (const revInfo of revInfos) {
// All available revisions should be processed.
// If the revision is not available, it means the revision is already tombstoned.
if (revInfo.status == "available") {
// Schedule fetch task
ft.push(fetchRevision(id as DocumentID, revInfo.rev, 0));
}
}
}
// Wait for all fetch tasks to complete.
await Promise.all(ft);
// Reference count marking and unique/shared chunk classification.
for (const [, docRevs] of docMap) {
for (const docRev of docRevs) {
for (const chunkId of docRev.chunks) {
const chunkInfos = chunkMap.get(chunkId);
if (chunkInfos) {
for (const chunkInfo of chunkInfos) {
if (chunkInfo.refCount === 0) {
docRev.uniqueChunks.add(chunkId);
} else {
docRev.sharedChunks.add(chunkId);
}
chunkInfo.refCount++;
}
}
}
}
}
// Prepare results
const result = [];
// Calculate total size of chunks in the given set.
const getTotalSize = (ids: Set<DocumentID>) => {
return [...ids].reduce((acc, chunkId) => {
const chunkInfos = chunkMap.get(chunkId);
if (chunkInfos) {
for (const chunkInfo of chunkInfos) {
acc += chunkInfo.length;
}
}
return acc;
}, 0);
};
// Compile results for each document revision
for (const doc of docMap.values()) {
for (const rev of doc) {
const title = `${rev.path} (${rev.rev})`;
const id = rev.id;
const revStr = `${getNoFromRev(rev.rev)}`;
const revHash = rev.rev.split("-")[1].substring(0, 6);
const path = rev.path;
const uniqueChunkCount = rev.uniqueChunks.size;
const sharedChunkCount = rev.sharedChunks.size;
const uniqueChunkSize = getTotalSize(rev.uniqueChunks);
const sharedChunkSize = getTotalSize(rev.sharedChunks);
result.push({
title,
path,
rev: revStr,
revHash,
id,
uniqueChunkCount: uniqueChunkCount,
sharedChunkCount,
uniqueChunkSize: uniqueChunkSize,
sharedChunkSize: sharedChunkSize,
});
}
}
const titleMap = {
title: "Title",
id: "Document ID",
path: "Path",
rev: "Revision No",
revHash: "Revision Hash",
uniqueChunkCount: "Unique Chunk Count",
sharedChunkCount: "Shared Chunk Count",
uniqueChunkSize: "Unique Chunk Size",
sharedChunkSize: "Shared Chunk Size",
} as const;
// Enumerate orphan chunks (not referenced by any document)
const orphanChunks = [...chunkMap.entries()].filter(([chunkId, infos]) => {
const totalRefCount = [...infos].reduce((acc, info) => acc + info.refCount, 0);
return totalRefCount === 0;
});
const orphanChunkSize = orphanChunks.reduce((acc, [chunkId, infos]) => {
for (const info of infos) {
acc += info.length;
}
return acc;
}, 0);
result.push({
title: "__orphan",
id: "__orphan",
path: "__orphan",
rev: "1",
revHash: "xxxxx",
uniqueChunkCount: orphanChunks.length,
sharedChunkCount: 0,
uniqueChunkSize: orphanChunkSize,
sharedChunkSize: 0,
} as any);
const csvSrc = result.map((e) => {
return [
`${e.title.replace(/"/g, '""')}"`,
`${e.id}`,
`${e.path}`,
`${e.rev}`,
`${e.revHash}`,
`${e.uniqueChunkCount}`,
`${e.sharedChunkCount}`,
`${e.uniqueChunkSize}`,
`${e.sharedChunkSize}`,
].join("\t");
});
// Add title row
csvSrc.unshift(Object.values(titleMap).join("\t"));
const csv = csvSrc.join("\n");
// Prompt to copy to clipboard
await this.services.UI.promptCopyToClipboard("Database Analysis data (TSV):", csv);
}
async compactDatabase() {
const replicator = this.plugin.replicator as LiveSyncCouchDBReplicator;
const remote = await replicator.connectRemoteCouchDBWithSetting(this.settings, false, false, true);
if (!remote) {
this._notice("Failed to connect to remote for compaction.", "gc-compact");
return;
}
if (typeof remote == "string") {
this._notice(`Failed to connect to remote for compaction. ${remote}`, "gc-compact");
return;
}
const compactResult = await remote.db.compact({
interval: 1000,
});
// Probably no need to wait, but just in case.
let timeout = 2 * 60 * 1000; // 2 minutes
do {
const status = await remote.db.info();
if ("compact_running" in status && status?.compact_running) {
this._notice("Compaction in progress on remote database...", "gc-compact");
await delay(2000);
timeout -= 2000;
if (timeout <= 0) {
this._notice("Compaction on remote database timed out.", "gc-compact");
break;
}
} else {
break;
}
} while (true);
if (compactResult && "ok" in compactResult) {
this._notice("Compaction on remote database completed successfully.", "gc-compact");
} else {
this._notice("Compaction on remote database failed.", "gc-compact");
}
}
/**
* Compact the database by temporarily setting the revision limit to 1.
* @returns
*/
async compactDatabaseWithRevLimit() {
// Temporarily set revs_limit to 1, perform compaction, and restore the original revs_limit.
// Very dangerous operation, so now suppressed.
return false;
const replicator = this.plugin.replicator as LiveSyncCouchDBReplicator;
const remote = await replicator.connectRemoteCouchDBWithSetting(this.settings, false, false, true);
if (!remote) {
this._notice("Failed to connect to remote for compaction.");
return;
}
if (typeof remote == "string") {
this._notice(`Failed to connect to remote for compaction. ${remote}`);
return;
}
const customHeaders = parseHeaderValues(this.settings.couchDB_CustomHeaders);
const credential = generateCredentialObject(this.settings);
const request = async (path: string, method: string = "GET", body: any = undefined) => {
const req = await _requestToCouchDB(
this.settings.couchDB_URI + (this.settings.couchDB_DBNAME ? `/${this.settings.couchDB_DBNAME}` : ""),
credential,
window.origin,
path,
body,
method,
customHeaders
);
return req;
};
let revsLimit = "";
const req = await request(`_revs_limit`, "GET");
if (req.status == 200) {
revsLimit = req.text.trim();
this._info(`Remote database _revs_limit: ${revsLimit}`);
} else {
this._notice(`Failed to get remote database _revs_limit. Status: ${req.status}`);
return;
}
const req2 = await request(`_revs_limit`, "PUT", 1);
if (req2.status == 200) {
this._info(`Set remote database _revs_limit to 1 for compaction.`);
}
try {
await this.compactDatabase();
} finally {
// Restore revs_limit
if (revsLimit) {
const req3 = await request(`_revs_limit`, "PUT", parseInt(revsLimit));
if (req3.status == 200) {
this._info(`Restored remote database _revs_limit to ${revsLimit}.`);
} else {
this._notice(
`Failed to restore remote database _revs_limit. Status: ${req3.status} / ${req3.text}`
);
}
}
}
}
async gcv3() {
if (!this.isAvailable()) return;
const replicator = this.plugin.replicator as LiveSyncCouchDBReplicator;
// Start one-shot replication to ensure all changes are synced before GC.
const r0 = await replicator.openOneShotReplication(this.settings, false, false, "sync");
if (!r0) {
this._notice(
"Failed to start one-shot replication before Garbage Collection. Garbage Collection Cancelled."
);
return;
}
// Delete the chunk, but first verify the following:
// Fetch the list of accepted nodes from the replicator.
const OPTION_CANCEL = "Cancel Garbage Collection";
const info = await this.plugin.replicator.getConnectedDeviceList();
if (!info) {
this._notice("No connected device information found. Cancelling Garbage Collection.");
return;
}
const { accepted_nodes, node_info } = info;
//1. Compare accepted_nodes and node_info, and confirm whether it is acceptable to delete nodes not present in node_info.
const infoMissingNodes = [] as string[];
for (const node of accepted_nodes) {
if (!(node in node_info)) {
infoMissingNodes.push(node);
}
}
if (infoMissingNodes.length > 0) {
const message = `The following accepted nodes are missing its node information:\n- ${infoMissingNodes.join("\n- ")}\n\nThis indicates that they have not been connected for some time or have been left on an older version.
It is preferable to update all devices if possible. If you have any devices that are no longer in use, you can clear all accepted nodes by locking the remote once.`;
const OPTION_IGNORE = "Ignore and Proceed";
// const OPTION_DELETE = "Delete them and proceed";
const buttons = [OPTION_CANCEL, OPTION_IGNORE] as const;
const result = await this.plugin.confirm.askSelectStringDialogue(message, buttons, {
title: "Node Information Missing",
defaultAction: OPTION_CANCEL,
});
if (result === OPTION_CANCEL) {
this._notice("Garbage Collection cancelled by user.");
return;
} else if (result === OPTION_IGNORE) {
this._notice("Proceeding with Garbage Collection, ignoring missing nodes.");
}
}
//2. Check whether the progress values in NodeData are roughly the same (only the numerical part is needed).
const progressValues = Object.values(node_info)
.map((e) => e.progress.split("-")[0])
.map((e) => parseInt(e));
const maxProgress = Math.max(...progressValues);
const minProgress = Math.min(...progressValues);
const progressDifference = maxProgress - minProgress;
const OPTION_PROCEED = "Proceed Garbage Collection";
// - If they differ significantly, the node may not have completed synchronisation, potentially causing conflicts. Display a confirmation dialog as a precaution.
// - If they are not significantly different, display the standard confirmation dialogue message.
const detail = `> [!INFO]- The connected devices have been detected as follows:
${Object.entries(node_info)
.map(
([nodeId, nodeData]) =>
`> - Device: ${nodeData.device_name} (Node ID: ${nodeId})
> - Obsidian version: ${nodeData.app_version}
> - Plug-in version: ${nodeData.plugin_version}
> - Progress: ${nodeData.progress.split("-")[0]}`
)
.join("\n")}
`;
const message =
progressDifference != 0
? `Some devices have differing progress values (max: ${maxProgress}, min: ${minProgress}).
This may indicate that some devices have not completed synchronisation, which could lead to conflicts. Strongly recommend confirming that all devices are synchronised before proceeding.`
: `All devices have the same progress value (${maxProgress}). Your devices seem to be synchronised. And be able to proceed with Garbage Collection.`;
const buttons = [OPTION_PROCEED, OPTION_CANCEL] as const;
const defaultAction = progressDifference != 0 ? OPTION_CANCEL : OPTION_PROCEED;
const result = await this.plugin.confirm.askSelectStringDialogue(message + "\n\n" + detail, buttons, {
title: "Garbage Collection Confirmation",
defaultAction,
});
if (result !== OPTION_PROCEED) {
this._notice("Garbage Collection cancelled by user.");
return;
}
this._notice("Proceeding with Garbage Collection.");
//- 3. Once OK is confirmed in the dialogue, execute the chunk deletion. This is performed on the local database and immediately reflected on the remote. After reflecting on the remote, perform compaction.
const gcStartTime = Date.now();
// Perform Garbage Collection (new implementation).
const localDatabase = this.localDatabase.localDatabase;
const usedChunks = new Set<DocumentID>();
const allChunks = new Map<DocumentID, string>();
const IDs = this.localDatabase.findEntryNames("", "", {});
let i = 0;
const doc_count = (await localDatabase.info()).doc_count;
for await (const id of IDs) {
const doc = await this.localDatabase.getRaw(id as DocumentID);
i++;
if (i % 100 == 0) {
this._notice(`Garbage Collection: Scanned ${i} / ~${doc_count} `, "gc-scanning");
}
if (!doc) continue;
if ("children" in doc) {
const children = (doc.children || []) as DocumentID[];
for (const chunkId of children) {
usedChunks.add(chunkId);
}
} else if (doc.type === EntryTypes.CHUNK) {
allChunks.set(doc._id as DocumentID, doc._rev);
}
}
this._notice(
`Garbage Collection: Scanning completed. Total chunks: ${allChunks.size}, Used chunks: ${usedChunks.size}`,
"gc-scanning"
);
const unusedChunks = [...allChunks.keys()].filter((e) => !usedChunks.has(e));
this._notice(`Garbage Collection: Found ${unusedChunks.length} unused chunks to delete.`, "gc-scanning");
const deleteChunkDocs = unusedChunks.map(
(chunkId) =>
({
_id: chunkId,
_deleted: true,
_rev: allChunks.get(chunkId),
}) as EntryLeaf
);
const response = await localDatabase.bulkDocs(deleteChunkDocs);
const deletedCount = response.filter((e) => "ok" in e).length;
const gcEndTime = Date.now();
this._notice(
`Garbage Collection completed. Deleted chunks: ${deletedCount} / ${unusedChunks.length}. Time taken: ${(gcEndTime - gcStartTime) / 1000} seconds.`
);
// Send changes to remote
const r = await replicator.openOneShotReplication(this.settings, false, false, "pushOnly");
// Wait for replication to complete
if (!r) {
this._notice("Failed to start replication after Garbage Collection.");
return;
}
// Perform compaction
await this.compactDatabase();
this.clearHash();
}
}

View File

@@ -1,278 +0,0 @@
import { P2PReplicatorPaneView, VIEW_TYPE_P2P } from "./P2PReplicator/P2PReplicatorPaneView.ts";
import {
AutoAccepting,
LOG_LEVEL_NOTICE,
P2P_DEFAULT_SETTINGS,
REMOTE_P2P,
type EntryDoc,
type P2PSyncSetting,
type RemoteDBSettings,
} from "../../lib/src/common/types.ts";
import { LiveSyncCommands } from "../LiveSyncCommands.ts";
import {
LiveSyncTrysteroReplicator,
setReplicatorFunc,
} from "../../lib/src/replication/trystero/LiveSyncTrysteroReplicator.ts";
import { EVENT_REQUEST_OPEN_P2P, eventHub } from "../../common/events.ts";
import type { LiveSyncAbstractReplicator } from "../../lib/src/replication/LiveSyncAbstractReplicator.ts";
import { LOG_LEVEL_INFO, LOG_LEVEL_VERBOSE, Logger } from "octagonal-wheels/common/logger";
import type { CommandShim } from "../../lib/src/replication/trystero/P2PReplicatorPaneCommon.ts";
import {
addP2PEventHandlers,
closeP2PReplicator,
openP2PReplicator,
P2PLogCollector,
removeP2PReplicatorInstance,
type P2PReplicatorBase,
} from "../../lib/src/replication/trystero/P2PReplicatorCore.ts";
import { reactiveSource } from "octagonal-wheels/dataobject/reactive_v2";
import type { Confirm } from "../../lib/src/interfaces/Confirm.ts";
import type ObsidianLiveSyncPlugin from "../../main.ts";
import type { SimpleStore } from "octagonal-wheels/databases/SimpleStoreBase";
// import { getPlatformName } from "../../lib/src/PlatformAPIs/obsidian/Environment.ts";
import type { LiveSyncCore } from "../../main.ts";
import { TrysteroReplicator } from "../../lib/src/replication/trystero/TrysteroReplicator.ts";
import { SETTING_KEY_P2P_DEVICE_NAME } from "../../lib/src/common/types.ts";
export class P2PReplicator extends LiveSyncCommands implements P2PReplicatorBase, CommandShim {
storeP2PStatusLine = reactiveSource("");
getSettings(): P2PSyncSetting {
return this.plugin.settings;
}
get settings() {
return this.plugin.settings;
}
getDB() {
return this.plugin.localDatabase.localDatabase;
}
get confirm(): Confirm {
return this.plugin.confirm;
}
_simpleStore!: SimpleStore<any>;
simpleStore(): SimpleStore<any> {
return this._simpleStore;
}
constructor(plugin: ObsidianLiveSyncPlugin) {
super(plugin);
setReplicatorFunc(() => this._replicatorInstance);
addP2PEventHandlers(this);
this.afterConstructor();
// onBindFunction is called in super class
// this.onBindFunction(plugin, plugin.services);
}
async handleReplicatedDocuments(docs: EntryDoc[]): Promise<void> {
// console.log("Processing Replicated Docs", docs);
return await this.services.replication.parseSynchroniseResult(
docs as PouchDB.Core.ExistingDocument<EntryDoc>[]
);
}
_anyNewReplicator(settingOverride: Partial<RemoteDBSettings> = {}): Promise<LiveSyncAbstractReplicator> {
const settings = { ...this.settings, ...settingOverride };
if (settings.remoteType == REMOTE_P2P) {
return Promise.resolve(new LiveSyncTrysteroReplicator(this.plugin));
}
return undefined!;
}
_replicatorInstance?: TrysteroReplicator;
p2pLogCollector = new P2PLogCollector();
afterConstructor() {
return;
}
async open() {
await openP2PReplicator(this);
}
async close() {
await closeP2PReplicator(this);
}
getConfig(key: string) {
return this.services.config.getSmallConfig(key);
}
setConfig(key: string, value: string) {
return this.services.config.setSmallConfig(key, value);
}
enableBroadcastCastings() {
return this?._replicatorInstance?.enableBroadcastChanges();
}
disableBroadcastCastings() {
return this?._replicatorInstance?.disableBroadcastChanges();
}
init() {
this._simpleStore = this.services.database.openSimpleStore("p2p-sync");
return Promise.resolve(this);
}
async initialiseP2PReplicator(): Promise<TrysteroReplicator> {
await this.init();
try {
if (this._replicatorInstance) {
await this._replicatorInstance.close();
this._replicatorInstance = undefined;
}
if (!this.settings.P2P_AppID) {
this.settings.P2P_AppID = P2P_DEFAULT_SETTINGS.P2P_AppID;
}
const getInitialDeviceName = () =>
this.getConfig(SETTING_KEY_P2P_DEVICE_NAME) || this.services.vault.getVaultName();
const getSettings = () => this.settings;
const store = () => this.simpleStore();
const getDB = () => this.getDB();
const getConfirm = () => this.confirm;
const getPlatform = () => this.services.API.getPlatform();
const env = {
get db() {
return getDB();
},
get confirm() {
return getConfirm();
},
get deviceName() {
return getInitialDeviceName();
},
get platform() {
return getPlatform();
},
get settings() {
return getSettings();
},
processReplicatedDocs: async (docs: EntryDoc[]): Promise<void> => {
await this.handleReplicatedDocuments(docs);
// No op. This is a client and does not need to process the docs
},
get simpleStore() {
return store();
},
};
this._replicatorInstance = new TrysteroReplicator(env);
return this._replicatorInstance;
} catch (e) {
this._log(
e instanceof Error ? e.message : "Something occurred on Initialising P2P Replicator",
LOG_LEVEL_INFO
);
this._log(e, LOG_LEVEL_VERBOSE);
throw e;
}
}
onunload(): void {
removeP2PReplicatorInstance();
void this.close();
}
onload(): void | Promise<void> {
eventHub.onEvent(EVENT_REQUEST_OPEN_P2P, () => {
void this.openPane();
});
this.p2pLogCollector.p2pReplicationLine.onChanged((line) => {
this.storeP2PStatusLine.value = line.value;
});
}
async _everyOnInitializeDatabase(): Promise<boolean> {
await this.initialiseP2PReplicator();
return Promise.resolve(true);
}
private async _allSuspendExtraSync() {
this.plugin.settings.P2P_Enabled = false;
this.plugin.settings.P2P_AutoAccepting = AutoAccepting.NONE;
this.plugin.settings.P2P_AutoBroadcast = false;
this.plugin.settings.P2P_AutoStart = false;
this.plugin.settings.P2P_AutoSyncPeers = "";
this.plugin.settings.P2P_AutoWatchPeers = "";
return await Promise.resolve(true);
}
// async $everyOnLoadStart() {
// return await Promise.resolve();
// }
async openPane() {
await this.services.API.showWindow(VIEW_TYPE_P2P);
}
async _everyOnloadStart(): Promise<boolean> {
this.plugin.registerView(VIEW_TYPE_P2P, (leaf) => new P2PReplicatorPaneView(leaf, this.plugin));
this.plugin.addCommand({
id: "open-p2p-replicator",
name: "P2P Sync : Open P2P Replicator",
callback: async () => {
await this.openPane();
},
});
this.plugin.addCommand({
id: "p2p-establish-connection",
name: "P2P Sync : Connect to the Signalling Server",
checkCallback: (isChecking) => {
if (isChecking) {
return !(this._replicatorInstance?.server?.isServing ?? false);
}
void this.open();
},
});
this.plugin.addCommand({
id: "p2p-close-connection",
name: "P2P Sync : Disconnect from the Signalling Server",
checkCallback: (isChecking) => {
if (isChecking) {
return this._replicatorInstance?.server?.isServing ?? false;
}
Logger(`Closing P2P Connection`, LOG_LEVEL_NOTICE);
void this.close();
},
});
this.plugin.addCommand({
id: "replicate-now-by-p2p",
name: "Replicate now by P2P",
checkCallback: (isChecking) => {
if (isChecking) {
if (this.settings.remoteType == REMOTE_P2P) return false;
if (!this._replicatorInstance?.server?.isServing) return false;
return true;
}
void this._replicatorInstance?.replicateFromCommand(false);
},
});
this.plugin
.addRibbonIcon("waypoints", "P2P Replicator", async () => {
await this.openPane();
})
.addClass("livesync-ribbon-replicate-p2p");
return await Promise.resolve(true);
}
_everyAfterResumeProcess(): Promise<boolean> {
if (this.settings.P2P_Enabled && this.settings.P2P_AutoStart) {
setTimeout(() => void this.open(), 100);
}
const rep = this._replicatorInstance;
rep?.allowReconnection();
return Promise.resolve(true);
}
_everyBeforeSuspendProcess(): Promise<boolean> {
const rep = this._replicatorInstance;
rep?.disconnectFromServer();
return Promise.resolve(true);
}
override onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.replicator.getNewReplicator.addHandler(this._anyNewReplicator.bind(this));
services.databaseEvents.onDatabaseInitialisation.addHandler(this._everyOnInitializeDatabase.bind(this));
services.appLifecycle.onInitialise.addHandler(this._everyOnloadStart.bind(this));
services.appLifecycle.onSuspending.addHandler(this._everyBeforeSuspendProcess.bind(this));
services.appLifecycle.onResumed.addHandler(this._everyAfterResumeProcess.bind(this));
services.setting.suspendExtraSync.addHandler(this._allSuspendExtraSync.bind(this));
}
}

View File

@@ -1,496 +0,0 @@
<script lang="ts">
import { onMount, setContext } from "svelte";
import { AutoAccepting, DEFAULT_SETTINGS, type P2PSyncSetting } from "../../../lib/src/common/types";
import {
AcceptedStatus,
ConnectionStatus,
type CommandShim,
type PeerStatus,
type PluginShim,
} from "../../../lib/src/replication/trystero/P2PReplicatorPaneCommon";
import PeerStatusRow from "../P2PReplicator/PeerStatusRow.svelte";
import { EVENT_LAYOUT_READY, eventHub } from "../../../common/events";
import {
type PeerInfo,
type P2PServerInfo,
EVENT_SERVER_STATUS,
EVENT_REQUEST_STATUS,
EVENT_P2P_REPLICATOR_STATUS,
} from "../../../lib/src/replication/trystero/TrysteroReplicatorP2PServer";
import { type P2PReplicatorStatus } from "../../../lib/src/replication/trystero/TrysteroReplicator";
import { $msg as _msg } from "../../../lib/src/common/i18n";
import { SETTING_KEY_P2P_DEVICE_NAME } from "../../../lib/src/common/types";
interface Props {
plugin: PluginShim;
cmdSync: CommandShim;
}
let { plugin, cmdSync }: Props = $props();
// const cmdSync = plugin.getAddOn<P2PReplicator>("P2PReplicator")!;
setContext("getReplicator", () => cmdSync);
const initialSettings = { ...plugin.settings };
let settings = $state<P2PSyncSetting>(initialSettings);
// const vaultName = service.vault.getVaultName();
// const dbKey = `${vaultName}-p2p-device-name`;
const initialDeviceName = cmdSync.getConfig(SETTING_KEY_P2P_DEVICE_NAME) ?? plugin.services.vault.getVaultName();
let deviceName = $state<string>(initialDeviceName);
let eP2PEnabled = $state<boolean>(initialSettings.P2P_Enabled);
let eRelay = $state<string>(initialSettings.P2P_relays);
let eRoomId = $state<string>(initialSettings.P2P_roomID);
let ePassword = $state<string>(initialSettings.P2P_passphrase);
let eAppId = $state<string>(initialSettings.P2P_AppID);
let eDeviceName = $state<string>(initialDeviceName);
let eAutoAccept = $state<boolean>(initialSettings.P2P_AutoAccepting == AutoAccepting.ALL);
let eAutoStart = $state<boolean>(initialSettings.P2P_AutoStart);
let eAutoBroadcast = $state<boolean>(initialSettings.P2P_AutoBroadcast);
const isP2PEnabledModified = $derived.by(() => eP2PEnabled !== settings.P2P_Enabled);
const isRelayModified = $derived.by(() => eRelay !== settings.P2P_relays);
const isRoomIdModified = $derived.by(() => eRoomId !== settings.P2P_roomID);
const isPasswordModified = $derived.by(() => ePassword !== settings.P2P_passphrase);
const isAppIdModified = $derived.by(() => eAppId !== settings.P2P_AppID);
const isDeviceNameModified = $derived.by(() => eDeviceName !== deviceName);
const isAutoAcceptModified = $derived.by(() => eAutoAccept !== (settings.P2P_AutoAccepting == AutoAccepting.ALL));
const isAutoStartModified = $derived.by(() => eAutoStart !== settings.P2P_AutoStart);
const isAutoBroadcastModified = $derived.by(() => eAutoBroadcast !== settings.P2P_AutoBroadcast);
const isAnyModified = $derived.by(
() =>
isP2PEnabledModified ||
isRelayModified ||
isRoomIdModified ||
isPasswordModified ||
isAppIdModified ||
isDeviceNameModified ||
isAutoAcceptModified ||
isAutoStartModified ||
isAutoBroadcastModified
);
async function saveAndApply() {
const newSettings = {
...plugin.settings,
P2P_Enabled: eP2PEnabled,
P2P_relays: eRelay,
P2P_roomID: eRoomId,
P2P_passphrase: ePassword,
P2P_AppID: eAppId,
P2P_AutoAccepting: eAutoAccept ? AutoAccepting.ALL : AutoAccepting.NONE,
P2P_AutoStart: eAutoStart,
P2P_AutoBroadcast: eAutoBroadcast,
};
plugin.settings = newSettings;
cmdSync.setConfig(SETTING_KEY_P2P_DEVICE_NAME, eDeviceName);
deviceName = eDeviceName;
await plugin.saveSettings();
}
async function revert() {
eP2PEnabled = settings.P2P_Enabled;
eRelay = settings.P2P_relays;
eRoomId = settings.P2P_roomID;
ePassword = settings.P2P_passphrase;
eAppId = settings.P2P_AppID;
eAutoAccept = settings.P2P_AutoAccepting == AutoAccepting.ALL;
eAutoStart = settings.P2P_AutoStart;
eAutoBroadcast = settings.P2P_AutoBroadcast;
}
let serverInfo = $state<P2PServerInfo | undefined>(undefined);
let replicatorInfo = $state<P2PReplicatorStatus | undefined>(undefined);
const applyLoadSettings = (d: P2PSyncSetting, force: boolean) => {
const { P2P_relays, P2P_roomID, P2P_passphrase, P2P_AppID, P2P_AutoAccepting } = d;
if (force || !isP2PEnabledModified) eP2PEnabled = d.P2P_Enabled;
if (force || !isRelayModified) eRelay = P2P_relays;
if (force || !isRoomIdModified) eRoomId = P2P_roomID;
if (force || !isPasswordModified) ePassword = P2P_passphrase;
if (force || !isAppIdModified) eAppId = P2P_AppID;
const newAutoAccept = P2P_AutoAccepting === AutoAccepting.ALL;
if (force || !isAutoAcceptModified) eAutoAccept = newAutoAccept;
if (force || !isAutoStartModified) eAutoStart = d.P2P_AutoStart;
if (force || !isAutoBroadcastModified) eAutoBroadcast = d.P2P_AutoBroadcast;
settings = d;
};
onMount(() => {
const r = eventHub.onEvent("setting-saved", async (d) => {
applyLoadSettings(d, false);
closeServer();
});
const rx = eventHub.onEvent(EVENT_LAYOUT_READY, () => {
applyLoadSettings(plugin.settings, true);
});
const r2 = eventHub.onEvent(EVENT_SERVER_STATUS, (status) => {
serverInfo = status;
advertisements = status?.knownAdvertisements ?? [];
});
const r3 = eventHub.onEvent(EVENT_P2P_REPLICATOR_STATUS, (status) => {
replicatorInfo = status;
});
eventHub.emitEvent(EVENT_REQUEST_STATUS);
return () => {
r();
r2();
r3();
};
});
let isConnected = $derived.by(() => {
return serverInfo?.isConnected ?? false;
});
let serverPeerId = $derived.by(() => {
return serverInfo?.serverPeerId ?? "";
});
let advertisements = $state<PeerInfo[]>([]);
let autoSyncPeers = $derived.by(() =>
settings.P2P_AutoSyncPeers.split(",")
.map((e) => e.trim())
.filter((e) => e)
);
let autoWatchPeers = $derived.by(() =>
settings.P2P_AutoWatchPeers.split(",")
.map((e) => e.trim())
.filter((e) => e)
);
let syncOnCommand = $derived.by(() =>
settings.P2P_SyncOnReplication.split(",")
.map((e) => e.trim())
.filter((e) => e)
);
const peers = $derived.by(() =>
advertisements.map((ad) => {
let accepted: AcceptedStatus;
const isTemporaryAccepted = ad.isTemporaryAccepted;
if (isTemporaryAccepted === undefined) {
if (ad.isAccepted === undefined) {
accepted = AcceptedStatus.UNKNOWN;
} else {
accepted = ad.isAccepted ? AcceptedStatus.ACCEPTED : AcceptedStatus.DENIED;
}
} else if (isTemporaryAccepted === true) {
accepted = AcceptedStatus.ACCEPTED_IN_SESSION;
} else {
accepted = AcceptedStatus.DENIED_IN_SESSION;
}
const isFetching = replicatorInfo?.replicatingFrom.indexOf(ad.peerId) !== -1;
const isSending = replicatorInfo?.replicatingTo.indexOf(ad.peerId) !== -1;
const isWatching = replicatorInfo?.watchingPeers.indexOf(ad.peerId) !== -1;
const syncOnStart = autoSyncPeers.indexOf(ad.name) !== -1;
const watchOnStart = autoWatchPeers.indexOf(ad.name) !== -1;
const syncOnReplicationCommand = syncOnCommand.indexOf(ad.name) !== -1;
const st: PeerStatus = {
name: ad.name,
peerId: ad.peerId,
accepted: accepted,
status: ad.isAccepted ? ConnectionStatus.CONNECTED : ConnectionStatus.DISCONNECTED,
isSending: isSending,
isFetching: isFetching,
isWatching: isWatching,
syncOnConnect: syncOnStart,
watchOnConnect: watchOnStart,
syncOnReplicationCommand: syncOnReplicationCommand,
};
return st;
})
);
function useDefaultRelay() {
eRelay = DEFAULT_SETTINGS.P2P_relays;
}
function _generateRandom() {
return (Math.floor(Math.random() * 1000) + 1000).toString().substring(1);
}
function generateRandom(length: number) {
let buf = "";
while (buf.length < length) {
buf += "-" + _generateRandom();
}
return buf.substring(1, length);
}
function chooseRandom() {
eRoomId = generateRandom(12) + "-" + Math.random().toString(36).substring(2, 5);
}
async function openServer() {
await cmdSync.open();
}
async function closeServer() {
await cmdSync.close();
}
function startBroadcasting() {
void cmdSync.enableBroadcastCastings();
}
function stopBroadcasting() {
void cmdSync.disableBroadcastCastings();
}
const initialDialogStatusKey = `p2p-dialog-status`;
const getDialogStatus = () => {
try {
const initialDialogStatus = JSON.parse(cmdSync.getConfig(initialDialogStatusKey) ?? "{}") as {
notice?: boolean;
setting?: boolean;
};
return initialDialogStatus;
} catch (e) {
return {};
}
};
const initialDialogStatus = getDialogStatus();
let isNoticeOpened = $state<boolean>(initialDialogStatus.notice ?? true);
let isSettingOpened = $state<boolean>(initialDialogStatus.setting ?? true);
$effect(() => {
const dialogStatus = {
notice: isNoticeOpened,
setting: isSettingOpened,
};
cmdSync.setConfig(initialDialogStatusKey, JSON.stringify(dialogStatus));
});
let isObsidian = $derived.by(() => {
return plugin.services.API.getPlatform() === "obsidian";
});
</script>
<article>
<h1>Peer to Peer Replicator</h1>
<details bind:open={isNoticeOpened}>
<summary>{_msg("P2P.Note.Summary")}</summary>
<p class="important">{_msg("P2P.Note.important_note")}</p>
<p class="important-sub">
{_msg("P2P.Note.important_note_sub")}
</p>
{#each _msg("P2P.Note.description").split("\n\n") as paragraph}
<p>{paragraph}</p>
{/each}
</details>
<h2>Connection Settings</h2>
{#if isObsidian}
You can configure in the Obsidian Plugin Settings.
{:else}
<details bind:open={isSettingOpened}>
<summary>{eRelay}</summary>
<table class="settings">
<tbody>
<tr>
<th> Enable P2P Replicator </th>
<td>
<label class={{ "is-dirty": isP2PEnabledModified }}>
<input type="checkbox" bind:checked={eP2PEnabled} />
</label>
</td>
</tr><tr>
<th> Relay settings </th>
<td>
<label class={{ "is-dirty": isRelayModified }}>
<input
type="text"
placeholder="wss://exp-relay.vrtmrz.net, wss://xxxxx"
bind:value={eRelay}
autocomplete="off"
/>
<button onclick={() => useDefaultRelay()}> Use vrtmrz's relay </button>
</label>
</td>
</tr>
<tr>
<th> Room ID </th>
<td>
<label class={{ "is-dirty": isRoomIdModified }}>
<input
type="text"
placeholder="anything-you-like"
bind:value={eRoomId}
autocomplete="off"
spellcheck="false"
autocorrect="off"
/>
<button onclick={() => chooseRandom()}> Use Random Number </button>
</label>
<span>
<small>
This can isolate your connections between devices. Use the same Room ID for the same
devices.</small
>
</span>
</td>
</tr>
<tr>
<th> Password </th>
<td>
<label class={{ "is-dirty": isPasswordModified }}>
<input type="password" placeholder="password" bind:value={ePassword} />
</label>
<span>
<small>
This password is used to encrypt the connection. Use something long enough.
</small>
</span>
</td>
</tr>
<tr>
<th> This device name </th>
<td>
<label class={{ "is-dirty": isDeviceNameModified }}>
<input
type="text"
placeholder="iphone-16"
bind:value={eDeviceName}
autocomplete="off"
/>
</label>
<span>
<small>
Device name to identify the device. Please use shorter one for the stable peer
detection, i.e., "iphone-16" or "macbook-2021".
</small>
</span>
</td>
</tr>
<tr>
<th> Auto Connect </th>
<td>
<label class={{ "is-dirty": isAutoStartModified }}>
<input type="checkbox" bind:checked={eAutoStart} />
</label>
</td>
</tr>
<tr>
<th> Start change-broadcasting on Connect </th>
<td>
<label class={{ "is-dirty": isAutoBroadcastModified }}>
<input type="checkbox" bind:checked={eAutoBroadcast} />
</label>
</td>
</tr>
<!-- <tr>
<th> Auto Accepting </th>
<td>
<label class={{ "is-dirty": isAutoAcceptModified }}>
<input type="checkbox" bind:checked={eAutoAccept} />
</label>
</td>
</tr> -->
</tbody>
</table>
<button disabled={!isAnyModified} class="button mod-cta" onclick={saveAndApply}>Save and Apply</button>
<button disabled={!isAnyModified} class="button" onclick={revert}>Revert changes</button>
</details>
{/if}
<div>
<h2>Signaling Server Connection</h2>
<div>
{#if !isConnected}
<p>No Connection</p>
{:else}
<p>Connected to Signaling Server (as Peer ID: {serverPeerId})</p>
{/if}
</div>
<div>
{#if !isConnected}
<button onclick={openServer}>Connect</button>
{:else}
<button onclick={closeServer}>Disconnect</button>
{#if replicatorInfo?.isBroadcasting !== undefined}
{#if replicatorInfo?.isBroadcasting}
<button onclick={stopBroadcasting}>Stop Broadcasting</button>
{:else}
<button onclick={startBroadcasting}>Start Broadcasting</button>
{/if}
{/if}
<details>
<summary>Broadcasting?</summary>
<p>
<small>
If you want to use `LiveSync`, you should broadcast changes. All `watching` peers which
detects this will start the replication for fetching. <br />
However, This should not be enabled if you want to increase your secrecy more.
</small>
</p>
</details>
{/if}
</div>
</div>
<div>
<h2>Peers</h2>
<table class="peers">
<thead>
<tr>
<th>Name</th>
<th>Action</th>
<th>Command</th>
</tr>
</thead>
<tbody>
{#each peers as peer}
<PeerStatusRow peerStatus={peer}></PeerStatusRow>
{/each}
</tbody>
</table>
</div>
</article>
<style>
article {
max-width: 100%;
}
article p {
user-select: text;
-webkit-user-select: text;
}
h2 {
margin-top: var(--size-4-1);
margin-bottom: var(--size-4-1);
padding-bottom: var(--size-4-1);
border-bottom: 1px solid var(--background-modifier-border);
}
label.is-dirty {
background-color: var(--background-modifier-error);
}
input {
background-color: transparent;
}
th {
/* display: flex;
justify-content: center;
align-items: center; */
min-height: var(--input-height);
}
td {
min-height: var(--input-height);
}
td > label {
display: flex;
flex-direction: row;
align-items: center;
justify-content: flex-start;
min-height: var(--input-height);
}
td > label > * {
margin: auto var(--size-4-1);
}
table.peers {
width: 100%;
}
.important {
color: var(--text-error);
font-size: 1.2em;
font-weight: bold;
}
.important-sub {
color: var(--text-warning);
}
.settings label {
display: flex;
flex-direction: row;
align-items: center;
justify-content: flex-start;
flex-wrap: wrap;
}
</style>

View File

@@ -1,198 +0,0 @@
import { Menu, WorkspaceLeaf } from "@/deps.ts";
import ReplicatorPaneComponent from "./P2PReplicatorPane.svelte";
import type ObsidianLiveSyncPlugin from "../../../main.ts";
import { mount } from "svelte";
import { SvelteItemView } from "../../../common/SvelteItemView.ts";
import { eventHub } from "../../../common/events.ts";
import { unique } from "octagonal-wheels/collection";
import { LOG_LEVEL_NOTICE, REMOTE_P2P } from "../../../lib/src/common/types.ts";
import { Logger } from "../../../lib/src/common/logger.ts";
import { P2PReplicator } from "../CmdP2PReplicator.ts";
import {
EVENT_P2P_PEER_SHOW_EXTRA_MENU,
type PeerStatus,
} from "../../../lib/src/replication/trystero/P2PReplicatorPaneCommon.ts";
export const VIEW_TYPE_P2P = "p2p-replicator";
function addToList(item: string, list: string) {
return unique(
list
.split(",")
.map((e) => e.trim())
.concat(item)
.filter((p) => p)
).join(",");
}
function removeFromList(item: string, list: string) {
return list
.split(",")
.map((e) => e.trim())
.filter((p) => p !== item)
.filter((p) => p)
.join(",");
}
export class P2PReplicatorPaneView extends SvelteItemView {
plugin: ObsidianLiveSyncPlugin;
icon = "waypoints";
title: string = "";
navigation = false;
getIcon(): string {
return "waypoints";
}
get replicator() {
const r = this.plugin.getAddOn<P2PReplicator>(P2PReplicator.name);
if (!r || !r._replicatorInstance) {
throw new Error("Replicator not found");
}
return r._replicatorInstance;
}
async replicateFrom(peer: PeerStatus) {
await this.replicator.replicateFrom(peer.peerId);
}
async replicateTo(peer: PeerStatus) {
await this.replicator.requestSynchroniseToPeer(peer.peerId);
}
async getRemoteConfig(peer: PeerStatus) {
Logger(
`Requesting remote config for ${peer.name}. Please input the passphrase on the remote device`,
LOG_LEVEL_NOTICE
);
const remoteConfig = await this.replicator.getRemoteConfig(peer.peerId);
if (remoteConfig) {
Logger(`Remote config for ${peer.name} is retrieved successfully`);
const DROP = "Yes, and drop local database";
const KEEP = "Yes, but keep local database";
const CANCEL = "No, cancel";
const yn = await this.plugin.confirm.askSelectStringDialogue(
`Do you really want to apply the remote config? This will overwrite your current config immediately and restart.
And you can also drop the local database to rebuild from the remote device.`,
[DROP, KEEP, CANCEL] as const,
{
defaultAction: CANCEL,
title: "Apply Remote Config ",
}
);
if (yn === DROP || yn === KEEP) {
if (yn === DROP) {
if (remoteConfig.remoteType !== REMOTE_P2P) {
const yn2 = await this.plugin.confirm.askYesNoDialog(
`Do you want to set the remote type to "P2P Sync" to rebuild by "P2P replication"?`,
{
title: "Rebuild from remote device",
}
);
if (yn2 === "yes") {
remoteConfig.remoteType = REMOTE_P2P;
remoteConfig.P2P_RebuildFrom = peer.name;
}
}
}
this.plugin.settings = remoteConfig;
await this.plugin.saveSettings();
if (yn === DROP) {
await this.plugin.rebuilder.scheduleFetch();
} else {
this.plugin.services.appLifecycle.scheduleRestart();
}
} else {
Logger(`Cancelled\nRemote config for ${peer.name} is not applied`, LOG_LEVEL_NOTICE);
}
} else {
Logger(`Cannot retrieve remote config for ${peer.peerId}`);
}
}
async toggleProp(peer: PeerStatus, prop: "syncOnConnect" | "watchOnConnect" | "syncOnReplicationCommand") {
const settingMap = {
syncOnConnect: "P2P_AutoSyncPeers",
watchOnConnect: "P2P_AutoWatchPeers",
syncOnReplicationCommand: "P2P_SyncOnReplication",
} as const;
const targetSetting = settingMap[prop];
if (peer[prop]) {
this.plugin.settings[targetSetting] = removeFromList(peer.name, this.plugin.settings[targetSetting]);
await this.plugin.saveSettings();
} else {
this.plugin.settings[targetSetting] = addToList(peer.name, this.plugin.settings[targetSetting]);
await this.plugin.saveSettings();
}
await this.plugin.saveSettings();
}
m?: Menu;
constructor(leaf: WorkspaceLeaf, plugin: ObsidianLiveSyncPlugin) {
super(leaf);
this.plugin = plugin;
eventHub.onEvent(EVENT_P2P_PEER_SHOW_EXTRA_MENU, ({ peer, event }) => {
if (this.m) {
this.m.hide();
}
this.m = new Menu()
.addItem((item) => item.setTitle("📥 Only Fetch").onClick(() => this.replicateFrom(peer)))
.addItem((item) => item.setTitle("📤 Only Send").onClick(() => this.replicateTo(peer)))
.addSeparator()
.addItem((item) => {
item.setTitle("🔧 Get Configuration").onClick(async () => {
await this.getRemoteConfig(peer);
});
})
.addSeparator()
.addItem((item) => {
const mark = peer.syncOnConnect ? "checkmark" : null;
item.setTitle("Toggle Sync on connect")
.onClick(async () => {
await this.toggleProp(peer, "syncOnConnect");
})
.setIcon(mark);
})
.addItem((item) => {
const mark = peer.watchOnConnect ? "checkmark" : null;
item.setTitle("Toggle Watch on connect")
.onClick(async () => {
await this.toggleProp(peer, "watchOnConnect");
})
.setIcon(mark);
})
.addItem((item) => {
const mark = peer.syncOnReplicationCommand ? "checkmark" : null;
item.setTitle("Toggle Sync on `Replicate now` command")
.onClick(async () => {
await this.toggleProp(peer, "syncOnReplicationCommand");
})
.setIcon(mark);
});
this.m.showAtPosition({ x: event.x, y: event.y });
});
}
getViewType() {
return VIEW_TYPE_P2P;
}
getDisplayText() {
return "Peer-to-Peer Replicator";
}
override async onClose(): Promise<void> {
await super.onClose();
if (this.m) {
this.m.hide();
}
}
instantiateComponent(target: HTMLElement) {
const cmdSync = this.plugin.getAddOn<P2PReplicator>(P2PReplicator.name);
if (!cmdSync) {
throw new Error("Replicator not found");
}
return mount(ReplicatorPaneComponent, {
target: target,
props: {
plugin: cmdSync.plugin,
cmdSync: cmdSync,
},
});
}
}

View File

@@ -1,259 +0,0 @@
<script lang="ts">
import { getContext } from "svelte";
import { AcceptedStatus, type PeerStatus } from "../../../lib/src/replication/trystero/P2PReplicatorPaneCommon";
import type { P2PReplicator } from "../CmdP2PReplicator";
import { eventHub } from "../../../common/events";
import { EVENT_P2P_PEER_SHOW_EXTRA_MENU } from "../../../lib/src/replication/trystero/P2PReplicatorPaneCommon";
interface Props {
peerStatus: PeerStatus;
}
let { peerStatus }: Props = $props();
let peer = $derived(peerStatus);
function select<T extends string | number | symbol, U>(d: T, cond: Record<T, U>): U;
function select<T extends string | number | symbol, U, V>(d: T, cond: Record<T, U>, def: V): U | V;
function select<T extends string | number | symbol, U>(d: T, cond: Record<T, U>, def?: U): U | undefined {
return d in cond ? cond[d] : def;
}
let statusChips = $derived.by(() =>
[
peer.isWatching ? ["WATCHING"] : [],
peer.isFetching ? ["FETCHING"] : [],
peer.isSending ? ["SENDING"] : [],
].flat()
);
let acceptedStatusChip = $derived.by(() =>
select(
peer.accepted.toString(),
{
[AcceptedStatus.ACCEPTED]: "ACCEPTED",
[AcceptedStatus.ACCEPTED_IN_SESSION]: "ACCEPTED (in session)",
[AcceptedStatus.DENIED_IN_SESSION]: "DENIED (in session)",
[AcceptedStatus.DENIED]: "DENIED",
[AcceptedStatus.UNKNOWN]: "NEW",
},
""
)
);
const classList = {
["SENDING"]: "connected",
["FETCHING"]: "connected",
["WATCHING"]: "connected-live",
["WAITING"]: "waiting",
["ACCEPTED"]: "accepted",
["DENIED"]: "denied",
["NEW"]: "unknown",
};
let isAccepted = $derived.by(
() => peer.accepted === AcceptedStatus.ACCEPTED || peer.accepted === AcceptedStatus.ACCEPTED_IN_SESSION
);
let isDenied = $derived.by(
() => peer.accepted === AcceptedStatus.DENIED || peer.accepted === AcceptedStatus.DENIED_IN_SESSION
);
let isNew = $derived.by(() => peer.accepted === AcceptedStatus.UNKNOWN);
function makeDecision(isAccepted: boolean, isTemporary: boolean) {
cmdReplicator._replicatorInstance?.server?.makeDecision({
peerId: peer.peerId,
name: peer.name,
decision: isAccepted,
isTemporary: isTemporary,
});
}
function revokeDecision() {
cmdReplicator._replicatorInstance?.server?.revokeDecision({
peerId: peer.peerId,
name: peer.name,
});
}
const cmdReplicator = getContext<() => P2PReplicator>("getReplicator")();
const replicator = cmdReplicator._replicatorInstance!;
const peerAttrLabels = $derived.by(() => {
const attrs = [];
if (peer.syncOnConnect) {
attrs.push("✔ SYNC");
}
if (peer.watchOnConnect) {
attrs.push("✔ WATCH");
}
if (peer.syncOnReplicationCommand) {
attrs.push("✔ SELECT");
}
return attrs;
});
function startWatching() {
replicator.watchPeer(peer.peerId);
}
function stopWatching() {
replicator.unwatchPeer(peer.peerId);
}
function sync() {
replicator.sync(peer.peerId, false);
}
function moreMenu(evt: MouseEvent) {
eventHub.emitEvent(EVENT_P2P_PEER_SHOW_EXTRA_MENU, { peer, event: evt });
}
</script>
<tr>
<td>
<div class="info">
<div class="row name">
<span class="peername">{peer.name}</span>
</div>
<div class="row peer-id">
<span class="peerid">({peer.peerId})</span>
</div>
</div>
<div class="status-chips">
<div class="row">
<span class="chip {select(acceptedStatusChip, classList)}">{acceptedStatusChip}</span>
</div>
{#if isAccepted}
<div class="row">
{#each statusChips as chip}
<span class="chip {select(chip, classList)}">{chip}</span>
{/each}
</div>
{/if}
<div class="row">
{#each peerAttrLabels as attr}
<span class="chip attr">{attr}</span>
{/each}
</div>
</div>
</td>
<td>
<div class="buttons">
<div class="row">
{#if isNew}
{#if !isAccepted}
<button class="button" onclick={() => makeDecision(true, true)}>Accept in session</button>
<button class="button mod-cta" onclick={() => makeDecision(true, false)}>Accept</button>
{/if}
{#if !isDenied}
<button class="button" onclick={() => makeDecision(false, true)}>Deny in session</button>
<button class="button mod-warning" onclick={() => makeDecision(false, false)}>Deny</button>
{/if}
{:else}
<button class="button mod-warning" onclick={() => revokeDecision()}>Revoke</button>
{/if}
</div>
</div>
</td>
<td>
{#if isAccepted}
<div class="buttons">
<div class="row">
<button class="button" onclick={sync} disabled={peer.isSending || peer.isFetching}>🔄</button>
<!-- <button class="button" onclick={replicateFrom} disabled={peer.isFetching}>📥</button>
<button class="button" onclick={replicateTo} disabled={peer.isSending}>📤</button> -->
{#if peer.isWatching}
<button class="button" onclick={stopWatching}>Stop ⚡</button>
{:else}
<button class="button" onclick={startWatching} title="live"></button>
{/if}
<button class="button" onclick={moreMenu}>...</button>
</div>
</div>
{/if}
</td>
</tr>
<style>
tr:nth-child(odd) {
background-color: var(--background-primary-alt);
}
.info {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
padding: var(--size-4-1) var(--size-4-1);
}
.peer-id {
font-size: 0.8em;
}
.status-chips {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
/* min-width: 10em; */
}
.buttons {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
}
.buttons .row {
display: flex;
justify-content: center;
align-items: center;
flex-wrap: wrap;
/* padding: var(--size-4-1) var(--size-4-1); */
}
.chip {
display: inline-block;
padding: 4px 8px;
margin: 4px;
border-radius: 4px;
font-size: 0.75em;
font-weight: bold;
background-color: var(--tag-background);
border: var(--tag-border-width) solid var(--tag-border-color);
}
.chip.connected {
background-color: var(--background-modifier-success);
color: var(--text-normal);
}
.chip.connected-live {
background-color: var(--background-modifier-success);
border-color: var(--background-modifier-success);
color: var(--text-normal);
}
.chip.accepted {
background-color: var(--background-modifier-success);
color: var(--text-normal);
}
.chip.waiting {
background-color: var(--background-secondary);
}
.chip.unknown {
background-color: var(--background-primary);
color: var(--text-warning);
}
.chip.denied {
background-color: var(--background-modifier-error);
color: var(--text-error);
}
.chip.attr {
background-color: var(--background-secondary);
}
.button {
margin: var(--size-4-1);
}
.button.affirmative {
background-color: var(--interactive-accent);
color: var(--text-normal);
}
.button.affirmative:hover {
background-color: var(--interactive-accent-hover);
}
.button.negative {
background-color: var(--background-modifier-error);
color: var(--text-error);
}
.button.negative:hover {
background-color: var(--background-modifier-error-hover);
}
</style>

Submodule src/lib updated: 7c275d50ae...f0253a8548

File diff suppressed because it is too large Load Diff

View File

@@ -1,65 +0,0 @@
import { LOG_LEVEL_INFO, LOG_LEVEL_NOTICE, LOG_LEVEL_VERBOSE, Logger } from "octagonal-wheels/common/logger";
import type { LOG_LEVEL } from "../lib/src/common/types";
import type { LiveSyncCore } from "../main";
import { __$checkInstanceBinding } from "../lib/src/dev/checks";
export abstract class AbstractModule {
_log = (msg: any, level: LOG_LEVEL = LOG_LEVEL_INFO, key?: string) => {
if (typeof msg === "string" && level !== LOG_LEVEL_NOTICE) {
msg = `[${this.constructor.name}]\u{200A} ${msg}`;
}
// console.log(msg);
Logger(msg, level, key);
};
get localDatabase() {
return this.core.localDatabase;
}
get settings() {
return this.core.settings;
}
set settings(value) {
this.core.settings = value;
}
onBindFunction(core: LiveSyncCore, services: typeof core.services) {
// Override if needed.
}
constructor(public core: LiveSyncCore) {
this.onBindFunction(core, core.services);
Logger(`[${this.constructor.name}] Loaded`, LOG_LEVEL_VERBOSE);
__$checkInstanceBinding(this);
}
saveSettings = this.core.saveSettings.bind(this.core);
addTestResult(key: string, value: boolean, summary?: string, message?: string) {
this.services.test.addTestResult(`${this.constructor.name}`, key, value, summary, message);
}
testDone(result: boolean = true) {
return Promise.resolve(result);
}
testFail(message: string) {
this._log(message, LOG_LEVEL_NOTICE);
return this.testDone(false);
}
async _test(key: string, process: () => Promise<any>) {
this._log(`Testing ${key}`, LOG_LEVEL_VERBOSE);
try {
const ret = await process();
if (ret !== true) {
this.addTestResult(key, false, ret.toString());
return this.testFail(`${key} failed: ${ret}`);
}
this.addTestResult(key, true, "");
} catch (ex: any) {
this.addTestResult(key, false, "Failed by Exception", ex.toString());
return this.testFail(`${key} failed: ${ex}`);
}
return this.testDone();
}
get services() {
return this.core._services;
}
}

View File

@@ -1,54 +0,0 @@
import { type Prettify } from "../lib/src/common/types";
import type { LiveSyncCore } from "../main";
import type ObsidianLiveSyncPlugin from "../main";
import { AbstractModule } from "./AbstractModule.ts";
import type { ChainableExecuteFunction, OverridableFunctionsKeys } from "./ModuleTypes";
export type IObsidianModuleBase = OverridableFunctionsKeys<ObsidianLiveSyncPlugin>;
export type IObsidianModule = Prettify<Partial<IObsidianModuleBase>>;
export type ModuleKeys = keyof IObsidianModule;
export type ChainableModuleProps = ChainableExecuteFunction<ObsidianLiveSyncPlugin>;
export abstract class AbstractObsidianModule extends AbstractModule {
addCommand = this.plugin.addCommand.bind(this.plugin);
registerView = this.plugin.registerView.bind(this.plugin);
addRibbonIcon = this.plugin.addRibbonIcon.bind(this.plugin);
registerObsidianProtocolHandler = this.plugin.registerObsidianProtocolHandler.bind(this.plugin);
get localDatabase() {
return this.plugin.localDatabase;
}
get settings() {
return this.plugin.settings;
}
set settings(value) {
this.plugin.settings = value;
}
get app() {
return this.plugin.app;
}
constructor(
public plugin: ObsidianLiveSyncPlugin,
public core: LiveSyncCore
) {
super(core);
}
saveSettings = this.plugin.saveSettings.bind(this.plugin);
isMainReady() {
return this.services.appLifecycle.isReady();
}
isMainSuspended() {
return this.services.appLifecycle.isSuspended();
}
isDatabaseReady() {
return this.services.database.isDatabaseReady();
}
//should be overridden
isThisModuleEnabled() {
return true;
}
}

View File

@@ -1,55 +0,0 @@
import type { Prettify } from "../lib/src/common/types";
import type { LiveSyncCore } from "../main";
export type OverridableFunctionsKeys<T> = {
[K in keyof T as K extends `$${string}` ? K : never]: T[K];
};
export type ChainableExecuteFunction<T> = {
[K in keyof T as K extends `$${string}`
? T[K] extends (...args: any) => ChainableFunctionResult
? K
: never
: never]: T[K];
};
export type ICoreModuleBase = OverridableFunctionsKeys<LiveSyncCore>;
export type ICoreModule = Prettify<Partial<ICoreModuleBase>>;
export type CoreModuleKeys = keyof ICoreModule;
export type ChainableFunctionResult =
| Promise<boolean | undefined | string>
| Promise<boolean | undefined>
| Promise<boolean>
| Promise<void>;
export type ChainableFunctionResultOrAll = Promise<boolean | undefined | string | void>;
type AllExecuteFunction<T> = {
[K in keyof T as K extends `$all${string}`
? T[K] extends (...args: any[]) => ChainableFunctionResultOrAll
? K
: never
: never]: T[K];
};
type EveryExecuteFunction<T> = {
[K in keyof T as K extends `$every${string}`
? T[K] extends (...args: any[]) => ChainableFunctionResult
? K
: never
: never]: T[K];
};
type AnyExecuteFunction<T> = {
[K in keyof T as K extends `$any${string}`
? T[K] extends (...args: any[]) => ChainableFunctionResult
? K
: never
: never]: T[K];
};
type InjectableFunction<T> = {
[K in keyof T as K extends `$$${string}` ? (T[K] extends (...args: any[]) => any ? K : never) : never]: T[K];
};
export type AllExecuteProps = AllExecuteFunction<LiveSyncCore>;
export type EveryExecuteProps = EveryExecuteFunction<LiveSyncCore>;
export type AnyExecuteProps = AnyExecuteFunction<LiveSyncCore>;
export type AllInjectableProps = InjectableFunction<LiveSyncCore>;

View File

@@ -1,352 +0,0 @@
import { LOG_LEVEL_VERBOSE } from "octagonal-wheels/common/logger";
import { EVENT_FILE_SAVED, eventHub } from "../../common/events";
import {
getDatabasePathFromUXFileInfo,
getStoragePathFromUXFileInfo,
isInternalMetadata,
markChangesAreSame,
} from "../../common/utils";
import type {
UXFileInfoStub,
FilePathWithPrefix,
UXFileInfo,
MetaEntry,
LoadedEntry,
FilePath,
SavingEntry,
DocumentID,
} from "../../lib/src/common/types";
import type { DatabaseFileAccess } from "../interfaces/DatabaseFileAccess";
import { isPlainText, shouldBeIgnored, stripAllPrefixes } from "../../lib/src/string_and_binary/path";
import {
createBlob,
createTextBlob,
delay,
determineTypeFromBlob,
isDocContentSame,
readContent,
} from "../../lib/src/common/utils";
import { serialized } from "octagonal-wheels/concurrency/lock";
import { AbstractModule } from "../AbstractModule.ts";
import { ICHeader } from "../../common/types.ts";
import type { LiveSyncCore } from "../../main.ts";
export class ModuleDatabaseFileAccess extends AbstractModule implements DatabaseFileAccess {
private _everyOnload(): Promise<boolean> {
this.core.databaseFileAccess = this;
return Promise.resolve(true);
}
private async _everyModuleTest(): Promise<boolean> {
if (!this.settings.enableDebugTools) return Promise.resolve(true);
const testString = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam nec purus nec nunc";
// Before test, we need to delete completely.
const conflicts = await this.getConflictedRevs("autoTest.md" as FilePathWithPrefix);
for (const rev of conflicts) {
await this.delete("autoTest.md" as FilePathWithPrefix, rev);
}
await this.delete("autoTest.md" as FilePathWithPrefix);
// OK, begin!
await this._test(
"storeContent",
async () => await this.storeContent("autoTest.md" as FilePathWithPrefix, testString)
);
// For test, we need to clear the caches.
this.localDatabase.clearCaches();
await this._test("readContent", async () => {
const content = await this.fetch("autoTest.md" as FilePathWithPrefix);
if (!content) return "File not found";
if (content.deleted) return "File is deleted";
return (await content.body.text()) == testString
? true
: `Content is not same ${await content.body.text()}`;
});
await this._test("delete", async () => await this.delete("autoTest.md" as FilePathWithPrefix));
await this._test("read deleted content", async () => {
const content = await this.fetch("autoTest.md" as FilePathWithPrefix);
if (!content) return true;
if (content.deleted) return true;
return `Still exist !:${await content.body.text()},${JSON.stringify(content, undefined, 2)}`;
});
await delay(100);
return this.testDone();
}
async checkIsTargetFile(file: UXFileInfoStub | FilePathWithPrefix): Promise<boolean> {
const path = getStoragePathFromUXFileInfo(file);
if (!(await this.services.vault.isTargetFile(path))) {
this._log(`File is not target: ${path}`, LOG_LEVEL_VERBOSE);
return false;
}
if (shouldBeIgnored(path)) {
this._log(`File should be ignored: ${path}`, LOG_LEVEL_VERBOSE);
return false;
}
return true;
}
async delete(file: UXFileInfoStub | FilePathWithPrefix, rev?: string): Promise<boolean> {
if (!(await this.checkIsTargetFile(file))) {
return true;
}
const fullPath = getDatabasePathFromUXFileInfo(file);
try {
this._log(`deleteDB By path:${fullPath}`);
return await this.deleteFromDBbyPath(fullPath, rev);
} catch (ex) {
this._log(`Failed to delete ${fullPath}`);
this._log(ex, LOG_LEVEL_VERBOSE);
return false;
}
}
async createChunks(file: UXFileInfo, force: boolean = false, skipCheck?: boolean): Promise<boolean> {
return await this.__store(file, force, skipCheck, true);
}
async store(file: UXFileInfo, force: boolean = false, skipCheck?: boolean): Promise<boolean> {
return await this.__store(file, force, skipCheck, false);
}
async storeContent(path: FilePathWithPrefix, content: string): Promise<boolean> {
const blob = createTextBlob(content);
const bytes = (await blob.arrayBuffer()).byteLength;
const isInternal = path.startsWith(".") ? true : undefined;
const dummyUXFileInfo: UXFileInfo = {
name: path.split("/").pop() as string,
path: path,
stat: {
size: bytes,
ctime: Date.now(),
mtime: Date.now(),
type: "file",
},
body: blob,
isInternal,
};
return await this.__store(dummyUXFileInfo, true, false, false);
}
private async __store(
file: UXFileInfo,
force: boolean = false,
skipCheck?: boolean,
onlyChunks?: boolean
): Promise<boolean> {
if (!skipCheck) {
if (!(await this.checkIsTargetFile(file))) {
return true;
}
}
if (!file) {
this._log("File seems bad", LOG_LEVEL_VERBOSE);
return false;
}
// const path = getPathFromUXFileInfo(file);
const isPlain = isPlainText(file.name);
const possiblyLarge = !isPlain;
const content = file.body;
const datatype = determineTypeFromBlob(content);
const idPrefix = file.isInternal ? ICHeader : "";
const fullPath = getStoragePathFromUXFileInfo(file);
const fullPathOnDB = getDatabasePathFromUXFileInfo(file);
if (possiblyLarge) this._log(`Processing: ${fullPath}`, LOG_LEVEL_VERBOSE);
// if (isInternalMetadata(fullPath)) {
// this._log(`Internal file: ${fullPath}`, LOG_LEVEL_VERBOSE);
// return false;
// }
if (file.isInternal) {
if (file.deleted) {
file.stat = {
size: 0,
ctime: Date.now(),
mtime: Date.now(),
type: "file",
};
} else if (file.stat == undefined) {
const stat = await this.core.storageAccess.statHidden(file.path);
if (!stat) {
// We stored actually deleted or not since here, so this is an unexpected case. we should raise an error.
this._log(`Internal file not found: ${fullPath}`, LOG_LEVEL_VERBOSE);
return false;
}
file.stat = stat;
}
}
const idMain = await this.services.path.path2id(fullPath);
const id = (idPrefix + idMain) as DocumentID;
const d: SavingEntry = {
_id: id,
path: fullPathOnDB,
data: content,
ctime: file.stat.ctime,
mtime: file.stat.mtime,
size: file.stat.size,
children: [],
datatype: datatype,
type: datatype,
eden: {},
};
//upsert should locked
const msg = `STORAGE -> DB (${datatype}) `;
const isNotChanged = await serialized("file-" + fullPath, async () => {
if (force) {
this._log(msg + "Force writing " + fullPath, LOG_LEVEL_VERBOSE);
return false;
}
// Commented out temporarily: this checks that the file was made ourself.
// if (this.core.storageAccess.recentlyTouched(file)) {
// return true;
// }
try {
const old = await this.localDatabase.getDBEntry(d.path, undefined, false, true, false);
if (old !== false) {
const oldData = { data: old.data, deleted: old._deleted || old.deleted };
const newData = { data: d.data, deleted: d._deleted || d.deleted };
if (oldData.deleted != newData.deleted) return false;
if (!(await isDocContentSame(old.data, newData.data))) return false;
this._log(
msg + "Skipped (not changed) " + fullPath + (d._deleted || d.deleted ? " (deleted)" : ""),
LOG_LEVEL_VERBOSE
);
markChangesAreSame(old, d.mtime, old.mtime);
return true;
// d._rev = old._rev;
}
} catch (ex) {
this._log(
msg +
"Error, Could not check the diff for the old one." +
(force ? "force writing." : "") +
fullPath +
(d._deleted || d.deleted ? " (deleted)" : ""),
LOG_LEVEL_VERBOSE
);
this._log(ex, LOG_LEVEL_VERBOSE);
return !force;
}
return false;
});
if (isNotChanged) {
this._log(msg + " Skip " + fullPath, LOG_LEVEL_VERBOSE);
return true;
}
const ret = await this.localDatabase.putDBEntry(d, onlyChunks);
if (ret !== false) {
this._log(msg + fullPath);
eventHub.emitEvent(EVENT_FILE_SAVED);
}
return ret != false;
}
async getConflictedRevs(file: UXFileInfoStub | FilePathWithPrefix): Promise<string[]> {
if (!(await this.checkIsTargetFile(file))) {
return [];
}
const filename = getDatabasePathFromUXFileInfo(file);
const doc = await this.localDatabase.getDBEntryMeta(filename, { conflicts: true }, true);
if (doc === false) {
return [];
}
return doc._conflicts || [];
}
async fetch(
file: UXFileInfoStub | FilePathWithPrefix,
rev?: string,
waitForReady?: boolean,
skipCheck = false
): Promise<UXFileInfo | false> {
if (skipCheck && !(await this.checkIsTargetFile(file))) {
return false;
}
const entry = await this.fetchEntry(file, rev, waitForReady, true);
if (entry === false) {
return false;
}
const data = createBlob(readContent(entry));
const path = stripAllPrefixes(entry.path);
const fileInfo: UXFileInfo = {
name: path.split("/").pop() as string,
path: path,
stat: {
size: entry.size,
ctime: entry.ctime,
mtime: entry.mtime,
type: "file",
},
body: data,
deleted: entry.deleted || entry._deleted,
};
if (isInternalMetadata(entry.path)) {
fileInfo.isInternal = true;
}
return fileInfo;
}
async fetchEntryMeta(
file: UXFileInfoStub | FilePathWithPrefix,
rev?: string,
skipCheck = false
): Promise<MetaEntry | false> {
const dbFileName = getDatabasePathFromUXFileInfo(file);
if (skipCheck && !(await this.checkIsTargetFile(file))) {
return false;
}
const doc = await this.localDatabase.getDBEntryMeta(dbFileName, rev ? { rev: rev } : undefined, true);
if (doc === false) {
return false;
}
return doc as MetaEntry;
}
async fetchEntryFromMeta(
meta: MetaEntry,
waitForReady: boolean = true,
skipCheck = false
): Promise<LoadedEntry | false> {
if (skipCheck && !(await this.checkIsTargetFile(meta.path))) {
return false;
}
const doc = await this.localDatabase.getDBEntryFromMeta(meta as LoadedEntry, false, waitForReady);
if (doc === false) {
return false;
}
return doc;
}
async fetchEntry(
file: UXFileInfoStub | FilePathWithPrefix,
rev?: string,
waitForReady: boolean = true,
skipCheck = false
): Promise<LoadedEntry | false> {
if (skipCheck && !(await this.checkIsTargetFile(file))) {
return false;
}
const entry = await this.fetchEntryMeta(file, rev, true);
if (entry === false) {
return false;
}
const doc = await this.fetchEntryFromMeta(entry, waitForReady, true);
return doc;
}
async deleteFromDBbyPath(fullPath: FilePath | FilePathWithPrefix, rev?: string): Promise<boolean> {
if (!(await this.checkIsTargetFile(fullPath))) {
this._log(`storeFromStorage: File is not target: ${fullPath}`);
return true;
}
const opt = rev ? { rev: rev } : undefined;
const ret = await this.localDatabase.deleteDBEntry(fullPath, opt);
eventHub.emitEvent(EVENT_FILE_SAVED);
return ret;
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.appLifecycle.onLoaded.addHandler(this._everyOnload.bind(this));
services.test.test.addHandler(this._everyModuleTest.bind(this));
}
}

View File

@@ -1,443 +0,0 @@
import { LOG_LEVEL_INFO, LOG_LEVEL_NOTICE, LOG_LEVEL_VERBOSE } from "octagonal-wheels/common/logger";
import { serialized } from "octagonal-wheels/concurrency/lock";
import type { FileEventItem } from "../../common/types";
import type {
FilePath,
FilePathWithPrefix,
MetaEntry,
UXFileInfo,
UXFileInfoStub,
UXInternalFileInfoStub,
} from "../../lib/src/common/types";
import { AbstractModule } from "../AbstractModule.ts";
import {
compareFileFreshness,
EVEN,
getPath,
getPathWithoutPrefix,
getStoragePathFromUXFileInfo,
markChangesAreSame,
} from "../../common/utils";
import { getDocDataAsArray, isDocContentSame, readAsBlob, readContent } from "../../lib/src/common/utils";
import { shouldBeIgnored } from "../../lib/src/string_and_binary/path";
import { Semaphore } from "octagonal-wheels/concurrency/semaphore";
import { eventHub } from "../../common/events.ts";
import type { LiveSyncCore } from "../../main.ts";
export class ModuleFileHandler extends AbstractModule {
get db() {
return this.core.databaseFileAccess;
}
get storage() {
return this.core.storageAccess;
}
_everyOnloadStart(): Promise<boolean> {
this.core.fileHandler = this;
return Promise.resolve(true);
}
async readFileFromStub(file: UXFileInfoStub | UXFileInfo) {
if ("body" in file && file.body) {
return file;
}
const readFile = await this.storage.readStubContent(file);
if (!readFile) {
throw new Error(`File ${file.path} is not exist on the storage`);
}
return readFile;
}
async storeFileToDB(
info: UXFileInfoStub | UXFileInfo | UXInternalFileInfoStub | FilePathWithPrefix,
force: boolean = false,
onlyChunks: boolean = false
): Promise<boolean> {
const file = typeof info === "string" ? this.storage.getFileStub(info) : info;
if (file == null) {
this._log(`File ${info} is not exist on the storage`, LOG_LEVEL_VERBOSE);
return false;
}
// const file = item.args.file;
if (file.isInternal) {
this._log(
`Internal file ${file.path} is not allowed to be processed on processFileEvent`,
LOG_LEVEL_VERBOSE
);
return false;
}
// First, check the file on the database
const entry = await this.db.fetchEntry(file, undefined, true, true);
if (!entry || entry.deleted || entry._deleted) {
// If the file is not exist on the database, then it should be created.
const readFile = await this.readFileFromStub(file);
if (!onlyChunks) {
return await this.db.store(readFile);
} else {
return await this.db.createChunks(readFile, false, true);
}
}
// entry is exist on the database, check the difference between the file and the entry.
let shouldApplied = false;
if (!force && !onlyChunks) {
// 1. if the time stamp is far different, then it should be updated.
// Note: This checks only the mtime with the resolution reduced to 2 seconds.
// 2 seconds it for the ZIP file's mtime. If not, we cannot backup the vault as the ZIP file.
// This is hardcoded on `compareMtime` of `src/common/utils.ts`.
if (compareFileFreshness(file, entry) !== EVEN) {
shouldApplied = true;
}
// 2. if not, the content should be checked.
let readFile: UXFileInfo | undefined = undefined;
if (!shouldApplied) {
readFile = await this.readFileFromStub(file);
if (!readFile) {
this._log(`File ${file.path} is not exist on the storage`, LOG_LEVEL_NOTICE);
return false;
}
if (await isDocContentSame(getDocDataAsArray(entry.data), readFile.body)) {
// Timestamp is different but the content is same. therefore, two timestamps should be handled as same.
// So, mark the changes are same.
markChangesAreSame(readFile, readFile.stat.mtime, entry.mtime);
} else {
shouldApplied = true;
}
}
if (!shouldApplied) {
this._log(`File ${file.path} is not changed`, LOG_LEVEL_VERBOSE);
return true;
}
if (!readFile) readFile = await this.readFileFromStub(file);
// If the file is changed, then the file should be stored.
if (onlyChunks) {
return await this.db.createChunks(readFile, false, true);
} else {
return await this.db.store(readFile, false, true);
}
} else {
// If force is true, then it should be updated.
const readFile = await this.readFileFromStub(file);
if (onlyChunks) {
return await this.db.createChunks(readFile, true, true);
} else {
return await this.db.store(readFile, true, true);
}
}
}
async deleteFileFromDB(info: UXFileInfoStub | UXInternalFileInfoStub | FilePath): Promise<boolean> {
const file = typeof info === "string" ? this.storage.getFileStub(info) : info;
if (file == null) {
this._log(`File ${info} is not exist on the storage`, LOG_LEVEL_VERBOSE);
return false;
}
// const file = item.args.file;
if (file.isInternal) {
this._log(
`Internal file ${file.path} is not allowed to be processed on processFileEvent`,
LOG_LEVEL_VERBOSE
);
return false;
}
// First, check the file on the database
const entry = await this.db.fetchEntry(file, undefined, true, true);
if (!entry || entry.deleted || entry._deleted) {
this._log(`File ${file.path} is not exist or already deleted on the database`, LOG_LEVEL_VERBOSE);
return false;
}
// Check the file is already conflicted. if so, only the conflicted one should be deleted.
const conflictedRevs = await this.db.getConflictedRevs(file);
if (conflictedRevs.length > 0) {
// If conflicted, then it should be deleted. entry._rev should be own file's rev.
// TODO: I BELIEVED SO. BUT I NOTICED THAT I AN NOT SURE. I SHOULD CHECK THIS.
// ANYWAY, I SHOULD DELETE THE FILE. ACTUALLY WE SIMPLY DELETED THE FILE UNTIL PREVIOUS VERSIONS.
return await this.db.delete(file, entry._rev);
}
// Otherwise, the file should be deleted simply. This is the previous behaviour.
return await this.db.delete(file);
}
async deleteRevisionFromDB(
info: UXFileInfoStub | FilePath | FilePathWithPrefix,
rev: string
): Promise<boolean | undefined> {
//TODO: Possibly check the conflicting.
return await this.db.delete(info, rev);
}
async resolveConflictedByDeletingRevision(
info: UXFileInfoStub | FilePath,
rev: string
): Promise<boolean | undefined> {
const path = getStoragePathFromUXFileInfo(info);
if (!(await this.deleteRevisionFromDB(info, rev))) {
this._log(`Failed to delete the conflicted revision ${rev} of ${path}`, LOG_LEVEL_VERBOSE);
return false;
}
if (!(await this.dbToStorageWithSpecificRev(info, rev, true))) {
this._log(`Failed to apply the resolved revision ${rev} of ${path} to the storage`, LOG_LEVEL_VERBOSE);
return false;
}
}
async dbToStorageWithSpecificRev(
info: UXFileInfoStub | UXFileInfo | FilePath | null,
rev: string,
force?: boolean
): Promise<boolean> {
const file = typeof info === "string" ? this.storage.getFileStub(info) : info;
if (file == null) {
this._log(`File ${info} is not exist on the storage`, LOG_LEVEL_VERBOSE);
return false;
}
const docEntry = await this.db.fetchEntryMeta(file, rev, true);
if (!docEntry) {
this._log(`File ${file.path} is not exist on the database`, LOG_LEVEL_VERBOSE);
return false;
}
return await this.dbToStorage(docEntry, file, force);
}
async dbToStorage(
entryInfo: MetaEntry | FilePathWithPrefix,
info: UXFileInfoStub | UXFileInfo | FilePath | null,
force?: boolean
): Promise<boolean> {
const file = typeof info === "string" ? this.storage.getFileStub(info) : info;
const mode = file == null ? "create" : "modify";
const pathFromEntryInfo = typeof entryInfo === "string" ? entryInfo : getPath(entryInfo);
const docEntry = await this.db.fetchEntryMeta(pathFromEntryInfo, undefined, true);
if (!docEntry) {
this._log(`File ${pathFromEntryInfo} is not exist on the database`, LOG_LEVEL_VERBOSE);
return false;
}
const path = getPath(docEntry);
// 1. Check if it already conflicted.
const revs = await this.db.getConflictedRevs(path);
if (revs.length > 0) {
// Some conflicts are exist.
if (this.settings.writeDocumentsIfConflicted) {
// If configured to write the document even if conflicted, then it should be written.
// NO OP
} else {
// If not, then it should be checked. and will be processed later (i.e., after the conflict is resolved).
await this.services.conflict.queueCheckForIfOpen(path);
return true;
}
}
// 2. Check if the file is already exist on the storage.
const existDoc = this.storage.getStub(path);
if (existDoc && existDoc.isFolder) {
this._log(`Folder ${path} is already exist on the storage as a folder`, LOG_LEVEL_VERBOSE);
// We can do nothing, and other modules should also nothing to do.
return true;
}
// Check existence of both file and docEntry.
const existOnDB = !(docEntry._deleted || docEntry.deleted || false);
const existOnStorage = existDoc != null;
if (!existOnDB && !existOnStorage) {
this._log(`File ${path} seems to be deleted, but already not on storage`, LOG_LEVEL_VERBOSE);
return true;
}
if (!existOnDB && existOnStorage) {
// Deletion has been Transferred. Storage files will be deleted.
// Note: If the folder becomes empty, the folder will be deleted if not configured to keep it.
// This behaviour is implemented on the `ModuleFileAccessObsidian`.
// And it does not care actually deleted.
await this.storage.deleteVaultItem(path);
return true;
}
// Okay, the file is exist on the database. Let's check the file is exist on the storage.
const docRead = await this.db.fetchEntryFromMeta(docEntry);
if (!docRead) {
this._log(`File ${path} is not exist on the database`, LOG_LEVEL_VERBOSE);
return false;
}
// If we want to process size mismatched files -- in case of having files created by some integrations, enable the toggle.
if (!this.settings.processSizeMismatchedFiles) {
// Check the file is not corrupted
// (Zero is a special case, may be created by some APIs and it might be acceptable).
if (docRead.size != 0 && docRead.size !== readAsBlob(docRead).size) {
this._log(
`File ${path} seems to be corrupted! Writing prevented. (${docRead.size} != ${readAsBlob(docRead).size})`,
LOG_LEVEL_NOTICE
);
return false;
}
}
const docData = readContent(docRead);
if (existOnStorage && !force) {
// The file is exist on the storage. Let's check the difference between the file and the entry.
// But, if force is true, then it should be updated.
// Ok, we have to compare.
let shouldApplied = false;
// 1. if the time stamp is far different, then it should be updated.
// Note: This checks only the mtime with the resolution reduced to 2 seconds.
// 2 seconds it for the ZIP file's mtime. If not, we cannot backup the vault as the ZIP file.
// This is hardcoded on `compareMtime` of `src/common/utils.ts`.
if (compareFileFreshness(existDoc, docEntry) !== EVEN) {
shouldApplied = true;
}
// 2. if not, the content should be checked.
if (!shouldApplied) {
const readFile = await this.readFileFromStub(existDoc);
if (await isDocContentSame(docData, readFile.body)) {
// The content is same. So, we do not need to update the file.
shouldApplied = false;
// Timestamp is different but the content is same. therefore, two timestamps should be handled as same.
// So, mark the changes are same.
markChangesAreSame(docRead, docRead.mtime, existDoc.stat.mtime);
} else {
shouldApplied = true;
}
}
if (!shouldApplied) {
this._log(`File ${docRead.path} is not changed`, LOG_LEVEL_VERBOSE);
return true;
}
// Let's apply the changes.
} else {
this._log(
`File ${docRead.path} ${existOnStorage ? "(new) " : ""} ${force ? " (forced)" : ""}`,
LOG_LEVEL_VERBOSE
);
}
await this.storage.ensureDir(path);
const ret = await this.storage.writeFileAuto(path, docData, { ctime: docRead.ctime, mtime: docRead.mtime });
await this.storage.touched(path);
this.storage.triggerFileEvent(mode, path);
return ret;
}
private async _anyHandlerProcessesFileEvent(item: FileEventItem): Promise<boolean> {
const eventItem = item.args;
const type = item.type;
const path = eventItem.file.path;
if (!(await this.services.vault.isTargetFile(path))) {
this._log(`File ${path} is not the target file`, LOG_LEVEL_VERBOSE);
return false;
}
if (shouldBeIgnored(path)) {
this._log(`File ${path} should be ignored`, LOG_LEVEL_VERBOSE);
return false;
}
const lockKey = `processFileEvent-${path}`;
return await serialized(lockKey, async () => {
switch (type) {
case "CREATE":
case "CHANGED":
return await this.storeFileToDB(item.args.file);
case "DELETE":
return await this.deleteFileFromDB(item.args.file);
case "INTERNAL":
// this should be handled on the other module.
return false;
default:
this._log(`Unsupported event type: ${type}`, LOG_LEVEL_VERBOSE);
return false;
}
});
}
async _anyProcessReplicatedDoc(entry: MetaEntry): Promise<boolean> {
return await serialized(entry.path, async () => {
if (!(await this.services.vault.isTargetFile(entry.path))) {
this._log(`File ${entry.path} is not the target file`, LOG_LEVEL_VERBOSE);
return false;
}
if (this.services.vault.isFileSizeTooLarge(entry.size)) {
this._log(`File ${entry.path} is too large (on database) to be processed`, LOG_LEVEL_VERBOSE);
return false;
}
if (shouldBeIgnored(entry.path)) {
this._log(`File ${entry.path} should be ignored`, LOG_LEVEL_VERBOSE);
return false;
}
const path = getPath(entry);
const targetFile = this.storage.getStub(getPathWithoutPrefix(entry));
if (targetFile && targetFile.isFolder) {
this._log(`${getPath(entry)} is already exist as the folder`);
// Nothing to do and other modules should also nothing to do.
return true;
} else {
if (targetFile && this.services.vault.isFileSizeTooLarge(targetFile.stat.size)) {
this._log(`File ${targetFile.path} is too large (on storage) to be processed`, LOG_LEVEL_VERBOSE);
return false;
}
this._log(
`Processing ${path} (${entry._id.substring(0, 8)} :${entry._rev?.substring(0, 5)}) : Started...`,
LOG_LEVEL_VERBOSE
);
// Before writing (or skipped ), merging dialogue should be cancelled.
eventHub.emitEvent("conflict-cancelled", path);
const ret = await this.dbToStorage(entry, targetFile);
this._log(`Processing ${path} (${entry._id.substring(0, 8)} :${entry._rev?.substring(0, 5)}) : Done`);
return ret;
}
});
}
async createAllChunks(showingNotice?: boolean): Promise<void> {
this._log("Collecting local files on the storage", LOG_LEVEL_VERBOSE);
const semaphore = Semaphore(10);
let processed = 0;
const filesStorageSrc = this.storage.getFiles();
const incProcessed = () => {
processed++;
if (processed % 25 == 0)
this._log(
`Creating missing chunks: ${processed} of ${total} files`,
showingNotice ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO,
"chunkCreation"
);
};
const total = filesStorageSrc.length;
const procAllChunks = filesStorageSrc.map(async (file) => {
if (!(await this.services.vault.isTargetFile(file))) {
incProcessed();
return true;
}
if (this.services.vault.isFileSizeTooLarge(file.stat.size)) {
incProcessed();
return true;
}
if (shouldBeIgnored(file.path)) {
incProcessed();
return true;
}
const release = await semaphore.acquire();
incProcessed();
try {
await this.storeFileToDB(file, false, true);
} catch (ex) {
this._log(ex, LOG_LEVEL_VERBOSE);
} finally {
release();
}
});
await Promise.all(procAllChunks);
this._log(
`Creating chunks Done: ${processed} of ${total} files`,
showingNotice ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO,
"chunkCreation"
);
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.appLifecycle.onInitialise.addHandler(this._everyOnloadStart.bind(this));
services.fileProcessing.processFileEvent.addHandler(this._anyHandlerProcessesFileEvent.bind(this));
services.replication.processSynchroniseResult.addHandler(this._anyProcessReplicatedDoc.bind(this));
}
}

View File

@@ -1,46 +0,0 @@
import { $msg } from "../../lib/src/common/i18n";
import { LiveSyncLocalDB } from "../../lib/src/pouchdb/LiveSyncLocalDB.ts";
import { initializeStores } from "../../common/stores.ts";
import { AbstractModule } from "../AbstractModule.ts";
import { LiveSyncManagers } from "../../lib/src/managers/LiveSyncManagers.ts";
import type { LiveSyncCore } from "../../main.ts";
export class ModuleLocalDatabaseObsidian extends AbstractModule {
_everyOnloadStart(): Promise<boolean> {
return Promise.resolve(true);
}
private async _openDatabase(): Promise<boolean> {
if (this.localDatabase != null) {
await this.localDatabase.close();
}
const vaultName = this.services.vault.getVaultName();
this._log($msg("moduleLocalDatabase.logWaitingForReady"));
const getDB = () => this.core.localDatabase.localDatabase;
const getSettings = () => this.core.settings;
this.core.managers = new LiveSyncManagers({
get database() {
return getDB();
},
getActiveReplicator: () => this.core.replicator,
id2path: this.services.path.id2path,
// path2id: this.core.$$path2id.bind(this.core),
path2id: this.services.path.path2id,
get settings() {
return getSettings();
},
});
this.core.localDatabase = new LiveSyncLocalDB(vaultName, this.core);
initializeStores(vaultName);
return await this.localDatabase.initializeDatabase();
}
_isDatabaseReady(): boolean {
return this.localDatabase != null && this.localDatabase.isReady;
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.database.isDatabaseReady.setHandler(this._isDatabaseReady.bind(this));
services.appLifecycle.onInitialise.addHandler(this._everyOnloadStart.bind(this));
services.database.openDatabase.setHandler(this._openDatabase.bind(this));
}
}

View File

@@ -1,41 +0,0 @@
import { PeriodicProcessor } from "../../common/utils";
import type { LiveSyncCore } from "../../main";
import { AbstractModule } from "../AbstractModule";
export class ModulePeriodicProcess extends AbstractModule {
periodicSyncProcessor = new PeriodicProcessor(this.core, async () => await this.services.replication.replicate());
disablePeriodic() {
this.periodicSyncProcessor?.disable();
return Promise.resolve(true);
}
resumePeriodic() {
this.periodicSyncProcessor.enable(
this.settings.periodicReplication ? this.settings.periodicReplicationInterval * 1000 : 0
);
return Promise.resolve(true);
}
private _allOnUnload() {
return this.disablePeriodic();
}
private _everyBeforeRealizeSetting(): Promise<boolean> {
return this.disablePeriodic();
}
private _everyBeforeSuspendProcess(): Promise<boolean> {
return this.disablePeriodic();
}
private _everyAfterResumeProcess(): Promise<boolean> {
return this.resumePeriodic();
}
private _everyAfterRealizeSetting(): Promise<boolean> {
return this.resumePeriodic();
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.appLifecycle.onUnload.addHandler(this._allOnUnload.bind(this));
services.setting.onBeforeRealiseSetting.addHandler(this._everyBeforeRealizeSetting.bind(this));
services.setting.onSettingRealised.addHandler(this._everyAfterRealizeSetting.bind(this));
services.appLifecycle.onSuspending.addHandler(this._everyBeforeSuspendProcess.bind(this));
services.appLifecycle.onResumed.addHandler(this._everyAfterResumeProcess.bind(this));
}
}

View File

@@ -1,23 +0,0 @@
import { AbstractModule } from "../AbstractModule";
import { PouchDB } from "../../lib/src/pouchdb/pouchdb-browser";
import type { LiveSyncCore } from "../../main";
import { ExtraSuffixIndexedDB } from "../../lib/src/common/types";
export class ModulePouchDB extends AbstractModule {
_createPouchDBInstance<T extends object>(
name?: string,
options?: PouchDB.Configuration.DatabaseConfiguration
): PouchDB.Database<T> {
const optionPass = options ?? {};
if (this.settings.useIndexedDBAdapter) {
optionPass.adapter = "indexeddb";
//@ts-ignore :missing def
optionPass.purged_infos_limit = 1;
return new PouchDB(name + ExtraSuffixIndexedDB, optionPass);
}
return new PouchDB(name, optionPass);
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.database.createPouchDBInstance.setHandler(this._createPouchDBInstance.bind(this));
}
}

View File

@@ -1,310 +0,0 @@
import { delay } from "octagonal-wheels/promises";
import {
DEFAULT_SETTINGS,
FLAGMD_REDFLAG2_HR,
FLAGMD_REDFLAG3_HR,
LOG_LEVEL_NOTICE,
LOG_LEVEL_VERBOSE,
REMOTE_COUCHDB,
REMOTE_MINIO,
} from "../../lib/src/common/types.ts";
import { AbstractModule } from "../AbstractModule.ts";
import type { Rebuilder } from "../interfaces/DatabaseRebuilder.ts";
import type { LiveSyncCouchDBReplicator } from "../../lib/src/replication/couchdb/LiveSyncReplicator.ts";
import { fetchAllUsedChunks } from "@/lib/src/pouchdb/chunks.ts";
import { EVENT_DATABASE_REBUILT, eventHub } from "src/common/events.ts";
import type { LiveSyncCore } from "../../main.ts";
export class ModuleRebuilder extends AbstractModule implements Rebuilder {
private _everyOnload(): Promise<boolean> {
this.core.rebuilder = this;
return Promise.resolve(true);
}
async $performRebuildDB(
method: "localOnly" | "remoteOnly" | "rebuildBothByThisDevice" | "localOnlyWithChunks"
): Promise<void> {
if (method == "localOnly") {
await this.$fetchLocal();
}
if (method == "localOnlyWithChunks") {
await this.$fetchLocal(true);
}
if (method == "remoteOnly") {
await this.$rebuildRemote();
}
if (method == "rebuildBothByThisDevice") {
await this.$rebuildEverything();
}
}
async informOptionalFeatures() {
await this.core.services.UI.showMarkdownDialog(
"All optional features are disabled",
`Customisation Sync and Hidden File Sync will all be disabled.
Please enable them from the settings screen after setup is complete.`,
["OK"]
);
}
async askUsingOptionalFeature(opt: { enableFetch?: boolean; enableOverwrite?: boolean }) {
if (
(await this.core.confirm.askYesNoDialog(
"Do you want to enable extra features? If you are new to Self-hosted LiveSync, try the core feature first!",
{ title: "Enable extra features", defaultOption: "No", timeout: 15 }
)) == "yes"
) {
await this.services.setting.suggestOptionalFeatures(opt);
}
}
async rebuildRemote() {
await this.services.setting.suspendExtraSync();
this.core.settings.isConfigured = true;
this.core.settings.notifyThresholdOfRemoteStorageSize = DEFAULT_SETTINGS.notifyThresholdOfRemoteStorageSize;
await this.services.setting.realiseSetting();
await this.services.remote.markLocked();
await this.services.remote.tryResetDatabase();
await this.services.remote.markLocked();
await delay(500);
// await this.askUsingOptionalFeature({ enableOverwrite: true });
await delay(1000);
await this.services.remote.replicateAllToRemote(true);
await delay(1000);
await this.services.remote.replicateAllToRemote(true, true);
await this.informOptionalFeatures();
}
$rebuildRemote(): Promise<void> {
return this.rebuildRemote();
}
async rebuildEverything() {
await this.services.setting.suspendExtraSync();
// await this.askUseNewAdapter();
this.core.settings.isConfigured = true;
this.core.settings.notifyThresholdOfRemoteStorageSize = DEFAULT_SETTINGS.notifyThresholdOfRemoteStorageSize;
await this.services.setting.realiseSetting();
await this.resetLocalDatabase();
await delay(1000);
await this.services.databaseEvents.initialiseDatabase(true, true, true);
await this.services.remote.markLocked();
await this.services.remote.tryResetDatabase();
await this.services.remote.markLocked();
await delay(500);
// We do not have any other devices' data, so we do not need to ask for overwriting.
// await this.askUsingOptionalFeature({ enableOverwrite: false });
await delay(1000);
await this.services.remote.replicateAllToRemote(true);
await delay(1000);
await this.services.remote.replicateAllToRemote(true, true);
await this.informOptionalFeatures();
}
$rebuildEverything(): Promise<void> {
return this.rebuildEverything();
}
$fetchLocal(makeLocalChunkBeforeSync?: boolean, preventMakeLocalFilesBeforeSync?: boolean): Promise<void> {
return this.fetchLocal(makeLocalChunkBeforeSync, preventMakeLocalFilesBeforeSync);
}
async scheduleRebuild(): Promise<void> {
try {
await this.core.storageAccess.writeFileAuto(FLAGMD_REDFLAG2_HR, "");
} catch (ex) {
this._log("Could not create red_flag_rebuild.md", LOG_LEVEL_NOTICE);
this._log(ex, LOG_LEVEL_VERBOSE);
}
this.services.appLifecycle.performRestart();
}
async scheduleFetch(): Promise<void> {
try {
await this.core.storageAccess.writeFileAuto(FLAGMD_REDFLAG3_HR, "");
} catch (ex) {
this._log("Could not create red_flag_fetch.md", LOG_LEVEL_NOTICE);
this._log(ex, LOG_LEVEL_VERBOSE);
}
this.services.appLifecycle.performRestart();
}
private async _tryResetRemoteDatabase(): Promise<void> {
await this.core.replicator.tryResetRemoteDatabase(this.settings);
}
private async _tryCreateRemoteDatabase(): Promise<void> {
await this.core.replicator.tryCreateRemoteDatabase(this.settings);
}
private async _resetLocalDatabase(): Promise<boolean> {
this.core.storageAccess.clearTouched();
return await this.localDatabase.resetDatabase();
}
async suspendAllSync() {
this.core.settings.liveSync = false;
this.core.settings.periodicReplication = false;
this.core.settings.syncOnSave = false;
this.core.settings.syncOnEditorSave = false;
this.core.settings.syncOnStart = false;
this.core.settings.syncOnFileOpen = false;
this.core.settings.syncAfterMerge = false;
await this.services.setting.suspendExtraSync();
}
async suspendReflectingDatabase() {
if (this.core.settings.doNotSuspendOnFetching) return;
if (this.core.settings.remoteType == REMOTE_MINIO) return;
this._log(
`Suspending reflection: Database and storage changes will not be reflected in each other until completely finished the fetching.`,
LOG_LEVEL_NOTICE
);
this.core.settings.suspendParseReplicationResult = true;
this.core.settings.suspendFileWatching = true;
await this.core.saveSettings();
}
async resumeReflectingDatabase() {
if (this.core.settings.doNotSuspendOnFetching) return;
if (this.core.settings.remoteType == REMOTE_MINIO) return;
this._log(`Database and storage reflection has been resumed!`, LOG_LEVEL_NOTICE);
this.core.settings.suspendParseReplicationResult = false;
this.core.settings.suspendFileWatching = false;
await this.services.vault.scanVault(true);
await this.services.replication.onBeforeReplicate(false); //TODO: Check actual need of this.
await this.core.saveSettings();
}
// No longer needed, both adapters have each advantages and disadvantages.
// async askUseNewAdapter() {
// if (!this.core.settings.useIndexedDBAdapter) {
// const message = `Now this core has been configured to use the old database adapter for keeping compatibility. Do you want to deactivate it?`;
// const CHOICE_YES = "Yes, disable and use latest";
// const CHOICE_NO = "No, keep compatibility";
// const choices = [CHOICE_YES, CHOICE_NO];
//
// const ret = await this.core.confirm.confirmWithMessage(
// "Database adapter",
// message,
// choices,
// CHOICE_YES,
// 10
// );
// if (ret == CHOICE_YES) {
// this.core.settings.useIndexedDBAdapter = true;
// }
// }
// }
async fetchLocal(makeLocalChunkBeforeSync?: boolean, preventMakeLocalFilesBeforeSync?: boolean) {
await this.services.setting.suspendExtraSync();
// await this.askUseNewAdapter();
this.core.settings.isConfigured = true;
this.core.settings.notifyThresholdOfRemoteStorageSize = DEFAULT_SETTINGS.notifyThresholdOfRemoteStorageSize;
if (this.core.settings.maxMTimeForReflectEvents > 0) {
const date = new Date(this.core.settings.maxMTimeForReflectEvents);
const ask = `Your settings restrict file reflection times to no later than ${date}.
**This is a recovery configuration.**
This operation should only be performed on an empty vault.
Are you sure you wish to proceed?`;
const PROCEED = "I understand, proceed";
const CANCEL = "Cancel operation";
const CLEARANDPROCEED = "Clear restriction and proceed";
const choices = [PROCEED, CLEARANDPROCEED, CANCEL] as const;
const ret = await this.core.confirm.askSelectStringDialogue(ask, choices, {
title: "Confirm restricted fetch",
defaultAction: CANCEL,
timeout: 0,
});
if (ret == CLEARANDPROCEED) {
this.core.settings.maxMTimeForReflectEvents = 0;
await this.core.saveSettings();
}
if (ret == CANCEL) {
return;
}
}
await this.suspendReflectingDatabase();
await this.services.setting.realiseSetting();
await this.resetLocalDatabase();
await delay(1000);
await this.services.database.openDatabase();
// this.core.isReady = true;
this.services.appLifecycle.markIsReady();
if (makeLocalChunkBeforeSync) {
await this.core.fileHandler.createAllChunks(true);
} else if (!preventMakeLocalFilesBeforeSync) {
await this.services.databaseEvents.initialiseDatabase(true, true, true);
} else {
// Do not create local file entries before sync (Means use remote information)
}
await this.services.remote.markResolved();
await delay(500);
await this.services.remote.replicateAllFromRemote(true);
await delay(1000);
await this.services.remote.replicateAllFromRemote(true);
await this.resumeReflectingDatabase();
await this.informOptionalFeatures();
// No longer enable
// await this.askUsingOptionalFeature({ enableFetch: true });
}
async fetchLocalWithRebuild() {
return await this.fetchLocal(true);
}
private async _allSuspendAllSync(): Promise<boolean> {
await this.suspendAllSync();
return true;
}
async resetLocalDatabase() {
if (this.core.settings.isConfigured && this.core.settings.additionalSuffixOfDatabaseName == "") {
// Discard the non-suffixed database
await this.services.database.resetDatabase();
}
const suffix = this.services.API.getAppID() || "";
this.core.settings.additionalSuffixOfDatabaseName = suffix;
await this.services.database.resetDatabase();
eventHub.emitEvent(EVENT_DATABASE_REBUILT);
}
async fetchRemoteChunks() {
if (
!this.core.settings.doNotSuspendOnFetching &&
!this.core.settings.useOnlyLocalChunk &&
this.core.settings.remoteType == REMOTE_COUCHDB
) {
this._log(`Fetching chunks`, LOG_LEVEL_NOTICE);
const replicator = this.services.replicator.getActiveReplicator() as LiveSyncCouchDBReplicator;
const remoteDB = await replicator.connectRemoteCouchDBWithSetting(
this.settings,
this.services.API.isMobile(),
true
);
if (typeof remoteDB == "string") {
this._log(remoteDB, LOG_LEVEL_NOTICE);
} else {
await fetchAllUsedChunks(this.localDatabase.localDatabase, remoteDB.db);
}
this._log(`Fetching chunks done`, LOG_LEVEL_NOTICE);
}
}
async resolveAllConflictedFilesByNewerOnes() {
this._log(`Resolving conflicts by newer ones`, LOG_LEVEL_NOTICE);
const files = this.core.storageAccess.getFileNames();
let i = 0;
for (const file of files) {
if (i++ % 10)
this._log(
`Check and Processing ${i} / ${files.length}`,
LOG_LEVEL_NOTICE,
"resolveAllConflictedFilesByNewerOnes"
);
await this.services.conflict.resolveByNewest(file);
}
this._log(`Done!`, LOG_LEVEL_NOTICE, "resolveAllConflictedFilesByNewerOnes");
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.appLifecycle.onLoaded.addHandler(this._everyOnload.bind(this));
services.database.resetDatabase.setHandler(this._resetLocalDatabase.bind(this));
services.remote.tryResetDatabase.setHandler(this._tryResetRemoteDatabase.bind(this));
services.remote.tryCreateDatabase.setHandler(this._tryCreateRemoteDatabase.bind(this));
services.setting.suspendAllSync.addHandler(this._allSuspendAllSync.bind(this));
}
}

View File

@@ -1,347 +0,0 @@
import { fireAndForget, yieldMicrotask } from "octagonal-wheels/promises";
import type { LiveSyncLocalDB } from "../../lib/src/pouchdb/LiveSyncLocalDB";
import { AbstractModule } from "../AbstractModule";
import {
Logger,
LOG_LEVEL_NOTICE,
LOG_LEVEL_INFO,
LOG_LEVEL_VERBOSE,
LEVEL_NOTICE,
LEVEL_INFO,
type LOG_LEVEL,
} from "octagonal-wheels/common/logger";
import { isLockAcquired, shareRunningResult, skipIfDuplicated } from "octagonal-wheels/concurrency/lock";
import { balanceChunkPurgedDBs } from "@/lib/src/pouchdb/chunks";
import { purgeUnreferencedChunks } from "@/lib/src/pouchdb/chunks";
import { LiveSyncCouchDBReplicator } from "../../lib/src/replication/couchdb/LiveSyncReplicator";
import { type EntryDoc, type RemoteType } from "../../lib/src/common/types";
import { rateLimitedSharedExecution, scheduleTask, updatePreviousExecutionTime } from "../../common/utils";
import { EVENT_FILE_SAVED, EVENT_ON_UNRESOLVED_ERROR, EVENT_SETTING_SAVED, eventHub } from "../../common/events";
import type { LiveSyncAbstractReplicator } from "../../lib/src/replication/LiveSyncAbstractReplicator";
import { $msg } from "../../lib/src/common/i18n";
import { clearHandlers } from "../../lib/src/replication/SyncParamsHandler";
import type { LiveSyncCore } from "../../main";
import { ReplicateResultProcessor } from "./ReplicateResultProcessor";
const KEY_REPLICATION_ON_EVENT = "replicationOnEvent";
const REPLICATION_ON_EVENT_FORECASTED_TIME = 5000;
export class ModuleReplicator extends AbstractModule {
_replicatorType?: RemoteType;
_previousErrors = new Set<string>();
processor: ReplicateResultProcessor = new ReplicateResultProcessor(this);
showError(msg: string, max_log_level: LOG_LEVEL = LEVEL_NOTICE) {
const level = this._previousErrors.has(msg) ? LEVEL_INFO : max_log_level;
this._log(msg, level);
if (!this._previousErrors.has(msg)) {
this._previousErrors.add(msg);
eventHub.emitEvent(EVENT_ON_UNRESOLVED_ERROR);
}
}
clearErrors() {
this._previousErrors.clear();
eventHub.emitEvent(EVENT_ON_UNRESOLVED_ERROR);
}
private _everyOnloadAfterLoadSettings(): Promise<boolean> {
eventHub.onEvent(EVENT_FILE_SAVED, () => {
if (this.settings.syncOnSave && !this.core.services.appLifecycle.isSuspended()) {
scheduleTask("perform-replicate-after-save", 250, () => this.services.replication.replicateByEvent());
}
});
eventHub.onEvent(EVENT_SETTING_SAVED, (setting) => {
if (this._replicatorType !== setting.remoteType) {
void this.setReplicator();
}
if (this.core.settings.suspendParseReplicationResult) {
this.processor.suspend();
} else {
this.processor.resume();
}
});
return Promise.resolve(true);
}
async setReplicator() {
const replicator = await this.services.replicator.getNewReplicator();
if (!replicator) {
this.showError($msg("Replicator.Message.InitialiseFatalError"), LOG_LEVEL_NOTICE);
return false;
}
if (this.core.replicator) {
await this.core.replicator.closeReplication();
this._log("Replicator closed for changing", LOG_LEVEL_VERBOSE);
}
this.core.replicator = replicator;
this._replicatorType = this.settings.remoteType;
await yieldMicrotask();
// Clear any existing sync parameter handlers (means clearing key-deriving salt).
clearHandlers();
return true;
}
_getReplicator(): LiveSyncAbstractReplicator {
return this.core.replicator;
}
_everyOnInitializeDatabase(db: LiveSyncLocalDB): Promise<boolean> {
return this.setReplicator();
}
_everyOnDatabaseInitialized(showNotice: boolean): Promise<boolean> {
fireAndForget(() => this.processor.restoreFromSnapshotOnce());
return Promise.resolve(true);
}
_everyOnResetDatabase(db: LiveSyncLocalDB): Promise<boolean> {
return this.setReplicator();
}
async ensureReplicatorPBKDF2Salt(showMessage: boolean = false): Promise<boolean> {
// Checking salt
const replicator = this.services.replicator.getActiveReplicator();
if (!replicator) {
this.showError($msg("Replicator.Message.InitialiseFatalError"), LOG_LEVEL_NOTICE);
return false;
}
return await replicator.ensurePBKDF2Salt(this.settings, showMessage, true);
}
async _everyBeforeReplicate(showMessage: boolean): Promise<boolean> {
// Checking salt
if (!this.core.managers.networkManager.isOnline) {
this.showError("Network is offline", showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO);
return false;
}
// Showing message is false: that because be shown here. (And it is a fatal error, no way to hide it).
if (!(await this.ensureReplicatorPBKDF2Salt(false))) {
this.showError("Failed to initialise the encryption key, preventing replication.");
return false;
}
await this.processor.restoreFromSnapshotOnce();
this.clearErrors();
return true;
}
private async _replicate(showMessage: boolean = false): Promise<boolean | void> {
try {
updatePreviousExecutionTime(KEY_REPLICATION_ON_EVENT, REPLICATION_ON_EVENT_FORECASTED_TIME);
return await this.$$_replicate(showMessage);
} finally {
updatePreviousExecutionTime(KEY_REPLICATION_ON_EVENT);
}
}
/**
* obsolete method. No longer maintained and will be removed in the future.
* @deprecated v0.24.17
* @param showMessage If true, show message to the user.
*/
async cleaned(showMessage: boolean) {
Logger(`The remote database has been cleaned.`, showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO);
await skipIfDuplicated("cleanup", async () => {
const count = await purgeUnreferencedChunks(this.localDatabase.localDatabase, true);
const message = `The remote database has been cleaned up.
To synchronize, this device must be also cleaned up. ${count} chunk(s) will be erased from this device.
However, If there are many chunks to be deleted, maybe fetching again is faster.
We will lose the history of this device if we fetch the remote database again.
Even if you choose to clean up, you will see this option again if you exit Obsidian and then synchronise again.`;
const CHOICE_FETCH = "Fetch again";
const CHOICE_CLEAN = "Cleanup";
const CHOICE_DISMISS = "Dismiss";
const ret = await this.core.confirm.confirmWithMessage(
"Cleaned",
message,
[CHOICE_FETCH, CHOICE_CLEAN, CHOICE_DISMISS],
CHOICE_DISMISS,
30
);
if (ret == CHOICE_FETCH) {
await this.core.rebuilder.$performRebuildDB("localOnly");
}
if (ret == CHOICE_CLEAN) {
const replicator = this.services.replicator.getActiveReplicator();
if (!(replicator instanceof LiveSyncCouchDBReplicator)) return;
const remoteDB = await replicator.connectRemoteCouchDBWithSetting(
this.settings,
this.services.API.isMobile(),
true
);
if (typeof remoteDB == "string") {
Logger(remoteDB, LOG_LEVEL_NOTICE);
return false;
}
await purgeUnreferencedChunks(this.localDatabase.localDatabase, false);
this.localDatabase.clearCaches();
// Perform the synchronisation once.
if (await this.core.replicator.openReplication(this.settings, false, showMessage, true)) {
await balanceChunkPurgedDBs(this.localDatabase.localDatabase, remoteDB.db);
await purgeUnreferencedChunks(this.localDatabase.localDatabase, false);
this.localDatabase.clearCaches();
await this.services.replicator.getActiveReplicator()?.markRemoteResolved(this.settings);
Logger("The local database has been cleaned up.", showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO);
} else {
Logger(
"Replication has been cancelled. Please try it again.",
showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO
);
}
}
});
}
async _canReplicate(showMessage: boolean = false): Promise<boolean> {
if (!this.services.appLifecycle.isReady()) {
Logger(`Not ready`);
return false;
}
if (isLockAcquired("cleanup")) {
Logger($msg("Replicator.Message.Cleaned"), LOG_LEVEL_NOTICE);
return false;
}
if (this.settings.versionUpFlash != "") {
Logger($msg("Replicator.Message.VersionUpFlash"), LOG_LEVEL_NOTICE);
return false;
}
if (!(await this.services.fileProcessing.commitPendingFileEvents())) {
this.showError($msg("Replicator.Message.Pending"), LOG_LEVEL_NOTICE);
return false;
}
if (!this.core.managers.networkManager.isOnline) {
this.showError("Network is offline", showMessage ? LOG_LEVEL_NOTICE : LOG_LEVEL_INFO);
return false;
}
if (!(await this.services.replication.onBeforeReplicate(showMessage))) {
this.showError($msg("Replicator.Message.SomeModuleFailed"), LOG_LEVEL_NOTICE);
return false;
}
this.clearErrors();
return true;
}
async $$_replicate(showMessage: boolean = false): Promise<boolean | void> {
const checkBeforeReplicate = await this.services.replication.isReplicationReady(showMessage);
if (!checkBeforeReplicate) return false;
//<-- Here could be an module.
const ret = await this.core.replicator.openReplication(this.settings, false, showMessage, false);
if (!ret) {
if (this.core.replicator.tweakSettingsMismatched && this.core.replicator.preferredTweakValue) {
await this.services.tweakValue.askResolvingMismatched(this.core.replicator.preferredTweakValue);
} else {
if (this.core.replicator?.remoteLockedAndDeviceNotAccepted) {
if (this.core.replicator.remoteCleaned && this.settings.useIndexedDBAdapter) {
await this.cleaned(showMessage);
} else {
const message = $msg("Replicator.Dialogue.Locked.Message");
const CHOICE_FETCH = $msg("Replicator.Dialogue.Locked.Action.Fetch");
const CHOICE_DISMISS = $msg("Replicator.Dialogue.Locked.Action.Dismiss");
const CHOICE_UNLOCK = $msg("Replicator.Dialogue.Locked.Action.Unlock");
const ret = await this.core.confirm.askSelectStringDialogue(
message,
[CHOICE_FETCH, CHOICE_UNLOCK, CHOICE_DISMISS],
{
title: $msg("Replicator.Dialogue.Locked.Title"),
defaultAction: CHOICE_DISMISS,
timeout: 60,
}
);
if (ret == CHOICE_FETCH) {
this._log($msg("Replicator.Dialogue.Locked.Message.Fetch"), LOG_LEVEL_NOTICE);
await this.core.rebuilder.scheduleFetch();
this.services.appLifecycle.scheduleRestart();
return;
} else if (ret == CHOICE_UNLOCK) {
await this.core.replicator.markRemoteResolved(this.settings);
this._log($msg("Replicator.Dialogue.Locked.Message.Unlocked"), LOG_LEVEL_NOTICE);
return;
}
}
}
}
}
return ret;
}
private async _replicateByEvent(): Promise<boolean | void> {
const least = this.settings.syncMinimumInterval;
if (least > 0) {
return rateLimitedSharedExecution(KEY_REPLICATION_ON_EVENT, least, async () => {
return await this.services.replication.replicate();
});
}
return await shareRunningResult(`replication`, () => this.services.replication.replicate());
}
_parseReplicationResult(docs: Array<PouchDB.Core.ExistingDocument<EntryDoc>>): void {
this.processor.enqueueAll(docs);
}
_everyBeforeSuspendProcess(): Promise<boolean> {
this.core.replicator?.closeReplication();
return Promise.resolve(true);
}
private async _replicateAllToServer(
showingNotice: boolean = false,
sendChunksInBulkDisabled: boolean = false
): Promise<boolean> {
if (!this.services.appLifecycle.isReady()) return false;
if (!(await this.services.replication.onBeforeReplicate(showingNotice))) {
Logger($msg("Replicator.Message.SomeModuleFailed"), LOG_LEVEL_NOTICE);
return false;
}
if (!sendChunksInBulkDisabled) {
if (this.core.replicator instanceof LiveSyncCouchDBReplicator) {
if (
(await this.core.confirm.askYesNoDialog("Do you want to send all chunks before replication?", {
defaultOption: "No",
timeout: 20,
})) == "yes"
) {
await this.core.replicator.sendChunks(this.core.settings, undefined, true, 0);
}
}
}
const ret = await this.core.replicator.replicateAllToServer(this.settings, showingNotice);
if (ret) return true;
const checkResult = await this.services.replication.checkConnectionFailure();
if (checkResult == "CHECKAGAIN") return await this.services.remote.replicateAllToRemote(showingNotice);
return !checkResult;
}
async _replicateAllFromServer(showingNotice: boolean = false): Promise<boolean> {
if (!this.services.appLifecycle.isReady()) return false;
const ret = await this.core.replicator.replicateAllFromServer(this.settings, showingNotice);
if (ret) return true;
const checkResult = await this.services.replication.checkConnectionFailure();
if (checkResult == "CHECKAGAIN") return await this.services.remote.replicateAllFromRemote(showingNotice);
return !checkResult;
}
private _reportUnresolvedMessages(): Promise<string[]> {
return Promise.resolve([...this._previousErrors]);
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.replicator.getActiveReplicator.setHandler(this._getReplicator.bind(this));
services.databaseEvents.onDatabaseInitialisation.addHandler(this._everyOnInitializeDatabase.bind(this));
services.databaseEvents.onDatabaseInitialised.addHandler(this._everyOnDatabaseInitialized.bind(this));
services.databaseEvents.onResetDatabase.addHandler(this._everyOnResetDatabase.bind(this));
services.appLifecycle.onSettingLoaded.addHandler(this._everyOnloadAfterLoadSettings.bind(this));
services.replication.parseSynchroniseResult.setHandler(this._parseReplicationResult.bind(this));
services.appLifecycle.onSuspending.addHandler(this._everyBeforeSuspendProcess.bind(this));
services.replication.onBeforeReplicate.addHandler(this._everyBeforeReplicate.bind(this));
services.replication.isReplicationReady.setHandler(this._canReplicate.bind(this));
services.replication.replicate.setHandler(this._replicate.bind(this));
services.replication.replicateByEvent.setHandler(this._replicateByEvent.bind(this));
services.remote.replicateAllToRemote.setHandler(this._replicateAllToServer.bind(this));
services.remote.replicateAllFromRemote.setHandler(this._replicateAllFromServer.bind(this));
services.appLifecycle.getUnresolvedMessages.addHandler(this._reportUnresolvedMessages.bind(this));
}
}

View File

@@ -1,42 +0,0 @@
import { fireAndForget } from "octagonal-wheels/promises";
import { REMOTE_MINIO, REMOTE_P2P, type RemoteDBSettings } from "../../lib/src/common/types";
import { LiveSyncCouchDBReplicator } from "../../lib/src/replication/couchdb/LiveSyncReplicator";
import type { LiveSyncAbstractReplicator } from "../../lib/src/replication/LiveSyncAbstractReplicator";
import { AbstractModule } from "../AbstractModule";
import type { LiveSyncCore } from "../../main";
export class ModuleReplicatorCouchDB extends AbstractModule {
_anyNewReplicator(settingOverride: Partial<RemoteDBSettings> = {}): Promise<LiveSyncAbstractReplicator | false> {
const settings = { ...this.settings, ...settingOverride };
// If new remote types were added, add them here. Do not use `REMOTE_COUCHDB` directly for the safety valve.
if (settings.remoteType == REMOTE_MINIO || settings.remoteType == REMOTE_P2P) {
return Promise.resolve(false);
}
return Promise.resolve(new LiveSyncCouchDBReplicator(this.core));
}
_everyAfterResumeProcess(): Promise<boolean> {
if (this.services.appLifecycle.isSuspended()) return Promise.resolve(true);
if (!this.services.appLifecycle.isReady()) return Promise.resolve(true);
if (this.settings.remoteType != REMOTE_MINIO && this.settings.remoteType != REMOTE_P2P) {
const LiveSyncEnabled = this.settings.liveSync;
const continuous = LiveSyncEnabled;
const eventualOnStart = !LiveSyncEnabled && this.settings.syncOnStart;
// If enabled LiveSync or on start, open replication
if (LiveSyncEnabled || eventualOnStart) {
// And note that we do not open the conflict detection dialogue directly during this process.
// This should be raised explicitly if needed.
fireAndForget(async () => {
const canReplicate = await this.services.replication.isReplicationReady(false);
if (!canReplicate) return;
void this.core.replicator.openReplication(this.settings, continuous, false, false);
});
}
}
return Promise.resolve(true);
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.replicator.getNewReplicator.addHandler(this._anyNewReplicator.bind(this));
services.appLifecycle.onResumed.addHandler(this._everyAfterResumeProcess.bind(this));
}
}

View File

@@ -1,18 +0,0 @@
import { REMOTE_MINIO, type RemoteDBSettings } from "../../lib/src/common/types";
import { LiveSyncJournalReplicator } from "../../lib/src/replication/journal/LiveSyncJournalReplicator";
import type { LiveSyncAbstractReplicator } from "../../lib/src/replication/LiveSyncAbstractReplicator";
import type { LiveSyncCore } from "../../main";
import { AbstractModule } from "../AbstractModule";
export class ModuleReplicatorMinIO extends AbstractModule {
_anyNewReplicator(settingOverride: Partial<RemoteDBSettings> = {}): Promise<LiveSyncAbstractReplicator | false> {
const settings = { ...this.settings, ...settingOverride };
if (settings.remoteType == REMOTE_MINIO) {
return Promise.resolve(new LiveSyncJournalReplicator(this.core));
}
return Promise.resolve(false);
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.replicator.getNewReplicator.addHandler(this._anyNewReplicator.bind(this));
}
}

View File

@@ -1,34 +0,0 @@
import { REMOTE_P2P, type RemoteDBSettings } from "../../lib/src/common/types";
import type { LiveSyncAbstractReplicator } from "../../lib/src/replication/LiveSyncAbstractReplicator";
import { AbstractModule } from "../AbstractModule";
import { LiveSyncTrysteroReplicator } from "../../lib/src/replication/trystero/LiveSyncTrysteroReplicator";
import type { LiveSyncCore } from "../../main";
export class ModuleReplicatorP2P extends AbstractModule {
_anyNewReplicator(settingOverride: Partial<RemoteDBSettings> = {}): Promise<LiveSyncAbstractReplicator | false> {
const settings = { ...this.settings, ...settingOverride };
if (settings.remoteType == REMOTE_P2P) {
return Promise.resolve(new LiveSyncTrysteroReplicator(this.core));
}
return Promise.resolve(false);
}
_everyAfterResumeProcess(): Promise<boolean> {
if (this.settings.remoteType == REMOTE_P2P) {
// // If LiveSync enabled, open replication
// if (this.settings.liveSync) {
// fireAndForget(() => this.core.replicator.openReplication(this.settings, true, false, false));
// }
// // If sync on start enabled, open replication
// if (!this.settings.liveSync && this.settings.syncOnStart) {
// // Possibly ok as if only share the result
// fireAndForget(() => this.core.replicator.openReplication(this.settings, false, false, false));
// }
}
return Promise.resolve(true);
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.replicator.getNewReplicator.addHandler(this._anyNewReplicator.bind(this));
services.appLifecycle.onResumed.addHandler(this._everyAfterResumeProcess.bind(this));
}
}

View File

@@ -1,185 +0,0 @@
import { LRUCache } from "octagonal-wheels/memory/LRUCache";
import {
getStoragePathFromUXFileInfo,
id2path,
isInternalMetadata,
path2id,
stripInternalMetadataPrefix,
useMemo,
} from "../../common/utils";
import {
LOG_LEVEL_VERBOSE,
type DocumentID,
type EntryHasPath,
type FilePath,
type FilePathWithPrefix,
type ObsidianLiveSyncSettings,
type UXFileInfoStub,
} from "../../lib/src/common/types";
import { addPrefix, isAcceptedAll } from "../../lib/src/string_and_binary/path";
import { AbstractModule } from "../AbstractModule";
import { EVENT_REQUEST_RELOAD_SETTING_TAB, EVENT_SETTING_SAVED, eventHub } from "../../common/events";
import { isDirty } from "../../lib/src/common/utils";
import type { LiveSyncCore } from "../../main";
export class ModuleTargetFilter extends AbstractModule {
reloadIgnoreFiles() {
this.ignoreFiles = this.settings.ignoreFiles.split(",").map((e) => e.trim());
}
private _everyOnload(): Promise<boolean> {
this.reloadIgnoreFiles();
eventHub.onEvent(EVENT_SETTING_SAVED, (evt: ObsidianLiveSyncSettings) => {
this.reloadIgnoreFiles();
});
eventHub.onEvent(EVENT_REQUEST_RELOAD_SETTING_TAB, () => {
this.reloadIgnoreFiles();
});
return Promise.resolve(true);
}
_id2path(id: DocumentID, entry?: EntryHasPath, stripPrefix?: boolean): FilePathWithPrefix {
const tempId = id2path(id, entry);
if (stripPrefix && isInternalMetadata(tempId)) {
const out = stripInternalMetadataPrefix(tempId);
return out;
}
return tempId;
}
async _path2id(filename: FilePathWithPrefix | FilePath, prefix?: string): Promise<DocumentID> {
const destPath = addPrefix(filename, prefix ?? "");
return await path2id(
destPath,
this.settings.usePathObfuscation ? this.settings.passphrase : "",
!this.settings.handleFilenameCaseSensitive
);
}
private _isFileSizeExceeded(size: number) {
if (this.settings.syncMaxSizeInMB > 0 && size > 0) {
if (this.settings.syncMaxSizeInMB * 1024 * 1024 < size) {
return true;
}
}
return false;
}
_markFileListPossiblyChanged(): void {
this.totalFileEventCount++;
}
totalFileEventCount = 0;
get fileListPossiblyChanged() {
if (isDirty("totalFileEventCount", this.totalFileEventCount)) {
return true;
}
return false;
}
private async _isTargetFile(file: string | UXFileInfoStub, keepFileCheckList = false) {
const fileCount = useMemo<Record<string, number>>(
{
key: "fileCount", // forceUpdate: !keepFileCheckList,
},
(ctx, prev) => {
if (keepFileCheckList && prev) return prev;
if (!keepFileCheckList && prev && !this.fileListPossiblyChanged) {
return prev;
}
const fileList = (ctx.get("fileList") ?? []) as FilePathWithPrefix[];
// const fileNameList = (ctx.get("fileNameList") ?? []) as FilePath[];
// const fileNames =
const vaultFiles = this.core.storageAccess.getFileNames().sort();
if (prev && vaultFiles.length == fileList.length) {
const fl3 = new Set([...fileList, ...vaultFiles]);
if (fileList.length == fl3.size && vaultFiles.length == fl3.size) {
return prev;
}
}
ctx.set("fileList", vaultFiles);
const fileCount: Record<string, number> = {};
for (const file of vaultFiles) {
const lc = file.toLowerCase();
if (!fileCount[lc]) {
fileCount[lc] = 1;
} else {
fileCount[lc]++;
}
}
return fileCount;
}
);
const filepath = getStoragePathFromUXFileInfo(file);
const lc = filepath.toLowerCase();
if (this.services.setting.shouldCheckCaseInsensitively()) {
if (lc in fileCount && fileCount[lc] > 1) {
return false;
}
}
const fileNameLC = getStoragePathFromUXFileInfo(file).split("/").pop()?.toLowerCase();
if (this.settings.useIgnoreFiles) {
if (this.ignoreFiles.some((e) => e.toLowerCase() == fileNameLC)) {
// We must reload ignore files due to the its change.
await this.readIgnoreFile(filepath);
}
if (await this.services.vault.isIgnoredByIgnoreFile(file)) {
return false;
}
}
if (!this.localDatabase?.isTargetFile(filepath)) return false;
return true;
}
ignoreFileCache = new LRUCache<string, string[] | false>(300, 250000, true);
ignoreFiles = [] as string[];
async readIgnoreFile(path: string) {
try {
// this._log(`[ignore]Reading ignore file: ${path}`, LOG_LEVEL_VERBOSE);
if (!(await this.core.storageAccess.isExistsIncludeHidden(path))) {
this.ignoreFileCache.set(path, false);
// this._log(`[ignore]Ignore file not found: ${path}`, LOG_LEVEL_VERBOSE);
return false;
}
const file = await this.core.storageAccess.readHiddenFileText(path);
const gitignore = file.split(/\r?\n/g);
this.ignoreFileCache.set(path, gitignore);
this._log(`[ignore]Ignore file loaded: ${path}`, LOG_LEVEL_VERBOSE);
return gitignore;
} catch (ex) {
this._log(`[ignore]Failed to read ignore file ${path}`);
this._log(ex, LOG_LEVEL_VERBOSE);
this.ignoreFileCache.set(path, false);
return false;
}
}
async getIgnoreFile(path: string) {
if (this.ignoreFileCache.has(path)) {
return this.ignoreFileCache.get(path) ?? false;
} else {
return await this.readIgnoreFile(path);
}
}
private async _isIgnoredByIgnoreFiles(file: string | UXFileInfoStub): Promise<boolean> {
if (!this.settings.useIgnoreFiles) {
return false;
}
const filepath = getStoragePathFromUXFileInfo(file);
if (this.ignoreFileCache.has(filepath)) {
// Renew
await this.readIgnoreFile(filepath);
}
if (!(await isAcceptedAll(filepath, this.ignoreFiles, (filename) => this.getIgnoreFile(filename)))) {
return true;
}
return false;
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
services.vault.markFileListPossiblyChanged.setHandler(this._markFileListPossiblyChanged.bind(this));
services.path.id2path.setHandler(this._id2path.bind(this));
services.path.path2id.setHandler(this._path2id.bind(this));
services.appLifecycle.onLoaded.addHandler(this._everyOnload.bind(this));
services.vault.isFileSizeTooLarge.setHandler(this._isFileSizeExceeded.bind(this));
services.vault.isIgnoredByIgnoreFile.setHandler(this._isIgnoredByIgnoreFiles.bind(this));
services.vault.isTargetFile.setHandler(this._isTargetFile.bind(this));
}
}

View File

@@ -1,480 +0,0 @@
import {
SYNCINFO_ID,
VER,
type AnyEntry,
type EntryDoc,
type EntryLeaf,
type LoadedEntry,
type MetaEntry,
} from "@/lib/src/common/types";
import type { ModuleReplicator } from "./ModuleReplicator";
import { getPath, isChunk, isValidPath } from "@/common/utils";
import type { LiveSyncCore } from "@/main";
import {
LOG_LEVEL_DEBUG,
LOG_LEVEL_INFO,
LOG_LEVEL_NOTICE,
LOG_LEVEL_VERBOSE,
Logger,
type LOG_LEVEL,
} from "@/lib/src/common/logger";
import { fireAndForget, isAnyNote, throttle } from "@/lib/src/common/utils";
import { Semaphore } from "octagonal-wheels/concurrency/semaphore_v2";
import { serialized } from "octagonal-wheels/concurrency/lock";
import type { ReactiveSource } from "octagonal-wheels/dataobject/reactive_v2";
const KV_KEY_REPLICATION_RESULT_PROCESSOR_SNAPSHOT = "replicationResultProcessorSnapshot";
type ReplicateResultProcessorState = {
queued: PouchDB.Core.ExistingDocument<EntryDoc>[];
processing: PouchDB.Core.ExistingDocument<EntryDoc>[];
};
function shortenId(id: string): string {
return id.length > 10 ? id.substring(0, 10) : id;
}
function shortenRev(rev: string | undefined): string {
if (!rev) return "undefined";
return rev.length > 10 ? rev.substring(0, 10) : rev;
}
export class ReplicateResultProcessor {
private log(message: string, level: LOG_LEVEL = LOG_LEVEL_INFO) {
Logger(`[ReplicateResultProcessor] ${message}`, level);
}
private logError(e: any) {
Logger(e, LOG_LEVEL_VERBOSE);
}
private replicator: ModuleReplicator;
constructor(replicator: ModuleReplicator) {
this.replicator = replicator;
}
get localDatabase() {
return this.replicator.core.localDatabase;
}
get services() {
return this.replicator.core.services;
}
get core(): LiveSyncCore {
return this.replicator.core;
}
public suspend() {
this._suspended = true;
}
public resume() {
this._suspended = false;
fireAndForget(() => this.runProcessQueue());
}
// Whether the processing is suspended
// If true, the processing queue processor bails the loop.
private _suspended: boolean = false;
public get isSuspended() {
return (
this._suspended ||
!this.core.services.appLifecycle.isReady ||
this.replicator.settings.suspendParseReplicationResult ||
this.core.services.appLifecycle.isSuspended()
);
}
/**
* Take a snapshot of the current processing state.
* This snapshot is stored in the KV database for recovery on restart.
*/
protected async _takeSnapshot() {
const snapshot = {
queued: this._queuedChanges.slice(),
processing: this._processingChanges.slice(),
} satisfies ReplicateResultProcessorState;
await this.core.kvDB.set(KV_KEY_REPLICATION_RESULT_PROCESSOR_SNAPSHOT, snapshot);
this.log(
`Snapshot taken. Queued: ${snapshot.queued.length}, Processing: ${snapshot.processing.length}`,
LOG_LEVEL_DEBUG
);
this.reportStatus();
}
/**
* Trigger taking a snapshot.
*/
protected _triggerTakeSnapshot() {
fireAndForget(() => this._takeSnapshot());
}
/**
* Throttled version of triggerTakeSnapshot.
*/
protected triggerTakeSnapshot = throttle(() => this._triggerTakeSnapshot(), 50);
/**
* Restore from snapshot.
*/
public async restoreFromSnapshot() {
const snapshot = await this.core.kvDB.get<ReplicateResultProcessorState>(
KV_KEY_REPLICATION_RESULT_PROCESSOR_SNAPSHOT
);
if (snapshot) {
// Restoring the snapshot re-runs processing for both queued and processing items.
const newQueue = [...snapshot.processing, ...snapshot.queued, ...this._queuedChanges];
this._queuedChanges = [];
this.enqueueAll(newQueue);
this.log(
`Restored from snapshot (${snapshot.processing.length + snapshot.queued.length} items)`,
LOG_LEVEL_INFO
);
// await this._takeSnapshot();
}
}
private _restoreFromSnapshot: Promise<void> | undefined = undefined;
/**
* Restore from snapshot only once.
* @returns Promise that resolves when restoration is complete.
*/
public restoreFromSnapshotOnce() {
if (!this._restoreFromSnapshot) {
this._restoreFromSnapshot = this.restoreFromSnapshot();
}
return this._restoreFromSnapshot;
}
/**
* Perform the given procedure while counting the concurrency.
* @param proc async procedure to perform
* @param countValue reactive source to count concurrency
* @returns result of the procedure
*/
async withCounting<T>(proc: () => Promise<T>, countValue: ReactiveSource<number>) {
countValue.value++;
try {
return await proc();
} finally {
countValue.value--;
}
}
/**
* Report the current status.
*/
protected reportStatus() {
this.core.replicationResultCount.value = this._queuedChanges.length + this._processingChanges.length;
}
/**
* Enqueue all the given changes for processing.
* @param changes Changes to enqueue
*/
public enqueueAll(changes: PouchDB.Core.ExistingDocument<EntryDoc>[]) {
for (const change of changes) {
// Check if the change is not a document change (e.g., chunk, versioninfo, syncinfo), and processed it directly.
const isProcessed = this.processIfNonDocumentChange(change);
if (!isProcessed) {
this.enqueueChange(change);
}
}
}
/**
* Process the change if it is not a document change.
* @param change Change to process
* @returns True if the change was processed; false otherwise
*/
protected processIfNonDocumentChange(change: PouchDB.Core.ExistingDocument<EntryDoc>) {
if (!change) {
this.log(`Received empty change`, LOG_LEVEL_VERBOSE);
return true;
}
if (isChunk(change._id)) {
// Emit event for new chunk
this.localDatabase.onNewLeaf(change as EntryLeaf);
this.log(`Processed chunk: ${shortenId(change._id)}`, LOG_LEVEL_DEBUG);
return true;
}
if (change.type == "versioninfo") {
this.log(`Version info document received: ${change._id}`, LOG_LEVEL_VERBOSE);
if (change.version > VER) {
// Incompatible version, stop replication.
this.core.replicator.closeReplication();
this.log(
`Remote database updated to incompatible version. update your Self-hosted LiveSync plugin.`,
LOG_LEVEL_NOTICE
);
}
return true;
}
if (
change._id == SYNCINFO_ID || // Synchronisation information data
change._id.startsWith("_design") //design document
) {
this.log(`Skipped system document: ${change._id}`, LOG_LEVEL_VERBOSE);
return true;
}
return false;
}
/**
* Queue of changes to be processed.
*/
private _queuedChanges: PouchDB.Core.ExistingDocument<EntryDoc>[] = [];
/**
* List of changes being processed.
*/
private _processingChanges: PouchDB.Core.ExistingDocument<EntryDoc>[] = [];
/**
* Enqueue the given document change for processing.
* @param doc Document change to enqueue
* @returns
*/
protected enqueueChange(doc: PouchDB.Core.ExistingDocument<EntryDoc>) {
const old = this._queuedChanges.find((e) => e._id == doc._id);
const path = "path" in doc ? getPath(doc) : "<unknown>";
const docNote = `${path} (${shortenId(doc._id)}, ${shortenRev(doc._rev)})`;
if (old) {
if (old._rev == doc._rev) {
this.log(`[Enqueue] skipped (Already queued): ${docNote}`, LOG_LEVEL_VERBOSE);
return;
}
const oldRev = old._rev ?? "";
const isDeletedBefore = old._deleted === true || ("deleted" in old && old.deleted === true);
const isDeletedNow = doc._deleted === true || ("deleted" in doc && doc.deleted === true);
// Replace the old queued change (This may performed batched updates, actually process performed always with the latest version, hence we can simply replace it if the change is the same type).
if (isDeletedBefore === isDeletedNow) {
this._queuedChanges = this._queuedChanges.filter((e) => e._id != doc._id);
this.log(`[Enqueue] requeued: ${docNote} (from rev: ${shortenRev(oldRev)})`, LOG_LEVEL_VERBOSE);
}
}
// Enqueue the change
this._queuedChanges.push(doc);
this.triggerTakeSnapshot();
this.triggerProcessQueue();
}
/**
* Trigger processing of the queued changes.
*/
protected triggerProcessQueue() {
fireAndForget(() => this.runProcessQueue());
}
/**
* Semaphore to limit concurrent processing.
* This is the per-id semaphore + concurrency-control (max 10 concurrent = 10 documents being processed at the same time).
*/
private _semaphore = Semaphore(10);
/**
* Flag indicating whether the process queue is currently running.
*/
private _isRunningProcessQueue: boolean = false;
/**
* Process the queued changes.
*/
private async runProcessQueue() {
// Avoid re-entrance, suspend processing, or empty queue loop consumption.
if (this._isRunningProcessQueue) return;
if (this.isSuspended) return;
if (this._queuedChanges.length == 0) return;
try {
this._isRunningProcessQueue = true;
while (this._queuedChanges.length > 0) {
// If getting suspended, bail the loop. Some concurrent tasks may still be running.
if (this.isSuspended) {
this.log(
`Processing has got suspended. Remaining items in queue: ${this._queuedChanges.length}`,
LOG_LEVEL_INFO
);
break;
}
// Acquire semaphore for new processing slot
// (per-document serialisation caps concurrency).
const releaser = await this._semaphore.acquire();
releaser();
// Dequeue the next change
const doc = this._queuedChanges.shift();
if (doc) {
this._processingChanges.push(doc);
void this.parseDocumentChange(doc);
}
// Take snapshot (to be restored on next startup if needed)
this.triggerTakeSnapshot();
}
} finally {
this._isRunningProcessQueue = false;
}
}
// Phase 1: parse replication result
/**
* Parse the given document change.
* @param change
* @returns
*/
async parseDocumentChange(change: PouchDB.Core.ExistingDocument<EntryDoc>) {
try {
if (isAnyNote(change)) {
const docMtime = change.mtime ?? 0;
const maxMTime = this.replicator.settings.maxMTimeForReflectEvents;
if (maxMTime > 0 && docMtime > maxMTime) {
const docPath = getPath(change);
this.log(
`Processing ${docPath} has been skipped due to modification time (${new Date(
docMtime * 1000
).toISOString()}) exceeding the limit`,
LOG_LEVEL_INFO
);
return;
}
}
// If the document is a virtual document, process it in the virtual document processor.
if (await this.services.replication.processVirtualDocument(change)) return;
// If the document is version info, check compatibility and return.
if (isAnyNote(change)) {
const docPath = getPath(change);
if (!(await this.services.vault.isTargetFile(docPath))) {
this.log(`Skipped: ${docPath}`, LOG_LEVEL_VERBOSE);
return;
}
const size = change.size;
// Note that this size check depends size that in metadata, not the actual content size.
if (this.services.vault.isFileSizeTooLarge(size)) {
this.log(
`Processing ${docPath} has been skipped due to file size exceeding the limit`,
LOG_LEVEL_NOTICE
);
return;
}
return await this.applyToDatabase(change);
}
this.log(`Skipped unexpected non-note document: ${change._id}`, LOG_LEVEL_INFO);
return;
} finally {
// Remove from processing queue
this._processingChanges = this._processingChanges.filter((e) => e !== change);
this.triggerTakeSnapshot();
}
}
// Phase 2: apply the document to database
protected applyToDatabase(doc: PouchDB.Core.ExistingDocument<AnyEntry>) {
return this.withCounting(async () => {
let releaser: Awaited<ReturnType<typeof this._semaphore.acquire>> | undefined = undefined;
try {
releaser = await this._semaphore.acquire();
await this._applyToDatabase(doc);
} catch (e) {
this.log(`Error while processing replication result`, LOG_LEVEL_NOTICE);
this.logError(e);
} finally {
// Remove from processing queue (To remove from "in-progress" list, and snapshot will not include it)
if (releaser) {
releaser();
}
}
}, this.replicator.core.databaseQueueCount);
}
// Phase 2.1: process the document and apply to storage
// This function is serialized per document to avoid race-condition for the same document.
private _applyToDatabase(doc_: PouchDB.Core.ExistingDocument<AnyEntry>) {
const dbDoc = doc_ as LoadedEntry; // It has no `data`
const path = getPath(dbDoc);
return serialized(`replication-process:${dbDoc._id}`, async () => {
const docNote = `${path} (${shortenId(dbDoc._id)}, ${shortenRev(dbDoc._rev)})`;
const isRequired = await this.checkIsChangeRequiredForDatabaseProcessing(dbDoc);
if (!isRequired) {
this.log(`Skipped (Not latest): ${docNote}`, LOG_LEVEL_VERBOSE);
return;
}
// If `Read chunks online` is disabled, chunks should be transferred before here.
// However, in some cases, chunks are after that. So, if missing chunks exist, we have to wait for them.
// (If `Use Only Local Chunks` is enabled, we should not attempt to fetch chunks online automatically).
const isDeleted = dbDoc._deleted === true || ("deleted" in dbDoc && dbDoc.deleted === true);
// Gather full document if not deleted
const doc = isDeleted
? { ...dbDoc, data: "" }
: await this.localDatabase.getDBEntryFromMeta({ ...dbDoc }, false, true);
if (!doc) {
// Failed to gather content
this.log(`Failed to gather content of ${docNote}`, LOG_LEVEL_NOTICE);
return;
}
// Check if other processor wants to process this document, if so, skip processing here.
if (await this.services.replication.processOptionalSynchroniseResult(dbDoc)) {
// Already processed
this.log(`Processed by other processor: ${docNote}`, LOG_LEVEL_DEBUG);
} else if (isValidPath(getPath(doc))) {
// Apply to storage if the path is valid
await this.applyToStorage(doc as MetaEntry);
this.log(`Processed: ${docNote}`, LOG_LEVEL_DEBUG);
} else {
// Should process, but have an invalid path
this.log(`Unprocessed (Invalid path): ${docNote}`, LOG_LEVEL_VERBOSE);
}
return;
});
}
/**
* Phase 3: Apply the given entry to storage.
* @param entry
* @returns
*/
protected applyToStorage(entry: MetaEntry) {
return this.withCounting(async () => {
await this.services.replication.processSynchroniseResult(entry);
}, this.replicator.core.storageApplyingCount);
}
/**
* Check whether processing is required for the given document.
* @param dbDoc Document to check
* @returns True if processing is required; false otherwise
*/
protected async checkIsChangeRequiredForDatabaseProcessing(dbDoc: LoadedEntry): Promise<boolean> {
const path = getPath(dbDoc);
try {
const savedDoc = await this.localDatabase.getRaw<LoadedEntry>(dbDoc._id, {
conflicts: true,
revs_info: true,
});
const newRev = dbDoc._rev ?? "";
const latestRev = savedDoc._rev ?? "";
const revisions = savedDoc._revs_info?.map((e) => e.rev) ?? [];
if (savedDoc._conflicts && savedDoc._conflicts.length > 0) {
// There are conflicts, so we have to process it.
// (May auto-resolve or user intervention will be occurred).
return true;
}
if (newRev == latestRev) {
// The latest revision. Simply we can process it.
return true;
}
const index = revisions.indexOf(newRev);
if (index >= 0) {
// The revision has been inserted before.
return false; // This means that the document already processed (While no conflict existed).
}
return true; // This mostly should not happen, but we have to process it just in case.
} catch (e: any) {
if ("status" in e && e.status == 404) {
// getRaw failed due to not existing, it may not be happened normally especially on replication.
// If the process caused by some other reason, we **probably** have to process it.
// Note that this is not a common case.
return true;
} else {
this.log(
`Failed to get existing document for ${path} (${shortenId(dbDoc._id)}, ${shortenRev(dbDoc._rev)}) `,
LOG_LEVEL_NOTICE
);
this.logError(e);
return false;
}
}
}
}

View File

@@ -1,82 +0,0 @@
import { AbstractModule } from "../AbstractModule.ts";
import { LOG_LEVEL_NOTICE, type FilePathWithPrefix } from "../../lib/src/common/types";
import { QueueProcessor } from "octagonal-wheels/concurrency/processor";
import { sendValue } from "octagonal-wheels/messagepassing/signal";
import type { InjectableServiceHub } from "../../lib/src/services/InjectableServices.ts";
import type { LiveSyncCore } from "../../main.ts";
export class ModuleConflictChecker extends AbstractModule {
async _queueConflictCheckIfOpen(file: FilePathWithPrefix): Promise<void> {
const path = file;
if (this.settings.checkConflictOnlyOnOpen) {
const af = this.services.vault.getActiveFilePath();
if (af && af != path) {
this._log(`${file} is conflicted, merging process has been postponed.`, LOG_LEVEL_NOTICE);
return;
}
}
await this.services.conflict.queueCheckFor(path);
}
async _queueConflictCheck(file: FilePathWithPrefix): Promise<void> {
const optionalConflictResult = await this.services.conflict.getOptionalConflictCheckMethod(file);
if (optionalConflictResult == true) {
// The conflict has been resolved by another process.
return;
} else if (optionalConflictResult === "newer") {
// The conflict should be resolved by the newer entry.
await this.services.conflict.resolveByNewest(file);
} else {
this.conflictCheckQueue.enqueue(file);
}
}
_waitForAllConflictProcessed(): Promise<boolean> {
return this.conflictResolveQueue.waitForAllProcessed();
}
// TODO-> Move to ModuleConflictResolver?
conflictResolveQueue = new QueueProcessor(
async (filenames: FilePathWithPrefix[]) => {
const filename = filenames[0];
return await this.services.conflict.resolve(filename);
},
{
suspended: false,
batchSize: 1,
// No need to limit concurrency to `1` here, subsequent process will handle it,
// And, some cases, we do not need to synchronised. (e.g., auto-merge available).
// Therefore, limiting global concurrency is performed on resolver with the UI.
concurrentLimit: 10,
delay: 0,
keepResultUntilDownstreamConnected: false,
}
).replaceEnqueueProcessor((queue, newEntity) => {
const filename = newEntity;
sendValue("cancel-resolve-conflict:" + filename, true);
const newQueue = [...queue].filter((e) => e != newEntity);
return [...newQueue, newEntity];
});
conflictCheckQueue = // First process - Check is the file actually need resolve -
new QueueProcessor(
(files: FilePathWithPrefix[]) => {
const filename = files[0];
return Promise.resolve([filename]);
},
{
suspended: false,
batchSize: 1,
concurrentLimit: 10,
delay: 0,
keepResultUntilDownstreamConnected: true,
pipeTo: this.conflictResolveQueue,
totalRemainingReactiveSource: this.core.conflictProcessQueueCount,
}
);
onBindFunction(core: LiveSyncCore, services: InjectableServiceHub): void {
services.conflict.queueCheckForIfOpen.setHandler(this._queueConflictCheckIfOpen.bind(this));
services.conflict.queueCheckFor.setHandler(this._queueConflictCheck.bind(this));
services.conflict.ensureAllProcessed.setHandler(this._waitForAllConflictProcessed.bind(this));
}
}

View File

@@ -1,220 +0,0 @@
import { serialized } from "octagonal-wheels/concurrency/lock";
import { AbstractModule } from "../AbstractModule.ts";
import {
AUTO_MERGED,
CANCELLED,
LOG_LEVEL_INFO,
LOG_LEVEL_NOTICE,
LOG_LEVEL_VERBOSE,
MISSING_OR_ERROR,
NOT_CONFLICTED,
type diff_check_result,
type FilePathWithPrefix,
} from "../../lib/src/common/types";
import {
compareMTime,
displayRev,
isCustomisationSyncMetadata,
isPluginMetadata,
TARGET_IS_NEW,
} from "../../common/utils";
import diff_match_patch from "diff-match-patch";
import { stripAllPrefixes, isPlainText } from "../../lib/src/string_and_binary/path";
import { eventHub } from "../../common/events.ts";
import type { InjectableServiceHub } from "../../lib/src/services/InjectableServices.ts";
import type { LiveSyncCore } from "../../main.ts";
declare global {
interface LSEvents {
"conflict-cancelled": FilePathWithPrefix;
}
}
export class ModuleConflictResolver extends AbstractModule {
private async _resolveConflictByDeletingRev(
path: FilePathWithPrefix,
deleteRevision: string,
subTitle = ""
): Promise<typeof MISSING_OR_ERROR | typeof AUTO_MERGED> {
const title = `Resolving ${subTitle ? `[${subTitle}]` : ""}:`;
if (!(await this.core.fileHandler.deleteRevisionFromDB(path, deleteRevision))) {
this._log(
`${title} Could not delete conflicted revision ${displayRev(deleteRevision)} of ${path}`,
LOG_LEVEL_NOTICE
);
return MISSING_OR_ERROR;
}
eventHub.emitEvent("conflict-cancelled", path);
this._log(
`${title} Conflicted revision has been deleted ${displayRev(deleteRevision)} ${path}`,
LOG_LEVEL_INFO
);
if ((await this.core.databaseFileAccess.getConflictedRevs(path)).length != 0) {
this._log(`${title} some conflicts are left in ${path}`, LOG_LEVEL_INFO);
return AUTO_MERGED;
}
if (isPluginMetadata(path) || isCustomisationSyncMetadata(path)) {
this._log(`${title} ${path} is a plugin metadata file, no need to write to storage`, LOG_LEVEL_INFO);
return AUTO_MERGED;
}
// If no conflicts were found, write the resolved content to the storage.
if (!(await this.core.fileHandler.dbToStorage(path, stripAllPrefixes(path), true))) {
this._log(`Could not write the resolved content to the storage: ${path}`, LOG_LEVEL_NOTICE);
return MISSING_OR_ERROR;
}
const level = subTitle.indexOf("same") !== -1 ? LOG_LEVEL_INFO : LOG_LEVEL_NOTICE;
this._log(`${path} has been merged automatically`, level);
return AUTO_MERGED;
}
async checkConflictAndPerformAutoMerge(path: FilePathWithPrefix): Promise<diff_check_result> {
//
const ret = await this.localDatabase.tryAutoMerge(path, !this.settings.disableMarkdownAutoMerge);
if ("ok" in ret) {
return ret.ok;
}
if ("result" in ret) {
const p = ret.result;
// Merged content is coming.
// 1. Store the merged content to the storage
if (!(await this.core.databaseFileAccess.storeContent(path, p))) {
this._log(`Merged content cannot be stored:${path}`, LOG_LEVEL_NOTICE);
return MISSING_OR_ERROR;
}
// 2. As usual, delete the conflicted revision and if there are no conflicts, write the resolved content to the storage.
return await this.services.conflict.resolveByDeletingRevision(path, ret.conflictedRev, "Sensible");
}
const { rightRev, leftLeaf, rightLeaf } = ret;
// should be one or more conflicts;
if (leftLeaf == false) {
// what's going on..
this._log(`could not get current revisions:${path}`, LOG_LEVEL_NOTICE);
return MISSING_OR_ERROR;
}
if (rightLeaf == false) {
// Conflicted item could not load, delete this.
return await this.services.conflict.resolveByDeletingRevision(path, rightRev, "MISSING OLD REV");
}
const isSame = leftLeaf.data == rightLeaf.data && leftLeaf.deleted == rightLeaf.deleted;
const isBinary = !isPlainText(path);
const alwaysNewer = this.settings.resolveConflictsByNewerFile;
if (isSame || isBinary || alwaysNewer) {
const result = compareMTime(leftLeaf.mtime, rightLeaf.mtime);
let loser = leftLeaf;
// if (lMtime > rMtime) {
if (result != TARGET_IS_NEW) {
loser = rightLeaf;
}
const subTitle = [
`${isSame ? "same" : ""}`,
`${isBinary ? "binary" : ""}`,
`${alwaysNewer ? "alwaysNewer" : ""}`,
]
.filter((e) => e.trim())
.join(",");
return await this.services.conflict.resolveByDeletingRevision(path, loser.rev, subTitle);
}
// make diff.
const dmp = new diff_match_patch();
const diff = dmp.diff_main(leftLeaf.data, rightLeaf.data);
dmp.diff_cleanupSemantic(diff);
this._log(`conflict(s) found:${path}`);
return {
left: leftLeaf,
right: rightLeaf,
diff: diff,
};
}
private async _resolveConflict(filename: FilePathWithPrefix): Promise<void> {
// const filename = filenames[0];
return await serialized(`conflict-resolve:${filename}`, async () => {
const conflictCheckResult = await this.checkConflictAndPerformAutoMerge(filename);
if (
conflictCheckResult === MISSING_OR_ERROR ||
conflictCheckResult === NOT_CONFLICTED ||
conflictCheckResult === CANCELLED
) {
// nothing to do.
this._log(`[conflict] Not conflicted or cancelled: ${filename}`, LOG_LEVEL_VERBOSE);
return;
}
if (conflictCheckResult === AUTO_MERGED) {
//auto resolved, but need check again;
if (this.settings.syncAfterMerge && !this.services.appLifecycle.isSuspended()) {
//Wait for the running replication, if not running replication, run it once.
await this.services.replication.replicateByEvent();
}
this._log("[conflict] Automatically merged, but we have to check it again");
await this.services.conflict.queueCheckFor(filename);
return;
}
if (this.settings.showMergeDialogOnlyOnActive) {
const af = this.services.vault.getActiveFilePath();
if (af && af != filename) {
this._log(
`[conflict] ${filename} is conflicted. Merging process has been postponed to the file have got opened.`,
LOG_LEVEL_NOTICE
);
return;
}
}
this._log("[conflict] Manual merge required!");
eventHub.emitEvent("conflict-cancelled", filename);
await this.services.conflict.resolveByUserInteraction(filename, conflictCheckResult);
});
}
private async _anyResolveConflictByNewest(filename: FilePathWithPrefix): Promise<boolean> {
const currentRev = await this.core.databaseFileAccess.fetchEntryMeta(filename, undefined, true);
if (currentRev == false) {
this._log(`Could not get current revision of ${filename}`);
return Promise.resolve(false);
}
const revs = await this.core.databaseFileAccess.getConflictedRevs(filename);
if (revs.length == 0) {
return Promise.resolve(true);
}
const mTimeAndRev = (
[
[currentRev.mtime, currentRev._rev],
...(await Promise.all(
revs.map(async (rev) => {
const leaf = await this.core.databaseFileAccess.fetchEntryMeta(filename, rev);
if (leaf == false) {
return [0, rev] as [number, string];
}
return [leaf.mtime, rev] as [number, string];
})
)),
] as [number, string][]
).sort((a, b) => {
const diff = b[0] - a[0];
if (diff == 0) {
return a[1].localeCompare(b[1], "en", { numeric: true });
}
return diff;
});
// console.warn(mTimeAndRev);
this._log(
`Resolving conflict by newest: ${filename} (Newest: ${new Date(mTimeAndRev[0][0]).toLocaleString()}) (${mTimeAndRev.length} revisions exists)`
);
for (let i = 1; i < mTimeAndRev.length; i++) {
this._log(
`conflict: Deleting the older revision ${mTimeAndRev[i][1]} (${new Date(mTimeAndRev[i][0]).toLocaleString()}) of ${filename}`
);
await this.services.conflict.resolveByDeletingRevision(filename, mTimeAndRev[i][1], "NEWEST");
}
return true;
}
onBindFunction(core: LiveSyncCore, services: InjectableServiceHub): void {
services.conflict.resolveByDeletingRevision.setHandler(this._resolveConflictByDeletingRev.bind(this));
services.conflict.resolve.setHandler(this._resolveConflict.bind(this));
services.conflict.resolveByNewest.setHandler(this._anyResolveConflictByNewest.bind(this));
}
}

View File

@@ -1,331 +0,0 @@
import { LOG_LEVEL_INFO, LOG_LEVEL_NOTICE, LOG_LEVEL_VERBOSE } from "octagonal-wheels/common/logger";
import { normalizePath } from "../../deps.ts";
import {
FlagFilesHumanReadable,
FlagFilesOriginal,
REMOTE_MINIO,
TweakValuesShouldMatchedTemplate,
type ObsidianLiveSyncSettings,
} from "../../lib/src/common/types.ts";
import { AbstractModule } from "../AbstractModule.ts";
import type { LiveSyncCore } from "../../main.ts";
import FetchEverything from "../features/SetupWizard/dialogs/FetchEverything.svelte";
import RebuildEverything from "../features/SetupWizard/dialogs/RebuildEverything.svelte";
import { extractObject } from "octagonal-wheels/object";
import { SvelteDialogManagerBase } from "@/lib/src/UI/svelteDialog.ts";
import type { ServiceContext } from "@/lib/src/services/base/ServiceBase.ts";
export class ModuleRedFlag extends AbstractModule {
async isFlagFileExist(path: string) {
const redflag = await this.core.storageAccess.isExists(normalizePath(path));
if (redflag) {
return true;
}
return false;
}
async deleteFlagFile(path: string) {
try {
const isFlagged = await this.core.storageAccess.isExists(normalizePath(path));
if (isFlagged) {
await this.core.storageAccess.delete(path, true);
}
} catch (ex) {
this._log(`Could not delete ${path}`);
this._log(ex, LOG_LEVEL_VERBOSE);
}
}
isSuspendFlagActive = async () => await this.isFlagFileExist(FlagFilesOriginal.SUSPEND_ALL);
isRebuildFlagActive = async () =>
(await this.isFlagFileExist(FlagFilesOriginal.REBUILD_ALL)) ||
(await this.isFlagFileExist(FlagFilesHumanReadable.REBUILD_ALL));
isFetchAllFlagActive = async () =>
(await this.isFlagFileExist(FlagFilesOriginal.FETCH_ALL)) ||
(await this.isFlagFileExist(FlagFilesHumanReadable.FETCH_ALL));
async cleanupRebuildFlag() {
await this.deleteFlagFile(FlagFilesOriginal.REBUILD_ALL);
await this.deleteFlagFile(FlagFilesHumanReadable.REBUILD_ALL);
}
async cleanupFetchAllFlag() {
await this.deleteFlagFile(FlagFilesOriginal.FETCH_ALL);
await this.deleteFlagFile(FlagFilesHumanReadable.FETCH_ALL);
}
// dialogManager = new SvelteDialogManagerBase(this.core);
get dialogManager(): SvelteDialogManagerBase<ServiceContext> {
return this.core.services.UI.dialogManager;
}
/**
* Adjust setting to remote if needed.
* @param extra result of dialogues that may contain preventFetchingConfig flag (e.g, from FetchEverything or RebuildEverything)
* @param config current configuration to retrieve remote preferred config
*/
async adjustSettingToRemoteIfNeeded(extra: { preventFetchingConfig: boolean }, config: ObsidianLiveSyncSettings) {
if (extra && extra.preventFetchingConfig) {
return;
}
// Remote configuration fetched and applied.
if (await this.adjustSettingToRemote(config)) {
config = this.core.settings;
} else {
this._log("Remote configuration not applied.", LOG_LEVEL_NOTICE);
}
console.debug(config);
}
/**
* Adjust setting to remote configuration.
* @param config current configuration to retrieve remote preferred config
* @returns updated configuration if applied, otherwise null.
*/
async adjustSettingToRemote(config: ObsidianLiveSyncSettings) {
// Fetch remote configuration unless prevented.
const SKIP_FETCH = "Skip and proceed";
const RETRY_FETCH = "Retry (recommended)";
let canProceed = false;
do {
const remoteTweaks = await this.services.tweakValue.fetchRemotePreferred(config);
if (!remoteTweaks) {
const choice = await this.core.confirm.askSelectStringDialogue(
"Could not fetch configuration from remote. If you are new to the Self-hosted LiveSync, this might be expected. If not, you should check your network or server settings.",
[SKIP_FETCH, RETRY_FETCH] as const,
{
defaultAction: RETRY_FETCH,
timeout: 0,
title: "Fetch Remote Configuration Failed",
}
);
if (choice === SKIP_FETCH) {
canProceed = true;
}
} else {
const necessary = extractObject(TweakValuesShouldMatchedTemplate, remoteTweaks);
// Check if any necessary tweak value is different from current config.
const differentItems = Object.entries(necessary).filter(([key, value]) => {
return (config as any)[key] !== value;
});
if (differentItems.length === 0) {
this._log(
"Remote configuration matches local configuration. No changes applied.",
LOG_LEVEL_NOTICE
);
} else {
await this.core.confirm.askSelectStringDialogue(
"Your settings differed slightly from the server's. The plug-in has supplemented the incompatible parts with the server settings!",
["OK"] as const,
{
defaultAction: "OK",
timeout: 0,
}
);
}
config = {
...config,
...Object.fromEntries(differentItems),
} satisfies ObsidianLiveSyncSettings;
this.core.settings = config;
await this.core.services.setting.saveSettingData();
this._log("Remote configuration applied.", LOG_LEVEL_NOTICE);
canProceed = true;
return this.core.settings;
}
} while (!canProceed);
}
/**
* Process vault initialisation with suspending file watching and sync.
* @param proc process to be executed during initialisation, should return true if can be continued, false if app is unable to continue the process.
* @param keepSuspending whether to keep suspending file watching after the process.
* @returns result of the process, or false if error occurs.
*/
async processVaultInitialisation(proc: () => Promise<boolean>, keepSuspending = false) {
try {
// Disable batch saving and file watching during initialisation.
this.settings.batchSave = false;
await this.services.setting.suspendAllSync();
await this.services.setting.suspendExtraSync();
this.settings.suspendFileWatching = true;
await this.saveSettings();
try {
const result = await proc();
return result;
} catch (ex) {
this._log("Error during vault initialisation process.", LOG_LEVEL_NOTICE);
this._log(ex, LOG_LEVEL_VERBOSE);
return false;
}
} catch (ex) {
this._log("Error during vault initialisation.", LOG_LEVEL_NOTICE);
this._log(ex, LOG_LEVEL_VERBOSE);
return false;
} finally {
if (!keepSuspending) {
// Re-enable file watching after initialisation.
this.settings.suspendFileWatching = false;
await this.saveSettings();
}
}
}
/**
* Handle the rebuild everything scheduled operation.
* @returns true if can be continued, false if app restart is needed.
*/
async onRebuildEverythingScheduled() {
const method = await this.dialogManager.openWithExplicitCancel(RebuildEverything);
if (method === "cancelled") {
// Clean up the flag file and restart the app.
this._log("Rebuild everything cancelled by user.", LOG_LEVEL_NOTICE);
await this.cleanupRebuildFlag();
this.services.appLifecycle.performRestart();
return false;
}
const { extra } = method;
await this.adjustSettingToRemoteIfNeeded(extra, this.settings);
return await this.processVaultInitialisation(async () => {
await this.core.rebuilder.$rebuildEverything();
await this.cleanupRebuildFlag();
this._log("Rebuild everything operation completed.", LOG_LEVEL_NOTICE);
return true;
});
}
/**
* Handle the fetch all scheduled operation.
* @returns true if can be continued, false if app restart is needed.
*/
async onFetchAllScheduled() {
const method = await this.dialogManager.openWithExplicitCancel(FetchEverything);
if (method === "cancelled") {
this._log("Fetch everything cancelled by user.", LOG_LEVEL_NOTICE);
// Clean up the flag file and restart the app.
await this.cleanupFetchAllFlag();
this.services.appLifecycle.performRestart();
return false;
}
const { vault, extra } = method;
// If remote is MinIO, makeLocalChunkBeforeSync is not available. (because no-deduplication on sending).
const makeLocalChunkBeforeSyncAvailable = this.settings.remoteType !== REMOTE_MINIO;
const mapVaultStateToAction = {
identical: {
// If both are identical, no need to make local files/chunks before sync,
// Just for the efficiency, chunks should be made before sync.
makeLocalChunkBeforeSync: makeLocalChunkBeforeSyncAvailable,
makeLocalFilesBeforeSync: false,
},
independent: {
// If both are independent, nothing needs to be made before sync.
// Respect the remote state.
makeLocalChunkBeforeSync: false,
makeLocalFilesBeforeSync: false,
},
unbalanced: {
// If both are unbalanced, local files should be made before sync to avoid data loss.
// Then, chunks should be made before sync for the efficiency, but also the metadata made and should be detected as conflicting.
makeLocalChunkBeforeSync: false,
makeLocalFilesBeforeSync: true,
},
cancelled: {
// Cancelled case, not actually used.
makeLocalChunkBeforeSync: false,
makeLocalFilesBeforeSync: false,
},
} as const;
return await this.processVaultInitialisation(async () => {
await this.adjustSettingToRemoteIfNeeded(extra, this.settings);
// Okay, proceed to fetch everything.
const { makeLocalChunkBeforeSync, makeLocalFilesBeforeSync } = mapVaultStateToAction[vault];
this._log(
`Fetching everything with settings: makeLocalChunkBeforeSync=${makeLocalChunkBeforeSync}, makeLocalFilesBeforeSync=${makeLocalFilesBeforeSync}`,
LOG_LEVEL_INFO
);
await this.core.rebuilder.$fetchLocal(makeLocalChunkBeforeSync, !makeLocalFilesBeforeSync);
await this.cleanupFetchAllFlag();
this._log("Fetch everything operation completed. Vault files will be gradually synced.", LOG_LEVEL_NOTICE);
return true;
});
}
async onSuspendAllScheduled() {
this._log("SCRAM is detected. All operations are suspended.", LOG_LEVEL_NOTICE);
return await this.processVaultInitialisation(async () => {
this._log(
"All operations are suspended as per SCRAM.\nLogs will be written to the file. This might be a performance impact.",
LOG_LEVEL_NOTICE
);
this.settings.writeLogToTheFile = true;
await this.core.services.setting.saveSettingData();
return Promise.resolve(false);
}, true);
}
async verifyAndUnlockSuspension() {
if (!this.settings.suspendFileWatching) {
return true;
}
if (
(await this.core.confirm.askYesNoDialog(
"Do you want to resume file and database processing, and restart obsidian now?",
{ defaultOption: "Yes", timeout: 15 }
)) != "yes"
) {
// TODO: Confirm actually proceed to next process.
return true;
}
this.settings.suspendFileWatching = false;
await this.saveSettings();
this.services.appLifecycle.performRestart();
return false;
}
private async processFlagFilesOnStartup(): Promise<boolean> {
const isFlagSuspensionActive = await this.isSuspendFlagActive();
const isFlagRebuildActive = await this.isRebuildFlagActive();
const isFlagFetchAllActive = await this.isFetchAllFlagActive();
// TODO: Address the case when both flags are active (very unlikely though).
// if(isFlagFetchAllActive && isFlagRebuildActive) {
// const message = "Rebuild everything and Fetch everything flags are both detected.";
// await this.core.confirm.askSelectStringDialogue(
// "Both Rebuild Everything and Fetch Everything flags are detected. Please remove one of them and restart the app.",
// ["OK"] as const,)
if (isFlagFetchAllActive) {
const res = await this.onFetchAllScheduled();
if (res) {
return await this.verifyAndUnlockSuspension();
}
return false;
}
if (isFlagRebuildActive) {
const res = await this.onRebuildEverythingScheduled();
if (res) {
return await this.verifyAndUnlockSuspension();
}
return false;
}
if (isFlagSuspensionActive) {
const res = await this.onSuspendAllScheduled();
return res;
}
return true;
}
async _everyOnLayoutReady(): Promise<boolean> {
try {
const flagProcessResult = await this.processFlagFilesOnStartup();
return flagProcessResult;
} catch (ex) {
this._log("Something went wrong on FlagFile Handling", LOG_LEVEL_NOTICE);
this._log(ex, LOG_LEVEL_VERBOSE);
}
return true;
}
onBindFunction(core: LiveSyncCore, services: typeof core.services): void {
super.onBindFunction(core, services);
services.appLifecycle.onLayoutReady.addHandler(this._everyOnLayoutReady.bind(this));
}
}

View File

@@ -1,22 +0,0 @@
import type { InjectableServiceHub } from "../../lib/src/services/InjectableServices.ts";
import type { LiveSyncCore } from "../../main.ts";
import { AbstractModule } from "../AbstractModule.ts";
export class ModuleRemoteGovernor extends AbstractModule {
private async _markRemoteLocked(lockByClean: boolean = false): Promise<void> {
return await this.core.replicator.markRemoteLocked(this.settings, true, lockByClean);
}
private async _markRemoteUnlocked(): Promise<void> {
return await this.core.replicator.markRemoteLocked(this.settings, false, false);
}
private async _markRemoteResolved(): Promise<void> {
return await this.core.replicator.markRemoteResolved(this.settings);
}
onBindFunction(core: LiveSyncCore, services: InjectableServiceHub): void {
services.remote.markLocked.setHandler(this._markRemoteLocked.bind(this));
services.remote.markUnlocked.setHandler(this._markRemoteUnlocked.bind(this));
services.remote.markResolved.setHandler(this._markRemoteResolved.bind(this));
}
}

View File

@@ -1,299 +0,0 @@
import { Logger, LOG_LEVEL_NOTICE } from "octagonal-wheels/common/logger";
import { extractObject } from "octagonal-wheels/object";
import {
TweakValuesShouldMatchedTemplate,
IncompatibleChanges,
confName,
type TweakValues,
type RemoteDBSettings,
IncompatibleChangesInSpecificPattern,
CompatibleButLossyChanges,
} from "../../lib/src/common/types.ts";
import { escapeMarkdownValue } from "../../lib/src/common/utils.ts";
import { AbstractModule } from "../AbstractModule.ts";
import { $msg } from "../../lib/src/common/i18n.ts";
import type { InjectableServiceHub } from "../../lib/src/services/InjectableServices.ts";
import type { LiveSyncCore } from "../../main.ts";
export class ModuleResolvingMismatchedTweaks extends AbstractModule {
async _anyAfterConnectCheckFailed(): Promise<boolean | "CHECKAGAIN" | undefined> {
if (!this.core.replicator.tweakSettingsMismatched && !this.core.replicator.preferredTweakValue) return false;
const preferred = this.core.replicator.preferredTweakValue;
if (!preferred) return false;
const ret = await this.services.tweakValue.askResolvingMismatched(preferred);
if (ret == "OK") return false;
if (ret == "CHECKAGAIN") return "CHECKAGAIN";
if (ret == "IGNORE") return true;
}
async _checkAndAskResolvingMismatchedTweaks(
preferred: Partial<TweakValues>
): Promise<[TweakValues | boolean, boolean]> {
const mine = extractObject(TweakValuesShouldMatchedTemplate, this.settings);
const items = Object.entries(TweakValuesShouldMatchedTemplate);
let rebuildRequired = false;
let rebuildRecommended = false;
// Making tables:
// let table = `| Value name | This device | Configured | \n` + `|: --- |: --- :|: ---- :| \n`;
const tableRows = [];
// const items = [mine,preferred]
for (const v of items) {
const key = v[0] as keyof typeof TweakValuesShouldMatchedTemplate;
const valueMine = escapeMarkdownValue(mine[key]);
const valuePreferred = escapeMarkdownValue(preferred[key]);
if (valueMine == valuePreferred) continue;
if (IncompatibleChanges.indexOf(key) !== -1) {
rebuildRequired = true;
}
for (const pattern of IncompatibleChangesInSpecificPattern) {
if (pattern.key !== key) continue;
// if from value supplied, check if current value have been violated : in other words, if the current value is the same as the from value, it should require a rebuild.
const isFromConditionMet = "from" in pattern ? pattern.from === mine[key] : false;
// and, if to value supplied, same as above.
const isToConditionMet = "to" in pattern ? pattern.to === preferred[key] : false;
// if either of them is true, it should require a rebuild, if the pattern is not a recommendation.
if (isFromConditionMet || isToConditionMet) {
if (pattern.isRecommendation) {
rebuildRecommended = true;
} else {
rebuildRequired = true;
}
}
}
if (CompatibleButLossyChanges.indexOf(key) !== -1) {
rebuildRecommended = true;
}
// table += `| ${confName(key)} | ${valueMine} | ${valuePreferred} | \n`;
tableRows.push(
$msg("TweakMismatchResolve.Table.Row", {
name: confName(key),
self: valueMine,
remote: valuePreferred,
})
);
}
const additionalMessage =
rebuildRequired && this.core.settings.isConfigured
? $msg("TweakMismatchResolve.Message.WarningIncompatibleRebuildRequired")
: "";
const additionalMessage2 =
rebuildRecommended && this.core.settings.isConfigured
? $msg("TweakMismatchResolve.Message.WarningIncompatibleRebuildRecommended")
: "";
const table = $msg("TweakMismatchResolve.Table", { rows: tableRows.join("\n") });
const message = $msg("TweakMismatchResolve.Message.MainTweakResolving", {
table: table,
additionalMessage: [additionalMessage, additionalMessage2].filter((v) => v).join("\n"),
});
const CHOICE_USE_REMOTE = $msg("TweakMismatchResolve.Action.UseRemote");
const CHOICE_USE_REMOTE_WITH_REBUILD = $msg("TweakMismatchResolve.Action.UseRemoteWithRebuild");
const CHOICE_USE_REMOTE_PREVENT_REBUILD = $msg("TweakMismatchResolve.Action.UseRemoteAcceptIncompatible");
const CHOICE_USE_MINE = $msg("TweakMismatchResolve.Action.UseMine");
const CHOICE_USE_MINE_WITH_REBUILD = $msg("TweakMismatchResolve.Action.UseMineWithRebuild");
const CHOICE_USE_MINE_PREVENT_REBUILD = $msg("TweakMismatchResolve.Action.UseMineAcceptIncompatible");
const CHOICE_DISMISS = $msg("TweakMismatchResolve.Action.Dismiss");
const CHOICE_AND_VALUES = [] as [string, [result: TweakValues | boolean, rebuild: boolean]][];
if (rebuildRequired) {
CHOICE_AND_VALUES.push([CHOICE_USE_REMOTE_WITH_REBUILD, [preferred, true]]);
CHOICE_AND_VALUES.push([CHOICE_USE_MINE_WITH_REBUILD, [true, true]]);
CHOICE_AND_VALUES.push([CHOICE_USE_REMOTE_PREVENT_REBUILD, [preferred, false]]);
CHOICE_AND_VALUES.push([CHOICE_USE_MINE_PREVENT_REBUILD, [true, false]]);
} else if (rebuildRecommended) {
CHOICE_AND_VALUES.push([CHOICE_USE_REMOTE, [preferred, false]]);
CHOICE_AND_VALUES.push([CHOICE_USE_MINE, [true, false]]);
CHOICE_AND_VALUES.push([CHOICE_USE_REMOTE_WITH_REBUILD, [true, true]]);
CHOICE_AND_VALUES.push([CHOICE_USE_MINE_WITH_REBUILD, [true, true]]);
} else {
CHOICE_AND_VALUES.push([CHOICE_USE_REMOTE, [preferred, false]]);
CHOICE_AND_VALUES.push([CHOICE_USE_MINE, [true, false]]);
}
CHOICE_AND_VALUES.push([CHOICE_DISMISS, [false, false]]);
const CHOICES = Object.fromEntries(CHOICE_AND_VALUES) as Record<
string,
[TweakValues | boolean, performRebuild: boolean]
>;
const retKey = await this.core.confirm.askSelectStringDialogue(message, Object.keys(CHOICES), {
title: $msg("TweakMismatchResolve.Title.TweakResolving"),
timeout: 60,
defaultAction: CHOICE_DISMISS,
});
if (!retKey) return [false, false];
return CHOICES[retKey];
}
async _askResolvingMismatchedTweaks(): Promise<"OK" | "CHECKAGAIN" | "IGNORE"> {
if (!this.core.replicator.tweakSettingsMismatched) {
return "OK";
}
const tweaks = this.core.replicator.preferredTweakValue;
if (!tweaks) {
return "IGNORE";
}
const preferred = extractObject(TweakValuesShouldMatchedTemplate, tweaks);
const [conf, rebuildRequired] = await this.services.tweakValue.checkAndAskResolvingMismatched(preferred);
if (!conf) return "IGNORE";
if (conf === true) {
await this.core.replicator.setPreferredRemoteTweakSettings(this.settings);
if (rebuildRequired) {
await this.core.rebuilder.$rebuildRemote();
}
Logger(
`Tweak values on the remote server have been updated. Your other device will see this message.`,
LOG_LEVEL_NOTICE
);
return "CHECKAGAIN";
}
if (conf) {
this.settings = { ...this.settings, ...conf };
await this.core.replicator.setPreferredRemoteTweakSettings(this.settings);
await this.services.setting.saveSettingData();
if (rebuildRequired) {
await this.core.rebuilder.$fetchLocal();
}
Logger(`Configuration has been updated as configured by the other device.`, LOG_LEVEL_NOTICE);
return "CHECKAGAIN";
}
return "IGNORE";
}
async _fetchRemotePreferredTweakValues(trialSetting: RemoteDBSettings): Promise<TweakValues | false> {
const replicator = await this.services.replicator.getNewReplicator(trialSetting);
if (!replicator) {
this._log("The remote type is not supported for fetching preferred tweak values.", LOG_LEVEL_NOTICE);
return false;
}
if (await replicator.tryConnectRemote(trialSetting)) {
const preferred = await replicator.getRemotePreferredTweakValues(trialSetting);
if (preferred) {
return preferred;
}
this._log("Failed to get the preferred tweak values from the remote server.", LOG_LEVEL_NOTICE);
return false;
}
this._log("Failed to connect to the remote server.", LOG_LEVEL_NOTICE);
return false;
}
async _checkAndAskUseRemoteConfiguration(
trialSetting: RemoteDBSettings
): Promise<{ result: false | TweakValues; requireFetch: boolean }> {
const preferred = await this.services.tweakValue.fetchRemotePreferred(trialSetting);
if (preferred) {
return await this.services.tweakValue.askUseRemoteConfiguration(trialSetting, preferred);
}
return { result: false, requireFetch: false };
}
async _askUseRemoteConfiguration(
trialSetting: RemoteDBSettings,
preferred: TweakValues
): Promise<{ result: false | TweakValues; requireFetch: boolean }> {
const items = Object.entries(TweakValuesShouldMatchedTemplate);
let rebuildRequired = false;
let rebuildRecommended = false;
// Making tables:
// let table = `| Value name | This device | On Remote | \n` + `|: --- |: ---- :|: ---- :| \n`;
let differenceCount = 0;
const tableRows = [] as string[];
// const items = [mine,preferred]
for (const v of items) {
const key = v[0] as keyof typeof TweakValuesShouldMatchedTemplate;
const remoteValueForDisplay = escapeMarkdownValue(preferred[key]);
const currentValueForDisplay = `${escapeMarkdownValue((trialSetting as TweakValues)?.[key])}`;
if ((trialSetting as TweakValues)?.[key] !== preferred[key]) {
if (IncompatibleChanges.indexOf(key) !== -1) {
rebuildRequired = true;
}
for (const pattern of IncompatibleChangesInSpecificPattern) {
if (pattern.key !== key) continue;
// if from value supplied, check if current value have been violated : in other words, if the current value is the same as the from value, it should require a rebuild.
const isFromConditionMet =
"from" in pattern ? pattern.from === (trialSetting as TweakValues)?.[key] : false;
// and, if to value supplied, same as above.
const isToConditionMet = "to" in pattern ? pattern.to === preferred[key] : false;
// if either of them is true, it should require a rebuild, if the pattern is not a recommendation.
if (isFromConditionMet || isToConditionMet) {
if (pattern.isRecommendation) {
rebuildRecommended = true;
} else {
rebuildRequired = true;
}
}
}
if (CompatibleButLossyChanges.indexOf(key) !== -1) {
rebuildRecommended = true;
}
} else {
continue;
}
tableRows.push(
$msg("TweakMismatchResolve.Table.Row", {
name: confName(key),
self: currentValueForDisplay,
remote: remoteValueForDisplay,
})
);
differenceCount++;
}
if (differenceCount === 0) {
this._log("The settings in the remote database are the same as the local database.", LOG_LEVEL_NOTICE);
return { result: false, requireFetch: false };
}
const additionalMessage =
rebuildRequired && this.core.settings.isConfigured
? $msg("TweakMismatchResolve.Message.UseRemote.WarningRebuildRequired")
: "";
const additionalMessage2 =
rebuildRecommended && this.core.settings.isConfigured
? $msg("TweakMismatchResolve.Message.UseRemote.WarningRebuildRecommended")
: "";
const table = $msg("TweakMismatchResolve.Table", { rows: tableRows.join("\n") });
const message = $msg("TweakMismatchResolve.Message.Main", {
table: table,
additionalMessage: [additionalMessage, additionalMessage2].filter((v) => v).join("\n"),
});
const CHOICE_USE_REMOTE = $msg("TweakMismatchResolve.Action.UseConfigured");
const CHOICE_DISMISS = $msg("TweakMismatchResolve.Action.Dismiss");
// const CHOICE_AND_VALUES = [
// [CHOICE_USE_REMOTE, preferred],
// [CHOICE_DISMISS, false]]
const CHOICES = [CHOICE_USE_REMOTE, CHOICE_DISMISS];
const retKey = await this.core.confirm.askSelectStringDialogue(message, CHOICES, {
title: $msg("TweakMismatchResolve.Title.UseRemoteConfig"),
timeout: 0,
defaultAction: CHOICE_DISMISS,
});
if (!retKey) return { result: false, requireFetch: false };
if (retKey === CHOICE_DISMISS) return { result: false, requireFetch: false };
if (retKey === CHOICE_USE_REMOTE) {
return { result: { ...trialSetting, ...preferred }, requireFetch: rebuildRequired };
}
return { result: false, requireFetch: false };
}
onBindFunction(core: LiveSyncCore, services: InjectableServiceHub): void {
services.tweakValue.fetchRemotePreferred.setHandler(this._fetchRemotePreferredTweakValues.bind(this));
services.tweakValue.checkAndAskResolvingMismatched.setHandler(
this._checkAndAskResolvingMismatchedTweaks.bind(this)
);
services.tweakValue.askResolvingMismatched.setHandler(this._askResolvingMismatchedTweaks.bind(this));
services.tweakValue.checkAndAskUseRemoteConfiguration.setHandler(
this._checkAndAskUseRemoteConfiguration.bind(this)
);
services.tweakValue.askUseRemoteConfiguration.setHandler(this._askUseRemoteConfiguration.bind(this));
services.replication.checkConnectionFailure.addHandler(this._anyAfterConnectCheckFailed.bind(this));
}
}

Some files were not shown because too many files have changed in this diff Show More