diff --git a/manuscript/CHANGELOG.md b/manuscript/CHANGELOG.md index 94b33fb..aaf0515 100644 --- a/manuscript/CHANGELOG.md +++ b/manuscript/CHANGELOG.md @@ -2,25 +2,27 @@ ## Subscribe to updates -Sign up [here](http://eepurl.com/dfx95n) (double-opt-in) to receive email updates on new and improve recipes! +* Email : Sign up [here](http://eepurl.com/dfx95n) (double-opt-in) to receive email updates on new and improve recipes! +* Mastodon: https://mastodon.social/@geekcookbook_changes +* RSS: https://mastodon.social/@geekcookbook_changes.atom +* The #changelog channel in our [Discord server](http://chat.funkypenguin.co.nz) + +## Recent additions to work-in-progress + +* Kubernetes recipes for UniFi controller, Miniflux, Kanboard and PrivateBin coming in March! (_19 Mar 2019_) ## Recently added recipes -* Added a list of [sponsored projects](sponsored-projects/) which I regularly donate to, to keep the geeky ingredients fresh! (_8 Jun 2018_) -* [Turtle Pool](/recipies/turtle-pool/) - A mining pool for the fun, friendly, no-BS, still-in-its-infancy cryptocurrency, "[TurtleCoin](http://turtlecoin.lol)" (_7 May 2018_) -* [Wallabag](/recipies/wallabag/) - Self-hosted Read-it-Later / Annotation app (_21 Apr 2018_) -* [InstaPy](/recipies/instapy/) - Automate your Instagrammage (_17 Apr 2018_) -* [CryptoMiner](/recipies/cryto-miner/start/) - Become a cryptocurrency miner, put your GPU to work! -* [Calibre-Web](/recipies/calibre-web/) - Plex for EBooks (_8 Jan 2018_) -* [Emby](/recipies/emby/) - Geekier alternative to Plex, with improved parental controls (_28 Dec 2017_) +* Added Kubernetes version of [Miniflux](/recipes/kubernetes/miniflux/) recipe, a minimalistic RSS reader supporting the Fever API (_26 Mar 2019_) +* Added Kubernetes version of [Kanboard](/recipes/kubernetes/kanboard/) recipe, a lightweight, well-supported Kanban tool for visualizing your work (_19 Mar 2019_) +* Added [Minio](/recipes/minio/), a high performance distributed object storage server, designed for large-scale private cloud infrastructure, but perfect for simple use cases where emulating AWS S3 is useful. (_27 Jan 2019_) +* Added the beginning of the **Kubernetes** design, including a getting started on using [Digital Ocean,](/kubernetes/digitalocean/) and a WIP recipe for an [MQTT](/recipes/mqtt/) broker (_21 Jan 2019_) +* [ElkarBackup](/recipes/elkarbackup/), a beautiful GUI-based backup solution built on rsync/rsnapshot (_1 Jan 2019_) ## Recent improvements -* [Lazy Librarian](/recipies/autopirate/lazylibrarian/) component of [autopirate](/recipies/autopirate/start/) recipe updated to include calibre-server, so that downloaded ebooks can be automatically added to a calibre library, to then be independently managed using [Calibre-Web](/recipies/calibre-web/) (_27 May 2018_) -* [Miniflux](/recipies/miniflux/) updated to version 2.0, including PostgreSQL database and nightly DB dumps (_24 May 2018_) -* [Turtle Pool](/recipies/turtle-pool/) updated for redundant daemons plus a failsafe (_16 May 2018_) -* "Disengaged" [AutoPirate](/recipies/autopirate/) uber-recipe into individual sub-recipies per-page, easing navigation and support / comment flow -* Switched [Emby](/recipies/emby/) to official docker container (more up-to-date) (_27 Mar 2018_) -* [Docker Swarm Mode](/ha-docker-swarm/docker-swarm-mode/#setup-automatic-updates) setup updated for automatic container updates (Shepherd) -* [Kanboard](/recipies/kanboard/) recipe [improved](https://github.com/funkypenguin/geek-cookbook/commit/8597bcc6319b571c8138cd1b615e8c512e5f5bd5) with the inclusion of a cron container to run automated daily jobs (_22 Dec 2017_) +* Added recipe for [automated snapshots of Kubernetes Persistent Volumes](/kubernetes/snapshots/), instructions for using [Helm](/kubernetes/helm/), and recipe for deploying [Traefik](/kubernetes/traefik/), which completes the Kubernetes cluster design! (_9 Feb 2019_) +* Added detailed description (_and diagram_) of our [Kubernetes design](/kubernetes/design/), plus a [simple load-balancer design](kubernetes/loadbalancer/) to avoid the complexities/costs of permitting ingress access to a cluster (_7 Feb 2019_) +* Added an [introductory/explanatory page, including a children's story, on Kubernetes](/kubernetes/start/) (_29 Jan 2019_) +* [NextCloud](/recipes/nextcloud/) updated to fix CalDAV/CardDAV service discovery behind Traefik reverse proxy (_12 Dec 2018_) diff --git a/manuscript/README.md b/manuscript/README-UI.md similarity index 100% rename from manuscript/README.md rename to manuscript/README-UI.md diff --git a/manuscript/book.txt b/manuscript/book.txt index fd02ee1..a973ed0 100644 --- a/manuscript/book.txt +++ b/manuscript/book.txt @@ -2,38 +2,90 @@ index.md README.md +CHANGELOG.md whoami.md sections/ha-docker-swarm.md ha-docker-swarm/design.md ha-docker-swarm/vms.md ha-docker-swarm/shared-storage-ceph.md -ha-docker-swarm/shared-storage-glustermd +ha-docker-swarm/shared-storage-gluster.md ha-docker-swarm/keepalived.md -ha-docker-swarm/traefik.md ha-docker-swarm/docker-swarm-mode.md +ha-docker-swarm/traefik.md +ha-docker-swarm/registry.md ha-docker-swarm/duplicity.md -sections/recipies.md -recipies/mail.md -recipies/gitlab.md -recipies/gitlab-runner.md -recipies/wekan.md -recipies/huginn.md -recipies/kanboard.md -recipies/miniflux.md -recipies/ghost.md -recipies/piwik.md -recipies/autopirate.md -recipies/nextcloud.md -recipies/portainer.md -recipies/turtle-pool.md -recipies/tiny-tiny-rss.md +sections/chefs-favorites-docker.md +recipes/autopirate.md +recipes/autopirate/sabnzbd.md +recipes/autopirate/nzbget.md +recipes/autopirate/rtorrent.md +recipes/autopirate/sonarr.md +recipes/autopirate/radarr.md +recipes/autopirate/mylar.md +recipes/autopirate/lazylibrarian.md +recipes/autopirate/headphones.md +recipes/autopirate/lidarr.md +recipes/autopirate/nzbhydra.md +recipes/autopirate/nzbhydra2.md +recipes/autopirate/ombi.md +recipes/autopirate/jackett.md +recipes/autopirate/heimdall.md +recipes/autopirate/end.md + +recipes/elkarbackup.md +recipes/emby.md +recipes/homeassistant.md +recipes/homeassistant/ibeacon.md +recipes/huginn.md +recipes/kanboard.md +recipes/miniflux.md +recipes/munin.md +recipes/nextcloud.md +recipes/owntracks.md +recipes/phpipam.md +recipes/plex.md +recipes/privatebin.md +recipes/swarmprom.md +recipes/turtle-pool.md + +sections/menu-docker.md +recipes/bookstack.md +recipes/cryptominer.md +recipes/cryptominer/mining-rig.md +recipes/cryptominer/amd-gpu.md +recipes/cryptominer/nvidia-gpu.md +recipes/cryptominer/mining-pool.md +recipes/cryptominer/wallet.md +recipes/cryptominer/exchange.md +recipes/cryptominer/minerhotel.md +recipes/cryptominer/monitor.md +recipes/cryptominer/profit.md +recipes/calibre-web.md +recipes/collabora-online.md +recipes/ghost.md +recipes/gitlab.md +recipes/gitlab-runner.md +recipes/gollum.md +recipes/instapy.md +recipes/keycloak.md +recipes/openldap.md +recipes/mail.md +recipes/minio.md +recipes/piwik.md +recipes/portainer.md +recipes/realms.md +recipes/tiny-tiny-rss.md +recipes/wallabag.md +recipes/wekan.md +recipes/wetty.md sections/reference.md reference/oauth_proxy.md reference/data_layout.md reference/networks.md +reference/containers.md reference/git-docker.md reference/openvpn.md reference/troubleshooting.md diff --git a/manuscript/extras/javascript/piwik.js b/manuscript/extras/javascript/piwik.js deleted file mode 100644 index 7abb60c..0000000 --- a/manuscript/extras/javascript/piwik.js +++ /dev/null @@ -1,15 +0,0 @@ - - - diff --git a/manuscript/ha-docker-swarm/design.md b/manuscript/ha-docker-swarm/design.md index 9cceda7..17712f9 100644 --- a/manuscript/ha-docker-swarm/design.md +++ b/manuscript/ha-docker-swarm/design.md @@ -51,7 +51,7 @@ Assuming 3 nodes, under normal circumstances the following is illustrated: * The **traefik** service (in swarm mode) receives incoming requests (on http and https), and forwards them to individual containers. Traefik knows the containers names because it's able to access the docker socket. * All 3 nodes run keepalived, at different priorities. Since traefik is running as a swarm service and listening on TCP 80/443, requests made to the keepalived VIP and arriving at **any** of the swarm nodes will be forwarded to the traefik container (no matter which node it's on), and then onto the target backend. -![HA function](images/docker-swarm-ha-function.png) +![HA function](../images/docker-swarm-ha-function.png) ### Node failure @@ -63,7 +63,7 @@ In the case of a failure (or scheduled maintenance) of one of the nodes, the fol * The **traefik** service is either restarted or unaffected, and as the backend containers stop/start and change IP, traefik is aware and updates accordingly. * The keepalived VIP continues to function on the remaining nodes, and docker swarm continues to forward any traffic received on TCP 80/443 to the appropriate node. -![HA function](images/docker-swarm-node-failure.png) +![HA function](../images/docker-swarm-node-failure.png) ### Node restore @@ -75,7 +75,7 @@ When the failed (or upgraded) host is restored to service, the following is illu * Keepalived VIP regains full redundancy -![HA function](images/docker-swarm-node-restore.png) +![HA function](../images/docker-swarm-node-restore.png) ### Total cluster failure diff --git a/manuscript/ha-docker-swarm/images/shared-storage-replicated-gluster-volume.png b/manuscript/ha-docker-swarm/images/shared-storage-replicated-gluster-volume.png deleted file mode 100644 index 135a63f..0000000 Binary files a/manuscript/ha-docker-swarm/images/shared-storage-replicated-gluster-volume.png and /dev/null differ diff --git a/manuscript/ha-docker-swarm/shared-storage-gluster.md b/manuscript/ha-docker-swarm/shared-storage-gluster.md index 77c6fd8..be1ea91 100644 --- a/manuscript/ha-docker-swarm/shared-storage-gluster.md +++ b/manuscript/ha-docker-swarm/shared-storage-gluster.md @@ -6,7 +6,7 @@ While Docker Swarm is great for keeping containers running (_and restarting thos ### Why GlusterFS? -This GlusterFS recipe was my original design for shared storage, but I [found it to be flawed](ha-docker-swarm/shared-storage-ceph/#why-not-glusterfs), and I replaced it with a [design which employs Ceph instead](http://localhost:8000/ha-docker-swarm/shared-storage-ceph/#why-ceph). This recipe is an alternate to the Ceph design, if you happen to prefer GlusterFS. +This GlusterFS recipe was my original design for shared storage, but I [found it to be flawed](shared-storage-ceph/#why-not-glusterfs), and I replaced it with a [design which employs Ceph instead](shared-storage-ceph/#why-ceph). This recipe is an alternate to the Ceph design, if you happen to prefer GlusterFS. ## Ingredients diff --git a/manuscript/ha-docker-swarm/traefik.md b/manuscript/ha-docker-swarm/traefik.md index d8b6668..0b93d57 100644 --- a/manuscript/ha-docker-swarm/traefik.md +++ b/manuscript/ha-docker-swarm/traefik.md @@ -21,6 +21,9 @@ To deal with these gaps, we need a front-end load-balancer, and in this design, The traefik container is aware of the __other__ docker containers in the swarm, because it has access to the docker socket at **/var/run/docker.sock**. This allows traefik to dynamically configure itself based on the labels found on containers in the swarm, which is hugely useful. To make this functionality work on our SELinux-enabled Atomic hosts, we need to add custom SELinux policy. +!!! tip + The following is only necessary if you're using SELinux! + Run the following to build and activate policy to permit containers to access docker.sock: ``` @@ -37,7 +40,7 @@ make && semodule -i dockersock.pp While it's possible to configure traefik via docker command arguments, I prefer to create a config file (traefik.toml). This allows me to change traefik's behaviour by simply changing the file, and keeps my docker config simple. -Create /var/data/traefik/traefik.toml as follows: +Create ```/var/data/traefik/```, and then create ```traefik.toml``` inside it as follows: ``` checkNewVersion = true @@ -76,9 +79,14 @@ watch = true swarmmode = true ``` + ### Prepare the docker service config -Create /var/data/traefik/docker-compose.yml as follows: +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +Create /var/data/config/traefik/docker-compose.yml as follows: ``` version: "3" @@ -123,12 +131,14 @@ networks: - subnet: 10.1.0.0/24 ``` -Docker won't start an image with a bind-mount to a non-existent file, so prepare acme.json (_with the appropriate permissions_) by running: +Docker won't start an image with a bind-mount to a non-existent file, so prepare an empty acme.json (_with the appropriate permissions_) by running: ``` touch /var/data/traefik/acme.json chmod 600 /var/data/traefik/acme.json -```. +``` + +Traefik will populate acme.json itself when it runs, but it needs to exist before the container will start (_Chicken, meet egg._) ### Launch diff --git a/manuscript/images/athena-mining-pool.png b/manuscript/images/athena-mining-pool.png new file mode 100644 index 0000000..0011e6d Binary files /dev/null and b/manuscript/images/athena-mining-pool.png differ diff --git a/manuscript/images/bookstack.png b/manuscript/images/bookstack.png new file mode 100644 index 0000000..8e148c6 Binary files /dev/null and b/manuscript/images/bookstack.png differ diff --git a/manuscript/images/collabora-online-in-nextcloud.png b/manuscript/images/collabora-online-in-nextcloud.png new file mode 100644 index 0000000..5d9db66 Binary files /dev/null and b/manuscript/images/collabora-online-in-nextcloud.png differ diff --git a/manuscript/images/collabora-online.png b/manuscript/images/collabora-online.png new file mode 100644 index 0000000..aba9572 Binary files /dev/null and b/manuscript/images/collabora-online.png differ diff --git a/manuscript/images/collabora-traffic-flow.png b/manuscript/images/collabora-traffic-flow.png new file mode 100644 index 0000000..da3ccd8 Binary files /dev/null and b/manuscript/images/collabora-traffic-flow.png differ diff --git a/manuscript/images/common_observatory.png b/manuscript/images/common_observatory.png new file mode 100644 index 0000000..cc466f8 Binary files /dev/null and b/manuscript/images/common_observatory.png differ diff --git a/manuscript/images/cryptonote-mining-pool.png b/manuscript/images/cryptonote-mining-pool.png new file mode 100644 index 0000000..5836d9c Binary files /dev/null and b/manuscript/images/cryptonote-mining-pool.png differ diff --git a/manuscript/ha-docker-swarm/images/docker-swarm-ha-function.png b/manuscript/images/docker-swarm-ha-function.png similarity index 100% rename from manuscript/ha-docker-swarm/images/docker-swarm-ha-function.png rename to manuscript/images/docker-swarm-ha-function.png diff --git a/manuscript/ha-docker-swarm/images/docker-swarm-node-failure.png b/manuscript/images/docker-swarm-node-failure.png similarity index 100% rename from manuscript/ha-docker-swarm/images/docker-swarm-node-failure.png rename to manuscript/images/docker-swarm-node-failure.png diff --git a/manuscript/ha-docker-swarm/images/docker-swarm-node-restore.png b/manuscript/images/docker-swarm-node-restore.png similarity index 100% rename from manuscript/ha-docker-swarm/images/docker-swarm-node-restore.png rename to manuscript/images/docker-swarm-node-restore.png diff --git a/manuscript/images/elkarbackup-setup-1.png b/manuscript/images/elkarbackup-setup-1.png new file mode 100644 index 0000000..17dee0c Binary files /dev/null and b/manuscript/images/elkarbackup-setup-1.png differ diff --git a/manuscript/images/elkarbackup-setup-2.png b/manuscript/images/elkarbackup-setup-2.png new file mode 100644 index 0000000..c0d92a0 Binary files /dev/null and b/manuscript/images/elkarbackup-setup-2.png differ diff --git a/manuscript/images/elkarbackup-setup-3.png b/manuscript/images/elkarbackup-setup-3.png new file mode 100644 index 0000000..c02d7b8 Binary files /dev/null and b/manuscript/images/elkarbackup-setup-3.png differ diff --git a/manuscript/images/elkarbackup.png b/manuscript/images/elkarbackup.png new file mode 100644 index 0000000..af3a2e4 Binary files /dev/null and b/manuscript/images/elkarbackup.png differ diff --git a/manuscript/images/heimdall.jpg b/manuscript/images/heimdall.jpg new file mode 100644 index 0000000..e04615a Binary files /dev/null and b/manuscript/images/heimdall.jpg differ diff --git a/manuscript/images/ipfs.png b/manuscript/images/ipfs.png new file mode 100644 index 0000000..fd295fe Binary files /dev/null and b/manuscript/images/ipfs.png differ diff --git a/manuscript/images/keycloak.png b/manuscript/images/keycloak.png new file mode 100644 index 0000000..2e6a6c9 Binary files /dev/null and b/manuscript/images/keycloak.png differ diff --git a/manuscript/images/kubernetes-cluster-design.png b/manuscript/images/kubernetes-cluster-design.png new file mode 100644 index 0000000..984234a Binary files /dev/null and b/manuscript/images/kubernetes-cluster-design.png differ diff --git a/manuscript/images/kubernetes-helm.png b/manuscript/images/kubernetes-helm.png new file mode 100644 index 0000000..debb168 Binary files /dev/null and b/manuscript/images/kubernetes-helm.png differ diff --git a/manuscript/images/kubernetes-on-digitalocean-screenshot-1.png b/manuscript/images/kubernetes-on-digitalocean-screenshot-1.png new file mode 100644 index 0000000..eb11a26 Binary files /dev/null and b/manuscript/images/kubernetes-on-digitalocean-screenshot-1.png differ diff --git a/manuscript/images/kubernetes-on-digitalocean-screenshot-2.png b/manuscript/images/kubernetes-on-digitalocean-screenshot-2.png new file mode 100644 index 0000000..2e32c68 Binary files /dev/null and b/manuscript/images/kubernetes-on-digitalocean-screenshot-2.png differ diff --git a/manuscript/images/kubernetes-on-digitalocean-screenshot-3.png b/manuscript/images/kubernetes-on-digitalocean-screenshot-3.png new file mode 100644 index 0000000..6b91f68 Binary files /dev/null and b/manuscript/images/kubernetes-on-digitalocean-screenshot-3.png differ diff --git a/manuscript/images/kubernetes-on-digitalocean-screenshot-4.png b/manuscript/images/kubernetes-on-digitalocean-screenshot-4.png new file mode 100644 index 0000000..f22b94e Binary files /dev/null and b/manuscript/images/kubernetes-on-digitalocean-screenshot-4.png differ diff --git a/manuscript/images/kubernetes-on-digitalocean-screenshot-5.png b/manuscript/images/kubernetes-on-digitalocean-screenshot-5.png new file mode 100644 index 0000000..e914e94 Binary files /dev/null and b/manuscript/images/kubernetes-on-digitalocean-screenshot-5.png differ diff --git a/manuscript/images/kubernetes-on-digitalocean-screenshot-6.png b/manuscript/images/kubernetes-on-digitalocean-screenshot-6.png new file mode 100644 index 0000000..2ddcf39 Binary files /dev/null and b/manuscript/images/kubernetes-on-digitalocean-screenshot-6.png differ diff --git a/manuscript/images/kubernetes-on-digitalocean.jpg b/manuscript/images/kubernetes-on-digitalocean.jpg new file mode 100644 index 0000000..f37245a Binary files /dev/null and b/manuscript/images/kubernetes-on-digitalocean.jpg differ diff --git a/manuscript/images/kubernetes-snapshots.png b/manuscript/images/kubernetes-snapshots.png new file mode 100644 index 0000000..613caa2 Binary files /dev/null and b/manuscript/images/kubernetes-snapshots.png differ diff --git a/manuscript/images/lidarr.png b/manuscript/images/lidarr.png new file mode 100644 index 0000000..dbbeed3 Binary files /dev/null and b/manuscript/images/lidarr.png differ diff --git a/manuscript/images/mattermost.png b/manuscript/images/mattermost.png new file mode 100644 index 0000000..9b968a4 Binary files /dev/null and b/manuscript/images/mattermost.png differ diff --git a/manuscript/images/miniflux.png b/manuscript/images/miniflux.png index 1a090ce..9414dd7 100644 Binary files a/manuscript/images/miniflux.png and b/manuscript/images/miniflux.png differ diff --git a/manuscript/images/minio.png b/manuscript/images/minio.png new file mode 100644 index 0000000..2cd283f Binary files /dev/null and b/manuscript/images/minio.png differ diff --git a/manuscript/images/mqtt.png b/manuscript/images/mqtt.png new file mode 100644 index 0000000..e88b61a Binary files /dev/null and b/manuscript/images/mqtt.png differ diff --git a/manuscript/images/munin.png b/manuscript/images/munin.png new file mode 100644 index 0000000..2e38724 Binary files /dev/null and b/manuscript/images/munin.png differ diff --git a/manuscript/images/name.jpg b/manuscript/images/name.jpg new file mode 100644 index 0000000..15e32ab Binary files /dev/null and b/manuscript/images/name.jpg differ diff --git a/manuscript/images/nzbhydra2.png b/manuscript/images/nzbhydra2.png new file mode 100644 index 0000000..1ac8027 Binary files /dev/null and b/manuscript/images/nzbhydra2.png differ diff --git a/manuscript/images/openldap.jpeg b/manuscript/images/openldap.jpeg new file mode 100644 index 0000000..6c0b6b2 Binary files /dev/null and b/manuscript/images/openldap.jpeg differ diff --git a/manuscript/images/phpipam.png b/manuscript/images/phpipam.png new file mode 100644 index 0000000..8ebc090 Binary files /dev/null and b/manuscript/images/phpipam.png differ diff --git a/manuscript/images/privatebin.png b/manuscript/images/privatebin.png new file mode 100644 index 0000000..8d71fb4 Binary files /dev/null and b/manuscript/images/privatebin.png differ diff --git a/manuscript/images/realms.png b/manuscript/images/realms.png new file mode 100644 index 0000000..dce2c03 Binary files /dev/null and b/manuscript/images/realms.png differ diff --git a/manuscript/images/sso-stack-keycloak-1.png b/manuscript/images/sso-stack-keycloak-1.png new file mode 100644 index 0000000..a90966d Binary files /dev/null and b/manuscript/images/sso-stack-keycloak-1.png differ diff --git a/manuscript/images/sso-stack-keycloak-2.png b/manuscript/images/sso-stack-keycloak-2.png new file mode 100644 index 0000000..70ca8a7 Binary files /dev/null and b/manuscript/images/sso-stack-keycloak-2.png differ diff --git a/manuscript/images/sso-stack-keycloak-3.png b/manuscript/images/sso-stack-keycloak-3.png new file mode 100644 index 0000000..ff8ab23 Binary files /dev/null and b/manuscript/images/sso-stack-keycloak-3.png differ diff --git a/manuscript/images/sso-stack-keycloak-4.png b/manuscript/images/sso-stack-keycloak-4.png new file mode 100644 index 0000000..0839f5c Binary files /dev/null and b/manuscript/images/sso-stack-keycloak-4.png differ diff --git a/manuscript/images/sso-stack-lam-1.png b/manuscript/images/sso-stack-lam-1.png new file mode 100644 index 0000000..a52d36a Binary files /dev/null and b/manuscript/images/sso-stack-lam-1.png differ diff --git a/manuscript/images/sso-stack-lam-2.png b/manuscript/images/sso-stack-lam-2.png new file mode 100644 index 0000000..de64d25 Binary files /dev/null and b/manuscript/images/sso-stack-lam-2.png differ diff --git a/manuscript/images/sso-stack-lam-3.png b/manuscript/images/sso-stack-lam-3.png new file mode 100644 index 0000000..2b1e2a0 Binary files /dev/null and b/manuscript/images/sso-stack-lam-3.png differ diff --git a/manuscript/images/sso-stack-lam-4.png b/manuscript/images/sso-stack-lam-4.png new file mode 100644 index 0000000..b594a1f Binary files /dev/null and b/manuscript/images/sso-stack-lam-4.png differ diff --git a/manuscript/images/sso-stack-lam-5.png b/manuscript/images/sso-stack-lam-5.png new file mode 100644 index 0000000..d83d6cf Binary files /dev/null and b/manuscript/images/sso-stack-lam-5.png differ diff --git a/manuscript/images/sso-stack-lam-6.png b/manuscript/images/sso-stack-lam-6.png new file mode 100644 index 0000000..43ba362 Binary files /dev/null and b/manuscript/images/sso-stack-lam-6.png differ diff --git a/manuscript/images/sso-stack-lam-7.png b/manuscript/images/sso-stack-lam-7.png new file mode 100644 index 0000000..146204f Binary files /dev/null and b/manuscript/images/sso-stack-lam-7.png differ diff --git a/manuscript/images/swarmprom.png b/manuscript/images/swarmprom.png new file mode 100644 index 0000000..a9549c4 Binary files /dev/null and b/manuscript/images/swarmprom.png differ diff --git a/manuscript/images/terraform_service_accounts.png b/manuscript/images/terraform_service_accounts.png new file mode 100644 index 0000000..5c00279 Binary files /dev/null and b/manuscript/images/terraform_service_accounts.png differ diff --git a/manuscript/images/terraform_service_accounts_2.png b/manuscript/images/terraform_service_accounts_2.png new file mode 100644 index 0000000..94b6060 Binary files /dev/null and b/manuscript/images/terraform_service_accounts_2.png differ diff --git a/manuscript/images/wetty.png b/manuscript/images/wetty.png new file mode 100644 index 0000000..0f689d3 Binary files /dev/null and b/manuscript/images/wetty.png differ diff --git a/manuscript/index.md b/manuscript/index.md index 1562a08..b354d4a 100644 --- a/manuscript/index.md +++ b/manuscript/index.md @@ -1,6 +1,6 @@ # What is this? -The "**[Geek's Cookbook](https://geek-cookbook.funkypenguin.co.nz)**" is a collection of guides for establishing your own highly-available docker container cluster (swarm). This swarm enables you to run self-hosted services such as [GitLab](/recipies/gitlab/), [Plex](/recipies/plex/), [NextCloud](/recipies/nextcloud/), etc. Recent updates and additions are posted on the [CHANGELOG](/CHANGELOG/). +The "**[Geek's Cookbook](https://geek-cookbook.funkypenguin.co.nz)**" is a collection of guides for establishing your own highly-available docker container cluster (swarm). This swarm enables you to run self-hosted services such as [GitLab](/recipes/gitlab/), [Plex](/recipes/plex/), [NextCloud](/recipes/nextcloud/), etc. Recent updates and additions are posted on the [CHANGELOG](/CHANGELOG/). ## Who is this for? @@ -16,6 +16,10 @@ So if you're familiar enough with the tools, and you've done self-hosting before 2. You want to play. You want a safe sandbox to test new tools, keeping the ones you want and tossing the ones you don't. 3. You want reliability. Once you go from __playing__ with a tool to actually __using__ it, you want it to be available when you need it. Having to "_quickly ssh into the host and restart the webserver_" doesn't cut it when your wife wants to know why her phone won't sync! +## What have you done for me lately? (CHANGELOG) + +Check out recent change at [CHANGELOG](/CHANGELOG/) + ## What do you want from me? I want your money. @@ -24,8 +28,14 @@ No, seriously (_but yes, I do want your money - see below_), If the above applie ### Get in touch +<<<<<<< HEAD * Tweet me up, I'm [@funkypenguin](https://twitter.com/funkypenguin)! * or better yet, come into the [kitchen](https://discourse.geek-kitchen.funkypenguin.co.nz/) (discussion forums) to say hi, ask a question, or suggest a new recipe! +======= +* Come and say hi to me and the friendly geeks in the [Discord](http://chat.funkypenguin.co.nz) chat or the [Discourse](https://discourse.geek-kitchen.funkypenguin.co.nz/) forums - say hi, ask a question, or suggest a new recipe! +* Tweet me up, I'm [@funkypenguin](https://twitter.com/funkypenguin)! 🐦 +* [Contact me](https://www.funkypenguin.co.nz/contact/) by a variety of channels +>>>>>>> master ### Buy my book @@ -46,11 +56,12 @@ Impulsively **[click here (NOW quick do it!)](https://www.patreon.com/bePatron?u I also gratefully accept donations of most fine socialist/anarchist/hobbyist cryptocurrencies, including the list below (_let me know if I've left out the coin-of-the-week, and I'll happily add it_): -| ist-currency | Address +| -ist-currency | Address | ------------- |-------------| | Bitcoin | 1GBJfmqARmL66gQzUy9HtNWdmAEv74nfXj | Ethereum | 0x19e60ec49e1f053cfdfc193560ecfb3caed928f1 | Litecoin | LYLEF7xTpeVbjjoZD3jGLVgvKSKTYDKbK8 +| :turtle: TurtleCoin | TRTLv2qCKYChMbU5sNkc85hzq2VcGpQidaowbnV2N6LAYrFNebMLepKKPrdif75x5hAizwfc1pX4gi5VsR9WQbjQgYcJm21zec4 diff --git a/manuscript/kubernetes/cluster.md b/manuscript/kubernetes/cluster.md new file mode 100644 index 0000000..a6b0579 --- /dev/null +++ b/manuscript/kubernetes/cluster.md @@ -0,0 +1,92 @@ +# Kubernetes on DigitalOcean + +IMO, the easiest Kubernetes cloud provider to experiment with is [DigitalOcean](https://m.do.co/c/e33b78ad621b) (_this is a referral link_). I've included instructions below to start a basic cluster. + +![Kubernetes on Digital Ocean](/images/kubernetes-on-digitalocean.jpg) + +## Ingredients + +1. [DigitalOcean](https://www.digitalocean.com/?refcode=e33b78ad621b) account, either linked to a credit card or (_my preference for a trial_) topped up with $5 credit from PayPal. (_yes, this is a referral link, making me some 💰 to buy 🍷_) +2. Geek-Fu required : 🐱 (easy - even has screenshots!) + +## Preparation + +### Create DigitalOcean Account + +Create a project, and then from your project page, click **Manage** -> **Kubernetes (LTD)** in the left-hand panel: + +![Kubernetes on Digital Ocean Screenshot #1](/images/kubernetes-on-digitalocean-screenshot-1.png) + +Until DigitalOcean considers their Kubernetes offering to be "production ready", you'll need the additional step of clicking on **Enable Limited Access**: + +![Kubernetes on Digital Ocean Screenshot #2](/images/kubernetes-on-digitalocean-screenshot-2.png) + +The _Enable Limited Access_ button changes to read _Create a Kubernetes Cluster_ . Cleeeek it: + +![Kubernetes on Digital Ocean Screenshot #3](/images/kubernetes-on-digitalocean-screenshot-3.png) + +When prompted, choose some defaults for your first node pool (_your pool of "compute" resources for your cluster_), and give it a name. In more complex deployments, you can use this concept of "node pools" to run certain applications (_like an inconsequential nightly batch job_) on a particular class of compute instance (_such as cheap, preemptible instances_) + +![Kubernetes on Digital Ocean Screenshot #4](/images/kubernetes-on-digitalocean-screenshot-4.png) + +That's it! Have a sip of your 🍷, a bite of your :cheese:, and wait for your cluster to build. While you wait, follow the instructions to setup kubectl (if you don't already have it) + +![Kubernetes on Digital Ocean Screenshot #5](/images/kubernetes-on-digitalocean-screenshot-5.png) + +DigitalOcean will provide you with a "kubeconfig" file to use to access your cluster. It's at the bottom of the page (_illustrated below_), and easy to miss (_in my experience_). + +![Kubernetes on Digital Ocean Screenshot #6](/images/kubernetes-on-digitalocean-screenshot-6.png) + +## Release the kubectl! + +Save your kubeconfig file somewhere, and test it our by running ```kubectl --kubeconfig= get nodes``` + +Example output: +``` +[davidy:~/Downloads] 130 % kubectl --kubeconfig=penguins-are-the-sexiest-geeks-kubeconfig.yaml get nodes +NAME STATUS ROLES AGE VERSION +festive-merkle-8n9e Ready 20s v1.13.1 +[davidy:~/Downloads] % +``` + +In the example above, my nodes were being deployed. Repeat the command to see your nodes spring into existence: + +``` +[davidy:~/Downloads] % kubectl --kubeconfig=penguins-are-the-sexiest-geeks-kubeconfig.yaml get nodes +NAME STATUS ROLES AGE VERSION +festive-merkle-8n96 Ready 6s v1.13.1 +festive-merkle-8n9e Ready 34s v1.13.1 +[davidy:~/Downloads] % + +[davidy:~/Downloads] % kubectl --kubeconfig=penguins-are-the-sexiest-geeks-kubeconfig.yaml get nodes +NAME STATUS ROLES AGE VERSION +festive-merkle-8n96 Ready 30s v1.13.1 +festive-merkle-8n9a Ready 17s v1.13.1 +festive-merkle-8n9e Ready 58s v1.13.1 +[davidy:~/Downloads] % +``` + +That's it. You have a beautiful new kubernetes cluster ready for some action! + +## Move on.. + +Still with me? Good. Move on to creating your own external load balancer.. + +* [Start](/kubernetes/start/) - Why Kubernetes? +* [Design](/kubernetes/design/) - How does it fit together? +* Cluster (this page) - Setup a basic cluster +* [Load Balancer](/kubernetes/loadbalancer/) - Setup inbound access +* [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data +* [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks +* [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm + + +## Chef's Notes + +1. Ok, yes, there's not much you can do with your cluster _yet_. But stay tuned, more Kubernetes fun to come! + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/kubernetes/design.md b/manuscript/kubernetes/design.md new file mode 100644 index 0000000..94ed2bf --- /dev/null +++ b/manuscript/kubernetes/design.md @@ -0,0 +1,138 @@ +# Design + +Like the [Docker Swarm](ha-docker-swarm/design/) "_private cloud_" design, the Kubernetes design is: + +* **Highly-available** (_can tolerate the failure of a single component_) +* **Scalable** (_can add resource or capacity as required_) +* **Portable** (_run it in DigitalOcean today, AWS tomorrow and Azure on Thursday_) +* **Secure** (_access protected with LetsEncrypt certificates_) +* **Automated** (_requires minimal care and feeding_) + +*Unlike* the Docker Swarm design, the Kubernetes design is: + +* **Cloud-Native** (_While you **can** [run your own Kubernetes cluster](https://microk8s.io/), it's far simpler to let someone else manage the infrastructure, freeing you to play with the fun stuff_) +* **Complex** (_Requires more basic elements, more verbose configuration, and provides more flexibility and customisability_) + +## Design Decisions + +**The design and recipes are provider-agnostic** + +This means that: + +* The design should work on GKE, AWS, DigitalOcean, Azure, or even MicroK8s +* Custom service elements specific to individual providers are avoided + +**The simplest solution to achieve the desired result will be preferred** + +This means that: + +* Persistent volumes from the cloud provider are used for all persistent storage +* We'll do things the "_Kubernetes way_", i.e., using secrets and configmaps, rather than trying to engineer around the Kubernetes basic building blocks. + +**Insofar as possible, the format of recipes will align with Docker Swarm** + +This means that: + +* We use Kubernetes namespaces to replicate Docker Swarm's "_per-stack_" networking and service discovery + +## Security + +Under this design, the only inbound connections we're permitting to our Kubernetes swarm are: + +### Network Flows + +* HTTPS (TCP 443) : Serves individual docker containers via SSL-encrypted reverse proxy (_Traefik_) +* Individual additional ports we choose to expose for specific recipes (_i.e., port 8443 for [MQTT](/recipes/mqtt/)_) + +### Authentication + +* Other than when an SSL-served application provides a trusted level of authentication, or where the application requires public exposure, applications served via Traefik will be protected with an OAuth proxy. + +## The challenges of external access + +Because we're Cloude-Native now, it's complex to get traffic **into** our cluster from outside. We basically have 3 options: + +1. **HostIP**: Map a port on the host to a service. This is analogous to Docker's port exposure, but lacking in that it restricts us to one host port per-container, and it's not possible to anticipate _which_ of your Kubernetes hosts is running a given container. Kubernetes does not have Docker Swarm's "routing mesh", allowing for simple load-balancing of incoming connections. + +2. **LoadBalancer**: Purchase a "loadbalancer" per-service from your cloud provider. While this is the simplest way to assure a fixed IP and port combination will always exist for your service, it has 2 significant limitations: + 1. Cost is prohibitive, at roughly $US10/month per port + 2. You won't get the _same_ fixed IP for multiple ports. So if you wanted to expose 443 and 25 (_webmail and smtp server, for example_), you'd find yourself assigned a port each on two **unique** IPs, a challenge for a single DNS-based service, like "_mail.batman.com_" + +3. **NodePort**: Expose our service as a port (_between 30000-32767_) on the host which happens to be running the service. This is challenging because you might want to expose port 443, but that's not possible with NodePort. + +To further complicate options #1 and #3 above, our cloud provider may, without notice, change the IP of the host running your containers (_O hai, Google!_). + +Our solution to these challenges is to employ a simple-but-effective solution which places an HAProxy instance in front of the services exposed by NodePort. For example, this allows us to expose a container on 443 as NodePort 30443, and to cause HAProxy to listen on port 443, and forward all requests to our Node's IP on port 30443, after which it'll be forwarded onto our container on the original port 443. + +We use a phone-home container, which calls a simple webhook on our haproxy VM, advising HAProxy to update its backend for the calling IP. This means that when our provider changes the host's IP, we automatically update HAProxy and keep-on-truckin'! + +Here's a high-level diagram: + +![Kubernetes Design](/images/kubernetes-cluster-design.png) + +## Overview + +So what's happening in the diagram above? I'm glad you asked - let's go through it! + +### Setting the scene + +In the diagram, we have a Kubernetes cluster comprised of 3 nodes. You'll notice that there's no visible master node. This is because most cloud providers will give you "_free_" master node, but you don't get to access it. The master node is just a part of the Kubernetes "_as-a-service_" which you're purchasing. + +Our nodes are partitioned into several namespaces, which logically separate our individual recipes. (_I.e., allowing both a "gitlab" and a "nextcloud" namespace to include a service named "db", which would be challenging without namespaces_) + +Outside of our cluster (_could be anywhere on the internet_) is a single VM servicing as a load-balancer, running HAProxy and a webhook service. This load-balancer is described in detail, [in its own section](/kubernetes/loadbalancer/), but what's important up-front is that this VM is the **only element of the design for which we need to provide a fixed IP address**. + +### 1 : The mosquitto pod + +In the "mqtt" namespace, we have a single pod, running 2 containers - the mqtt broker, and a "phone-home" container. + +Why 2 containers in one pod, instead of 2 independent pods? Because all the containers in a pod are **always** run on the same physical host. We're using the phone-home container as a simple way to call a webhook on the not-in-the-cluster VM. + +The phone-home container calls the webhook, and tells HAProxy to listen on port 8443, and to forward any incoming requests to port 30843 (_within the NodePort range_) on the IP of the host running the container (_and because of the pod, tho phone-home container is guaranteed to be on the same host as the MQTT container_). + +### 2 : The Traefik Ingress + +In the "default" namespace, we have a Traefik "Ingress Controller". An Ingress controller is a way to use a single port (_say, 443_) plus some intelligence (_say, a defined mapping of URLs to services_) to route incoming requests to the appropriate containers (_via services_). Basically, the Trafeik ingress does what [Traefik does for us under Docker Swarm](/docker-ha-swarm/traefik/). + +What's happening in the diagram is that a phone-home pod is tied to the traefik pod using affinity, so that both containers will be executed on the same host. Again, the phone-home container calls a webhook on the HAProxy VM, auto-configuring HAproxy to send any HTTPs traffic to its calling address and customer NodePort port number. + +When an inbound HTTPS request is received by Traefik, based on some internal Kubernetes elements (ingresses), Traefik provides SSL termination, and routes the request to the appropriate service (_In this case, either the GitLab UI or teh UniFi UI_) + +### 3 : The UniFi pod + +What's happening in the UniFi pod is a combination of #1 and #2 above. UniFi controller provides a webUI (_typically 8443, but we serve it via Traefik on 443_), plus some extra ports for device adoption, which are using a proprietary protocol, and can't be proxied with Traefik. + +To make both the webUI and the adoption ports work, we use a combination of an ingress for the webUI (_see #2 above_), and a phone-home container to tell HAProxy to forward port 8080 (_the adoption port_) directly to the host, using a NodePort-exposed service. + +This allows us to retain the use of a single IP for all controller functions, as accessed outside of the cluster. + +### 4 : The webhook + +Each phone-home container is calling a webhook on the HAProxy VM, secured with a secret shared token. The phone-home container passes the desired frontend port (i.e., 443), the corresponding NodeIP port (i.e., 30443), and the node's current public IP address. + +The webhook uses the provided details to update HAProxy for the combination of values, validate the config, and then restart HAProxy. + +### 5 : The user + +Finally, the DNS for all externally-accessible services is pointed to the IP of the HAProxy VM. On receiving an inbound request (_be it port 443, 8080, or anything else configured_), HAProxy will forward the request to the IP and NodePort port learned from the phone-home container. + +## Move on.. + +Still with me? Good. Move on to creating your cluster! + +* [Start](/kubernetes/start/) - Why Kubernetes? +* Design (this page) - How does it fit together? +* [Cluster](/kubernetes/cluster/) - Setup a basic cluster +* [Load Balancer](/kubernetes/loadbalancer/) - Setup inbound access +* [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data +* [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks +* [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm + + +## Chef's Notes + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/kubernetes/helm.md b/manuscript/kubernetes/helm.md new file mode 100644 index 0000000..873a9fe --- /dev/null +++ b/manuscript/kubernetes/helm.md @@ -0,0 +1,68 @@ +# Helm + +[Helm](https://github.com/helm/helm) is a tool for managing Kubernetes "charts" (_think of it as an uber-polished collection of recipes_). Using one simple command, and by tweaking one simple config file (values.yaml), you can launch a complex stack. There are many publicly available helm charts for popular packages like [elasticsearch](https://github.com/helm/charts/tree/master/stable/elasticsearch), [ghost](https://github.com/helm/charts/tree/master/stable/ghost), [grafana](https://github.com/helm/charts/tree/master/stable/grafana), [mediawiki](https://github.com/helm/charts/tree/master/stable/mediawiki), etc. + +![Kubernetes Snapshots](/images/kubernetes-helm.png) + +!!! note + Given enough interest, I may provide a helm-compatible version of the pre-mix repository for [supporters](/support/). [Hit me up](/whoami/#contact-me) if you're interested! + +## Ingredients + +1. [Kubernetes cluster](/kubernetes/cluster/) +2. Geek-Fu required : 🐤 (_easy - copy and paste_) + +## Preparation + +### Install Helm + +This section is from the Helm README: + +Binary downloads of the Helm client can be found on [the Releases page](https://github.com/helm/helm/releases/latest). + +Unpack the `helm` binary and add it to your PATH and you are good to go! + +If you want to use a package manager: + +- [Homebrew](https://brew.sh/) users can use `brew install kubernetes-helm`. +- [Chocolatey](https://chocolatey.org/) users can use `choco install kubernetes-helm`. +- [Scoop](https://scoop.sh/) users can use `scoop install helm`. +- [GoFish](https://gofi.sh/) users can use `gofish install helm`. + +To rapidly get Helm up and running, start with the [Quick Start Guide](https://docs.helm.sh/using_helm/#quickstart-guide). + +See the [installation guide](https://docs.helm.sh/using_helm/#installing-helm) for more options, +including installing pre-releases. + + +## Serving + +### Initialise Helm + +After installing Helm, initialise it by running ```helm init```. This will install "tiller" pod into your cluster, which works with the locally installed helm binaries to launch/update/delete Kubernetes elements based on helm charts. + +That's it - not very exciting I know, but we'll need helm for the next and final step in building our Kubernetes cluster - deploying the [Traefik ingress controller (via helm)](/kubernetes/traefik/)! + +## Move on.. + +Still with me? Good. Move on to understanding Helm charts... + +* [Start](/kubernetes/start/) - Why Kubernetes? +* [Design](/kubernetes/design/) - How does it fit together? +* [Cluster](/kubernetes/cluster/) - Setup a basic cluster +* [Load Balancer](/kubernetes/loadbalancer/) Setup inbound access +* [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data +* Helm (this page) - Uber-recipes from fellow geeks +* [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm + + + +## Chef's Notes + +1. Of course, you can have lots of fun deploying all sorts of things via Helm. Check out https://github.com/helm/charts for some examples. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/kubernetes/loadbalancer.md b/manuscript/kubernetes/loadbalancer.md new file mode 100644 index 0000000..ccd55f6 --- /dev/null +++ b/manuscript/kubernetes/loadbalancer.md @@ -0,0 +1,340 @@ +# Load Balancer + +One of the issues I encountered early on in migrating my Docker Swarm workloads to Kubernetes on GKE, was how to reliably permit inbound traffic into the cluster. + +There were several complications with the "traditional" mechanisms of providing a load-balanced ingress, not the least of which was cost. I also found that even if I paid my cloud provider (_Google_) for a load-balancer Kubernetes service, this service required a unique IP per exposed port, which was incompatible with my mining pool empire (_mining pools need to expose multiple ports on the same DNS name_). + +See further examination of the problem and possible solutions in the [Kubernetes design](kubernetes/design/#the-challenges-of-external-access) page. + +This recipe details a simple design to permit the exposure of as many ports as you like, on a single public IP, to a cluster of Kubernetes nodes running as many pods/containers as you need, with services exposed via NodePort. + +![Kubernetes Design](/images/kubernetes-cluster-design.png) + +## Ingredients + +1. [Kubernetes cluster](/kubernetes/cluster/) +2. VM _outside_ of Kubernetes cluster, with a fixed IP address. Perhaps, on a [$5/month Digital Ocean Droplet](https://www.digitalocean.com/?refcode=e33b78ad621b).. (_yes, another referral link. Mooar 🍷 for me!_) +3. Geek-Fu required : 🐧🐧🐧 (_complex - inline adjustments required_) + + +## Preparation + +### Summary + +### Create LetsEncrypt certificate + +!!! warning + Safety first, folks. You wouldn't run a webhook exposed to the big bad ol' internte without first securing it with a valid SSL certificate? Of course not, I didn't think so! + +Use whatever method you prefer to generate (and later, renew) your LetsEncrypt cert. The example below uses the CertBot docker image for CloudFlare DNS validation, since that's what I've used elsewhere. + +We **could** run our webhook as a simple HTTP listener, but really, in a world where LetsEncrypt cacn assign you a wildcard certificate in under 30 seconds, thaht's unforgivable. Use the following **general** example to create a LetsEncrypt wildcard cert for your host: + +In my case, since I use CloudFlare, I create /etc/webhook/letsencrypt/cloudflare.ini: + +``` +dns_cloudflare_email=davidy@funkypenguin.co.nz +dns_cloudflare_api_key=supersekritnevergonnatellyou +``` + +I request my cert by running: +``` +cd /etc/webhook/ +docker run -ti --rm -v "$(pwd)"/letsencrypt:/etc/letsencrypt certbot/dns-cloudflare --preferred-challenges dns certonly --dns-cloudflare --dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini -d ''*.funkypenguin.co.nz' +``` + +!!! question + Why use a wildcard cert? So my enemies can't examine my certs to enumerate my various services and discover my weaknesses, of course! + +I add the following as a cron command to renew my certs every day: + +``` +cd /etc/webhook && docker run -ti --rm -v "$(pwd)"/letsencrypt:/etc/letsencrypt certbot/dns-cloudflare renew --dns-cloudflare --dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini +``` + +Once you've confirmed you've got a valid LetsEncrypt certificate stored in ```/etc/webhook/letsencrypt/live//fullcert.pem```, proceed to the next step.. + +### Install webhook + +We're going to use https://github.com/adnanh/webhook to run our webhook. On some distributions (_❤️ ya, Debian!_), webhook and its associated systemd config can be installed by running ```apt-get install webhook```. + +### Create webhook config + +We'll create a single webhook, by creating ```/etc/webhook/hooks.json``` as follows. Choose a nice secure random string for your MY_TOKEN value! + +``` +mkdir /etc/webhook +export MY_TOKEN=ilovecheese +echo << EOF > /etc/webhook/hooks.json +[ + { + "id": "update-haproxy", + "execute-command": "/etc/webhook/update-haproxy.sh", + "command-working-directory": "/etc/webhook", + "pass-arguments-to-command": + [ + { + "source": "payload", + "name": "name" + }, + { + "source": "payload", + "name": "frontend-port" + }, + { + "source": "payload", + "name": "backend-port" + }, + { + "source": "payload", + "name": "dst-ip" + }, + { + "source": "payload", + "name": "action" + } + ], + "trigger-rule": + { + "match": + { + "type": "value", + "value": "$MY_TOKEN", + "parameter": + { + "source": "header", + "name": "X-Funkypenguin-Token" + } + } + } + } +] +EOF +``` + +!!! note + Note that to avoid any bozo from calling our we're matching on a token header in the request called ```X-Funkypenguin-Token```. Webhook will **ignore** any request which doesn't include a matching token in the request header. + +### Update systemd for webhook + +!!! note + This section is particular to Debian Stretch and its webhook package. If you're using another OS for your VM, just ensure that you can start webhook with a config similar to the one illustrated below. + +Since we want to force webhook to run in secure mode (_no point having a token if it can be extracted from a simple packet capture!_) I ran ```systemctl edit webhook```, and pasted in the following: + +``` +[Service] +# Override the default (non-secure) behaviour of webhook by passing our certificate details and custom hooks.json location +ExecStart= +ExecStart=/usr/bin/webhook -hooks /etc/webhook/hooks.json -verbose -secure -cert /etc/webhook/letsencrypt/live/funkypenguin.co.nz/fullchain.pem -key /etc/webhook/letsencrypt/live/funkypenguin.co.nz/privkey.pem +``` + +Then I restarted webhook by running ```systemctl enable webhook && systemctl restart webhook```. I watched the subsequent logs by running ```journalctl -u webhook -f``` + +### Create /etc/webhook/update-haproxy.sh + +When successfully authenticated with our top-secret token, our webhook will execute a local script, defined as follows (_yes, you should create this file_): + +``` +#!/bin/bash + +NAME=$1 +FRONTEND_PORT=$2 +BACKEND_PORT=$3 +DST_IP=$4 +ACTION=$5 + +# Bail if we haven't received our expected parameters +if [[ "$#" -ne 5 ]] +then + echo "illegal number of parameters" + exit 2; +fi + +# Either add or remove a service based on $ACTION +case $ACTION in + add) + # Create the portion of haproxy config + cat << EOF > /etc/webhook/haproxy/$FRONTEND_PORT.inc +### >> Used to run $NAME:${FRONTEND_PORT} +frontend ${FRONTEND_PORT}_frontend + bind *:$FRONTEND_PORT + mode tcp + default_backend ${FRONTEND_PORT}_backend + +backend ${FRONTEND_PORT}_backend + mode tcp + balance roundrobin + stick-table type ip size 200k expire 30m + stick on src + server s1 $DST_IP:$BACKEND_PORT +### << Used to run $NAME:$FRONTEND_PORT +EOF + ;; + delete) + rm /etc/webhook/haproxy/$FRONTEND_PORT.inc + ;; + *) + echo "Invalid action $ACTION" + exit 2 +esac + +# Concatenate all the haproxy configs into a single file +cat /etc/webhook/haproxy/global /etc/webhook/haproxy/*.inc > /etc/webhook/haproxy/pre_validate.cfg + +# Validate the generated config +haproxy -f /etc/webhook/haproxy/pre_validate.cfg -c + +# If validation was successful, only _then_ copy it over to /etc/haproxy/haproxy.cfg, and reload +if [[ $? -gt 0 ]] +then + echo "HAProxy validation failed, not continuing" + exit 2 +else + # Remember what the original file looked like + m1=$(md5sum "/etc/haproxy/haproxy.cfg") + + # Overwrite the original file + cp /etc/webhook/haproxy/pre_validate.cfg /etc/haproxy/haproxy.cfg + + # Get MD5 of new file + m2=$(md5sum "/etc/haproxy/haproxy.cfg") + + # Only if file has changed, then we need to reload haproxy + if [ "$m1" != "$m2" ] ; then + echo "HAProxy config has changed, reloading" + systemctl reload haproxy + fi +fi +``` + +### Create /etc/webhook/haproxy/global + +Create ```/etc/webhook/haproxy/global``` and populate with something like the following. This will be the non-dynamically generated part of our HAProxy config: + +``` +global + log /dev/log local0 + log /dev/log local1 notice + chroot /var/lib/haproxy + stats socket /run/haproxy/admin.sock mode 660 level admin + stats timeout 30s + user haproxy + group haproxy + daemon + + # Default SSL material locations + ca-base /etc/ssl/certs + crt-base /etc/ssl/private + + # Default ciphers to use on SSL-enabled listening sockets. + # For more information, see ciphers(1SSL). This list is from: + # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ + # An alternative list with additional directives can be obtained from + # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy + ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS + ssl-default-bind-options no-sslv3 + +defaults + log global + mode tcp + option tcplog + option dontlognull + timeout connect 5000 + timeout client 5000000 + timeout server 5000000 + errorfile 400 /etc/haproxy/errors/400.http + errorfile 403 /etc/haproxy/errors/403.http + errorfile 408 /etc/haproxy/errors/408.http + errorfile 500 /etc/haproxy/errors/500.http + errorfile 502 /etc/haproxy/errors/502.http + errorfile 503 /etc/haproxy/errors/503.http + errorfile 504 /etc/haproxy/errors/504.http +``` + +## Serving + +### Take the bait! + +Whew! We now have all the components of our automated load-balancing solution in place. Browse to your VM's FQDN at https://whatever.it.is:9000/hooks/update-haproxy, and you should see the text "_Hook rules were not satisfied_", with a valid SSL certificate (_You didn't send a token_). + +If you don't see the above, then check the following: + +1. Does the webhook verbose log (```journalctl -u webhook -f```) complain about invalid arguments or missing files? +2. Is port 9000 open to the internet on your VM? + +### Apply to pods + +You'll see me use this design in any Kubernetes-based recipe which requires container-specific ports, like UniFi. Here's an excerpt of the .yml which defines the UniFi controller: + +``` + +spec: + containers: + - image: linuxserver/unifi + name: controller + volumeMounts: + - name: controller-volumeclaim + mountPath: /config + - image: funkypenguin/poor-mans-k8s-lb + imagePullPolicy: Always + name: 8080-phone-home + env: + - name: REPEAT_INTERVAL + value: "600" + - name: FRONTEND_PORT + value: "8080" + - name: BACKEND_PORT + value: "30808" + - name: NAME + value: "unifi-adoption" + - name: WEBHOOK + value: "https://my-secret.url.wouldnt.ya.like.to.know:9000/hooks/update-haproxy" + - name: WEBHOOK_TOKEN + valueFrom: + secretKeyRef: + name: unifi-credentials + key: webhook_token.secret + +``` + +The takeaways here are: + +1. We add the funkypenguin/poor-mans-k8s-lb containier to any pod which has special port requirements, forcing the container to run on the same node as the other containers in the pod (_in this case, the UniFi controller_) +2. We use a Kubernetes secret for the webhook token, so that our .yml can be shared without exposing sensitive data + +Here's what the webhook logs look like when the above is added to the UniFi deployment: + +``` +Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 Started POST /hooks/update-haproxy +Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 update-haproxy got matched +Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 update-haproxy hook triggered successfully +Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 Completed 200 OK in 2.123921ms +Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 executing /etc/webhook/update-haproxy.sh (/etc/webhook/update-haproxy.sh) with arguments ["/etc/webhook/update-haproxy.sh" "unifi-adoption" "8080" "30808" "35.244.91.178" "add"] and environment [] using /etc/webhook as cwd +Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 command output: Configuration file is valid + +``` + + +## Move on.. + +Still with me? Good. Move on to setting up an ingress SSL terminating proxy with Traefik.. + +* [Start](/kubernetes/start/) - Why Kubernetes? +* [Design](/kubernetes/design/) - How does it fit together? +* [Cluster](/kubernetes/cluster/) - Setup a basic cluster +* Load Balancer (this page) - Setup inbound access +* [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data +* [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks +* [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm + + +## Chef's Notes + +1. This is MVP of the load balancer solution. Any suggestions for improvements are welcome 😉 + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/kubernetes/snapshots.md b/manuscript/kubernetes/snapshots.md new file mode 100644 index 0000000..7df1199 --- /dev/null +++ b/manuscript/kubernetes/snapshots.md @@ -0,0 +1,187 @@ +# Snapshots + +Before we get carried away creating pods, services, deployments etc, let's spare a thought for _security_... (_DevSecPenguinOps, here we come!_). In the context of this recipe, security refers to safe-guarding your data from accidental loss, as well as malicious impact. + +Under [Docker Swarm](/ha-docker-swarm/design/), we used [shared storage](/ha-docker-swarm/shared-storage-ceph/) with [Duplicity](/recipes/duplicity/) (or [ElkarBackup](recipes/elkarbackup/)) to automate backups of our persistent data. + +Now that we're playing in the deep end with Kubernetes, we'll need a Cloud-native backup solution... + +It bears repeating though - don't be like [Cameron](http://haltandcatchfire.wikia.com/wiki/Cameron_Howe). Backup your stuff. + + + +This recipe employs a clever tool ([miracle2k/k8s-snapshots](https://github.com/miracle2k/k8s-snapshots)), running _inside_ your cluster, to trigger automated snapshots of your persistent volumes, using your cloud provider's APIs. + +## Ingredients + +1. [Kubernetes cluster](/kubernetes/cluster/) with either AWS or GKE (currently, but apparently other providers are [easy to implement](https://github.com/miracle2k/k8s-snapshots/blob/master/k8s_snapshots/backends/abstract.py)) +2. Geek-Fu required : 🐒🐒 (_medium - minor adjustments may be required_) + +## Preparation + +### Create RoleBinding (GKE only) + +If you're running GKE, run the following to create a RoleBinding, allowing your user to grant rights-it-doesn't-currently-have to the service account responsible for creating the snapshots: + +```kubectl create clusterrolebinding your-user-cluster-admin-binding \ + --clusterrole=cluster-admin --user=``` + +!!! question + Why do we have to do this? Check [this blog post](https://www.funkypenguin.co.nz/workaround-blocked-attempt-to-grant-extra-privileges-on-gke/) for details + +### Apply RBAC + +If your cluster is RBAC-enabled (_it probably is_), you'll need to create a ClusterRole and ClusterRoleBinding to allow k8s_snapshots to see your PVs and friends: + +``` +kubectl apply -f https://raw.githubusercontent.com/miracle2k/k8s-snapshots/master/rbac.yaml +``` + +## Serving + +### Deploy the pod + +Ready? Run the following to create a deployment in to the kube-system namespace: + +``` +cat <``` + +### Pick PVs to snapshot + +k8s-snapshots relies on annotations to tell it how frequently to snapshot your PVs. A PV requires the ```backup.kubernetes.io/deltas``` annotation in order to be snapshotted. + +From the k8s-snapshots README: + +``` +The generations are defined by a list of deltas formatted as ISO 8601 durations (this differs from tarsnapper). PT60S or PT1M means a minute, PT12H or P0.5D is half a day, P1W or P7D is a week. The number of backups in each generation is implied by it's and the parent generation's delta. + +For example, given the deltas PT1H P1D P7D, the first generation will consist of 24 backups each one hour older than the previous (or the closest approximation possible given the available backups), the second generation of 7 backups each one day older than the previous, and backups older than 7 days will be discarded for good. + +The most recent backup is always kept. + +The first delta is the backup interval. +``` + +To add the annotation to an existing PV, run something like this: + +``` +kubectl patch pv pvc-01f74065-8fe9-11e6-abdd-42010af00148 -p \ + '{"metadata": {"annotations": {"backup.kubernetes.io/deltas": "P1D P30D P360D"}}}' +``` + +To add the annotation to a _new_ PV, add the following annotation to your **PVC**: + +``` +backup.kubernetes.io/deltas: PT1H P2D P30D P180D +``` + +Here's an example of the PVC for the UniFi recipe, which includes 7 daily snapshots of the PV: + +``` +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: controller-volumeclaim + namespace: unifi + annotations: + backup.kubernetes.io/deltas: P1D P7D +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +And here's what my snapshot list looks like after a few days: + +![Kubernetes Snapshots](/images/kubernetes-snapshots.png) + +### Snapshot a non-Kubernetes volume (optional) + +If you're running traditional compute instances with your cloud provider (I do this for my poor man's load balancer), you might want to backup _these_ volumes as well. + +To do so, first create a custom resource, ```SnapshotRule```: + +``` +cat < + +## Wait, what? + +Why would you want to use Kubernetes for your self-hosted recipes over simple Docker Swarm? Here's my personal take.. + +I use Docker swarm both at home (_on a single-node swarm_), and on a trio of Ubuntu 16.04 VPSs in a shared lab OpenStack environment. + +In both cases above, I'm responsible for maintaining the infrastructure supporting Docker - either the physical host, or the VPS operating systems. + +I started experimenting with Kubernetes as a plan to improve the reliability of my cryptocurrency mining pools (_the contended lab VPSs negatively impacted the likelihood of finding a block_), and as a long-term replacement for my aging home server. + +What I enjoy about building recipes and self-hosting is **not** the operating system maintenance, it's the tools and applications that I can quickly launch in my swarms. If I could **only** play with the applications, and not bother with the maintenance, I totally would. + +Kubernetes (_on a cloud provider, mind you!_) does this for me. I feed Kubernetes a series of YAML files, and it takes care of all the rest, including version upgrades, node failures/replacements, disk attach/detachments, etc. + +## Uggh, it's so complicated! + +Yes, but that's a necessary sacrifice for the maturity, power and flexibility it offers. Like docker-compose syntax, Kubernetes uses YAML to define its various, interworking components. + +Let's talk some definitions. Kubernetes.io provides a [glossary](https://kubernetes.io/docs/reference/glossary/?fundamental=true). My definitions are below: + +* **Node** : A compute instance which runs docker containers, managed by a cluster master. + +* **Cluster** : One or more "worker nodes" which run containers. Very similar to a Docker Swarm node. In most cloud provider deployments, the [master node for your cluster is provided free of charge](https://www.sdxcentral.com/articles/news/google-eliminates-gke-management-fees-kubernetes-clusters/2017/11/), but you don't get to access it. + +* **Pod** : A collection of one or more the containers. If a pod runs multiple containers, these containers always run on the same node. + +* **Deployment** : A definition of a desired state. I.e., "I want a pod with containers A and B running". The Kubernetes master then ensures that any changes necessary to maintain the state are taken. (_I.e., if a pod crashes, but is supposed to be running, a new pod will be started_) + +* **Service** : Unlike Docker Swarm, service discovery is not _built in_ to Kubernetes. For your pods to discover each other (say, to have "webserver" talk to "database"), you create a service for each pod, and refer to these services when you want your containers (_in pods_) to talk to each other. Complicated, yes, but the abstraction allows you to do powerful things, like auto-scale-up a bunch of database "pods" behind a service called "database", or perform a rolling container image upgrade with zero impact. + +* **External access** : Services not only allow pods to discover each other, but they're also the mechanism through which the outside world can talk to a container. At the simplest level, this is akin to exposing a container port on a docker host. + +* **Ingress** : When mapping ports to applications is inadequate (think virtual web hosts), an ingress is a sort of "inbound router" which can receive requests on one port (i.e., HTTPS), and forward them to a variety of internal pods, based on things like VHOST, etc. For us, this is the functional equivalent of what Traefik does in Docker Swarm. In fact, we use a Traefik Ingress in Kubernetes to accomplish the same. + +* **Persistent Volume** : A virtual disk which is attached to a pod, storing persistent data. Meets the requirement for shared storage from Docker Swarm. I.e., if a persistent volume (PV) is bound to a pod, and the pod dies and is recreated, or get upgraded to a new image, the PV the data is bound to the new container. PVs can be "claimed" in a YAML definition, so that your Kubernetes provider will auto-create a PV when you launch your pod. PVs can be snapshotted. + +* **Namespace** : An abstraction to separate a collection of pods, services, ingresses, etc. A "virtual cluster within a cluster". Can be used for security, or simplicity. For example, since we don't have individual docker stacks anymore, if you commonly name your database container "db", and you want to deploy two applications which both use a database container, how will you name your services? Use namespaces to keep each application ("nextcloud" vs "kanboard") separate. Namespaces also allow you to allocate resources **limits** to the aggregate of containers in a namespace, so you could, for example, limit the "nextcloud" namespace to 2.3 CPUs and 1200MB RAM. + +## Mm.. maaaaybe, how do I start? + +If you're like me, and you learn by doing, either play with the examples at https://labs.play-with-k8s.com/, or jump right in by setting up a Google Cloud trial (_you get $300 credit for 12 months_), or a small cluster on [Digital Ocean](/kubernetes/digitalocean/). + +If you're the learn-by-watching type, just search for "Kubernetes introduction video". There's a **lot** of great content available. + +## I'm ready, gimme some recipes! + +As of Jan 2019, our first (_and only!_) Kubernetes recipe is a WIP for the Mosquitto [MQTT](/recipes/mqtt/) broker. It's a good, simple starter if you're into home automation (_shoutout to [Home Assistant](/recipes/homeassistant/)!_), since it only requires a single container, and a simple NodePort service. + +I'd love for your [feedback](/support/) on the Kubernetes recipes, as well as suggestions for what to add next. The current rough plan is to replicate the Chef's Favorites recipes (_see the left-hand panel_) into Kubernetes first. + +## Move on.. + +Still with me? Good. Move on to reviewing the design elements + +* Start (this page) - Why Kubernetes? +* [Design](/kubernetes/design/) - How does it fit together? +* [Cluster](/kubernetes/cluster/) - Setup a basic cluster +* [Load Balancer](/kubernetes/loadbalancer/) - Setup inbound access +* [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data +* [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks +* [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm + + +## Chef's Notes + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/kubernetes/traefik.md b/manuscript/kubernetes/traefik.md new file mode 100644 index 0000000..0aedd7d --- /dev/null +++ b/manuscript/kubernetes/traefik.md @@ -0,0 +1,220 @@ +# Traefik + +This recipe utilises the [traefik helm chart](https://github.com/helm/charts/tree/master/stable/traefik) to proving LetsEncrypt-secured HTTPS access to multiple containers within your cluster. + +## Ingredients + +1. [Kubernetes cluster](/kubernetes/cluster/) +2. [Helm](/kubernetes/helm/) installed and initialised in your cluster + +## Preparation + +### Clone helm charts + +Clone the helm charts, by running: + +``` +git clone https://github.com/helm/charts +``` + +Change to stable/traefik: + +``` +cd charts/stable/traefik +``` + +### Edit values.yaml + +The beauty of the helm approach is that all the complexity of the Kubernetes elements' YAML files are hidden from you (created using templates), and all your changes go into values.yaml. + +These are my values, you'll need to adjust for your own situation: + +``` +imageTag: alpine +serviceType: NodePort +# yes, we're not listening on 80 or 443 because we don't want to pay for a loadbalancer IP to do this. I use poor-mans-k8s-lb instead +service: + nodePorts: + http: 30080 + https: 30443 +cpuRequest: 1m +memoryRequest: 100Mi +cpuLimit: 1000m +memoryLimit: 500Mi + +ssl: + enabled: true + enforced: true +debug: + enabled: false + +rbac: + enabled: true +dashboard: + enabled: true + domain: traefik.funkypenguin.co.nz +kubernetes: + # set these to all the namespaces you intend to use. I standardize on one-per-stack. You can always add more later + namespaces: + - kube-system + - unifi + - kanboard + - nextcloud + - huginn + - miniflux +accessLogs: + enabled: true +acme: + persistence: + enabled: true + # Add the necessary annotation to backup ACME store with k8s-snapshots + annotations: { "backup.kubernetes.io/deltas: P1D P7D" } + staging: false + enabled: true + logging: true + email: "" + challengeType: "dns-01" + dnsProvider: + name: cloudflare + cloudflare: + CLOUDFLARE_EMAIL: "" + domains: + enabled: true + domainsList: + - main: "*.funkypenguin.co.nz" # name of the wildcard domain name for the certificate + - sans: + - "funkypenguin.co.nz" +metrics: + prometheus: + enabled: true +``` + +!!! note + The helm chart doesn't enable the Traefik dashboard by default. I intend to add an oauth_proxy pod to secure this, in a future recipe update. + +### Prepare phone-home pod + +[Remember](/kubernetes/loadbalancer/) how our load balancer design ties a phone-home container to another container using a pod, so that the phone-home container can tell our external load balancer (_using a webhook_) where to send our traffic? + +Since we deployed Traefik using helm, we need to take a slightly different approach, so we'll create a pod with an affinity which ensures it runs on the same host which runs the Traefik container (_more precisely, containers with the label app=traefik_). + +Create phone-home.yaml as follows: + +``` +apiVersion: v1 +kind: Pod +metadata: + name: phonehome-traefik +spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - traefik + topologyKey: failure-domain.beta.kubernetes.io/zone + containers: + - image: funkypenguin/poor-mans-k8s-lb + imagePullPolicy: Always + name: phonehome-traefik + env: + - name: REPEAT_INTERVAL + value: "600" + - name: FRONTEND_PORT + value: "443" + - name: BACKEND_PORT + value: "30443" + - name: NAME + value: "traefik" + - name: WEBHOOK + value: "https://:9000/hooks/update-haproxy" + - name: WEBHOOK_TOKEN + valueFrom: + secretKeyRef: + name: traefik-credentials + key: webhook_token.secret +``` + +Create your webhook token secret by running: + +``` +echo -n "imtoosecretformyshorts" > webhook_token.secret +kubectl create secret generic traefik-credentials --from-file=webhook_token.secret +``` + +!!! warning + Yes, the "-n" in the echo statement is needed. [Read here for why](https://www.funkypenguin.co.nz/beware-the-hidden-newlines-in-kubernetes-secrets/). + +## Serving + +### Install the chart + +To install the chart, simply run ```helm install stable/traefik --name traefik --namespace kube-system``` + +That's it, traefik is running. + +You can confirm this by running ```kubectl get pods```, and even watch the traefik logs, by running ```kubectl logs -f traefik``` + +### Deploy the phone-home pod + +We still can't access traefik yet, since it's listening on port 30443 on node it happens to be running on. We'll launch our phone-home pod, to tell our [load balancer](/kubernetes/loadbalancer/) where to send incoming traffic on port 443. + +Optionally, on your loadbalancer VM, run ```journalctl -u webhook -f``` to watch for the container calling the webhook. + +Run ```kubectl create -f phone-home.yaml``` to create the pod. + +Run ```kubectl get pods -o wide``` to confirm that both the phone-home pod and the traefik pod are on the same node: + +``` +# kubectl get pods -o wide +NAME READY STATUS RESTARTS AGE IP NODE +phonehome-traefik 1/1 Running 0 20h 10.56.2.55 gke-penguins-are-sexy-8b85ef4d-2c9g +traefik-69db67f64c-5666c 1/1 Running 0 10d 10.56.2.30 gkepenguins-are-sexy-8b85ef4d-2c9g +``` + +Now browse to https://, and you should get a valid SSL cert, along with a 404 error (_you haven't deployed any other recipes yet_) + +### Making changes + +If you change a value in values.yaml, and want to update the traefik pod, run: + +``` +helm upgrade --values values.yml traefik stable/traefik --recreate-pods +``` + +## Review + +We're doneburgers! 🍔 We now have all the pieces to safely deploy recipes into our Kubernetes cluster, knowing: + +1. Our HTTPS traffic will be secured with LetsEncrypt (thanks Traefik!) +2. Our non-HTTPS ports (like UniFi adoption) will be load-balanced using an free-to-scale [external load balancer](/kubernetes/loadbalancer/) +3. Our persistent data will be [automatically backed up](/kubernetes/snapshots/) + +Here's a recap: + +* [Start](/kubernetes/start/) - Why Kubernetes? +* [Design](/kubernetes/design/) - How does it fit together? +* [Cluster](/kubernetes/cluster/) - Setup a basic cluster +* [Load Balancer](/kubernetes/loadbalancer/) Setup inbound access +* [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data +* [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks +* Traefik (this page) - Traefik Ingress via Helm + +## Where to next? + +I'll be adding more Kubernetes versions of existing recipes soon. Check out the [MQTT](/recipes/mqtt/) recipe for a start! + + +## Chef's Notes + +1. It's kinda lame to be able to bring up Traefik but not to use it. I'll be adding the oauth_proxy element shortly, which will make this last step a little more conclusive and exciting! + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/autopirate.md b/manuscript/recipes/autopirate.md similarity index 87% rename from manuscript/recipies/autopirate.md rename to manuscript/recipes/autopirate.md index 96cda27..ab8e1c6 100644 --- a/manuscript/recipies/autopirate.md +++ b/manuscript/recipes/autopirate.md @@ -18,12 +18,13 @@ Tools included in the AutoPirate stack are: * **[NZBGet](https://nzbget.net/)** : downloads data from usenet servers based on .nzb definitions, but written in C++ and designed with performance in mind to achieve maximum download speed by using very little system resources (_this is a popular alternative to SABnzbd_) * **[RTorrent](https://github.com/rakshasa/rtorrent/wiki)** is a CLI-based torrent client, which when combined with **[ruTorrent](https://github.com/Novik/ruTorrent)** becomes a powerful and fully browser-managed torrent client. (_Yes, it's not Usenet, but Sonarr/Radarr will let fulfill your watchlist using either Usenet **or** torrents, so it's worth including_) * **[NZBHydra](https://github.com/theotherp/nzbhydra)** : acts as a "meta-indexer", so that your downloading tools (_radarr, sonarr, etc_) only need to be setup for a single indexes. Also produces interesting stats on indexers, which helps when evaluating which indexers are performing well. +* **[NZBHydra2](https://github.com/theotherp/nzbhydra2)** : is a high-performance rewrite of the original NZBHydra, with extra features. While still in beta, this NZBHydra2 will eventually supercede NZBHydra * **[Sonarr](https://sonarr.tv)** : finds, downloads and manages TV shows * **[Radarr](https://radarr.video)** : finds, downloads and manages movies * **[Mylar](https://github.com/evilhero/mylar)** : finds, downloads and manages comic books * **[Headphones](https://github.com/rembo10/headphones)** : finds, downloads and manages music * **[Lazy Librarian](https://github.com/itsmegb/LazyLibrarian)** : finds, downloads and manages ebooks -* **[Ombi](https://github.com/tidusjar/Ombi)** : provides an interface to request additions to a [Plex](/recipies/plex/)/[Emby](/recipies/emby/) library using the above tools +* **[Ombi](https://github.com/tidusjar/Ombi)** : provides an interface to request additions to a [Plex](/recipes/plex/)/[Emby](/recipes/emby/) library using the above tools * **[Jackett](https://github.com/Jackett/Jackett)** : Provides an local, caching, API-based interface to torrent trackers, simplifying the way your tools search for torrents. Since this recipe is so long, and so many of the tools are optional to the final result (_i.e., if you're not interested in comics, you won't want Mylar_), I've described each individual tool on its own sub-recipe page (_below_), even though most of them are deployed very similarly. @@ -110,18 +111,19 @@ networks: Now work your way through the list of tools below, adding whichever tools your want to use, and finishing with the **end** section: -* [SABnzbd](/recipies/autopirate/sabnzbd.md) -* [NZBGet](/recipies/autopirate/nzbget.md) -* [RTorrent](/recipies/autopirate/rtorrent/) -* [Sonarr](/recipies/autopirate/sonarr/) -* [Radarr](/recipies/autopirate/radarr/) -* [Mylar](/recipies/autopirate/mylar/) -* [Lazy Librarian](/recipies/autopirate/lazylibrarian/) -* [Headphones](/recipies/autopirate/headphones/) -* [NZBHydra](/recipies/autopirate/nzbhydra/) -* [Ombi](/recipies/autopirate/ombi/) -* [Jackett](/recipies/autopirate/jackett/) -* [End](/recipies/autopirate/end/) (launch the stack) +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) +* [Mylar](/recipes/autopirate/mylar/) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* [End](/recipes/autopirate/end/) (launch the stack) ### Tip your waiter (donate) diff --git a/manuscript/recipies/autopirate/end.md b/manuscript/recipes/autopirate/end.md similarity index 88% rename from manuscript/recipies/autopirate/end.md rename to manuscript/recipes/autopirate/end.md index f6bdf11..4b2e809 100644 --- a/manuscript/recipies/autopirate/end.md +++ b/manuscript/recipes/autopirate/end.md @@ -1,5 +1,5 @@ !!! warning - This is not a complete recipe - it's the conclusion to the [AutoPirate](/recipies/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's the conclusion to the [AutoPirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. ### Launch Autopirate stack diff --git a/manuscript/recipies/autopirate/headphones.md b/manuscript/recipes/autopirate/headphones.md similarity index 73% rename from manuscript/recipies/autopirate/headphones.md rename to manuscript/recipes/autopirate/headphones.md index f4993bf..2c8b318 100644 --- a/manuscript/recipies/autopirate/headphones.md +++ b/manuscript/recipes/autopirate/headphones.md @@ -1,7 +1,7 @@ hero: AutoPirate - A fully-featured recipe to automate finding, downloading, and organising your media 📺 🎥 🎵 !!! warning - This is not a complete recipe - it's a component of the [autopirate](/recipies/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [autopirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. # Headphones @@ -11,7 +11,7 @@ hero: AutoPirate - A fully-featured recipe to automate finding, downloading, and ## Inclusion into AutoPirate -To include Headphones in your [AutoPirate](/recipies/autopirate/) stack, include the following in your autopirate.yml stack definition file: +To include Headphones in your [AutoPirate](/recipes/autopirate/) stack, include the following in your autopirate.yml stack definition file: ``` headphones: @@ -24,9 +24,8 @@ headphones: - internal headphones_proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/config/autopirate/headphones.env - dns_search: myswarm.example.com networks: - internal - traefik_public @@ -52,20 +51,23 @@ headphones_proxy: ## Assemble more tools.. -Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipies/autopirate/end/)** section: +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: -* [SABnzbd](/recipies/autopirate/sabnzbd.md) -* [NZBGet](/recipies/autopirate/nzbget.md) -* [RTorrent](/recipies/autopirate/rtorrent/) -* [Sonarr](/recipies/autopirate/sonarr/) -* [Radarr](/recipies/autopirate/radarr/) +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) * [Mylar](https://github.com/evilhero/mylar) -* [Lazy Librarian](/recipies/autopirate/lazylibrarian/) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) * Headphones (this page) -* [NZBHydra](/recipies/autopirate/nzbhydra/) -* [Ombi](/recipies/autopirate/ombi/) -* [Jackett](/recipies/autopirate/jackett/) -* [End](/recipies/autopirate/end/) (launch the stack) +* [Lidarr](/recipes/autopirate/lidarr/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) ## Chef's Notes 📓 diff --git a/manuscript/recipes/autopirate/heimdall.md b/manuscript/recipes/autopirate/heimdall.md new file mode 100644 index 0000000..698722d --- /dev/null +++ b/manuscript/recipes/autopirate/heimdall.md @@ -0,0 +1,88 @@ +!!! warning + This is not a complete recipe - it's a component of the [autopirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + +# Heimdall + +[Heimdall Application Dashboard](https://heimdall.site/) is a dashboard for all your web applications. It doesn't need to be limited to applications though, you can add links to anything you like. + +Heimdall is an elegant solution to organise all your web applications. It’s dedicated to this purpose so you won’t lose your links in a sea of bookmarks. + +Heimdall provides a single URL to manage access to all of your autopirate tools, and includes "enhanced" (_i.e., display stats within Heimdall without launching the app_) access to [NZBGet](/recipes/autopirate/nzbget.md), [SABnzbd](/recipes/autopirate/sabnzbd/), and friends. + +![Heimdall Screenshot](../../images/heimdall.jpg) + +## Inclusion into AutoPirate + +To include Heimdall in your [AutoPirate](/recipes/autopirate/) stack, include the following in your autopirate.yml stack definition file: + +```` + heimdall: + image: linuxserver/heimdall:latest + env_file: /var/data/config/autopirate/heimdall.env + volumes: + - /etc/localtime:/etc/localtime:ro + - /var/data/heimdall:/config + networks: + - internal + + heimdall_proxy: + image: funkypenguin/oauth2_proxy:latest + env_file : /var/data/config/autopirate/heimdall.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:heimdall.example.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /etc/localtime:/etc/localtime:ro + - /var/data/config/autopirate/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://heimdall:80 + -redirect-url=https://heimdall.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + -authenticated-emails-file=/authenticated-emails.txt + + + +```` + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + +## Assemble more tools.. + +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: + +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) +* [Mylar](/recipes/autopirate/mylarr/) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones) +* [Lidarr](/recipes/autopirate/lidarr/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* Heimdall (this page) +* [End](/recipes/autopirate/end/) (launch the stack) + + +## Chef's Notes 📓 + +1. In many cases, tools will integrate with each other. I.e., Radarr needs to talk to SABnzbd and NZBHydra, Ombi needs to talk to Radarr, etc. Since each tool runs within the stack under its own name, just refer to each tool by name (i.e. "radarr"), and docker swarm will resolve the name to the appropriate container. You can identify the tool-specific port by looking at the docker-compose service definition. +2. The inclusion of Heimdall was due to the efforts of @gkoerk in our [Discord server](http://chat.funkypenguin.co.nz). Thanks gkoerk! + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/autopirate/jackett.md b/manuscript/recipes/autopirate/jackett.md similarity index 72% rename from manuscript/recipies/autopirate/jackett.md rename to manuscript/recipes/autopirate/jackett.md index e3ca5f7..e796a03 100644 --- a/manuscript/recipies/autopirate/jackett.md +++ b/manuscript/recipes/autopirate/jackett.md @@ -1,5 +1,5 @@ !!! warning - This is not a complete recipe - it's a component of the [autopirate](/recipies/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [autopirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. # Jackett @@ -11,7 +11,7 @@ This allows for getting recent uploads (like RSS) and performing searches. Jacke ## Inclusion into AutoPirate -To include Jackett in your [AutoPirate](/recipies/autopirate/) stack, include the following in your autopirate.yml stack definition file: +To include Jackett in your [AutoPirate](/recipes/autopirate/) stack, include the following in your autopirate.yml stack definition file: ``` jackett: @@ -23,9 +23,8 @@ jackett: - internal jackett_proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/config/autopirate/jackett.env - dns_search: myswarm.example.com networks: - internal - traefik_public @@ -52,20 +51,23 @@ jackett_proxy: ## Assemble more tools.. -Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipies/autopirate/end/)** section: +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: -* [SABnzbd](/recipies/autopirate/sabnzbd.md) -* [NZBGet](/recipies/autopirate/nzbget.md) -* [RTorrent](/recipies/autopirate/rtorrent/) -* [Sonarr](/recipies/autopirate/sonarr/) -* [Radarr](/recipies/autopirate/radarr/) -* [Mylar](/recipies/autopirate/mylarr/) -* [Lazy Librarian](/recipies/autopirate/lazylibrarian/) -* [Headphones](/recipies/autopirate/headphones/) -* [NZBHydra](/recipies/autopirate/nzbhydra/) -* [Ombi](/recipies/autopirate/ombi/) +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) +* [Mylar](/recipes/autopirate/mylarr/) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones) +* [Lidarr](/recipes/autopirate/lidarr/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) * Jackett (this page) -* [End](/recipies/autopirate/end/) (launch the stack) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) ## Chef's Notes 📓 diff --git a/manuscript/recipies/autopirate/lazylibrarian.md b/manuscript/recipes/autopirate/lazylibrarian.md similarity index 77% rename from manuscript/recipies/autopirate/lazylibrarian.md rename to manuscript/recipes/autopirate/lazylibrarian.md index be582b3..e590eb5 100644 --- a/manuscript/recipies/autopirate/lazylibrarian.md +++ b/manuscript/recipes/autopirate/lazylibrarian.md @@ -1,5 +1,5 @@ !!! warning - This is not a complete recipe - it's a component of the [autopirate](/recipies/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [autopirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. # LazyLibrarian @@ -15,7 +15,7 @@ ## Inclusion into AutoPirate -To include LazyLibrarian in your [AutoPirate](/recipies/autopirate/) stack, include the following in your autopirate.yml stack definition file: +To include LazyLibrarian in your [AutoPirate](/recipes/autopirate/) stack, include the following in your autopirate.yml stack definition file: ``` lazylibrarian: @@ -28,9 +28,8 @@ lazylibrarian: - internal lazylibrarian_proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/config/autopirate/lazylibrarian.env - dns_search: myswarm.example.com networks: - internal - traefik_public @@ -64,25 +63,28 @@ calibre-server: ## Assemble more tools.. -Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipies/autopirate/end/)** section: +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: -* [SABnzbd](/recipies/autopirate/sabnzbd.md) -* [NZBGet](/recipies/autopirate/nzbget.md) -* [RTorrent](/recipies/autopirate/rtorrent/) -* [Sonarr](/recipies/autopirate/sonarr/) -* [Radarr](/recipies/autopirate/radarr/) +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) * [Mylar](https://github.com/evilhero/mylar) * Lazy Librarian (this page) -* [Headphones](https://github.com/rembo10/headphones) -* [NZBHydra](/recipies/autopirate/nzbhydra/) -* [Ombi](/recipies/autopirate/ombi/) -* [Jackett](/recipies/autopirate/jackett/) -* [End](/recipies/autopirate/end/) (launch the stack) +* [Headphones](/recipes/autopirate/headphones) +* [Lidarr](/recipes/autopirate/lidarr/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) ## Chef's Notes 📓 -1. The calibre-server container co-exists within the Lazy Librarian (LL) containers so that LL can automatically add a book to Calibre using the calibre-server interface. The calibre library can then be properly viewed using the [calibre-web](/recipies/calibre-web) recipe. +1. The calibre-server container co-exists within the Lazy Librarian (LL) containers so that LL can automatically add a book to Calibre using the calibre-server interface. The calibre library can then be properly viewed using the [calibre-web](/recipes/calibre-web) recipe. 2. In many cases, tools will integrate with each other. I.e., Radarr needs to talk to SABnzbd and NZBHydra, Ombi needs to talk to Radarr, etc. Since each tool runs within the stack under its own name, just refer to each tool by name (i.e. "radarr"), and docker swarm will resolve the name to the appropriate container. You can identify the tool-specific port by looking at the docker-compose service definition. ### Tip your waiter (donate) diff --git a/manuscript/recipes/autopirate/lidarr.md b/manuscript/recipes/autopirate/lidarr.md new file mode 100644 index 0000000..65e7a9f --- /dev/null +++ b/manuscript/recipes/autopirate/lidarr.md @@ -0,0 +1,83 @@ +hero: AutoPirate - A fully-featured recipe to automate finding, downloading, and organising your media 📺 🎥 🎵 📖 + +!!! warning + This is not a complete recipe - it's a component of the [autopirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + +# Lidarr + +[Lidarr](https://lidarr.audio/) is an automated music downloader for NZB and Torrent. It performs the same function as [Headphones](/recipes/autopirate/headphones), but is written using the same(ish) codebase as [Radarr](/recipes/autopirate/radarr/) and [Sonarr](/recipes/autopirate/sonarr). It's blazingly fast, and includes beautiful album/artist art. Lidarr supports [SABnzbd](/recipes/autopirate/sabnzbd/), [NZBGet](/recipes/autopirate/nzbget/), Transmission, µTorrent, Deluge and Blackhole (_just like Sonarr / Radarr_) + +![Lidarr Screenshot](../../images/lidarr.png) + +## Inclusion into AutoPirate + +To include Lidarr in your [AutoPirate](/recipes/autopirate/) stack, include the following in your autopirate.yml stack definition file: + +```` +lidarr: + image: linuxserver/lidarr:latest + env_file : /var/data/config/autopirate/lidarr.env + volumes: + - /var/data/autopirate/lidarr:/config + - /var/data/media:/media + networks: + - internal + +lidarr_proxy: + image: a5huynh/oauth2_proxy + env_file : /var/data/config/autopirate/lidarr.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:lidarr.example.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /var/data/config/autopirate/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://lidarr:8181 + -redirect-url=https://lidarr.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + -authenticated-emails-file=/authenticated-emails.txt +```` + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + +## Assemble more tools.. + +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: + +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) +* [Mylar](https://github.com/evilhero/mylar) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones/) +* Lidarr (this page) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) + + +## Chef's Notes 📓 + +1. In many cases, tools will integrate with each other. I.e., Radarr needs to talk to SABnzbd and NZBHydra, Ombi needs to talk to Radarr, etc. Since each tool runs within the stack under its own name, just refer to each tool by name (i.e. "radarr"), and docker swarm will resolve the name to the appropriate container. You can identify the tool-specific port by looking at the docker-compose service definition. +2. The addition of the Lidarr recipe was contributed by our very own @gpulido in Discord (http://chat.funkypenguin.co.nz) - Thanks Gabriel! + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/autopirate/mylar.md b/manuscript/recipes/autopirate/mylar.md similarity index 69% rename from manuscript/recipies/autopirate/mylar.md rename to manuscript/recipes/autopirate/mylar.md index 78bca45..19c4021 100644 --- a/manuscript/recipies/autopirate/mylar.md +++ b/manuscript/recipes/autopirate/mylar.md @@ -1,5 +1,5 @@ !!! warning - This is not a complete recipe - it's a component of the [autopirate](/recipies/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [autopirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. # Mylar @@ -9,7 +9,7 @@ ## Inclusion into AutoPirate -To include Mylar in your [AutoPirate](/recipies/autopirate/) stack, include the following in your autopirate.yml stack definition file: +To include Mylar in your [AutoPirate](/recipes/autopirate/) stack, include the following in your autopirate.yml stack definition file: ``` mylar: @@ -22,9 +22,8 @@ mylar: - internal mylar_proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/config/autopirate/mylar.env - dns_search: myswarm.example.com networks: - internal - traefik_public @@ -50,20 +49,23 @@ mylar_proxy: ## Assemble more tools.. -Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipies/autopirate/end/)** section: +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: -* [SABnzbd](/recipies/autopirate/sabnzbd.md) -* [NZBGet](/recipies/autopirate/nzbget.md) -* [RTorrent](/recipies/autopirate/rtorrent/) -* [Sonarr](/recipies/autopirate/sonarr/) -* [Radarr](/recipies/autopirate/radarr/) +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) * Mylar (this page) -* [Lazy Librarian](/recipies/autopirate/lazylibrarian/) -* [Headphones](/recipies/autopirate/headphones/) -* [NZBHydra](/recipies/autopirate/nzbhydra/) -* [Ombi](/recipies/autopirate/ombi/) -* [Jackett](/recipies/autopirate/jackett/) -* [End](/recipies/autopirate/end/) (launch the stack) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones) +* [Lidarr](/recipes/autopirate/lidarr/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) ## Chef's Notes 📓 diff --git a/manuscript/recipies/autopirate/nzbget.md b/manuscript/recipes/autopirate/nzbget.md similarity index 66% rename from manuscript/recipies/autopirate/nzbget.md rename to manuscript/recipes/autopirate/nzbget.md index d0d13fb..86e5ef5 100644 --- a/manuscript/recipies/autopirate/nzbget.md +++ b/manuscript/recipes/autopirate/nzbget.md @@ -1,18 +1,18 @@ !!! warning - This is not a complete recipe - it's a component of the [AutoPirate](/recipies/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [AutoPirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. # NZBGet ## Introduction -NZBGet performs the same function as [SABnzbd](/recipies/autopirate/sabnzbd.md) (_downloading content from Usenet servers_), but it's lightweight and fast(er), written in C++ (_as opposed to Python_). +NZBGet performs the same function as [SABnzbd](/recipes/autopirate/sabnzbd.md) (_downloading content from Usenet servers_), but it's lightweight and fast(er), written in C++ (_as opposed to Python_). ![NZBGet Screenshot](../../images/nzbget.jpg) ## Inclusion into AutoPirate -To include NZBGet in your [AutoPirate](/recipies/autopirate/) stack -(_The only reason you **wouldn't** use NZBGet, would be if you were using [SABnzbd](/recipies/autopirate/sabnzbd/) instead_), include the following in your autopirate.yml stack definition file: +To include NZBGet in your [AutoPirate](/recipes/autopirate/) stack +(_The only reason you **wouldn't** use NZBGet, would be if you were using [SABnzbd](/recipes/autopirate/sabnzbd/) instead_), include the following in your autopirate.yml stack definition file: !!! tip I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` @@ -28,9 +28,8 @@ nzbget: - internal nzbget_proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/config/autopirate/nzbget.env - dns_search: myswarm.example.com networks: - internal - traefik_public @@ -57,20 +56,23 @@ nzbget_proxy: ## Assemble more tools.. -Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipies/autopirate/end/)** section: +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: -* [SABnzbd](/recipies/autopirate/sabnzbd.md) +* [SABnzbd](/recipes/autopirate/sabnzbd.md) * NZBGet (this page) -* [RTorrent](/recipies/autopirate/rtorrent/) -* [Sonarr](/recipies/autopirate/sonarr/) -* [Radarr](/recipies/autopirate/radarr/) -* [Mylar](/recipies/autopirate/mylar/) -* [Lazy Librarian](/recipies/autopirate/lazylibrarian/) -* [Headphones](/recipies/autopirate/headphones/) -* [NZBHydra](/recipies/autopirate/nzbhydra/) -* [Ombi](/recipies/autopirate/ombi/) -* [Jackett](/recipies/autopirate/jackett/) -* [End](/recipies/autopirate/end/) (launch the stack) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) +* [Mylar](/recipes/autopirate/mylar/) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones/) +* [Lidarr](/recipes/autopirate/lidarr/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) ## Chef's Notes 📓 diff --git a/manuscript/recipies/autopirate/nzbhydra.md b/manuscript/recipes/autopirate/nzbhydra.md similarity index 77% rename from manuscript/recipies/autopirate/nzbhydra.md rename to manuscript/recipes/autopirate/nzbhydra.md index 2ed0533..d11fb30 100644 --- a/manuscript/recipies/autopirate/nzbhydra.md +++ b/manuscript/recipes/autopirate/nzbhydra.md @@ -1,5 +1,5 @@ !!! warning - This is not a complete recipe - it's a component of the [AutoPirate](/recipies/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [AutoPirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. # NZBHydra @@ -16,7 +16,7 @@ ## Inclusion into AutoPirate -To include NZBHydra in your [AutoPirate](/recipies/autopirate/) stack, include the following in your autopirate.yml stack definition file: +To include NZBHydra in your [AutoPirate](/recipes/autopirate/) stack, include the following in your autopirate.yml stack definition file: ``` nzbhydra: @@ -28,9 +28,8 @@ nzbhydra: - internal nzbhydra_proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/config/autopirate/nzbhydra.env - dns_search: myswarm.example.com networks: - internal - traefik_public @@ -56,20 +55,23 @@ nzbhydra_proxy: ## Assemble more tools.. -Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipies/autopirate/end/)** section: +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: -* [SABnzbd](/recipies/autopirate/sabnzbd.md) -* [NZBGet](/recipies/autopirate/nzbget.md) -* [RTorrent](/recipies/autopirate/rtorrent/) -* [Sonarr](/recipies/autopirate/sonarr/) -* [Radarr](/recipies/autopirate/radarr/) -* [Mylar](/recipies/autopirate/mylar/) -* [Lazy Librarian](/recipies/autopirate/lazylibrarian/) -* [Headphones](/recipies/autopirate/headphones/) +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) +* [Mylar](/recipes/autopirate/mylar/) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones/) +* [Lidarr](/recipes/autopirate/lidarr/) * NZBHydra (this page) -* [Ombi](/recipies/autopirate/ombi/) -* [Jackett](/recipies/autopirate/jackett/) -* [End](/recipies/autopirate/end/) (launch the stack) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) ## Chef's Notes 📓 diff --git a/manuscript/recipes/autopirate/nzbhydra2.md b/manuscript/recipes/autopirate/nzbhydra2.md new file mode 100644 index 0000000..40d687d --- /dev/null +++ b/manuscript/recipes/autopirate/nzbhydra2.md @@ -0,0 +1,101 @@ +!!! warning + This is not a complete recipe - it's a component of the [AutoPirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + + +# NZBHydra 2 + +[NZBHydra 2](https://github.com/theotherp/nzbhydra2) is a meta search for NZB indexers. It provides easy access to a number of raw and newznab based indexers. You can search all your indexers from one place and use it as an indexer source for tools like Sonarr, Radarr or CouchPotato. + +!!! note + NZBHydra 2 is a complete rewrite of [NZBHydra (1)](/recipes/autopirate/nzbhybra/). It's currently in Beta. It works mostly fine but some functions might not be completely done and incompatibilities with some tools might still exist. You might want to run both in parallel for migration / testing purposes, but ultimately you'll probably want to switch over to NZBHydra 2 exclusively. + +![NZBHydra Screenshot](../../images/nzbhydra2.png) + +Features include: + +* Searches Anizb, BinSearch, NZBIndex and any newznab compatible indexers. Merges all results, filters them by a number of configurable restrictions, recognizes duplicates and returns them all in one place +* Add results to [NZBGet](/recipes/autopirate/nzbget/) or [SABnzbd](/recipes/autopirate/sabnzbd/) +* Support for all relevant media IDs (IMDB, TMDB, TVDB, TVRage, TVMaze) and conversion between them +* Query generation, meaning a query will be generated if only a media ID is provided in the search and the indexer doesn't support the ID or if no results were found +* Compatible with [Sonarr](/recipes/autopirate/sonarr/), [Radarr](/recipes/autopirate/radarr/), [NZBGet](/recipes/autopirate/nzbget.md), [SABnzbd](/recipes/autopirate/sabnzbd/), nzb360, CouchPotato, [Mylar](/recipes/autopirate/mylar/), [Lazy Librarian](/recipes/autopirate/lazylibrarian/), Sick Beard, [Jackett/Cardigann](/recipes/autopirate/jackett/), Watcher, etc. +* Search and download history and extensive stats. E.g. indexer response times, download shares, NZB age, etc. +* Authentication and multi-user support +* Automatic update of NZB download status by querying configured downloaders +* RSS support with configurable cache times +* Torrent support (_Although I prefer [Jackett](/recipes/autopirate/jackett/) for this_): + * For GUI searches, allowing you to download torrents to a blackhole folder + * A separate Torznab compatible endpoint for API requests, allowing you to merge multiple trackers +* Extensive configurability +* Migration of database and settings from v1 + + +## Inclusion into AutoPirate + +To include NZBHydra2 in your [AutoPirate](/recipes/autopirate/) stack, include the following in your autopirate.yml stack definition file: + +```` +nzbhydra2: + image: linuxserver/hydra2:latest + env_file : /var/data/config/autopirate/nzbhydra2.env + volumes: + - /var/data/autopirate/nzbhydra2:/config + networks: + - internal + +nzbhydra2_proxy: + image: a5huynh/oauth2_proxy + env_file : /var/data/config/autopirate/nzbhydra2.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:nzbhydra2.example.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /var/data/config/autopirate/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://nzbhydra2:5076 + -redirect-url=https://nzbhydra2.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + -authenticated-emails-file=/authenticated-emails.txt +```` + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + +## Assemble more tools.. + +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: + +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) +* [Mylar](/recipes/autopirate/mylar/) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones/) +* [Lidarr](/recipes/autopirate/lidarr/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* NZBHydra2 (this page) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) + + +## Chef's Notes 📓 + +1. In many cases, tools will integrate with each other. I.e., Radarr needs to talk to SABnzbd and NZBHydra2, Ombi needs to talk to Radarr, etc. Since each tool runs within the stack under its own name, just refer to each tool by name (i.e. "radarr"), and docker swarm will resolve the name to the appropriate container. You can identify the tool-specific port by looking at the docker-compose service definition. +2. Note that NZBHydra2 _can_ co-exist with NZBHydra (1), but if you want your tools (Sonarr, Radarr, etc) to use NZBHydra2, you'll need to change both the target hostname (_to "hydra2"_) and the target port (_to 5076_). + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/autopirate/ombi.md b/manuscript/recipes/autopirate/ombi.md similarity index 72% rename from manuscript/recipies/autopirate/ombi.md rename to manuscript/recipes/autopirate/ombi.md index 28f7497..9d4cc47 100644 --- a/manuscript/recipies/autopirate/ombi.md +++ b/manuscript/recipes/autopirate/ombi.md @@ -1,9 +1,9 @@ !!! warning - This is not a complete recipe - it's a component of the [AutoPirate](/recipies/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [AutoPirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. # Ombi -[Ombi](https://github.com/tidusjar/Ombi) is a useful addition to the [autopirate](/recipies/autopirate/) stack. Features include: +[Ombi](https://github.com/tidusjar/Ombi) is a useful addition to the [autopirate](/recipes/autopirate/) stack. Features include: * Lets users request Movies and TV Shows (_whether it being the entire series, an entire season, or even single episodes._) * Easily manage your requests @@ -17,7 +17,7 @@ Automatically updates the status of requests when they are available on Plex/Emb ## Inclusion into AutoPirate -To include Ombi in your [AutoPirate](/recipies/autopirate/) stack, include the following in your autopirate.yml stack definition file: +To include Ombi in your [AutoPirate](/recipes/autopirate/) stack, include the following in your autopirate.yml stack definition file: ``` ombi: @@ -29,9 +29,8 @@ ombi: - internal ombi_proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/config/autopirate/ombi.env - dns_search: myswarm.example.com networks: - internal - traefik_public @@ -57,20 +56,23 @@ ombi_proxy: ## Assemble more tools.. -Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipies/autopirate/end/)** section: +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: -* [SABnzbd](/recipies/autopirate/sabnzbd.md) -* [NZBGet](/recipies/autopirate/nzbget.md) -* [RTorrent](/recipies/autopirate/rtorrent/) -* [Sonarr](/recipies/autopirate/sonarr/) -* [Radarr](/recipies/autopirate/radarr/) -* [Mylar](/recipies/autopirate/mylar/) -* [Lazy Librarian](/recipies/autopirate/lazylibrarian/) -* [Headphones](/recipies/autopirate/headphones/) -* [NZBHydra](/recipies/autopirate/nzbhydra/) +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) +* [Mylar](/recipes/autopirate/mylar/) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones/) +* [Lidarr](/recipes/autopirate/lidarr/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) * Ombi (this page) -* [Jackett](/recipies/autopirate/jackett/) -* [End](/recipies/autopirate/end/) (launch the stack) +* [Jackett](/recipes/autopirate/jackett/) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) ## Chef's Notes 📓 diff --git a/manuscript/recipies/autopirate/radarr.md b/manuscript/recipes/autopirate/radarr.md similarity index 71% rename from manuscript/recipies/autopirate/radarr.md rename to manuscript/recipes/autopirate/radarr.md index 9fcf705..17868fc 100644 --- a/manuscript/recipies/autopirate/radarr.md +++ b/manuscript/recipes/autopirate/radarr.md @@ -1,5 +1,5 @@ !!! warning - This is not a complete recipe - it's a component of the [AutoPirate](/recipies/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [AutoPirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. # Radarr @@ -22,9 +22,12 @@ ![Radarr Screenshot](../../images/radarr.png) +!!! tip "Sponsored Project" + Sonarr is one of my [sponsored projects](/sponsored-projects/) - a project I financially support on a regular basis because of its utility to me. I forget it's there until I (reliably) receive an email with new and exciting updates 😁 + ## Inclusion into AutoPirate -To include Radarr in your [AutoPirate](/recipies/autopirate/) stack, include the following in your autopirate.yml stack definition file: +To include Radarr in your [AutoPirate](/recipes/autopirate/) stack, include the following in your autopirate.yml stack definition file: ``` radarr: @@ -37,9 +40,8 @@ radarr: - internal radarr_proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/config/autopirate/radarr.env - dns_search: myswarm.example.com networks: - internal - traefik_public @@ -65,20 +67,23 @@ radarr_proxy: ## Assemble more tools.. -Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipies/autopirate/end/)** section: +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: -* [SABnzbd](/recipies/autopirate/sabnzbd.md) -* [NZBGet](/recipies/autopirate/nzbget.md) -* [RTorrent](/recipies/autopirate/rtorrent/) -* [Sonarr](/recipies/autopirate/sonarr/) +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) * Radarr (this page) -* [Mylar](/recipies/autopirate/mylar/) -* [Lazy Librarian](/recipies/autopirate/lazylibrarian/) -* [Headphones](/recipies/autopirate/headphones/) -* [NZBHydra](/recipies/autopirate/nzbhydra/) -* [Ombi](/recipies/autopirate/ombi/) -* [Jackett](/recipies/autopirate/jackett/) -* [End](/recipies/autopirate/end/) (launch the stack) +* [Mylar](/recipes/autopirate/mylar/) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones/) +* [Lidarr](/recipes/autopirate/lidarr/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) ## Chef's Notes 📓 diff --git a/manuscript/recipies/autopirate/rtorrent.md b/manuscript/recipes/autopirate/rtorrent.md similarity index 74% rename from manuscript/recipies/autopirate/rtorrent.md rename to manuscript/recipes/autopirate/rtorrent.md index 8bc2d13..152727f 100644 --- a/manuscript/recipies/autopirate/rtorrent.md +++ b/manuscript/recipes/autopirate/rtorrent.md @@ -1,5 +1,5 @@ !!! warning - This is not a complete recipe - it's a component of the [AutoPirate](/recipies/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [AutoPirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. # RTorrent / ruTorrent @@ -13,7 +13,7 @@ When using a torrent client from behind NAT (_which swarm, by nature, is_), you ## Inclusion into AutoPirate -To include ruTorrent in your [AutoPirate](/recipies/autopirate/) stack, include the following in your autopirate.yml stack definition file: +To include ruTorrent in your [AutoPirate](/recipes/autopirate/) stack, include the following in your autopirate.yml stack definition file: ``` rtorrent: @@ -30,7 +30,6 @@ rtorrent: rtorrent_proxy: image: skippy/oauth2_proxy env_file : /var/data/config/autopirate/rtorrent.env - dns_search: myswarm.example.com networks: - internal - traefik_public @@ -57,20 +56,23 @@ rtorrent_proxy: ## Assemble more tools.. -Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipies/autopirate/end/)** section: +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: -* [SABnzbd](/recipies/autopirate/sabnzbd.md) -* [NZBGet](/recipies/autopirate/nzbget.md) +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) * RTorrent (this page) -* [Sonarr](/recipies/autopirate/sonarr/) -* [Radarr](/recipies/autopirate/radarr/) -* [Mylar](/recipies/autopirate/mylar/) -* [Lazy Librarian](/recipies/autopirate/lazylibrarian/) -* [Headphones](/recipies/autopirate/headphones/) -* [NZBHydra](/recipies/autopirate/nzbhydra/) -* [Ombi](/recipies/autopirate/ombi/) -* [Jackett](/recipies/autopirate/jackett/) -* [End](/recipies/autopirate/end/) (launch the stack) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) +* [Mylar](/recipes/autopirate/mylar/) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones/) +* [Lidarr](/recipes/autopirate/lidarr/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) ## Chef's Notes 📓 diff --git a/manuscript/recipes/autopirate/sabnzbd.md b/manuscript/recipes/autopirate/sabnzbd.md new file mode 100644 index 0000000..3aebfbb --- /dev/null +++ b/manuscript/recipes/autopirate/sabnzbd.md @@ -0,0 +1,93 @@ +!!! warning + This is not a complete recipe - it's a component of the [AutoPirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + +# SABnzbd + +## Introduction + +SABnzbd is the workhorse of the stack. It takes .nzb files as input (_manually or from other [autopirate](/recipes/autopirate/) stack tools_), then connects to your chosen Usenet provider, downloads all the individual binaries referenced by the .nzb, and then tests/repairs/combines/uncompresses them all into the final result - media files. + +![SABNZBD Screenshot](../../images/sabnzbd.png) + +!!! tip "Sponsored Project" + SABnzbd is one of my [sponsored projects](/sponsored-projects/) - a project I financially support on a regular basis because of its utility to me. It's not sexy, but it's consistent and reliable, and I enjoy the fruits of its labor near-daily. + +## Inclusion into AutoPirate + +To include SABnzbd in your [AutoPirate](/recipes/autopirate/) stack +(_The only reason you **wouldn't** use SABnzbd, would be if you were using [NZBGet](/recipes/autopirate/nzbget.md) instead_), include the following in your autopirate.yml stack definition file: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + +```` +sabnzbd: + image: linuxserver/sabnzbd:latest + env_file : /var/data/config/autopirate/sabnzbd.env + volumes: + - /var/data/autopirate/sabnzbd:/config + - /var/data/media:/media + networks: + - internal + +sabnzbd_proxy: + image: a5huynh/oauth2_proxy + env_file : /var/data/config/autopirate/sabnzbd.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:sabnzbd.example.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /var/data/config/autopirate/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://sabnzbd:8080 + -redirect-url=https://sabnzbd.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + -authenticated-emails-file=/authenticated-emails.txt +```` + +!!! warning "Important Note re hostname validation" + + (**Updated 10 June 2018**) : In SABnzbd [2.3.3](https://sabnzbd.org/wiki/extra/hostname-check.html), hostname verification was added as a mandatory check. SABnzbd will refuse inbound connections which weren't addressed to its own (_initially, autodetected_) hostname. This presents a problem within Docker Swarm, where container hostnames are random and disposable. + + You'll need to edit sabnzbd.ini (_only created after your first launch_), and **replace** the value in ```host_whitelist``` configuration (_it's comma-separated_) with the name of your service within the swarm definition, as well as your FQDN as accessed via traefik. + + For example, mine simply reads ```host_whitelist = sabnzbd.funkypenguin.co.nz, sabnzbd``` + +## Assemble more tools.. + +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: + +* SABnzbd (this page) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) +* [Sonarr](/recipes/autopirate/sonarr/) +* [Radarr](/recipes/autopirate/radarr/) +* [Mylar](/recipes/autopirate/mylar/) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones/) +* [Lidarr](/recipes/autopirate/lidarr/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) + + +## Chef's Notes 📓 + +1. In many cases, tools will integrate with each other. I.e., Radarr needs to talk to SABnzbd and NZBHydra, Ombi needs to talk to Radarr, etc. Since each tool runs within the stack under its own name, just refer to each tool by name (i.e. "radarr"), and docker swarm will resolve the name to the appropriate container. You can identify the tool-specific port by looking at the docker-compose service definition. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/autopirate/sonarr.md b/manuscript/recipes/autopirate/sonarr.md similarity index 63% rename from manuscript/recipies/autopirate/sonarr.md rename to manuscript/recipes/autopirate/sonarr.md index d31a2d2..eeb5bf5 100644 --- a/manuscript/recipies/autopirate/sonarr.md +++ b/manuscript/recipes/autopirate/sonarr.md @@ -1,5 +1,5 @@ !!! warning - This is not a complete recipe - it's a component of the [AutoPirate](/recipies/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [AutoPirate](/recipes/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. # Sonarr @@ -8,10 +8,12 @@ ![Sonarr Screenshot](../../images/sonarr.png) +!!! tip "Sponsored Project" + Sonarr is one of my [sponsored projects](/sponsored-projects/) - a project I financially support on a regular basis because of its utility to me. I forget it's there until I (reliably) receive an email with new and exciting updates 😁 ## Inclusion into AutoPirate -To include Sonarr in your [AutoPirate](/recipies/autopirate/) stack, include the following in your autopirate.yml stack definition file: +To include Sonarr in your [AutoPirate](/recipes/autopirate/) stack, include the following in your autopirate.yml stack definition file: ``` sonarr: @@ -24,9 +26,8 @@ sonarr: - internal sonarr_proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/config/autopirate/sonarr.env - dns_search: myswarm.example.com networks: - internal - traefik_public @@ -52,20 +53,23 @@ sonarr_proxy: ## Assemble more tools.. -Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipies/autopirate/end/)** section: +Continue through the list of tools below, adding whichever tools your want to use, and finishing with the **[end](/recipes/autopirate/end/)** section: -* [SABnzbd](/recipies/autopirate/sabnzbd.md) -* [NZBGet](/recipies/autopirate/nzbget.md) -* [RTorrent](/recipies/autopirate/rtorrent/) +* [SABnzbd](/recipes/autopirate/sabnzbd.md) +* [NZBGet](/recipes/autopirate/nzbget.md) +* [RTorrent](/recipes/autopirate/rtorrent/) * Sonarr (this page) -* [Radarr](/recipies/autopirate/radarr/) -* [Mylar](/recipies/autopirate/mylar/) -* [Lazy Librarian](/recipies/autopirate/lazylibrarian/) -* [Headphones](/recipies/autopirate/headphones/) -* [NZBHydra](/recipies/autopirate/nzbhydra/) -* [Ombi](/recipies/autopirate/ombi/) -* [Jackett](/recipies/autopirate/jackett/) -* [End](/recipies/autopirate/end/) (launch the stack) +* [Radarr](/recipes/autopirate/radarr/) +* [Mylar](/recipes/autopirate/mylar/) +* [Lazy Librarian](/recipes/autopirate/lazylibrarian/) +* [Headphones](/recipes/autopirate/headphones/) +* [Lidarr](/recipes/autopirate/lidarr/) +* [NZBHydra](/recipes/autopirate/nzbhydra/) +* [NZBHydra2](/recipes/autopirate/nzbhydra2/) +* [Ombi](/recipes/autopirate/ombi/) +* [Jackett](/recipes/autopirate/jackett/) +* [Heimdall](/recipes/autopirate/heimdall/) +* [End](/recipes/autopirate/end/) (launch the stack) ## Chef's Notes 📓 diff --git a/manuscript/recipes/bookstack.md b/manuscript/recipes/bookstack.md new file mode 100644 index 0000000..1180912 --- /dev/null +++ b/manuscript/recipes/bookstack.md @@ -0,0 +1,152 @@ +hero: Heroic Hero + +# BookStack + +BookStack is a simple, self-hosted, easy-to-use platform for organising and storing information. + +A friendly middle ground between heavyweights like MediaWiki or Confluence and [Gollum](/recipes/gollum/), BookStack relies on a database backend (so searching and versioning is easy), but limits itself to a pre-defined, 3-tier structure (book, chapter, page). The result is a lightweight, approachable personal documentation stack, which includes search and Markdown editing. + +![BookStack Screenshot](../images/bookstack.png) + +I like to protect my public-facing web UIs with an [oauth_proxy](/reference/oauth_proxy), ensuring that if an application bug (or a user misconfiguration) exposes the app to unplanned public scrutiny, I have a second layer of defense. + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostname you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP + +## Preparation + +### Setup data locations + +We'll need several directories to bind-mount into our container, so create them in /var/data/bookstack: + +``` +mkdir -p /var/data/bookstack/database-dump +mkdir -p /var/data/runtime/bookstack/db +``` + +### Prepare environment + +Create bookstack.env, and populate with the following variables. Set the [oauth_proxy](/reference/oauth_proxy) variables provided by your OAuth provider (if applicable.) + +``` +# For oauth-proxy (optional) +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET= + +# For MariaDB/MySQL database +MYSQL_RANDOM_ROOT_PASSWORD=true +MYSQL_DATABASE=bookstack +MYSQL_USER=bookstack +MYSQL_PASSWORD=secret + +# Bookstack-specific variables +DB_HOST=bookstack_db:3306 +DB_DATABASE=bookstack +DB_USERNAME=bookstack +DB_PASSWORD=secret +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +``` +version: '3' + +services: + + db: + image: mariadb:10 + env_file: /var/data/config/bookstack/bookstack.env + networks: + - internal + volumes: + - /var/data/runtime/bookstack/db:/var/lib/mysql + + proxy: + image: a5huynh/oauth2_proxy + env_file : /var/data/config/bookstack/bookstack.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:bookstack.example.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /var/data/config/bookstack/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://app + -redirect-url=https://bookstack.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + -authenticated-emails-file=/authenticated-emails.txt + + app: + image: solidnerd/bookstack + env_file: /var/data/config/bookstack/bookstack.env + networks: + - internal + + db-backup: + image: mariadb:10 + env_file: /var/data/config/bookstack/bookstack.env + volumes: + - /var/data/bookstack/database-dump:/dump + - /etc/localtime:/etc/localtime:ro + entrypoint: | + bash -c 'bash -s < /dump/dump_\`date +%d-%m-%Y"_"%H_%M_%S\`.sql.gz + (ls -t /dump/dump*.sql.gz|head -n $$BACKUP_NUM_KEEP;ls /dump/dump*.sql.gz)|sort|uniq -u|xargs rm -- {} + sleep $$BACKUP_FREQUENCY + done + EOF' + networks: + - internal + +networks: + traefik_public: + external: true + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.33.0/24 +``` + +!!! note + Setup unique static subnets for every stack you deploy. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + + +## Serving + +### Launch Bookstack stack + +Launch the BookStack stack by running ```docker stack deploy bookstack -c ``` + +Log into your new instance at https://**YOUR-FQDN**, authenticate with oauth_proxy, and then login with username 'admin@admin.com' and password 'password'. + +## Chef's Notes + +1. If you wanted to expose the BookStack UI directly, you could remove the oauth2_proxy from the design, and move the traefik_public-related labels directly to the bookstack container. You'd also need to add the traefik_public network to the bookstack container. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/calibre-web.md b/manuscript/recipes/calibre-web.md similarity index 89% rename from manuscript/recipies/calibre-web.md rename to manuscript/recipes/calibre-web.md index d335762..e3bf484 100644 --- a/manuscript/recipies/calibre-web.md +++ b/manuscript/recipes/calibre-web.md @@ -2,9 +2,9 @@ hero: Manage your ebook collection. Like a BOSS. # Calibre-Web -The [AutoPirate](/recipies/autopirate/) recipe includes [Lazy Librarian](https://github.com/itsmegb/LazyLibrarian), a tool for tracking, finding, and downloading eBooks. However, after the eBooks are downloaded, Lazy Librarian is not much use for organising, tracking, and actually **reading** them. +The [AutoPirate](/recipes/autopirate/) recipe includes [Lazy Librarian](https://github.com/itsmegb/LazyLibrarian), a tool for tracking, finding, and downloading eBooks. However, after the eBooks are downloaded, Lazy Librarian is not much use for organising, tracking, and actually **reading** them. -[Calibre-Web](https://github.com/janeczku/calibre-web) could be described as "_[Plex](/recipies/plex/) (or [Emby](/recipies/emby/)) for eBooks_" - it's a web-based interface to manage your eBook library, screenshot below: +[Calibre-Web](https://github.com/janeczku/calibre-web) could be described as "_[Plex](/recipes/plex/) (or [Emby](/recipes/emby/)) for eBooks_" - it's a web-based interface to manage your eBook library, screenshot below: ![Calibre-Web Screenshot](../images/calibre-web.png) @@ -77,7 +77,7 @@ services: - internal proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/config/calibre-web/calibre-web.env dns_search: hq.example.com networks: @@ -125,7 +125,7 @@ Log into your new instance at https://**YOUR-FQDN**. You'll be directed to the i ## Chef's Notes 1. Yes, Calibre does provide a server component. But it's not as fully-featured as Calibre-Web (_i.e., you can't use it to send ebooks directly to your Kindle_) -2. A future enhancement might be integrating this recipe with the filestore for [NextCloud](/recipies/nextcloud/), so that the desktop database (Calibre) can be kept synced with Calibre-Web. +2. A future enhancement might be integrating this recipe with the filestore for [NextCloud](/recipes/nextcloud/), so that the desktop database (Calibre) can be kept synced with Calibre-Web. ### Tip your waiter (donate) diff --git a/manuscript/recipes/collabora-online.md b/manuscript/recipes/collabora-online.md new file mode 100644 index 0000000..8e02660 --- /dev/null +++ b/manuscript/recipes/collabora-online.md @@ -0,0 +1,311 @@ +# Collabora Online + +!!! important + Development of this recipe is sponsored by [The Common Observatory](https://www.observe.global/). Thanks guys! + + [![Common Observatory](../images/common_observatory.png)](https://www.observe.global/) + +Collabora Online Development Edition (or "[CODE](https://www.collaboraoffice.com/code/#what_is_code)"), is the lightweight, or "home" edition of the commercially-supported [Collabora Online](https://www.collaboraoffice.com/collabora-online/) platform. It + +It's basically the [LibreOffice](https://www.libreoffice.org/) interface in a web-browser. CODE is not a standalone app, it's a backend intended to be accessed via "WOPI" from an existing interface (_in our case, [NextCloud](/recipes/nextcloud/)_) + +![CODE Screenshot](../images/collabora-online.png) + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostname (_i.e. "collabora.your-domain.com"_) you intend to use for LDAP Account Manager, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP +4. [NextCloud](/recipes/nextcloud/) installed and operational +5. [Docker-compose](https://docs.docker.com/compose/install/) installed on your node(s) - this is a special case which needs to run outside of Docker Swarm + +## Preparation + +### Explanation for complexity + +Due to the clever magic that Collabora does to present a "headless" LibreOffice UI to the browser, the CODE docker container requires system capabilities which cannot be granted under Docker Swarm (_specifically, MKNOD_). + +So we have to run Collabora itself in the next best thing to Docker swarm - a docker-compose stack. Using docker-compose will at least provide us with consistent and version-able configuration files. + +This presents another problem though - Docker Swarm with Traefik is superb at making all our stacks "just work" with ingress routing and LetsEncyrpt certificates. We don't want to have to do this manually (_like a cave-man_), so we engage in some trickery to allow us to still use our swarmed Traefik to terminate SSL. + +We run a single swarmed Nginx instance, which forwards all requests to an upstream, with the target IP of the docker0 interface, on port 9980 (_the port exposed by the CODE container_) + +We attach the necessary labels to the Nginx container to instruct Trafeik to setup a front/backend for collabora.. Now incoming requests to **https://collabora.** will hit Traefik, be forwarded to nginx (_wherever in the swarm it's running_), and then to port 9980 on the same node that nginx is running on. + +What if we're running multiple nodes in our swarm, and nginx ends up on a different node to the one running Collabora via docker-compose? Well, either constrain nginx to the same node as Collabora (_example below_), or just launch an instance of Collabora on _every_ node then. It's just a rendering / GUI engine after all, it doesn't hold any persistent data. + +Here's a (_highly technical_) diagram to illustrate: + +![CODE traffic flow](../images/collabora-traffic-flow.png) + +### Setup data locations + +We'll need a directory for holding config to bind-mount into our containers, so create ```/var/data/collabora```, and ```/var/data/config/collabora``` for holding the docker/swarm config + +``` +mkdir /var/data/collabora/ +mkdir /var/data/config/collabora/ +``` + +### Prepare environment + +Create /var/data/config/collabora/collabora.env, and populate with the following variables, customized for your installation. + +!!! warning + Note the following: + + 1. Variables are in lower-case, unlike our standard convention. This is to align with the CODE container + 2. Set domain to your [NextCloud](/recipes/nextcloud/) domain, and escape all the periods as per the example + 3. Set your server_name to collabora.. Escaping periods is unnecessary + 4. Your password cannot include triangular brackets - the entrypoint script will insert this password into an XML document, and triangular brackets will make bad(tm) things happen 🔥 + +``` +username=admin +password=ilovemypassword +domain=nextcloud\.batcave\.com +server_name=collabora.batcave.com +termination=true +``` + +### Create docker-compose.yml + +Create ```/var/data/config/collabora/docker-compose.yml``` as follows: + +``` +version: "3.0" + +services: + local-collabora: + image: funkypenguin/collabora + # the funkypenguin version has a patch to include "termination" behind SSL-terminating reverse proxy (traefik), see CODE PR #50. + # Once merged, the official container can be used again. + #image: collabora/code + env_file: /var/data/config/collabora/collabora.env + volumes: + - /var/data/collabora/loolwsd.xml:/etc/loolwsd/loolwsd.xml-new + cap_add: + - MKNOD + ports: + - 9980:9980 +``` + +### Create nginx.conf + +Create ```/var/data/config/collabora/nginx.conf``` as follows, changing the ```server_name``` value to match the environment variable you established above. + +``` +upstream collabora-upstream { + # Run collabora under docker-compose, since it needs MKNOD cap, which can't be provided by Docker Swarm. + # The IP here is the typical IP of docker0 - change if yours is different. + server 172.17.0.1:9980; +} + +server { + listen 80; + server_name collabora.batcave.com; + + # static files + location ^~ /loleaflet { + proxy_pass http://collabora-upstream; + proxy_set_header Host $http_host; + } + + # WOPI discovery URL + location ^~ /hosting/discovery { + proxy_pass http://collabora-upstream; + proxy_set_header Host $http_host; + } + + # Main websocket + location ~ /lool/(.*)/ws$ { + proxy_pass http://collabora-upstream; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $http_host; + proxy_read_timeout 36000s; + } + + # Admin Console websocket + location ^~ /lool/adminws { + proxy_buffering off; + proxy_pass http://collabora-upstream; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $http_host; + proxy_read_timeout 36000s; + } + + # download, presentation and image upload + location ~ /lool { + proxy_pass https://collabora-upstream; + proxy_set_header Host $http_host; + } +} +``` +### Create loolwsd.xml + +[Until we understand](https://github.com/CollaboraOnline/Docker-CODE/pull/50) how to [pass trusted network parameters to the entrypoint script using environment variables](https://github.com/CollaboraOnline/Docker-CODE/issues/49), we have to maintain a manually edited version of ```loolwsd.xml```, and bind-mount it into our collabora container. + +The way we do this is we mount +```/var/data/collabora/loolwsd.xml``` as ```/etc/loolwsd/loolwsd.xml-new```, then allow the container to create its default ```/etc/loolwsd/loolwsd.xml```, copy this default **over** our ```/var/data/collabora/loolwsd.xml``` as ```/etc/loolwsd/loolwsd.xml-new```, and then update the container to use **our** ```/var/data/collabora/loolwsd.xml``` as ```/etc/loolwsd/loolwsd.xml``` instead (_confused yet?_) + +Create an empty ```/var/data/collabora/loolwsd.xml``` by running ```touch /var/data/collabora/loolwsd.xml```. We'll populate this in the next section... + +### Setup Docker Swarm + +Create ```/var/data/config/collabora/collabora.yml``` as follows, changing the traefik frontend_rule as necessary: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + +``` +version: "3.0" + +services: + + nginx: + image: nginx:latest + networks: + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:collabora.batcave.com + - traefik.docker.network=traefik_public + - traefik.port=80 + - traefik.frontend.passHostHeader=true + # uncomment this line if you want to force nginx to always run on one node (i.e., the one running collabora) + #placement: + # constraints: + # - node.hostname == ds1 + volumes: + - /var/data/collabora/nginx.conf:/etc/nginx/conf.d/default.conf:ro + +networks: + traefik_public: + external: true +``` + +## Serving + +### Generate loolwsd.xml + +Well. This is awkward. There's no documented way to make Collabora work with Docker Swarm, so we're doing a bit of a hack here, until I understand [how to pass these arguments](https://github.com/CollaboraOnline/Docker-CODE/issues/49) via environment variables. + +Launching Collabora is (_for now_) a 2-step process. First.. we launch collabora itself, by running: + +``` +cd /var/data/config/collabora/ +docker-compose -d up +``` + +Output looks something like this: + +``` +root@ds1:/var/data/config/collabora# docker-compose up -d +WARNING: The Docker Engine you're using is running in swarm mode. + +Compose does not use swarm mode to deploy services to multiple nodes in a swarm. All containers will be scheduled on the current node. + +To deploy your application across the swarm, use `docker stack deploy`. + +Pulling local-collabora (funkypenguin/collabora:latest)... +latest: Pulling from funkypenguin/collabora +7b8b6451c85f: Pull complete +ab4d1096d9ba: Pull complete +e6797d1788ac: Pull complete +e25c5c290bde: Pull complete +4b8e1b074e06: Pull complete +f51a3d1fb75e: Pull complete +8b826e2ae5ad: Pull complete +Digest: sha256:6cd38cb5cbd170da0e3f0af85cecf07a6bc366e44555c236f81d5b433421a39d +Status: Downloaded newer image for funkypenguin/collabora:latest +Creating collabora_local-collabora_1 ... +Creating collabora_local-collabora_1 ... done +root@ds1:/var/data/config/collabora# +``` + +Now exec into the container (_from another shell session_), by running ```exec -it /bin/bash```. Make a copy of /etc/loolwsd/loolwsd, by running ```cp /etc/loolwsd/loolwsd.xml /etc/loolwsd/loolwsd.xml-new```, and then exit the container with ```exit```. + +Delete the collabora container by hitting CTRL-C in the docker-compose shell, running ```docker-compose rm```, and then altering this line in docker-compose.yml: + +``` + - /var/data/collabora/loolwsd.xml:/etc/loolwsd/loolwsd.xml-new +``` + +To this: + +``` + - /var/data/collabora/loolwsd.xml:/etc/loolwsd/loolwsd.xml +``` + +Edit /var/data/collabora/loolwsd.xml, find the **storage.filesystem.wopi** section, and add lines like this to the existing allow rules (_to allow IPv6-enabled hosts to still connect with their IPv4 addreses_): + +``` +::ffff:10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3} +::ffff:172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3} +::ffff:172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3} +::ffff:172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3} +::ffff:192\.168\.[0-9]{1,3}\.[0-9]{1,3} +``` + +Find the **net.post_allow** section, and add a line like this: + +``` +::ffff:10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3} +::ffff:172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3} +::ffff:172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3} +::ffff:172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3} +::ffff:192\.168\.[0-9]{1,3}\.[0-9]{1,3} +``` + +Find these 2 lines: + +``` + + true +``` + +And change to: + +``` + + false +``` + +Now re-launch collabora (_with the correct with loolwsd.xml_) under docker-compose, by running: + +``` +docker-compose -d up +``` + +Once collabora is up, we launch the swarm stack, by running: + +``` +docker stack deploy collabora -c /var/data/config/collabora/collabora.yml +``` + +Visit **https://collabora./l/loleaflet/dist/admin/admin.html** and confirm you can login with the user/password you specified in collabora.env + +### Integrate into NextCloud + +In NextCloud, Install the **Collabora Online** app (https://apps.nextcloud.com/apps/richdocuments), and then under **Settings -> Collabora Online**, set your Collabora Online Server to ```https://collabora.``` + +![CODE Screenshot](../images/collabora-online-in-nextcloud.png) + +Now browse your NextCloud files. Click the plus (+) sign to create a new document, and create either a new document, spreadsheet, or presentation. Name your document and then click on it. If Collabora is setup correctly, you'll shortly enter into the rich editing interface provided by Collabora :) + +!!! important + Development of this recipe is sponsored by [The Common Observatory](https://www.observe.global/). Thanks guys! + + [![Common Observatory](../images/common_observatory.png)](https://www.observe.global/) + +## Chef's Notes + +1. Yes, this recipe is complicated. And you probably only care if you feel strongly about using Open Source rich document editing in the browser, vs using something like Google Docs. It works impressively well however, once it works. I hope to make this recipe simpler once the CODE developers have documented how to pass optional parameters as environment variables. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/cryptominer.md b/manuscript/recipes/cryptominer.md similarity index 63% rename from manuscript/recipies/cryptominer.md rename to manuscript/recipes/cryptominer.md index f741206..25ba6d9 100644 --- a/manuscript/recipies/cryptominer.md +++ b/manuscript/recipes/cryptominer.md @@ -6,23 +6,24 @@ This is a diversion from my usual recipes - recently I've become interested in c I honestly didn't expect to enjoy the mining process as much as I did. Part of the enjoyment was getting my hands dirty with hardware. -Since a [mining rig](/recipies/cryptominer/mining-rig/) relies on hardware, we can't really use a docker swarm for this one! +Since a [mining rig](/recipes/cryptominer/mining-rig/) relies on hardware, we can't really use a docker swarm for this one! -![NAME Screenshot](../images/cryptominer.png) +![CryptoMiner Screenshot](../images/cryptominer.png) This recipe isn't for everyone - if you just want to make some money from cryptocurrency, then you're better off learning to [invest](https://www.reddit.com/r/CryptoCurrency/) or [trade](https://www.reddit.com/r/CryptoMarkets/). However, if you want to (_ideally_) make money **and** you like tinkering, playing with hardware, optimising and monitoring, read on! ## Ingredients -1. Suitable system guts (_CPU, motherboard, RAM, PSU_) for your [mining rig](/recipies/cryptominer/mining-rig/) -2. [AMD](/recipies/cryptominer/amd-gpu/) / [Nvidia](/recipies/cryptominer/nvidia-gpu/) GPUs (_yes, plural, since although you **can** start with just one, you'll soon get hooked!_) +1. Suitable system guts (_CPU, motherboard, RAM, PSU_) for your [mining rig](/recipes/cryptominer/mining-rig/) +2. [AMD](/recipes/cryptominer/amd-gpu/) / [Nvidia](/recipes/cryptominer/nvidia-gpu/) GPUs (_yes, plural, since although you **can** start with just one, you'll soon get hooked!_) 3. A friendly operating system ([Ubuntu](https://www.ubuntu.com/)/[Debian](https://www.debian.org/)/[CentOS7](https://www.centos.org/download/)) are known to work 4. Patience and time ## Preparation -For readability, I've split this recipe into multiple sub-recipies, which can be found below, or in the navigation links on the right-hand side: +For readability, I've split this recipe into multiple sub-recipes, which can be found below, or in the navigation links on the right-hand side: +<<<<<<< HEAD:manuscript/recipies/cryptominer.md 1. Build your [mining rig](/recipies/cryptominer/mining-rig/) 💻 2. Setup your [AMD](/recipies/cryptominer/amd-gpu/) or [Nvidia](/recipies/cryptominer/nvidia-gpu/) GPUs 🎨 3. Sign up for [mining pools](/recipies/cryptominer/mining-pool/) :swimmer: @@ -30,6 +31,15 @@ For readability, I've split this recipe into multiple sub-recipies, which can be 5. Send your coins to [exchanges](/recipies/cryptominer/exchange/) or [wallets](/recipies/cryptominer/wallet/) 💹 6. [Monitor](/recipies/cryptominer/monitor/) your empire :heartbeat: 7. [Profit](/recipies/cryptominer/profit/)! +======= +1. Build your [mining rig](/recipes/cryptominer/mining-rig/) 💻 +2. Setup your [AMD](/recipes/cryptominer/amd-gpu/) or [Nvidia](/recipes/cryptominer/nvidia-gpu/) GPUs 🎨 +3. Sign up for [mining pools](/recipes/cryptominer/mining-pool/) :swimmer: +4. Setup your miners with [Miner Hotel](/recipes/cryptominer/minerhotel/) 🏨 +5. Send your coins to [exchanges](/recipes/cryptominer/exchange/) or [wallets](/recipes/cryptominer/wallet/) 💹 +6. [Monitor](/recipes/cryptominer/monitor/) your empire :heartbeat: +7. [Profit](/recipes/cryptominer/profit/)! 💰 +>>>>>>> master:manuscript/recipes/cryptominer.md ## Chef's Notes diff --git a/manuscript/recipies/cryptominer/amd-gpu.md b/manuscript/recipes/cryptominer/amd-gpu.md similarity index 89% rename from manuscript/recipies/cryptominer/amd-gpu.md rename to manuscript/recipes/cryptominer/amd-gpu.md index 3a45288..679eb89 100644 --- a/manuscript/recipies/cryptominer/amd-gpu.md +++ b/manuscript/recipes/cryptominer/amd-gpu.md @@ -1,5 +1,5 @@ !!! warning - This is not a complete recipe - it's a component of the [cryptominer](/recipies/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [cryptominer](/recipes/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. # AMD GPU @@ -149,6 +149,7 @@ If you want to tweak the BIOS yourself, download the [Polaris bios editor](https Now, continue to the next stage of your grand mining adventure: +<<<<<<< HEAD:manuscript/recipies/cryptominer/amd-gpu.md 1. Build your [mining rig](/recipies/cryptominer/mining-rig/) 💻 2. Setup your AMD (_this page_) or [Nvidia](/recipies/cryptominer/nvidia-gpu/) GPUs 🎨 3. Sign up for [mining pools](/recipies/cryptominer/mining-pool/) :swimmer: @@ -156,6 +157,15 @@ Now, continue to the next stage of your grand mining adventure: 4. Send your coins to [exchanges](/recipies/cryptominer/exchange/) or [wallets](/recipies/cryptominer/wallet/) 💹 5. [Monitor](/recipies/cryptominer/monitor/) your empire :heartbeat: 6. [Profit](/recipies/cryptominer/profit/)! +======= +1. Build your [mining rig](/recipes/cryptominer/mining-rig/) 💻 +2. Setup your AMD (_this page_) or [Nvidia](/recipes/cryptominer/nvidia-gpu/) GPUs 🎨 +3. Sign up for [mining pools](/recipes/cryptominer/mining-pool/) :swimmer: +3. Setup your miners with [Miner Hotel](/recipes/cryptominer/minerhotel/) 🏨 +4. Send your coins to [exchanges](/recipes/cryptominer/exchange/) or [wallets](/recipes/cryptominer/wallet/) 💹 +5. [Monitor](/recipes/cryptominer/monitor/) your empire :heartbeat: +6. [Profit](/recipes/cryptominer/profit/)! 💰 +>>>>>>> master:manuscript/recipes/cryptominer/amd-gpu.md ## Chef's Notes diff --git a/manuscript/recipies/cryptominer/exchange.md b/manuscript/recipes/cryptominer/exchange.md similarity index 78% rename from manuscript/recipies/cryptominer/exchange.md rename to manuscript/recipes/cryptominer/exchange.md index 58f82d2..a99453b 100644 --- a/manuscript/recipies/cryptominer/exchange.md +++ b/manuscript/recipes/cryptominer/exchange.md @@ -1,9 +1,9 @@ !!! warning - This is not a complete recipe - it's a component of the [cryptominer](/recipies/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [cryptominer](/recipes/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. # Exchange -You may be mining a particular coin, and want to hold onto it, in the hopes of long-term growth. In that case, stick it in a [wallet](/recipies/cryptominer/wallet/) and be done with it. +You may be mining a particular coin, and want to hold onto it, in the hopes of long-term growth. In that case, stick it in a [wallet](/recipes/cryptominer/wallet/) and be done with it. You may also not care too much about the coin (you're mining for money, right?), in which case you want to "cash out" your coins into something you can spend. @@ -37,6 +37,7 @@ Once you have enough coins in your exchange wallet, you can "trade" them into th Now, continue to the next stage of your grand mining adventure: +<<<<<<< HEAD:manuscript/recipies/cryptominer/exchange.md 1. Build your [mining rig](/recipies/cryptominer/mining-rig/) 💻 2. Setup your [AMD](/recipies/cryptominer/amd-gpu/) or [Nvidia](/recipies/cryptominer/nvidia-gpu/) GPUs 🎨 3. Sign up for [mining pools](/recipies/cryptominer/mining-pool/) :swimmer: @@ -44,6 +45,15 @@ Now, continue to the next stage of your grand mining adventure: 5. Send your coins to exchanges (_This page_) or [wallets](/recipies/cryptominer/wallet/) 💹 6. [Monitor](/recipies/cryptominer/monitor/) your empire :heartbeat: 7. [Profit](/recipies/cryptominer/profit/)! +======= +1. Build your [mining rig](/recipes/cryptominer/mining-rig/) 💻 +2. Setup your [AMD](/recipes/cryptominer/amd-gpu/) or [Nvidia](/recipes/cryptominer/nvidia-gpu/) GPUs 🎨 +3. Sign up for [mining pools](/recipes/cryptominer/mining-pool/) :swimmer: +4. Setup your miners with [Miner Hotel](/recipes/cryptominer/minerhotel/) 🏨 +5. Send your coins to exchanges (_This page_) or [wallets](/recipes/cryptominer/wallet/) 💹 +6. [Monitor](/recipes/cryptominer/monitor/) your empire :heartbeat: +7. [Profit](/recipes/cryptominer/profit/)! 💰 +>>>>>>> master:manuscript/recipes/cryptominer/exchange.md ## Chef's Notes diff --git a/manuscript/recipies/cryptominer/minerhotel.md b/manuscript/recipes/cryptominer/minerhotel.md similarity index 89% rename from manuscript/recipies/cryptominer/minerhotel.md rename to manuscript/recipes/cryptominer/minerhotel.md index 2e21b2f..fbb103a 100644 --- a/manuscript/recipies/cryptominer/minerhotel.md +++ b/manuscript/recipes/cryptominer/minerhotel.md @@ -1,12 +1,12 @@ # Minerhotel !!! warning - This is not a complete recipe - it's a component of the [cryptominer](/recipies/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [cryptominer](/recipes/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. So, you have GPUs. You can mine cryptocurrency. But **what** cryptocurrency should you mine? 1. You could manually keep track of [whattomine](http://whattomine.com/), and launch/stop miners based on profitability/convenience, as you see fit. -2. You can automate the process of mining the most profitable coin based on your GPUs' capabilities and the current market prices, and do better things with your free time! (_[receiving alerts](/recipies/crytominer/monitor/), of course, if anything stops working!_) +2. You can automate the process of mining the most profitable coin based on your GPUs' capabilities and the current market prices, and do better things with your free time! (_[receiving alerts](/recipes/crytominer/monitor/), of course, if anything stops working!_) This recipe covers option #2 😁 @@ -87,13 +87,19 @@ To make whattomine start automatically in future, run ```systemctl enable minerh Now, continue to the next stage of your grand mining adventure: -1. Build your [mining rig](/recipies/cryptominer/mining-rig/) 💻 -2. Setup your [AMD](/recipies/cryptominer/amd-gpu/) or [Nvidia](/recipies/cryptominer/nvidia-gpu/) GPUs 🎨 -3. Sign up for [mining pools](/recipies/cryptominer/mining-pool/) :swimmer: +1. Build your [mining rig](/recipes/cryptominer/mining-rig/) 💻 +2. Setup your [AMD](/recipes/cryptominer/amd-gpu/) or [Nvidia](/recipes/cryptominer/nvidia-gpu/) GPUs 🎨 +3. Sign up for [mining pools](/recipes/cryptominer/mining-pool/) :swimmer: 4. Setup your miners with Miner Hotel 🏨 (_This page_) +<<<<<<< HEAD:manuscript/recipies/cryptominer/minerhotel.md 5. Send your coins to [exchanges](/recipies/cryptominer/exchange/) or [wallets](/recipies/cryptominer/wallet/) 💹 6. [Monitor](/recipies/cryptominer/monitor/) your empire :heartbeat: 7. [Profit](/recipies/cryptominer/profit/)! +======= +5. Send your coins to [exchanges](/recipes/cryptominer/exchange/) or [wallets](/recipes/cryptominer/wallet/) 💹 +6. [Monitor](/recipes/cryptominer/monitor/) your empire :heartbeat: +7. [Profit](/recipes/cryptominer/profit/)! 💰 +>>>>>>> master:manuscript/recipes/cryptominer/minerhotel.md ## Chef's Notes diff --git a/manuscript/recipies/cryptominer/mining-pool.md b/manuscript/recipes/cryptominer/mining-pool.md similarity index 74% rename from manuscript/recipies/cryptominer/mining-pool.md rename to manuscript/recipes/cryptominer/mining-pool.md index 9f2c0a0..a8bb043 100644 --- a/manuscript/recipies/cryptominer/mining-pool.md +++ b/manuscript/recipes/cryptominer/mining-pool.md @@ -1,5 +1,5 @@ !!! warning - This is not a complete recipe - it's a component of the [cryptominer](/recipies/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [cryptominer](/recipes/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. # Mining Pools @@ -11,7 +11,7 @@ You and your puny GPUs don't have a snowball's chance of mining a block on your This'll save you some frustration later... Next time you're watching a movie or doing something mindless, visit http://whattomine.com/, and take note of the 10-15 most profitable coins for your GPU type(s). -On your [exchanges](/recipies/cryptominer/exchange/), identify the "_deposit address_" for each popular coin, and note them down for the next step. +On your [exchanges](/recipes/cryptominer/exchange/), identify the "_deposit address_" for each popular coin, and note them down for the next step. !!! note If you're wanting to mine directly to a wallet for long-term holding, then substitute your wallet public address for this deposit address. @@ -20,7 +20,7 @@ Now work your way through the following list of pools, creating an account on ea * [Mining Pool Hub](https://miningpoolhub.com/) (Lots of coins) * [NiceHash](https://nicehash.com) (Ethereum, Decred) -* [suprnova](https://suprnova.cc/) - Lots of coins, but, you generally need a separate login for each pool. You _also_ need to create a worker in each pool with a common username and password, for [Minerhotel](/recipies/crytominer/minerhotel/). +* [suprnova](https://suprnova.cc/) - Lots of coins, but, you generally need a separate login for each pool. You _also_ need to create a worker in each pool with a common username and password, for [Minerhotel](/recipes/crytominer/minerhotel/). * [nanopool](https://nanopool.org/) (Ethereum, Ethereum Classic, SiaCoin, ZCash, Monero, Pascal and Electroneum) * [slushpool](https://slushpool.com/home/) (BTC and ZCash) @@ -40,6 +40,7 @@ As noted by IronicBadger [here](https://www.linuxserver.io/2018/01/20/how-to-bui Now, continue to the next stage of your grand mining adventure: +<<<<<<< HEAD:manuscript/recipies/cryptominer/mining-pool.md 1. Build your [mining rig](/recipies/cryptominer/mining-rig/) 💻 2. Setup your [AMD](/recipies/cryptominer/amd-gpu/) or [Nvidia](/recipies/cryptominer/nvidia-gpu/) GPUs 🎨 3. Sign up for [mining pools](/recipies/cryptominer/mining-pool/) :swimmer: @@ -47,6 +48,15 @@ Now, continue to the next stage of your grand mining adventure: 5. Send your coins to exchanges (_This page_) or [wallets](/recipies/cryptominer/wallet/) 💹 6. [Monitor](/recipies/cryptominer/monitor/) your empire :heartbeat: 7. [Profit](/recipies/cryptominer/profit/)! +======= +1. Build your [mining rig](/recipes/cryptominer/mining-rig/) 💻 +2. Setup your [AMD](/recipes/cryptominer/amd-gpu/) or [Nvidia](/recipes/cryptominer/nvidia-gpu/) GPUs 🎨 +3. Sign up for [mining pools](/recipes/cryptominer/mining-pool/) :swimmer: +4. Setup your miners with [Miner Hotel](/recipes/cryptominer/minerhotel/) 🏨 +5. Send your coins to exchanges (_This page_) or [wallets](/recipes/cryptominer/wallet/) 💹 +6. [Monitor](/recipes/cryptominer/monitor/) your empire :heartbeat: +7. [Profit](/recipes/cryptominer/profit/)! 💰 +>>>>>>> master:manuscript/recipes/cryptominer/mining-pool.md ## Chef's Notes diff --git a/manuscript/recipies/cryptominer/mining-rig.md b/manuscript/recipes/cryptominer/mining-rig.md similarity index 76% rename from manuscript/recipies/cryptominer/mining-rig.md rename to manuscript/recipes/cryptominer/mining-rig.md index c78c225..c2985d4 100644 --- a/manuscript/recipies/cryptominer/mining-rig.md +++ b/manuscript/recipes/cryptominer/mining-rig.md @@ -1,5 +1,5 @@ !!! warning - This is not a complete recipe - it's a component of the [cryptominer](/recipies/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [cryptominer](/recipes/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. # Mining Rig @@ -22,7 +22,7 @@ You don't need anything fancy. Here's a photo of the rig my wife built me: I recommend this design (_with the board with little holes in it_) - it takes up more space, but I have more room to place extra components (_PSUs, hard drives, etc_), as illustrated below: !!! note - You'll note the hard drives in the picture - that's not part of the mining requirements, it's because my rig doubles as my [Plex](/recipies/plex/) server ;) + You'll note the hard drives in the picture - that's not part of the mining requirements, it's because my rig doubles as my [Plex](/recipes/plex/) server ;) ![My mining rig, populated](../../images/mining_rig_populated.jpg) @@ -31,12 +31,21 @@ I recommend this design (_with the board with little holes in it_) - it takes up Now, continue to the next stage of your grand mining adventure: 1. Build your mining rig 💻 (This page) +<<<<<<< HEAD:manuscript/recipies/cryptominer/mining-rig.md 2. Setup your [AMD](/recipies/cryptominer/amd-gpu/) or [Nvidia](/recipies/cryptominer/nvidia-gpu/) GPUs 🎨 3. Sign up for [mining pools](/recipies/cryptominer/mining-pool/) :swimmer: 4. Setup your miners with [Miner Hotel](/recipies/cryptominer/minerhotel/) 🏨 5. Send your coins to [exchanges](/recipies/cryptominer/exchange/) or [wallets](/recipies/cryptominer/wallet/) 💹 6. [Monitor](/recipies/cryptominer/monitor/) your empire :heartbeat: 7. [Profit](/recipies/cryptominer/profit/)! +======= +2. Setup your [AMD](/recipes/cryptominer/amd-gpu/) or [Nvidia](/recipes/cryptominer/nvidia-gpu/) GPUs 🎨 +3. Sign up for [mining pools](/recipes/cryptominer/mining-pool/) :swimmer: +4. Setup your miners with [Miner Hotel](/recipes/cryptominer/minerhotel/) 🏨 +5. Send your coins to [exchanges](/recipes/cryptominer/exchange/) or [wallets](/recipes/cryptominer/wallet/) 💹 +6. [Monitor](/recipes/cryptominer/monitor/) your empire :heartbeat: +7. [Profit](/recipes/cryptominer/profit/)! 💰 +>>>>>>> master:manuscript/recipes/cryptominer/mining-rig.md diff --git a/manuscript/recipies/cryptominer/monitor.md b/manuscript/recipes/cryptominer/monitor.md similarity index 81% rename from manuscript/recipies/cryptominer/monitor.md rename to manuscript/recipes/cryptominer/monitor.md index 341d0db..12ecb05 100644 --- a/manuscript/recipies/cryptominer/monitor.md +++ b/manuscript/recipes/cryptominer/monitor.md @@ -1,7 +1,7 @@ # Monitor !!! warning - This is not a complete recipe - it's a component of the [cryptominer](/recipies/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [cryptominer](/recipes/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. So, you're a miner! But if you're not **actively** mining, are you still a miner? This page details how to **measure** your mining activity, and how to raise an alert when a profit-affecting issue affects your miners. @@ -18,7 +18,7 @@ So, you're a miner! But if you're not **actively** mining, are you still a miner ![Visualise mining performance](../../images/cryptominer_grafana.png) -Since [Minerhotel](/recipies/crytominer/minerhotel/) switches currency based on what's most profitable in the moment, it's hard to gauge the impact of changes (overclocking, tweaking, mining pools) over time. +Since [Minerhotel](/recipes/crytominer/minerhotel/) switches currency based on what's most profitable in the moment, it's hard to gauge the impact of changes (overclocking, tweaking, mining pools) over time. I hacked up a bash script which grabs performance data from the output of the miners, and throws it into an InfluxDB database, which can then be visualized using Grafana. @@ -49,7 +49,7 @@ I've tried several iOS apps for monitoring my performance across various. The mo ### Track your portfolio -Now that you've got your coins happily cha-chinging into you [wallets](/recipies/cryptominer/wallet/) (_and potentially various [exchanges](/recipies/cryptominer/exchange/)_), you'll want to monitor the performance of your portfolio over time. +Now that you've got your coins happily cha-chinging into you [wallets](/recipes/cryptominer/wallet/) (_and potentially various [exchanges](/recipes/cryptominer/exchange/)_), you'll want to monitor the performance of your portfolio over time. #### Web Apps @@ -74,13 +74,17 @@ I've found the following iOS apps to be useful in tracking my portfolio (_really Now, continue to the next stage of your grand mining adventure: -1. Build your [mining rig](/recipies/cryptominer/mining-rig/) 💻 -2. Setup your [AMD](/recipies/cryptominer/amd-gpu/) or [Nvidia](/recipies/cryptominer/nvidia-gpu/) GPUs 🎨 -3. Sign up for [mining pools](/recipies/cryptominer/mining-pool/) :swimmer: -4. Setup your miners with [Miner Hotel](/recipies/cryptominer/minerhotel/) 🏨 -5. Send your coins to [exchanges](/recipies/cryptominer/exchange/) or [wallets](/recipies/cryptominer/wallet/) 💹 +1. Build your [mining rig](/recipes/cryptominer/mining-rig/) 💻 +2. Setup your [AMD](/recipes/cryptominer/amd-gpu/) or [Nvidia](/recipes/cryptominer/nvidia-gpu/) GPUs 🎨 +3. Sign up for [mining pools](/recipes/cryptominer/mining-pool/) :swimmer: +4. Setup your miners with [Miner Hotel](/recipes/cryptominer/minerhotel/) 🏨 +5. Send your coins to [exchanges](/recipes/cryptominer/exchange/) or [wallets](/recipes/cryptominer/wallet/) 💹 6. Monitor your empire :heartbeat: (_this page_) +<<<<<<< HEAD:manuscript/recipies/cryptominer/monitor.md 7. [Profit](/recipies/cryptominer/profit/)! +======= +7. [Profit](/recipes/cryptominer/profit/)! 💰 +>>>>>>> master:manuscript/recipes/cryptominer/monitor.md ## Chef's Notes diff --git a/manuscript/recipies/cryptominer/nvidia-gpu.md b/manuscript/recipes/cryptominer/nvidia-gpu.md similarity index 87% rename from manuscript/recipies/cryptominer/nvidia-gpu.md rename to manuscript/recipes/cryptominer/nvidia-gpu.md index b3c0c23..70c9d51 100644 --- a/manuscript/recipies/cryptominer/nvidia-gpu.md +++ b/manuscript/recipes/cryptominer/nvidia-gpu.md @@ -1,7 +1,7 @@ # NVidia GPU !!! warning - This is not a complete recipe - it's a component of the [cryptominer](/recipies/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [cryptominer](/recipes/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. ## Ingredients @@ -146,6 +146,7 @@ Play with changing your settings.conf file until you break it, and then go back Now, continue to the next stage of your grand mining adventure: +<<<<<<< HEAD:manuscript/recipies/cryptominer/nvidia-gpu.md 1. Build your [mining rig](/recipies/cryptominer/mining-rig/) 💻 2. Setup your [AMD](/recipies/cryptominer/amd-gpu/) or Nvidia (_this page_) GPUs 🎨 3. Sign up for [mining pools](/recipies/cryptominer/mining-pool/) :swimmer: @@ -153,6 +154,15 @@ Now, continue to the next stage of your grand mining adventure: 5. Send your coins to [exchanges](/recipies/cryptominer/exchange/) or [wallets](/recipies/cryptominer/wallet/) 💹 6. [Monitor](/recipies/cryptominer/monitor/) your empire :heartbeat: 7. [Profit](/recipies/cryptominer/profit/)! +======= +1. Build your [mining rig](/recipes/cryptominer/mining-rig/) 💻 +2. Setup your [AMD](/recipes/cryptominer/amd-gpu/) or Nvidia (_this page_) GPUs 🎨 +3. Sign up for [mining pools](/recipes/cryptominer/mining-pool/) :swimmer: +4. Setup your miners with [Miner Hotel](/recipes/cryptominer/minerhotel/) 🏨 +5. Send your coins to [exchanges](/recipes/cryptominer/exchange/) or [wallets](/recipes/cryptominer/wallet/) 💹 +6. [Monitor](/recipes/cryptominer/monitor/) your empire :heartbeat: +7. [Profit](/recipes/cryptominer/profit/)! 💰 +>>>>>>> master:manuscript/recipes/cryptominer/nvidia-gpu.md ## Chef's Notes diff --git a/manuscript/recipies/cryptominer/profit.md b/manuscript/recipes/cryptominer/profit.md similarity index 64% rename from manuscript/recipies/cryptominer/profit.md rename to manuscript/recipes/cryptominer/profit.md index 17db314..41c46da 100644 --- a/manuscript/recipies/cryptominer/profit.md +++ b/manuscript/recipes/cryptominer/profit.md @@ -6,6 +6,7 @@ Well, that's it really. You're a cryptominer. Welcome to the party. To recap, you did all this: +<<<<<<< HEAD:manuscript/recipies/cryptominer/profit.md 1. Build your [mining rig](/recipies/cryptominer/mining-rig/) 💻 2. Setup your [AMD](/recipies/cryptominer/amd-gpu/) or [Nvidia](/recipies/cryptominer/nvidia-gpu/) GPUs 🎨 3. Sign up for [mining pools](/recipies/cryptominer/mining-pool/) :swimmer: @@ -13,6 +14,15 @@ To recap, you did all this: 5. Send your coins to [exchanges](/recipies/cryptominer/exchange/) or [wallets](/recipies/cryptominer/wallet/) 💹 6. [Monitor](/recipies/cryptominer/monitor/) your empire :heartbeat: 7. Profit! (_This page_) +======= +1. Build your [mining rig](/recipes/cryptominer/mining-rig/) 💻 +2. Setup your [AMD](/recipes/cryptominer/amd-gpu/) or [Nvidia](/recipes/cryptominer/nvidia-gpu/) GPUs 🎨 +3. Sign up for [mining pools](/recipes/cryptominer/mining-pool/) :swimmer: +4. Setup your miners with [Miner Hotel](/recipes/cryptominer/minerhotel/) 🏨 +5. Send your coins to [exchanges](/recipes/cryptominer/exchange/) or [wallets](/recipes/cryptominer/wallet/) 💹 +6. [Monitor](/recipes/cryptominer/monitor/) your empire :heartbeat: +7. Profit! (_This page_) 💰 +>>>>>>> master:manuscript/recipes/cryptominer/profit.md ## What next? diff --git a/manuscript/recipies/cryptominer/wallet.md b/manuscript/recipes/cryptominer/wallet.md similarity index 68% rename from manuscript/recipies/cryptominer/wallet.md rename to manuscript/recipes/cryptominer/wallet.md index 0eb2d91..40edaaf 100644 --- a/manuscript/recipies/cryptominer/wallet.md +++ b/manuscript/recipes/cryptominer/wallet.md @@ -1,5 +1,5 @@ !!! warning - This is not a complete recipe - it's a component of the [cryptominer](/recipies/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. + This is not a complete recipe - it's a component of the [cryptominer](/recipes/cryptominer/) "_uber-recipe_", but has been split into its own page to reduce complexity. # Wallet @@ -23,6 +23,7 @@ I mine most of my coins to Exchanges, but I do have the following wallets: Now, continue to the next stage of your grand mining adventure: +<<<<<<< HEAD:manuscript/recipies/cryptominer/wallet.md 1. Build your [mining rig](/recipies/cryptominer/mining-rig/) 💻 2. Setup your [AMD](/recipies/cryptominer/amd-gpu/) or [Nvidia](/recipies/cryptominer/nvidia-gpu/) GPUs 🎨 3. Sign up for [mining pools](/recipies/cryptominer/mining-pool/) :swimmer: @@ -30,6 +31,15 @@ Now, continue to the next stage of your grand mining adventure: 5. Send your coins to [exchanges](/recipies/cryptominer/exchange/) or wallets (_This page_) 💹 6. [Monitor](/recipies/cryptominer/monitor/) your empire :heartbeat: 7. [Profit](/recipies/cryptominer/profit/)! +======= +1. Build your [mining rig](/recipes/cryptominer/mining-rig/) 💻 +2. Setup your [AMD](/recipes/cryptominer/amd-gpu/) or [Nvidia](/recipes/cryptominer/nvidia-gpu/) GPUs 🎨 +3. Sign up for [mining pools](/recipes/cryptominer/mining-pool/) :swimmer: +4. Setup your miners with [Miner Hotel](/recipes/cryptominer/minerhotel/) 🏨 +5. Send your coins to [exchanges](/recipes/cryptominer/exchange/) or wallets (_This page_) 💹 +6. [Monitor](/recipes/cryptominer/monitor/) your empire :heartbeat: +7. [Profit](/recipes/cryptominer/profit/)! 💰 +>>>>>>> master:manuscript/recipes/cryptominer/wallet.md ## Chef's Notes diff --git a/manuscript/recipes/cryptonote-mining-pool.md b/manuscript/recipes/cryptonote-mining-pool.md new file mode 100644 index 0000000..135f6e7 --- /dev/null +++ b/manuscript/recipes/cryptonote-mining-pool.md @@ -0,0 +1,16 @@ +# CryptoNote Mining Pool + +[Cryptocurrency miners](/recipes/cryptominer) will "pool" their GPU resources ("_hashpower_") into aggregate "_mining pools_", so that by the combined effort of all the miners, the pool will receive a reward for the blocks "mined" into the blockchain, and this reward will be distributed among the miners. + +[CryptoNote](https://cryptonote.org/) is an open-source toolset designed to facilitate the creation of new privacy-focused [cryptocurrencies](https://cryptonote.org/coins) + +(_CryptoNote = 'Kryptonite'. In a pool. Get it?_) + +![CryptoNote Mining Pool Screenshot](/images/cryptonote-mining-pool.png) + +The fact that all these currencies share a common ancestry means that a common mining pool platform can be used for miners. The following recipes all use variations of [Dvandal's cryptonote-nodejs-pool ](https://github.com/dvandal/cryptonote-nodejs-pool) + +## Mining Pool Recipies + +* [TurtleCoin](/recipes/turtle-pool/), the no-BS, fun baby cryptocurrency +* [Athena](/recipes/cryptonote-mining-pool/athena/), TurtleCoin's newborn baby sister diff --git a/manuscript/recipies/duplicity.md b/manuscript/recipes/duplicity.md similarity index 100% rename from manuscript/recipies/duplicity.md rename to manuscript/recipes/duplicity.md diff --git a/manuscript/recipes/elkarbackup.md b/manuscript/recipes/elkarbackup.md new file mode 100644 index 0000000..563593d --- /dev/null +++ b/manuscript/recipes/elkarbackup.md @@ -0,0 +1,255 @@ +hero: Real heroes backup their 💾 + +# Elkar Backup + +Don't be like [Cameron](http://haltandcatchfire.wikia.com/wiki/Cameron_Howe). Backup your stuff. + + + +!!! important + Ongoing development of this recipe is sponsored by [The Common Observatory](https://www.observe.global/). Thanks guys! + + [![Common Observatory](../images/common_observatory.png)](https://www.observe.global/) + +ElkarBackup is a free open-source backup solution based on RSync/RSnapshot. It's basically a web wrapper around rsync/rsnapshot, which means that your backups are just files on a filesystem, utilising hardlinks for tracking incremental changes. I find this result more reassuring than a blob of compressed, (encrypted?) data that [more sophisticated backup solutions](/recipes/duplicity/) would produce for you. + +![ElkarBackup Screenshot](../images/elkarbackup.png) + +## Details + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostname you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP + +## Preparation + +### Setup data locations + +We'll need several directories to bind-mount into our container, so create them in /var/data/elkarbackup: + +``` +mkdir -p /var/data/elkarbackup/{backups,uploads,sshkeys,database-dump} +mkdir -p /var/data/runtime/elkarbackup/db +mkdir -p /var/data/config/elkarbackup +``` + +### Prepare environment + +Create /var/data/config/elkarbackup/elkarbackup.env, and populate with the following variables +``` +SYMFONY__DATABASE__PASSWORD=password +EB_CRON=enabled +TZ='Etc/UTC' + +#SMTP - Populate these if you want email notifications +#SYMFONY__MAILER__HOST= +#SYMFONY__MAILER__USER= +#SYMFONY__MAILER__PASSWORD= +#SYMFONY__MAILER__FROM= + +# For mysql +MYSQL_ROOT_PASSWORD=password + +#oauth2_proxy +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET= +``` + +Create ```/var/data/config/elkarbackup/elkarbackup-db-backup.env```, and populate with the following, to setup the nightly database dump. + +!!! note + Running a daily database dump might be considered overkill, since ElkarBackup can be configured to backup its own database. However, making my own backup keeps the operation of this stack consistent with **other** stacks which employ MariaDB. + + Also, did you ever hear about the guy who said "_I wish I had fewer backups"? + + No, me either :shrug: + +```` +# For database backup (keep 7 days daily backups) +MYSQL_PWD= +MYSQL_USER=root +BACKUP_NUM_KEEP=7 +BACKUP_FREQUENCY=1d +```` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +``` +version: "3" + +services: + db: + image: mariadb:10.4 + env_file: /var/data/config/elkarbackup/elkarbackup.env + networks: + - internal + volumes: + - /etc/localtime:/etc/localtime:ro + - /var/data/runtime/elkarbackup/db:/var/lib/mysql + + db-backup: + image: mariadb:10.4 + env_file: /var/data/config/elkarbackup/elkarbackup-db-backup.env + volumes: + - /var/data/elkarbackup/database-dump:/dump + - /etc/localtime:/etc/localtime:ro + entrypoint: | + bash -c 'bash -s < /dump/dump_\`date +%d-%m-%Y"_"%H_%M_%S\`.sql.gz + (ls -t /dump/dump*.sql.gz|head -n $$BACKUP_NUM_KEEP;ls /dump/dump*.sql.gz)|sort|uniq -u|xargs rm -- {} + sleep $$BACKUP_FREQUENCY + done + EOF' + networks: + - internal + + app: + image: elkarbackup/elkarbackup + env_file: /var/data/config/elkarbackup/elkarbackup.env + networks: + - internal + volumes: + - /etc/localtime:/etc/localtime:ro + - /var/data/:/var/data + - /var/data/elkarbackup/backups:/app/backups + - /var/data/elkarbackup/uploads:/app/uploads + - /var/data/elkarbackup/sshkeys:/app/.ssh + + proxy: + image: funkypenguin/oauth2_proxy + env_file: /var/data/config/elkarbackup/elkarbackup.env + networks: + - traefik_public + - internal + deploy: + labels: + - traefik.frontend.rule=Host:elkarbackup.example.com + - traefik.port=4180 + volumes: + - /var/data/config/traefik/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://app:80 + -redirect-url=https://elkarbackup.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + -authenticated-emails-file=/authenticated-emails.txt + +networks: + traefik_public: + external: true + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.36.0/24 +``` + +!!! note + Setup unique static subnets for every stack you deploy. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + + +## Serving + +### Launch ElkarBackup stack + +Launch the ElkarBackup stack by running ```docker stack deploy elkarbackup -c ``` + +Log into your new instance at https://**YOUR-FQDN**, with user "root" and the password default password "root": + +![ElkarBackup Login Screen](/images/elkarbackup-setup-1.png) + +First thing you do, change your password, using the gear icon, and "Change Password" link: + +![ElkarBackup Login Screen](/images/elkarbackup-setup-2.png) + +Have a read of the [Elkarbackup Docs](https://docs.elkarbackup.org/docs/introduction.html) - they introduce the concept of **clients** (_hosts containing data to be backed up_), **jobs** (_what data gets backed up_), **policies** (_when is data backed up and how long is it kept_). + +At the very least, you want to setup a **client** called "_localhost_" with an empty path (_i.e., the job path will be accessed locally, without SSH_), and then add a job to this client to backup /var/data, **excluding** ```/var/data/runtime``` and ```/var/data/elkarbackup/backup``` (_unless you **like** "backup-ception"_) + +### Copying your backup data offsite + +From the WebUI, you can download a script intended to be executed on a remote host, to backup your backup data to an offsite location. This is a **Good Idea**(tm), but needs some massaging for a Docker swarm deployment. + +Here's a variation to the standard script, which I've employed: + +``` +#!/bin/bash + +REPOSITORY=/var/data/elkarbackup/backups +SERVER= +SERVER_USER=elkarbackup +UPLOADS=/var/data/elkarbackup/uploads +TARGET=/srv/backup/elkarbackup + +echo "Starting backup..." +echo "Date: " `date "+%Y-%m-%d (%H:%M)"` + +ssh "$SERVER_USER@$SERVER" "cd '$REPOSITORY'; find . -maxdepth 2 -mindepth 2" | sed s/^..// | while read jobId +do + echo Backing up job $jobId + mkdir -p $TARGET/$jobId 2>/dev/null + rsync -aH --delete "$SERVER_USER@$SERVER:$REPOSITORY/$jobId/" $TARGET/$jobId +done + +echo Backing up uploads +rsync -aH --delete "$SERVER_USER@$SERVER":"$UPLOADS/" $TARGET/uploads + +USED=`df -h . | awk 'NR==2 { print $3 }'` +USE=`df -h . | awk 'NR==2 { print $5 }'` +AVAILABLE=`df -h . | awk 'NR==2 { print $4 }'` + +echo "Backup finished succesfully!" +echo "Date: " `date "+%Y-%m-%d (%H:%M)"` +echo "" +echo "**** INFO ****" +echo "Used disk space: $USED ($USE)" +echo "Available disk space: $AVAILABLE" +echo "" +``` + +!!! note + You'll note that I don't use the script to create a mysql dump (_since Elkar is running within a container anyway_), rather I just rely on the database dump which is made nightly into ```/var/data/elkarbackup/database-dump/``` + +### Restoring data + +Repeat after me : "**It's not a backup unless you've tested a restore**" + +!!! note + I had some difficulty making restoring work well in the webUI. My attempts to "Restore to client" failed with an SSH error about "localhost" not found. I **was** able to download the backup from my web browser, so I considered it a successful restore, since I can retrieve the backed-up data either from the webUI or from the filesystem directly. + +To restore files form a job, click on the "Restore" button in the WebUI, while on the **Jobs** tab: + +![ElkarBackup Login Screen](/images/elkarbackup-setup-3.png) + +This takes you to a list of backup names and file paths. You can choose to download the entire contents of the backup from your browser as a .tar.gz, or to restore the backup to the client. If you click on the **name** of the backup, you can also drill down into the file structure, choosing to restore a single file or directory. + +!!! important + Ongoing development of this recipe is sponsored by [The Common Observatory](https://www.observe.global/). Thanks guys! + + [![Common Observatory](../images/common_observatory.png)](https://www.observe.global/) + +## Chef's Notes + +1. If you wanted to expose the ElkarBackup UI directly, you could remove the oauth2_proxy from the design, and move the traefik_public-related labels directly to the app service. You'd also need to add the traefik_public network to the app service. +2. The original inclusion of ElkarBackup was due to the efforts of @gpulido in our [Discord server](http://chat.funkypenguin.co.nz). Thanks Gabriel! + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/emby.md b/manuscript/recipes/emby.md similarity index 89% rename from manuscript/recipies/emby.md rename to manuscript/recipes/emby.md index 6ed9577..d5586d8 100644 --- a/manuscript/recipies/emby.md +++ b/manuscript/recipes/emby.md @@ -1,6 +1,6 @@ # Emby -[Emby](https://emby.media/) (_think "M.B." or "Media Browser"_) is best described as "_like [Plex](/recipies/plex/) but different_" 😁 - It's a bit geekier and less polished than Plex, but it allows for more flexibility and customization. +[Emby](https://emby.media/) (_think "M.B." or "Media Browser"_) is best described as "_like [Plex](/recipes/plex/) but different_" 😁 - It's a bit geekier and less polished than Plex, but it allows for more flexibility and customization. ![Emby Screenshot](../images/emby.png) @@ -44,7 +44,7 @@ version: "3.0" services: emby: - image: emby/embyserver + image: emby/emby-server env_file: /var/data/config/emby/emby.env volumes: - /var/data/emby/emby:/config @@ -86,7 +86,7 @@ Log into your new instance at https://**YOUR-FQDN**, and complete the wizard-bas ## Chef's Notes 1. I didn't use an [oauth2_proxy](/reference/oauth_proxy/) for this stack, because it would interfere with mobile client support. -2. I used the LinuxServer docker container, even though still under "active development", to maintain consistency with the [Plex](/recipies/plex/) and [autopirate](/recipies/autopirate/) recipies. +2. Got an NVIDIA GPU? See [this blog post](https://www.funkypenguin.co.nz/note/gpu-transcoding-with-emby-plex-using-docker-nvidia/) re how to use your GPU to transcode your media! 3. We don't bother exposing the HTTPS port for Emby, since [Traefik](/ha-docker-swarm/traefik/) is doing the SSL termination for us already. ### Tip your waiter (donate) diff --git a/manuscript/recipes/flightairmap.md b/manuscript/recipes/flightairmap.md new file mode 100644 index 0000000..ea9f1d8 --- /dev/null +++ b/manuscript/recipes/flightairmap.md @@ -0,0 +1,60 @@ +version: '3' +services: + flightairmap: + image: richarvey/nginx-php-fpm + volumes: + - "/var/data/flightairmap/conf:/var/www/html/conf" + - "/var/data/flightairmap/scripts:/var/www/html/scripts" + - "/var/data/flightairmap/html:/var/www/flightairmap/" + env_file: + - "/var/data/config/flightairmap/flightairmap.env" + environment: + - PHP_MEM_LIMIT=256 + - RUN_SCRIPTS=1 + - MYSQL_HOST=${MYSQL_HOST} + - MYSQL_DATABASE=${MYSQL_DATABASE} + - MYSQL_USER=${MYSQL_USER} + - MYSQL_PASSWORD=${MYSQL_PASSWORD} + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:www.observe.global + - traefik.docker.network=traefik_public + - traefik.port=80 + + db: + image: mariadb:10 + env_file: /var/data/config/flightairmap/flightairmap.env + networks: + - internal + volumes: + - /var/data/runtime/flightairmap/db:/var/lib/mysql + + db-backup: + image: mariadb:10 + env_file: /var/data/config/flightairmap/flightairmap.env + volumes: + - /var/data/flightairmap/database-dump:/dump + entrypoint: | + bash -c 'bash -s < /dump/dump_\`date +%d-%m-%Y"_"%H_%M_%S\`.sql.gz + (ls -t /dump/dump*.sql.gz|head -n $$BACKUP_NUM_KEEP;ls /dump/dump*.sql.gz)|sort|uniq -u|xargs rm -- {} + sleep $$BACKUP_FREQUENCY + done + EOF' + networks: + - internal + +networks: + traefik_public: + external: true + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.44.0/24 diff --git a/manuscript/recipies/ghost.md b/manuscript/recipes/ghost.md similarity index 100% rename from manuscript/recipies/ghost.md rename to manuscript/recipes/ghost.md diff --git a/manuscript/recipies/gitlab-runner.md b/manuscript/recipes/gitlab-runner.md similarity index 100% rename from manuscript/recipies/gitlab-runner.md rename to manuscript/recipes/gitlab-runner.md diff --git a/manuscript/recipies/gitlab.md b/manuscript/recipes/gitlab.md similarity index 85% rename from manuscript/recipies/gitlab.md rename to manuscript/recipes/gitlab.md index 9c2f354..246d465 100644 --- a/manuscript/recipies/gitlab.md +++ b/manuscript/recipes/gitlab.md @@ -33,18 +33,24 @@ You'll need to know the following: 2. Create gitlab.env, and populate with **at least** the following variables (the full set is available at https://github.com/sameersbn/docker-gitlab#available-configuration-parameters): ``` DB_USER=gitlab -DB_PASS= +DB_PASS=gitlabdbpass +DB_NAME=gitlabhq_production +DB_EXTENSION=pg_trgm +DB_ADAPTER=postgresql +DB_HOST=postgresql TZ=Pacific/Auckland +REDIS_HOST=redis +REDIS_PORT=6379 GITLAB_TIMEZONE=Auckland GITLAB_HTTPS=true SSL_SELF_SIGNED=false -GITLAB_HOST -GITLAB_PORT -GITLAB_SSH_PORT -GITLAB_SECRETS_DB_KEY_BASE -GITLAB_SECRETS_SECRET_KEY_BASE -GITLAB_SECRETS_OTP_KEY_BASE -GITLAB_ROOT_PASSWORD +GITLAB_HOST=gitlab.example.com +GITLAB_PORT=443 +GITLAB_SSH_PORT=2222 +GITLAB_SECRETS_DB_KEY_BASE=CFf7sS3kV2nGXBtMHDsTcjkRX8PWLlKTPJMc3lRc6GCzJDdVljZ85NkkzJ8mZbM5 +GITLAB_SECRETS_SECRET_KEY_BASE=h2LBVffktDgb6BxM3B97mDSjhnSNwLc5VL2Hqzq9cdrvBtVw48WSp5wKj5HZrJM5 +GITLAB_SECRETS_OTP_KEY_BASE=t9LPjnLzbkJ7Nt6LZJj6hptdpgG58MPJPwnMMMDdx27KSwLWHDrz9bMWXQMjq5mp +GITLAB_ROOT_PASSWORD=changeme ``` ### Setup Docker Swarm @@ -69,21 +75,18 @@ services: postgresql: image: sameersbn/postgresql:9.6-2 + env_file: /var/data/config/gitlab/gitlab.env volumes: - /var/data/gitlab/postgresql:/var/lib/postgresql:Z networks: - internal - environment: - - DB_USER=gitlab - - DB_PASS= - - DB_NAME=gitlabhq_production - - DB_EXTENSION=pg_trgm gitlab: image: sameersbn/gitlab:latest + env_file: /var/data/config/gitlab/gitlab.env networks: - internal - - traefik + - traefik_public deploy: labels: - traefik.frontend.rule=Host:gitlab.example.com @@ -94,13 +97,12 @@ services: max_attempts: 10 window: 60s ports: - - "10022:22" + - "2222:22" volumes: - /var/data/gitlab/gitlab:/home/git/data:Z - env_file: gitlab.env networks: - traefik: + traefik_public: external: true internal: driver: overlay diff --git a/manuscript/recipies/gollum.md b/manuscript/recipes/gollum.md similarity index 99% rename from manuscript/recipies/gollum.md rename to manuscript/recipes/gollum.md index f8ab547..1471efb 100644 --- a/manuscript/recipies/gollum.md +++ b/manuscript/recipes/gollum.md @@ -81,7 +81,7 @@ services: --user-icons gravatar proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/config/gollum/gollum.env networks: - internal diff --git a/manuscript/recipies/homeassistant.md b/manuscript/recipes/homeassistant.md similarity index 99% rename from manuscript/recipies/homeassistant.md rename to manuscript/recipes/homeassistant.md index 80b37a8..0c49ac3 100644 --- a/manuscript/recipies/homeassistant.md +++ b/manuscript/recipes/homeassistant.md @@ -85,7 +85,7 @@ services: - internal grafana-proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/config/homeassistant/grafana.env dns_search: hq.example.com networks: diff --git a/manuscript/recipes/homeassistant/ibeacon.md b/manuscript/recipes/homeassistant/ibeacon.md new file mode 100644 index 0000000..875f202 --- /dev/null +++ b/manuscript/recipes/homeassistant/ibeacon.md @@ -0,0 +1,24 @@ +# iBeacons with Home assistant + +!!! warning + This is not a complete recipe - it's an optional additional of the [HomeAssistant](/recipes/homeassistant/) "recipe", since it only applies to a subset of users + +One of the most useful features of Home Assistant is location awareness. I don't care if someone opens my office door when I'm home, but you bet I care about (_and want to be notified_) it if I'm away! + +## Ingredients + +1. [HomeAssistant](/recipes/home-assistant/) per recipe +2. iBeacon(s) - This recipe is for https://s.click.aliexpress.com/e/bzyLCnAp +4. [LightBlue Explorer](https://itunes.apple.com/nz/app/lightblue-explorer/id557428110?mt=8) + +## Preparation + +### Write UUID to iBeacon + +The iBeacons come with no UUID. We use the LightBlue Explorer app to pair with them (_code is "123456"_), and assign own own UUID. + +Generate your own UUID, or get a random one at https://www.uuidgenerator.net/ + +Plug in your iBeacon, launch LightBlue Explorer, and find your iBeacon. The first time you attempt to interrogate it, you'll be prompted to pair. Although it's not recorded anywhere in the documentation (_grr!_), the pairing code is **123456** + +Having paired, you'll be able to see the vital statistics of your iBeacon. diff --git a/manuscript/recipies/huginn.md b/manuscript/recipes/huginn.md similarity index 100% rename from manuscript/recipies/huginn.md rename to manuscript/recipes/huginn.md diff --git a/manuscript/recipies/instapy.md b/manuscript/recipes/instapy.md similarity index 100% rename from manuscript/recipies/instapy.md rename to manuscript/recipes/instapy.md diff --git a/manuscript/recipes/ipfs-cluster.md b/manuscript/recipes/ipfs-cluster.md new file mode 100644 index 0000000..e7fa4cf --- /dev/null +++ b/manuscript/recipes/ipfs-cluster.md @@ -0,0 +1,191 @@ +!!! danger "This recipe is a work in progress" + This recipe is **incomplete**, and remains a work in progress. + So... There may be errors and inaccuracies. Jump into [Discord](http://chat.funkypenguin.co.nz) if you're encountering issues 😁 + +!!! important + Development of this recipe is sponsored by [The Common Observatory](https://www.observe.global/) + +# IPFS + +The intention of this recipe is to provide a local IPFS cluster for the purpose of providing persistent storage for the various components of the recipes + +![IPFS Screenshot](../images/ipfs.png) + +Description. IPFS is a peer-to-peer distributed file system that seeks to connect all computing devices with the same system of files. In some ways, IPFS is similar to the World Wide Web, but IPFS could be seen as a single BitTorrent swarm, exchanging objects within one Git repository. + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) + +## Preparation + +### Setup data locations (per-node) + +Since IPFS may _replace_ ceph or glusterfs as a shared-storage provider for the swarm, we can't use sharded storage to store its persistent data. (🐔, meet :egg:) + +On _each_ node, therefore run the following, to create the persistent data storage for ipfs and ipfs-cluster: + +``` +mkdir -p {/var/ipfs/daemon,/var/ipfs/cluster} +``` + +### Setup environment + +ipfs-cluster nodes require a common secret, a 32-bit hex-encoded string, in order to "trust" each other, so generate one, and add it to ipfs.env on your first node, by running ```od -vN 32 -An -tx1 /dev/urandom | tr -d ' \n'; echo``` + +Now on _each_ node, create ```/var/ipfs/cluster:/data/ipfs-cluster```, including both the secret, *and* the IP of docker0 interface on your hosts (_on my hosts, this is always 172.17.0.1_). We do this (_the trick with docker0)_ to allow ipfs-cluster to talk to the local ipfs daemon, per-node: + +``` +SECRET= + +# Use docker0 to access daemon +IPFS_API=/ip4/172.17.0.1/tcp/5001 +``` + +### Create docker-compose file + +Yes, I know. It's not as snazzy as docker swarm. Maybe we'll get there. But this implementation uses docker-compose, so create the following (_identical_) docker-compose.yml on each node: + +``` +version: "3" + +services: + cluster: + image: ipfs/ipfs-cluster + volumes: + - /var/ipfs/cluster:/data/ipfs-cluster + env_file: /var/data/config/ipfs/ipfs.env + ports: + - 9095:9095 + - 9096:9096 + depends_on: + - daemon + + daemon: + image: ipfs/go-ipfs + ports: + - 4001:4001 + - 5001:5001 + - 8080:8080 + volumes: + - /var/ipfs/daemon:/data/ipfs +``` + +### Launch independent nodes + +Launch all nodes independently with ```docker-compose -f ipfs.yml up```. At this point, the nodes are each running independently, unaware of each other. But we do this to ensure that service.json is populated on each node, using the IPFS_API environment variable we specified in ipfs.env. (_it's only used on the first run_) + + +The output looks something like this: + +``` +cluster_1 | 11:03:33.272 INFO restapi: REST API (libp2p-http): ENABLED. Listening on: +cluster_1 | /ip4/127.0.0.1/tcp/9096/ipfs/QmbqPBLJNXWpbXEX6bVhYLo2ruEBE7mh1tfT9s6VXUzYYx +cluster_1 | /ip4/172.18.0.3/tcp/9096/ipfs/QmbqPBLJNXWpbXEX6bVhYLo2ruEBE7mh1tfT9s6VXUzYYx +cluster_1 | /p2p-circuit/ipfs/QmbqPBLJNXWpbXEX6bVhYLo2ruEBE7mh1tfT9s6VXUzYYx +daemon_1 | Swarm listening on /ip4/127.0.0.1/tcp/4001 +daemon_1 | Swarm listening on /ip4/172.19.0.2/tcp/4001 +daemon_1 | Swarm listening on /p2p-circuit +daemon_1 | Swarm announcing /ip4/127.0.0.1/tcp/4001 +daemon_1 | Swarm announcing /ip4/172.19.0.2/tcp/4001 +daemon_1 | Swarm announcing /ip4/202.170.161.77/tcp/4001 +daemon_1 | API server listening on /ip4/0.0.0.0/tcp/5001 +daemon_1 | Gateway (readonly) server listening on /ip4/0.0.0.0/tcp/8080 +daemon_1 | Daemon is ready +cluster_1 | 10:49:19.720 INFO consensus: Current Raft Leader: QmaAiMDP7PY3CX1xqzgAoNQav5M29P5WPWVqqSBdNu1Nsp raft.go:293 +cluster_1 | 10:49:19.721 INFO cluster: Cluster Peers (without including ourselves): cluster.go:403 +cluster_1 | 10:49:19.721 INFO cluster: - No other peers cluster.go:405 +cluster_1 | 10:49:19.722 INFO cluster: ** IPFS Cluster is READY ** cluster.go:418 +``` + +### Pick a leader + +Pick a node to be your primary node, and CTRL-C the others. + +Look for a line like this in the output of the primary node: + +``` +/ip4/127.0.0.1/tcp/9096/ipfs/QmbqPBLJNXWpbXEX6bVhYLo2ruEBE7mh1tfT9s6VXUzYYx +``` + +You'll note several addresses listed, all ending in the same hash. None of these addresses will be your docker node's actual IP address, however, since we exposed port 9096, we can substitute your docker node's IP. + +### Bootstrap the followers + +On each of the non-primary nodes, run the following, replacing **IP-OF-PRIMARY-NODE** with the actual IP of the primary node, and **HASHY-MC-HASHFACE** with your own hash from primary output above. + + +``` +docker run --rm -it -v /var/ipfs/cluster:/data/ipfs-cluster \ + --entrypoint ipfs-cluster-service ipfs/ipfs-cluster \ + daemon --bootstrap \ /ip4/IP-OF-PRIMARY-NODE/tcp/9096/ipfs/HASHY-MC-HASHFACE +``` + +You'll see output like this: + +``` +10:55:26.121 INFO service: Bootstrapping to /ip4/192.168.31.13/tcp/9096/ipfs/QmPrmQvW5knXLBE94jzpxvdtLSwXZeFE5DSY3FuMxypDsT daemon.go:153 +10:55:26.121 INFO ipfshttp: IPFS Proxy: /ip4/0.0.0.0/tcp/9095 -> /ip4/172.17.0.1/tcp/5001 ipfshttp.go:221 +10:55:26.304 ERROR ipfshttp: error posting to IPFS: Post http://172.17.0.1:5001/api/v0/id: dial tcp 172.17.0.1:5001: connect: connection refused ipfshttp.go:708 +10:55:26.622 INFO consensus: Current Raft Leader: QmPrmQvW5knXLBE94jzpxvdtLSwXZeFE5DSY3FuMxypDsT raft.go:293 +10:55:26.623 INFO cluster: Cluster Peers (without including ourselves): cluster.go:403 +10:55:26.623 INFO cluster: - QmPrmQvW5knXLBE94jzpxvdtLSwXZeFE5DSY3FuMxypDsT cluster.go:410 +10:55:26.624 INFO cluster: - QmbqPBLJNXWpbXEX6bVhYLo2ruEBE7mh1tfT9s6VXUzYYx cluster.go:410 +10:55:26.625 INFO cluster: ** IPFS Cluster is READY ** cluster.go:418 +``` + +!!! note + You can ignore the warnings about port 5001 refused - this is because we weren't running the ipfs daemon while bootstrapping the cluster. Its harmless. + +I haven't worked out why yet, but running the bootstrap in docker-run format reset the permissions on /var/ipfs/cluster/, so look at /var/ipfs/daemon, and make the permissions of /var/ipfs/cluster the same. + +You can now run ```docker-compose -f ipfs.yml up``` on the "follower" nodes, to bring your cluster online. + +### Confirm cluster + +docker-exec into one of the cluster containers (_it doesn't matter which one_), and run ```ipfs-cluster-ctl peers ls``` + +You should see output from each node member, indicating it can see its other peers. Here's my output from a 3-node cluster: + +``` +/ # ipfs-cluster-ctl peers ls +QmPrmQvW5knXLBE94jzpxvdtLSwXZeFE5DSY3FuMxypDsT | ef68b1437c56 | Sees 2 other peers + > Addresses: + - /ip4/127.0.0.1/tcp/9096/ipfs/QmPrmQvW5knXLBE94jzpxvdtLSwXZeFE5DSY3FuMxypDsT + - /ip4/172.19.0.3/tcp/9096/ipfs/QmPrmQvW5knXLBE94jzpxvdtLSwXZeFE5DSY3FuMxypDsT + - /p2p-circuit/ipfs/QmPrmQvW5knXLBE94jzpxvdtLSwXZeFE5DSY3FuMxypDsT + > IPFS: QmU6buucy4FX9XqPoj4ZEiJiu7xUq2dnth5puU1rswtrGg + - /ip4/127.0.0.1/tcp/4001/ipfs/QmU6buucy4FX9XqPoj4ZEiJiu7xUq2dnth5puU1rswtrGg + - /ip4/172.19.0.2/tcp/4001/ipfs/QmU6buucy4FX9XqPoj4ZEiJiu7xUq2dnth5puU1rswtrGg + - /ip4/202.170.161.75/tcp/4001/ipfs/QmU6buucy4FX9XqPoj4ZEiJiu7xUq2dnth5puU1rswtrGg +QmaAiMDP7PY3CX1xqzgAoNQav5M29P5WPWVqqSBdNu1Nsp | 6558e1bf32e2 | Sees 2 other peers + > Addresses: + - /ip4/127.0.0.1/tcp/9096/ipfs/QmaAiMDP7PY3CX1xqzgAoNQav5M29P5WPWVqqSBdNu1Nsp + - /ip4/172.19.0.3/tcp/9096/ipfs/QmaAiMDP7PY3CX1xqzgAoNQav5M29P5WPWVqqSBdNu1Nsp + - /p2p-circuit/ipfs/QmaAiMDP7PY3CX1xqzgAoNQav5M29P5WPWVqqSBdNu1Nsp + > IPFS: QmYMUwHHsaeP2H8D2G3iXKhs1fHm2gQV6SKWiRWxbZfxX7 + - /ip4/127.0.0.1/tcp/4001/ipfs/QmYMUwHHsaeP2H8D2G3iXKhs1fHm2gQV6SKWiRWxbZfxX7 + - /ip4/172.19.0.2/tcp/4001/ipfs/QmYMUwHHsaeP2H8D2G3iXKhs1fHm2gQV6SKWiRWxbZfxX7 + - /ip4/202.170.161.77/tcp/4001/ipfs/QmYMUwHHsaeP2H8D2G3iXKhs1fHm2gQV6SKWiRWxbZfxX7 +QmbqPBLJNXWpbXEX6bVhYLo2ruEBE7mh1tfT9s6VXUzYYx | 28c13ec68f33 | Sees 2 other peers + > Addresses: + - /ip4/127.0.0.1/tcp/9096/ipfs/QmbqPBLJNXWpbXEX6bVhYLo2ruEBE7mh1tfT9s6VXUzYYx + - /ip4/172.18.0.3/tcp/9096/ipfs/QmbqPBLJNXWpbXEX6bVhYLo2ruEBE7mh1tfT9s6VXUzYYx + - /p2p-circuit/ipfs/QmbqPBLJNXWpbXEX6bVhYLo2ruEBE7mh1tfT9s6VXUzYYx + > IPFS: QmazkAuAPpWw913HKiGsr1ief2N8cLa6xcqeAZxqDMsWmE + - /ip4/127.0.0.1/tcp/4001/ipfs/QmazkAuAPpWw913HKiGsr1ief2N8cLa6xcqeAZxqDMsWmE + - /ip4/172.18.0.2/tcp/4001/ipfs/QmazkAuAPpWw913HKiGsr1ief2N8cLa6xcqeAZxqDMsWmE + - /ip4/202.170.161.96/tcp/4001/ipfs/QmazkAuAPpWw913HKiGsr1ief2N8cLa6xcqeAZxqDMsWmE +/ # +``` + + +## Chef's Notes + +1. I'm still trying to work out how to _mount_ the ipfs data in my filesystem in a usable way. Which is why this is still a WIP :) + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/kanboard.md b/manuscript/recipes/kanboard.md similarity index 62% rename from manuscript/recipies/kanboard.md rename to manuscript/recipes/kanboard.md index 921e07e..cd536d0 100644 --- a/manuscript/recipies/kanboard.md +++ b/manuscript/recipes/kanboard.md @@ -2,7 +2,10 @@ hero: Kanboard - A recipe to get your personal kanban on # Kanboard -Kanboard is a Kanban tool, developed by [Frédéric Guillot](https://github.com/fguillot). (Who also happens to be the developer of my favorite RSS reader, [Miniflux](/recipies/miniflux/)) +Kanboard is a Kanban tool, developed by [Frédéric Guillot](https://github.com/fguillot). (_Who also happens to be the developer of my favorite RSS reader, [Miniflux](/recipes/miniflux/)_) + +!!! tip "Sponsored Project" + Kanboard is one of my [sponsored projects](/sponsored-projects/) - a project I financially support on a regular basis because of its utility to me. I use it both in my DayJob(tm), and to manage my overflowing, overly-optimistic personal commitments! 😓 Features include: @@ -36,6 +39,16 @@ Create the location for the bind-mount of the application data, so that it's per mkdir -p /var/data/kanboard ``` +### Setup Environment + +If you intend to use an [OAuth proxy](/reference/oauth_proxy/) to further secure public access to your instance, create a ```kanboard.env``` file to hold your environment variables, and populate with your OAuth provider's details (_the cookie secret you can just make up_): + +``` +# If you decide to protect kanboard with an oauth_proxy, complete these +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET= +``` ### Setup Docker Swarm @@ -55,32 +68,34 @@ services: - /var/data/kanboard/data:/var/www/app/data - /var/data/kanboard/plugins:/var/www/app/plugins networks: - - traefik_public + - internal deploy: labels: - traefik.frontend.rule=Host:kanboard.example.com - traefik.docker.network=traefik_public - traefik.port=80 - cron: - image: kanboard/kanboard - volumes: - - /var/data/kanboard/data:/var/www/app/data - user: nginx - networks: - - internal - entrypoint: | - bash -c 'bash -s <``` -Log into your new instance at https://**YOUR-FQDN**. Default credentials are admin/admin, after which you can change (under 'profile') and add more users. +Log into your new instance at https://**YOUR-FQDN**. Default credentials are admin/admin, after which you can change (_under 'profile'_) and add more users. ## Chef's Notes diff --git a/manuscript/recipes/keycloak.md b/manuscript/recipes/keycloak.md new file mode 100644 index 0000000..35547ff --- /dev/null +++ b/manuscript/recipes/keycloak.md @@ -0,0 +1,185 @@ +# KeyCloak + +[KeyCloak](https://www.keycloak.org/) is "an open source identity and access management solution." Using a local database, or a variety of backends (_think [OpenLDAP](/recipes/openldap/)_), you can provide Single Sign-On (SSO) using OpenID, OAuth 2.0, and SAML. + +!!! important + Development of this recipe is sponsored by [The Common Observatory](https://www.observe.global/). Thanks guys! + + [![Common Observatory](../images/common_observatory.png)](https://www.observe.global/) + +![KeyCloak Screenshot](../images/keycloak.png) + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostname (_i.e. "keycloak.your-domain.com"_) you intend to use for LDAP Account Manager, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP + +## Preparation + +### Setup data locations + +We'll need several directories to bind-mount into our container for both runtime and backup data, so create them as follows + +``` +mkdir /var/data/runtime/keycloak/database +mkdir /var/data/keycloak/database-dump +``` + +### Prepare environment + +Create ```/var/data/keycloak/keycloak.env```, and populate with the following variables, customized for your own domain structure. + +``` +# Technically, this could be auto-detected, but we prefer to be prescriptive +DB_VENDOR=postgres +DB_DATABASE=keycloak +DB_ADDR=keycloak-db +DB_USER=keycloak +DB_PASSWORD=myuberpassword +KEYCLOAK_USER=admin +KEYCLOAK_PASSWORD=ilovepasswords + +# This is required to run keycloak behind traefik +PROXY_ADDRESS_FORWARDING=true + +# What's our hostname? +KEYCLOAK_HOSTNAME=keycloak.batcave.com + +# Tell Postgress what user/password to create +POSTGRES_USER=keycloak +POSTGRES_PASSWORD=myuberpassword +``` + +Create /var/data/keycloak/keycloak-backup.env, and populate with the following, so that your database can be backed up to the filesystem, daily: + +``` +PGHOST=keycloak-db +PGUSER=keycloak +PGPASSWORD=myuberpassword +BACKUP_NUM_KEEP=7 +BACKUP_FREQUENCY=1d +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 +``` +version: '3' + +services: + keycloak: + image: jboss/keycloak + env_file: /var/data/config/keycloak/keycloak.env + volumes: + - /etc/localtime:/etc/localtime:ro + networks: + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:keycloak.batcave.com + - traefik.port=8080 + - traefik.docker.network=traefik_public + + keycloak-db: + env_file: /var/data/config/keycloak/keycloak.env + image: postgres:10.1 + volumes: + - /var/data/runtime/keycloak/database:/var/lib/postgresql/data + - /etc/localtime:/etc/localtime:ro + networks: + - traefik_public + + keycloak-db-backup: + image: postgres:10.1 + env_file: /var/data/config/keycloak/keycloak-backup.env + volumes: + - /var/data/keycloak/database-dump:/dump + - /etc/localtime:/etc/localtime:ro + entrypoint: | + bash -c 'bash -s < /dump/dump_\`date +%d-%m-%Y"_"%H_%M_%S\`.psql + (ls -t /dump/dump*.psql|head -n $$BACKUP_NUM_KEEP;ls /dump/dump*.psql)|sort|uniq -u|xargs rm -- {} + sleep $$BACKUP_FREQUENCY + done + EOF' + networks: + - traefik_public + +networks: + traefik_public: + external: true +``` + +!!! warning + **Normally**, we set unique static subnets for every stack you deploy, and put the non-public facing components (like databases) in an dedicated _internal network. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + However, KeyCloak's JBOSS startup script assumes a single interface, and will crash in a ball of 🔥 if you try to assign multiple interfaces to the container. This means that we can't use a "keycloak_internal" network for our supporting containers. This is why unlike our other recipes, all the supporting services are prefixed with "keycloak-". + + +## Serving + +### Launch KeyCloak stack + +Launch the OpenLDAP stack by running ```docker stack deploy keycloak -c ``` + +Log into your new instance at https://**YOUR-FQDN**, and login with the user/password you defined in keycloak.env. + +### Integrating into OpenLDAP + +KeyCloak gets really sexy when you integrate it into your [OpenLDAP](/recipes/openldap/) stack (_also, it's great not to have to play with ugly LDAP tree UIs_). + +You'll need to have completed the [OpenLDAP](/recipes/openldap/) recipe + +You start in the "Master" realm - but mouseover the realm name, to a dropdown box allowing you add an new realm: + +![KeyCloak Add Realm Screenshot](/images/sso-stack-keycloak-1.png) + +Enter a name for your new realm, and click "_Create_": + +![KeyCloak Add Realm Screenshot](/images/sso-stack-keycloak-2.png) + +Once in the desired realm, click on **User Federation**, and click **Add Provider**. On the next page ("_Required Settings_"), set the following: + +* **Edit Mode** : Writeable +* **Vendor** : Other +* **Connection URL** : ldap://openldap +* **Users DN** : ou=People, +* **Authentication Type** : simple +* **Bind DN** : cn=admin, +* **Bind Credential** : + +Save your changes, and then navigate back to "User Federation" > Your LDAP name > Mappers: + +![KeyCloak Add Realm Screenshot](/images/sso-stack-keycloak-3.png) + +For each of the following mappers, click the name, and set the "_Read Only_" flag to "_Off_" (_this enables 2-way sync between KeyCloak and OpenLDAP_) + +* last name +* username +* email +* first name + +![KeyCloak Add Realm Screenshot](/images/sso-stack-keycloak-4.png) + +!!! important + Development of this recipe is sponsored by [The Common Observatory](https://www.observe.global/). Thanks guys! + + [![Common Observatory](../images/common_observatory.png)](https://www.observe.global/) + + +## Chef's Notes + +1. I wanted to be able to add multiple networks to KeyCloak (_i.e., a dedicated overlay network for LDAP authentication_), but the entrypoint used by the container produces an error when more than one network is configured. This could theoretically be corrected in future, with a PR, but the [GitHub repo](https://github.com/jboss-dockerfiles/keycloak) has no issues enabled, so I wasn't sure where to start. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipes/kubernetes/kanboard.md b/manuscript/recipes/kubernetes/kanboard.md new file mode 100644 index 0000000..05e49c0 --- /dev/null +++ b/manuscript/recipes/kubernetes/kanboard.md @@ -0,0 +1,271 @@ +#Kanboard + +Kanboard is a Kanban tool, developed by [Frédéric Guillot](https://github.com/fguillot). (_Who also happens to be the developer of my favorite RSS reader, [Miniflux](/recipes/miniflux/)_) + +![Kanboard Screenshot](/images/kanboard.png) + +!!! tip "Sponsored Project" + Kanboard is one of my [sponsored projects](/sponsored-projects/) - a project I financially support on a regular basis because of its utility to me. I use it both in my DayJob(tm), and to manage my overflowing, overly-optimistic personal commitments! 😓 + +Features include: + +* Visualize your work +* Limit your work in progress to be more efficient +* Customize your boards according to your business activities +* Multiple projects with the ability to drag and drop tasks +* Reports and analytics +* Fast and simple to use +* Access from anywhere with a modern browser +* Plugins and integrations with external services +* Free, open source and self-hosted +* Super simple installation + +## Ingredients + +1. A [Kubernetes Cluster](/kubernetes/design/) including [Traefik Ingress](/kubernetes/traefik/) +2. A DNS name for your kanboard instance (*kanboard.example.com*, below) pointing to your [load balancer](/kubernetes/loadbalancer/), fronting your Traefik ingress + +## Preparation + +### Prepare traefik for namespace + +When you deployed [Traefik via the helm chart](/kubernetes/traefik/), you would have customized ```values.yml``` for your deployment. In ```values.yml``` is a list of namespaces which Traefik is permitted to access. Update ```values.yml``` to include the *kanboard* namespace, as illustrated below: + +``` + +kubernetes: + namespaces: + - kube-system + - nextcloud + - kanboard + - miniflux + +``` + +If you've updated ```values.yml```, upgrade your traefik deployment via helm, by running ```helm upgrade --values values.yml traefik stable/traefik --recreate-pods``` + +### Create data locations + +Although we could simply bind-mount local volumes to a local Kubuernetes cluster, since we're targetting a cloud-based Kubernetes deployment, we only need a local path to store the YAML files which define the various aspects of our Kubernetes deployment. + +``` +mkdir /var/data/config/kanboard +``` + +### Create namespace + +We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the kanboard stack with the following .yml: + +``` +cat < /var/data/config/kanboard/namespace.yml +apiVersion: v1 +kind: Namespace +metadata: + name: kanboard +EOF +kubectl create -f /var/data/config/kanboard/namespace.yaml +``` + +### Create persistent volume claim + +Persistent volume claims are a streamlined way to create a persistent volume and assign it to a container in a pod. Create a claim for the kanboard app and plugin data: + +``` +cat < /var/data/config/kanboard/persistent-volumeclaim.yml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: kanboard-volumeclaim + namespace: kanboard + annotations: + backup.kubernetes.io/deltas: P1D P7D +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +EOF +kubectl create -f /var/data/config/kanboard/kanboard-volumeclaim.yaml +``` + +!!! question "What's that annotation about?" + The annotation is used by [k8s-snapshots](/kubernetes/snapshots/) to create daily incremental snapshots of your persistent volumes. In this case, our volume is snapshotted daily, and copies kept for 7 days. + +### Create ConfigMap + +Kanboard's configuration is all contained within ```config.php```, which needs to be presented to the container. We _could_ maintain ```config.php``` in the persistent volume we created above, but this would require manually accessing the pod every time we wanted to make a change. + +Instead, we'll create ```config.php``` as a [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), meaning it "lives" within the Kuberetes cluster and can be **presented** to our pod. When we want to make changes, we simply update the ConfigMap (*delete and recreate, to be accurate*), and relaunch the pod. + +Grab a copy of [config.default.php](https://github.com/kanboard/kanboard/blob/master/config.default.php), save it to ```/var/data/config/kanboard/config.php```, and customize it per [the guide](https://docs.kanboard.org/en/latest/admin_guide/config_file.html). + +At the very least, I'd suggest making the following changes: +``` +define('PLUGIN_INSTALLER', true); // Yes, I want to install plugins using the UI +define('ENABLE_URL_REWRITE', false); // Yes, I want pretty URLs +``` + +Now create the configmap from config.php, by running ```kubectl create configmap -n kanboard kanboard-config --from-file=config.php``` + +## Serving + +Now that we have a [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/), and a [configmap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), we can create a [deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [service](https://kubernetes.io/docs/concepts/services-networking/service/), and [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) for the kanboard [pod](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/). + +### Create deployment + +Create a deployment to tell Kubernetes about the desired state of the pod (*which it will then attempt to maintain*). Note below that we mount the persistent volume **twice**, to both ```/var/www/app/data``` and ```/var/www/app/plugins```, using the subPath value to differentiate them. This trick avoids us having to provision **two** persistent volumes just for data mounted in 2 separate locations. + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary .yml files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```kubectl create -f *.yml``` 👍 + +``` +cat < /var/data/kanboard/deployment.yml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + namespace: kanboard + name: app + labels: + app: app +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - image: kanboard/kanboard + name: app + volumeMounts: + - name: kanboard-config + mountPath: /var/www/app/config.php + subPath: config.php + - name: kanboard-app + mountPath: /var/www/app/data + subPath: data + - name: kanboard-app + mountPath: /var/www/app/plugins + subPath: plugins + volumes: + - name: kanboard-app + persistentVolumeClaim: + claimName: kanboard-app + - name: kanboard-config + configMap: + name: kanboard-config +EOF +kubectl create -f /var/data/kanboard/deployment.yml +``` + +Check that your deployment is running, with ```kubectl get pods -n kanboard```. After a minute or so, you should see a "Running" pod, as illustrated below: + +``` +[funkypenguin:~] % kubectl get pods -n kanboard +NAME READY STATUS RESTARTS AGE +app-79f97f7db6-hsmfg 1/1 Running 0 11d +[funkypenguin:~] % +``` + +### Create service + +The service resource "advertises" the availability of TCP port 80 in your pod, to the rest of the cluster (*constrained within your namespace*). It seems a little like overkill coming from the Docker Swarm's automated "service discovery" model, but the Kubernetes design allows for load balancing, rolling upgrades, and health checks of individual pods, without impacting the rest of the cluster elements. + +``` +cat < /var/data/kanboard/service.yml +kind: Service +apiVersion: v1 +metadata: + name: app + namespace: kanboard +spec: + selector: + app: app + ports: + - protocol: TCP + port: 80 + clusterIP: None +EOF +kubectl create -f /var/data/kanboard/service.yml +``` + +Check that your service is deployed, with ```kubectl get services -n kanboard```. You should see something like this: + +``` +[funkypenguin:~] % kubectl get service -n kanboard +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +app ClusterIP None 80/TCP 38d +[funkypenguin:~] % +``` + +### Create ingress + +The ingress resource tells Traefik what to forward inbound requests for *kanboard.example.com* to your service (defined above), which in turn passes the request to the "app" pod. Adjust the config below for your domain. + +``` +cat < /var/data/kanboard/ingress.yml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: app + namespace: kanboard + annotations: + kubernetes.io/ingress.class: traefik +spec: + rules: + - host: kanboard.example.com + http: + paths: + - backend: + serviceName: app + servicePort: 80 +EOF +kubectl create -f /var/data/kanboard/ingress.yml +``` + +Check that your service is deployed, with ```kubectl get ingress -n kanboard```. You should see something like this: + +``` +[funkypenguin:~] % kubectl get ingress -n kanboard +NAME HOSTS ADDRESS PORTS AGE +app kanboard.funkypenguin.co.nz 80 38d +[funkypenguin:~] % +``` + +### Access Kanboard + +At this point, you should be able to access your instance on your chosen DNS name (*i.e. https://kanboard.example.com*) + + +### Updating config.php + +Since ```config.php``` is a ConfigMap now, to update it, make your local changes, and then delete and recreate the ConfigMap, by running: + +``` +kubectl delete configmap -n kanboard kanboard-config +kubectl create configmap -n kanboard kanboard-config --from-file=config.php +``` + +Then, in the absense of any other changes to the deployement definition, force the pod to restart by issuing a "null patch", as follows: + +``` +kubectl patch -n kanboard deployment app -p "{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"date\":\"`date +'%s'`\"}}}}}" +``` + +### Troubleshooting + +To look at the Kanboard pod's logs, run ```kubectl logs -n kanboard -f```. For further troubleshooting hints, see [Troubleshooting](/reference/kubernetes/troubleshooting/). + +## Chef's Notes + +1. The simplest deployment of Kanboard uses the default SQLite database backend, stored on the persistent volume. You can convert this to a "real" database running MySQL or PostgreSQL, and running an an additional database pod and service. Contact me if you'd like further details ;) + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipes/kubernetes/miniflux.md b/manuscript/recipes/kubernetes/miniflux.md new file mode 100644 index 0000000..4e1bb76 --- /dev/null +++ b/manuscript/recipes/kubernetes/miniflux.md @@ -0,0 +1,328 @@ +#Miniflux + +Miniflux is a lightweight RSS reader, developed by [Frédéric Guillot](https://github.com/fguillot). (_Who also happens to be the developer of the favorite Open Source Kanban app, [Kanboard](/recipes/kanboard/)_) + +![Miniflux Screenshot](/images/miniflux.png) + +!!! tip "Sponsored Project" + Miniflux is one of my [sponsored projects](/sponsored-projects/) - a project I financially support on a regular basis because of its utility to me. Although I get to process my RSS feeds less frequently than I'd like to! + +I've [reviewed Miniflux in detail on my blog](https://www.funkypenguin.co.nz/review/miniflux-lightweight-self-hosted-rss-reader/), but features (among many) that I appreciate: + +* Compatible with the Fever API, read your feeds through existing mobile and desktop clients (_This is the killer feature for me. I hardly ever read RSS on my desktop, I typically read on my iPhone or iPad, using [Fiery Feeds](http://cocoacake.net/apps/fiery/) or my new squeeze, [Unread](https://www.goldenhillsoftware.com/unread/)_) +* Send your bookmarks to Pinboard, Wallabag, Shaarli or Instapaper (_I use this to automatically pin my bookmarks for collection on my [blog](https://www.funkypenguin.co.nz/blog/)_) +* Feeds can be configured to download a "full" version of the content (_rather than an excerpt_) +* Use the Bookmarklet to subscribe to a website directly from any browsers + +!!! abstract "2.0+ is a bit different" + [Some things changed](https://docs.miniflux.net/en/latest/migration.html) when Miniflux 2.0 was released. For one thing, the only supported database is now postgresql (_no more SQLite_). External themes are gone, as is PHP (_in favor of golang_). It's been a controversial change, but I'm keen on minimal and single-purpose, so I'm still very happy with the direction of development. The developer has laid out his [opinions](https://docs.miniflux.net/en/latest/opinionated.html) re the decisions he's made in the course of development. + + +## Ingredients + +1. A [Kubernetes Cluster](/kubernetes/design/) including [Traefik Ingress](/kubernetes/traefik/) +2. A DNS name for your miniflux instance (*miniflux.example.com*, below) pointing to your [load balancer](/kubernetes/loadbalancer/), fronting your Traefik ingress + +## Preparation + +### Prepare traefik for namespace + +When you deployed [Traefik via the helm chart](/kubernetes/traefik/), you would have customized ```values.yml``` for your deployment. In ```values.yml``` is a list of namespaces which Traefik is permitted to access. Update ```values.yml``` to include the *miniflux* namespace, as illustrated below: + +``` + +kubernetes: + namespaces: + - kube-system + - nextcloud + - kanboard + - miniflux + +``` + +If you've updated ```values.yml```, upgrade your traefik deployment via helm, by running ```helm upgrade --values values.yml traefik stable/traefik --recreate-pods``` + +### Create data locations + +Although we could simply bind-mount local volumes to a local Kubuernetes cluster, since we're targetting a cloud-based Kubernetes deployment, we only need a local path to store the YAML files which define the various aspects of our Kubernetes deployment. + +``` +mkdir /var/data/config/miniflux +``` + +### Create namespace + +We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the miniflux stack with the following .yml: + +``` +cat < /var/data/config/miniflux/namespace.yml +apiVersion: v1 +kind: Namespace +metadata: + name: miniflux +EOF +kubectl create -f /var/data/config/miniflux/namespace.yaml +``` + +### Create persistent volume claim + +Persistent volume claims are a streamlined way to create a persistent volume and assign it to a container in a pod. Create a claim for the miniflux postgres database: + +``` +cat < /var/data/config/miniflux/db-persistent-volumeclaim.yml +kkind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: miniflux-db + namespace: miniflux + annotations: + backup.kubernetes.io/deltas: P1D P7D +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +EOF +kubectl create -f /var/data/config/miniflux/db-persistent-volumeclaim.yaml +``` + +!!! question "What's that annotation about?" + The annotation is used by [k8s-snapshots](/kubernetes/snapshots/) to create daily incremental snapshots of your persistent volumes. In this case, our volume is snapshotted daily, and copies kept for 7 days. + +### Create secrets + +It's not always desirable to have sensitive data stored in your .yml files. Maybe you want to check your config into a git repository, or share it. Using Kubernetes Secrets means that you can create "secrets", and use these in your deployments by name, without exposing their contents. Run the following, replacing ```imtoosexyformyadminpassword```, and the ```mydbpass``` value in both postgress-password.secret **and** database-url.secret: + +``` +echo -n "imtoosexyformyadminpassword" > admin-password.secret +echo -n "mydbpass" > postgres-password.secret +echo -n "postgres://miniflux:mydbpass@db/miniflux?sslmode=disable" > database-url.secret + +kubectl create secret -n mqtt generic miniflux-credentials \ + --from-file=admin-password.secret \ + --from-file=database-url.secret \ + --from-file=database-url.secret +``` + +!!! tip "Why use ```echo -n```?" + Because. See [my blog post here](https://www.funkypenguin.co.nz/beware-the-hidden-newlines-in-kubernetes-secrets/) for the pain of hunting invisible newlines, that's why! + + +## Serving + +Now that we have a [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/), and a [configmap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), we can create [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [services](https://kubernetes.io/docs/concepts/services-networking/service/), and an [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) for the miniflux [pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/). + +### Create db deployment + +Deployments tell Kubernetes about the desired state of the pod (*which it will then attempt to maintain*). Create the db deployment by excecuting the following. Note that the deployment refers to the secrets created above. + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary .yml files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```kubectl create -f *.yml``` 👍 + +``` +cat < /var/data/miniflux/db-deployment.yml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + namespace: miniflux + name: db + labels: + app: db +spec: + replicas: 1 + selector: + matchLabels: + app: db + template: + metadata: + labels: + app: db + spec: + containers: + - image: postgres:11 + name: db + volumeMounts: + - name: miniflux-db + mountPath: /var/lib/postgresql/data + env: + - name: POSTGRES_USER + value: "miniflux" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: miniflux-credentials + key: postgres-password.secret + volumes: + - name: miniflux-db + persistentVolumeClaim: + claimName: miniflux-db +``` + +### Create app deployment + +Create the app deployment by excecuting the following. Again, note that the deployment refers to the secrets created above. + +``` +cat < /var/data/miniflux/app-deployment.yml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + namespace: miniflux + name: app + labels: + app: app +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - image: miniflux/miniflux + name: app + env: + # This is necessary for the miniflux to update the db schema, even on an empty DB + - name: CREATE_ADMIN + value: "1" + - name: RUN_MIGRATIONS + value: "1" + - name: ADMIN_USERNAME + value: "admin" + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: miniflux-credentials + key: admin-password.secret + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: miniflux-credentials + key: database-url.secret +EOF +kubectl create -f /var/data/miniflux/deployment.yml +``` + +### Check pods + +Check that your deployment is running, with ```kubectl get pods -n miniflux```. After a minute or so, you should see 2 "Running" pods, as illustrated below: + +``` +[funkypenguin:~] % kubectl get pods -n miniflux +NAME READY STATUS RESTARTS AGE +app-667c667b75-5jjm9 1/1 Running 0 4d +db-fcd47b88f-9vvqt 1/1 Running 0 4d +[funkypenguin:~] % +``` + +### Create db service + +The db service resource "advertises" the availability of PostgreSQL's port (TCP 5432) in your pod, to the rest of the cluster (*constrained within your namespace*). It seems a little like overkill coming from the Docker Swarm's automated "service discovery" model, but the Kubernetes design allows for load balancing, rolling upgrades, and health checks of individual pods, without impacting the rest of the cluster elements. + +``` +cat < /var/data/miniflux/db-service.yml +kind: Service +apiVersion: v1 +metadata: + name: db + namespace: miniflux +spec: + selector: + app: db + ports: + - protocol: TCP + port: 5432 + clusterIP: None +EOF +kubectl create -f /var/data/miniflux/service.yml +``` + +### Create app service + +The app service resource "advertises" the availability of miniflux's HTTP listener port (TCP 8080) in your pod. This is the service which will be referred to by the ingress (below), so that Traefik can route incoming traffic to the miniflux app. + + +``` +cat < /var/data/miniflux/app-service.yml +kind: Service +apiVersion: v1 +metadata: + name: app + namespace: miniflux +spec: + selector: + app: app + ports: + - protocol: TCP + port: 8080 + clusterIP: None +EOF +kubectl create -f /var/data/miniflux/app-service.yml +``` + +### Check services + +Check that your services are deployed, with ```kubectl get services -n miniflux```. You should see something like this: + +``` +[funkypenguin:~] % kubectl get services -n miniflux +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +app ClusterIP None 8080/TCP 55d +db ClusterIP None 5432/TCP 55d +[funkypenguin:~] % +``` + +### Create ingress + +The ingress resource tells Traefik what to forward inbound requests for *miniflux.example.com* to your service (defined above), which in turn passes the request to the "app" pod. Adjust the config below for your domain. + +``` +cat < /var/data/miniflux/ingress.yml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: app + namespace: miniflux + annotations: + kubernetes.io/ingress.class: traefik +spec: + rules: + - host: miniflux.example.com + http: + paths: + - backend: + serviceName: app + servicePort: 8080 +EOF +kubectl create -f /var/data/miniflux/ingress.yml +``` + +Check that your service is deployed, with ```kubectl get ingress -n miniflux```. You should see something like this: + +``` +[funkypenguin:~] 130 % kubectl get ingress -n miniflux +NAME HOSTS ADDRESS PORTS AGE +app miniflux.funkypenguin.co.nz 80 55d +[funkypenguin:~] % +``` + +### Access Miniflux + +At this point, you should be able to access your instance on your chosen DNS name (*i.e. https://miniflux.example.com*) + + +### Troubleshooting + +To look at the Miniflux pod's logs, run ```kubectl logs -n miniflux -f```. For further troubleshooting hints, see [Troubleshooting](/reference/kubernetes/troubleshooting/). + +## Chef's Notes + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipes/kubernetes/nextcloud.md b/manuscript/recipes/kubernetes/nextcloud.md new file mode 100644 index 0000000..649dd79 --- /dev/null +++ b/manuscript/recipes/kubernetes/nextcloud.md @@ -0,0 +1,133 @@ +hero: Not all heroes wear capes + +!!! danger "This recipe is a work in progress" + This recipe is **incomplete**, and is featured to align the [patrons](https://www.patreon.com/funkypenguin)'s "premix" repository with the cookbook. "_premix_" is a private git repository available to [all Patreon patrons](https://www.patreon.com/funkypenguin), which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + So... There may be errors and inaccuracies. Jump into [Discord](http://chat.funkypenguin.co.nz) if you're encountering issues 😁 + +# NAME + +Intro + +![NAME Screenshot](../../images/name.jpg) + +Details + +## Ingredients + +1. [Kubernetes cluster](/kubernetes/digital-ocean/) + +## Preparation + +### Create data locations + +``` +mkdir /var/data/config/mqtt +``` + +### Create namespace + +We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the mqtt stack by creating the following .yaml: + +``` +cat < /var/data/mqtt/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: mqtt +EOF +kubectl create -f /var/data/mqtt/namespace.yaml +``` + +### Prepare environment + +Create wekan.env, and populate with the following variables +``` +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET= +MONGO_URL=mongodb://wekandb:27017/wekan +ROOT_URL=https://wekan.example.com +MAIL_URL=smtp://wekan@wekan.example.com:password@mail.example.com:587/ +MAIL_FROM="Wekan " +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +``` +version: '3' + +services: + + wekandb: + image: mongo:3.2.15 + command: mongod --smallfiles --oplogSize 128 + networks: + - internal + volumes: + - /var/data/wekan/wekan-db:/data/db + - /var/data/wekan/wekan-db-dump:/dump + + proxy: + image: a5huynh/oauth2_proxy + env_file: /var/data/wekan/wekan.env + networks: + - traefik_public + - internal + deploy: + labels: + - traefik_public.frontend.rule=Host:wekan.example.com + - traefik_public.docker.network=traefik_public + - traefik_public.port=4180 + command: | + -cookie-secure=false + -upstream=http://wekan:80 + -redirect-url=https://wekan.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + + wekan: + image: wekanteam/wekan:latest + networks: + - internal + env_file: /var/data/wekan/wekan.env + +networks: + traefik_public: + external: true + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.3.0/24 +``` + +!!! note + Setup unique static subnets for every stack you deploy. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + + +## Serving + +### Launch Wekan stack + +Launch the Wekan stack by running ```docker stack deploy wekan -c ``` + +Log into your new instance at https://**YOUR-FQDN**, with user "root" and the password you specified in gitlab.env. + +## Chef's Notes + +1. If you wanted to expose the Wekan UI directly, you could remove the oauth2_proxy from the design, and move the traefik_public-related labels directly to the wekan container. You'd also need to add the traefik_public network to the wekan container. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/template.md b/manuscript/recipes/kubernetes/phpipam.md similarity index 72% rename from manuscript/recipies/template.md rename to manuscript/recipes/kubernetes/phpipam.md index 79bdc4c..fe4f0e1 100644 --- a/manuscript/recipies/template.md +++ b/manuscript/recipes/kubernetes/phpipam.md @@ -1,29 +1,35 @@ -hero: Heroic Hero - -# NAME +# Kanboard Intro -![NAME Screenshot](../images/name.jpg) +![NAME Screenshot](../../images/name.jpg) Details ## Ingredients -1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) -2. [Traefik](/ha-docker-swarm/traefik) configured per design -3. DNS entry for the hostname you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP +1. [Kubernetes cluster](/kubernetes/digital-ocean/) ## Preparation -### Setup data locations - -We'll need several directories to bind-mount into our container, so create them in /var/data/wekan: +### Create data locations ``` -mkdir /var/data/wekan -cd /var/data/wekan -mkdir -p {wekan-db,wekan-db-dump} +mkdir /var/data/config/mqtt +``` + +### Create namespace + +We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the mqtt stack by creating the following .yaml: + +``` +cat < /var/data/mqtt/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: mqtt +EOF +kubectl create -f /var/data/mqtt/namespace.yaml ``` ### Prepare environment @@ -62,16 +68,16 @@ services: - /var/data/wekan/wekan-db-dump:/dump proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file: /var/data/wekan/wekan.env networks: - - traefik + - traefik_public - internal deploy: labels: - - traefik.frontend.rule=Host:wekan.example.com - - traefik.docker.network=traefik - - traefik.port=4180 + - traefik_public.frontend.rule=Host:wekan.example.com + - traefik_public.docker.network=traefik_public + - traefik_public.port=4180 command: | -cookie-secure=false -upstream=http://wekan:80 @@ -87,7 +93,7 @@ services: env_file: /var/data/wekan/wekan.env networks: - traefik: + traefik_public: external: true internal: driver: overlay @@ -111,7 +117,7 @@ Log into your new instance at https://**YOUR-FQDN**, with user "root" and the pa ## Chef's Notes -1. If you wanted to expose the Wekan UI directly, you could remove the oauth2_proxy from the design, and move the traefik-related labels directly to the wekan container. You'd also need to add the traefik network to the wekan container. +1. If you wanted to expose the Wekan UI directly, you could remove the oauth2_proxy from the design, and move the traefik_public-related labels directly to the wekan container. You'd also need to add the traefik_public network to the wekan container. ### Tip your waiter (donate) diff --git a/manuscript/recipes/kubernetes/privatebin.md b/manuscript/recipes/kubernetes/privatebin.md new file mode 100644 index 0000000..649dd79 --- /dev/null +++ b/manuscript/recipes/kubernetes/privatebin.md @@ -0,0 +1,133 @@ +hero: Not all heroes wear capes + +!!! danger "This recipe is a work in progress" + This recipe is **incomplete**, and is featured to align the [patrons](https://www.patreon.com/funkypenguin)'s "premix" repository with the cookbook. "_premix_" is a private git repository available to [all Patreon patrons](https://www.patreon.com/funkypenguin), which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + So... There may be errors and inaccuracies. Jump into [Discord](http://chat.funkypenguin.co.nz) if you're encountering issues 😁 + +# NAME + +Intro + +![NAME Screenshot](../../images/name.jpg) + +Details + +## Ingredients + +1. [Kubernetes cluster](/kubernetes/digital-ocean/) + +## Preparation + +### Create data locations + +``` +mkdir /var/data/config/mqtt +``` + +### Create namespace + +We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the mqtt stack by creating the following .yaml: + +``` +cat < /var/data/mqtt/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: mqtt +EOF +kubectl create -f /var/data/mqtt/namespace.yaml +``` + +### Prepare environment + +Create wekan.env, and populate with the following variables +``` +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET= +MONGO_URL=mongodb://wekandb:27017/wekan +ROOT_URL=https://wekan.example.com +MAIL_URL=smtp://wekan@wekan.example.com:password@mail.example.com:587/ +MAIL_FROM="Wekan " +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +``` +version: '3' + +services: + + wekandb: + image: mongo:3.2.15 + command: mongod --smallfiles --oplogSize 128 + networks: + - internal + volumes: + - /var/data/wekan/wekan-db:/data/db + - /var/data/wekan/wekan-db-dump:/dump + + proxy: + image: a5huynh/oauth2_proxy + env_file: /var/data/wekan/wekan.env + networks: + - traefik_public + - internal + deploy: + labels: + - traefik_public.frontend.rule=Host:wekan.example.com + - traefik_public.docker.network=traefik_public + - traefik_public.port=4180 + command: | + -cookie-secure=false + -upstream=http://wekan:80 + -redirect-url=https://wekan.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + + wekan: + image: wekanteam/wekan:latest + networks: + - internal + env_file: /var/data/wekan/wekan.env + +networks: + traefik_public: + external: true + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.3.0/24 +``` + +!!! note + Setup unique static subnets for every stack you deploy. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + + +## Serving + +### Launch Wekan stack + +Launch the Wekan stack by running ```docker stack deploy wekan -c ``` + +Log into your new instance at https://**YOUR-FQDN**, with user "root" and the password you specified in gitlab.env. + +## Chef's Notes + +1. If you wanted to expose the Wekan UI directly, you could remove the oauth2_proxy from the design, and move the traefik_public-related labels directly to the wekan container. You'd also need to add the traefik_public network to the wekan container. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipes/kubernetes/template-k8s.md b/manuscript/recipes/kubernetes/template-k8s.md new file mode 100644 index 0000000..05e49c0 --- /dev/null +++ b/manuscript/recipes/kubernetes/template-k8s.md @@ -0,0 +1,271 @@ +#Kanboard + +Kanboard is a Kanban tool, developed by [Frédéric Guillot](https://github.com/fguillot). (_Who also happens to be the developer of my favorite RSS reader, [Miniflux](/recipes/miniflux/)_) + +![Kanboard Screenshot](/images/kanboard.png) + +!!! tip "Sponsored Project" + Kanboard is one of my [sponsored projects](/sponsored-projects/) - a project I financially support on a regular basis because of its utility to me. I use it both in my DayJob(tm), and to manage my overflowing, overly-optimistic personal commitments! 😓 + +Features include: + +* Visualize your work +* Limit your work in progress to be more efficient +* Customize your boards according to your business activities +* Multiple projects with the ability to drag and drop tasks +* Reports and analytics +* Fast and simple to use +* Access from anywhere with a modern browser +* Plugins and integrations with external services +* Free, open source and self-hosted +* Super simple installation + +## Ingredients + +1. A [Kubernetes Cluster](/kubernetes/design/) including [Traefik Ingress](/kubernetes/traefik/) +2. A DNS name for your kanboard instance (*kanboard.example.com*, below) pointing to your [load balancer](/kubernetes/loadbalancer/), fronting your Traefik ingress + +## Preparation + +### Prepare traefik for namespace + +When you deployed [Traefik via the helm chart](/kubernetes/traefik/), you would have customized ```values.yml``` for your deployment. In ```values.yml``` is a list of namespaces which Traefik is permitted to access. Update ```values.yml``` to include the *kanboard* namespace, as illustrated below: + +``` + +kubernetes: + namespaces: + - kube-system + - nextcloud + - kanboard + - miniflux + +``` + +If you've updated ```values.yml```, upgrade your traefik deployment via helm, by running ```helm upgrade --values values.yml traefik stable/traefik --recreate-pods``` + +### Create data locations + +Although we could simply bind-mount local volumes to a local Kubuernetes cluster, since we're targetting a cloud-based Kubernetes deployment, we only need a local path to store the YAML files which define the various aspects of our Kubernetes deployment. + +``` +mkdir /var/data/config/kanboard +``` + +### Create namespace + +We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the kanboard stack with the following .yml: + +``` +cat < /var/data/config/kanboard/namespace.yml +apiVersion: v1 +kind: Namespace +metadata: + name: kanboard +EOF +kubectl create -f /var/data/config/kanboard/namespace.yaml +``` + +### Create persistent volume claim + +Persistent volume claims are a streamlined way to create a persistent volume and assign it to a container in a pod. Create a claim for the kanboard app and plugin data: + +``` +cat < /var/data/config/kanboard/persistent-volumeclaim.yml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: kanboard-volumeclaim + namespace: kanboard + annotations: + backup.kubernetes.io/deltas: P1D P7D +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +EOF +kubectl create -f /var/data/config/kanboard/kanboard-volumeclaim.yaml +``` + +!!! question "What's that annotation about?" + The annotation is used by [k8s-snapshots](/kubernetes/snapshots/) to create daily incremental snapshots of your persistent volumes. In this case, our volume is snapshotted daily, and copies kept for 7 days. + +### Create ConfigMap + +Kanboard's configuration is all contained within ```config.php```, which needs to be presented to the container. We _could_ maintain ```config.php``` in the persistent volume we created above, but this would require manually accessing the pod every time we wanted to make a change. + +Instead, we'll create ```config.php``` as a [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), meaning it "lives" within the Kuberetes cluster and can be **presented** to our pod. When we want to make changes, we simply update the ConfigMap (*delete and recreate, to be accurate*), and relaunch the pod. + +Grab a copy of [config.default.php](https://github.com/kanboard/kanboard/blob/master/config.default.php), save it to ```/var/data/config/kanboard/config.php```, and customize it per [the guide](https://docs.kanboard.org/en/latest/admin_guide/config_file.html). + +At the very least, I'd suggest making the following changes: +``` +define('PLUGIN_INSTALLER', true); // Yes, I want to install plugins using the UI +define('ENABLE_URL_REWRITE', false); // Yes, I want pretty URLs +``` + +Now create the configmap from config.php, by running ```kubectl create configmap -n kanboard kanboard-config --from-file=config.php``` + +## Serving + +Now that we have a [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/), and a [configmap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), we can create a [deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [service](https://kubernetes.io/docs/concepts/services-networking/service/), and [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) for the kanboard [pod](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/). + +### Create deployment + +Create a deployment to tell Kubernetes about the desired state of the pod (*which it will then attempt to maintain*). Note below that we mount the persistent volume **twice**, to both ```/var/www/app/data``` and ```/var/www/app/plugins```, using the subPath value to differentiate them. This trick avoids us having to provision **two** persistent volumes just for data mounted in 2 separate locations. + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary .yml files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```kubectl create -f *.yml``` 👍 + +``` +cat < /var/data/kanboard/deployment.yml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + namespace: kanboard + name: app + labels: + app: app +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - image: kanboard/kanboard + name: app + volumeMounts: + - name: kanboard-config + mountPath: /var/www/app/config.php + subPath: config.php + - name: kanboard-app + mountPath: /var/www/app/data + subPath: data + - name: kanboard-app + mountPath: /var/www/app/plugins + subPath: plugins + volumes: + - name: kanboard-app + persistentVolumeClaim: + claimName: kanboard-app + - name: kanboard-config + configMap: + name: kanboard-config +EOF +kubectl create -f /var/data/kanboard/deployment.yml +``` + +Check that your deployment is running, with ```kubectl get pods -n kanboard```. After a minute or so, you should see a "Running" pod, as illustrated below: + +``` +[funkypenguin:~] % kubectl get pods -n kanboard +NAME READY STATUS RESTARTS AGE +app-79f97f7db6-hsmfg 1/1 Running 0 11d +[funkypenguin:~] % +``` + +### Create service + +The service resource "advertises" the availability of TCP port 80 in your pod, to the rest of the cluster (*constrained within your namespace*). It seems a little like overkill coming from the Docker Swarm's automated "service discovery" model, but the Kubernetes design allows for load balancing, rolling upgrades, and health checks of individual pods, without impacting the rest of the cluster elements. + +``` +cat < /var/data/kanboard/service.yml +kind: Service +apiVersion: v1 +metadata: + name: app + namespace: kanboard +spec: + selector: + app: app + ports: + - protocol: TCP + port: 80 + clusterIP: None +EOF +kubectl create -f /var/data/kanboard/service.yml +``` + +Check that your service is deployed, with ```kubectl get services -n kanboard```. You should see something like this: + +``` +[funkypenguin:~] % kubectl get service -n kanboard +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +app ClusterIP None 80/TCP 38d +[funkypenguin:~] % +``` + +### Create ingress + +The ingress resource tells Traefik what to forward inbound requests for *kanboard.example.com* to your service (defined above), which in turn passes the request to the "app" pod. Adjust the config below for your domain. + +``` +cat < /var/data/kanboard/ingress.yml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: app + namespace: kanboard + annotations: + kubernetes.io/ingress.class: traefik +spec: + rules: + - host: kanboard.example.com + http: + paths: + - backend: + serviceName: app + servicePort: 80 +EOF +kubectl create -f /var/data/kanboard/ingress.yml +``` + +Check that your service is deployed, with ```kubectl get ingress -n kanboard```. You should see something like this: + +``` +[funkypenguin:~] % kubectl get ingress -n kanboard +NAME HOSTS ADDRESS PORTS AGE +app kanboard.funkypenguin.co.nz 80 38d +[funkypenguin:~] % +``` + +### Access Kanboard + +At this point, you should be able to access your instance on your chosen DNS name (*i.e. https://kanboard.example.com*) + + +### Updating config.php + +Since ```config.php``` is a ConfigMap now, to update it, make your local changes, and then delete and recreate the ConfigMap, by running: + +``` +kubectl delete configmap -n kanboard kanboard-config +kubectl create configmap -n kanboard kanboard-config --from-file=config.php +``` + +Then, in the absense of any other changes to the deployement definition, force the pod to restart by issuing a "null patch", as follows: + +``` +kubectl patch -n kanboard deployment app -p "{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"date\":\"`date +'%s'`\"}}}}}" +``` + +### Troubleshooting + +To look at the Kanboard pod's logs, run ```kubectl logs -n kanboard -f```. For further troubleshooting hints, see [Troubleshooting](/reference/kubernetes/troubleshooting/). + +## Chef's Notes + +1. The simplest deployment of Kanboard uses the default SQLite database backend, stored on the persistent volume. You can convert this to a "real" database running MySQL or PostgreSQL, and running an an additional database pod and service. Contact me if you'd like further details ;) + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/mail.md b/manuscript/recipes/mail.md similarity index 89% rename from manuscript/recipies/mail.md rename to manuscript/recipes/mail.md index 7fc8934..550a77a 100644 --- a/manuscript/recipies/mail.md +++ b/manuscript/recipes/mail.md @@ -2,7 +2,7 @@ hero: Docker-mailserver - A recipe for a self-contained mailserver and friends # Mail Server -Many of the recipies that follow require email access of some kind. It's normally possible to use a hosted service such as SendGrid, or just a gmail account. If (like me) you'd like to self-host email for your stacks, then the following recipe provides a full-stack mail server running on the docker HA swarm. +Many of the recipes that follow require email access of some kind. It's normally possible to use a hosted service such as SendGrid, or just a gmail account. If (like me) you'd like to self-host email for your stacks, then the following recipe provides a full-stack mail server running on the docker HA swarm. Of value to me in choosing docker-mailserver were: @@ -92,21 +92,34 @@ Create the necessary DNS TXT entries for your domain(s). Note that although open ### Setup Docker Swarm -Create a docker swarm config file in docker-compose syntax (v3), something like this: +Create a docker swarm config file in docker-compose syntax (_v3.2 - because we need to expose mail ports in "host mode"_), something like this: !!! tip I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` ``` -version: '3' +version: '3.2' services: mail: image: tvial/docker-mailserver:latest ports: - - "25:25" - - "587:587" - - "993:993" + - target: 25 + published: 25 + protocol: tcp + mode: host + - target: 587 + published: 587 + protocol: tcp + mode: host + - target: 993 + published: 993 + protocol: tcp + mode: host + - target: 995 + published: 995 + protocol: tcp + mode: host volumes: - /var/data/docker-mailserver/maildata:/var/mail - /var/data/docker-mailserver/mailstate:/var/mail-state diff --git a/manuscript/recipes/mattermost.md b/manuscript/recipes/mattermost.md new file mode 100644 index 0000000..a10f490 --- /dev/null +++ b/manuscript/recipes/mattermost.md @@ -0,0 +1,127 @@ +# MatterMost + +Intro + +![MatterMost Screenshot](../images/mattermost.png) + +Details + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostname you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP + +## Preparation + +### Setup data locations + +We'll need several directories to bind-mount into our container, so create them in /var/data/mattermost: + +``` +mkdir -p /var/data/mattermost/{cert,config,data,logs,plugins,database-dump} +mkdir -p /var/data/realtime/mattermost/database +``` + +### Prepare environment + +Create mattermost.env, and populate with the following variables +``` +POSTGRES_USER=mmuser +POSTGRES_PASSWORD=mmuser_password +POSTGRES_DB=mattermost +MM_USERNAME=mmuser +MM_PASSWORD=mmuser_password +MM_DBNAME=mattermost +``` + +Now create mattermost-backup.env, and populate with the following variables: +``` +PGHOST=db +PGUSER=mmuser +PGPASSWORD=mmuser_password +BACKUP_NUM_KEEP=7 +BACKUP_FREQUENCY=1d +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +``` +version: '3' + +services: + + db: + image: mattermost/mattermost-prod-db + env_file: /var/data/config/mattermost/mattermost.env + volumes: + - /var/data/realtime/mattermost/database:/var/lib/postgresql/data + networks: + - internal + + app: + image: mattermost/mattermost-team-edition + env_file: /var/data/config/mattermost/mattermost.env + volumes: + - /var/data/mattermost/config:/mattermost/config:rw + - /var/data/mattermost/data:/mattermost/data:rw + - /var/data/mattermost/logs:/mattermost/logs:rw + - /var/data/mattermost/plugins:/mattermost/plugins:rw + + db-backup: + image: mattermost/mattermost-prod-db + env_file: /var/data/config/mattermost/mattermost-backup.env + volumes: + - /var/data/mattermost/database-dump:/dump + entrypoint: | + bash -c 'bash -s < /dump/dump_\`date +%d-%m-%Y"_"%H_%M_%S\`.psql + (ls -t /dump/dump*.psql|head -n $$BACKUP_NUM_KEEP;ls /dump/dump*.psql)|sort|uniq -u|xargs rm -- {} + sleep $$BACKUP_FREQUENCY + done + EOF' + networks: + - internal + + +networks: + traefik_public: + external: true + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.40.0/24 +``` + +!!! note + Setup unique static subnets for every stack you deploy. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + + +## Serving + +### Launch MatterMost stack + +Launch the MatterMost stack by running ```docker stack deploy mattermost -c ``` + +Log into your new instance at https://**YOUR-FQDN**, with user "root" and the password you specified in gitlab.env. + +## Chef's Notes + +1. If you wanted to expose the Wekan UI directly, you could remove the oauth2_proxy from the design, and move the traefik_public-related labels directly to the wekan container. You'd also need to add the traefik_public network to the wekan container. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/miniflux.md b/manuscript/recipes/miniflux.md similarity index 94% rename from manuscript/recipies/miniflux.md rename to manuscript/recipes/miniflux.md index c8f199e..a5b1e90 100644 --- a/manuscript/recipies/miniflux.md +++ b/manuscript/recipes/miniflux.md @@ -2,10 +2,12 @@ hero: Miniflux - A recipe for a lightweight minimalist RSS reader # Miniflux -Miniflux is a lightweight RSS reader, developed by [Frédéric Guillot](https://github.com/fguillot). (_Who also happens to be the developer of the favorite Open Source Kanban app, [Kanboard](/recipies/kanboard/)_) +Miniflux is a lightweight RSS reader, developed by [Frédéric Guillot](https://github.com/fguillot). (_Who also happens to be the developer of the favorite Open Source Kanban app, [Kanboard](/recipes/kanboard/)_) -![Miniflux Screenshot](../../images/miniflux.png) +![Miniflux Screenshot](../images/miniflux.png) +!!! tip "Sponsored Project" + Miniflux is one of my [sponsored projects](/sponsored-projects/) - a project I financially support on a regular basis because of its utility to me. Although I get to process my RSS feeds less frequently than I'd like to! I've [reviewed Miniflux in detail on my blog](https://www.funkypenguin.co.nz/review/miniflux-lightweight-self-hosted-rss-reader/), but features (among many) that I appreciate: diff --git a/manuscript/recipes/minio.md b/manuscript/recipes/minio.md new file mode 100644 index 0000000..87d0043 --- /dev/null +++ b/manuscript/recipes/minio.md @@ -0,0 +1,183 @@ +# Minio + +Minio is a high performance distributed object storage server, designed for +large-scale private cloud infrastructure. + +However, at its simplest, Minio allows you to expose a local filestructure via the [Amazon S3 API](https://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html). You could, for example, use it to provide access to "buckets" (folders) of data on your filestore, secured by access/secret keys, just like AWS S3. You can further interact with your "buckets" with common tools, just as if they were hosted on S3. + +Under a more advanced configuration, Minio runs in distributed mode, with [features](https://www.minio.io/features.html) including high-availability, mirroring, erasure-coding, and "bitrot detection". + +![Minio Screenshot](../images/minio.png) + +Possible use-cases: + +1. Sharing files (_protected by user accounts with secrets_) via HTTPS, either as read-only or read-write, in such a way that the bucket could be mounted to a remote filesystem using common S3-compatible tools, like [goofys](https://github.com/kahing/goofys). Ever wanted to share a folder with friends, but didn't want to open additional firewall ports etc? +2. Simulating S3 in a dev environment +3. Mirroring an S3 bucket locally + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostname you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP + +## Preparation + +### Setup data locations + +We'll need a directory to hold our minio file store, as well as our minio client config, so create a structure at /var/data/minio: + +``` +mkdir /var/data/minio +cd /var/data/minio +mkdir -p {mc,data} +``` + +### Prepare environment + +Create minio.env, and populate with the following variables +``` +MINIO_ACCESS_KEY= +MINIO_SECRET_KEY= +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +``` +version: '3.1' + +services: + app: + image: minio/minio + env_file: /var/data/config/minio/minio.env + volumes: + - /var/data/minio/data:/data + networks: + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:minio.example.com + - traefik.port=9000 + command: minio server /data + +networks: + traefik_public: + external: true +``` + +## Serving + +### Launch Minio stack + +Launch the Minio stack by running ```docker stack deploy minio -c ``` + +Log into your new instance at https://**YOUR-FQDN**, with the access key and secret key you specified in minio.env. + +If you created ```/var/data/minio```, you'll see nothing. If you referenced existing data, you should see all subdirectories in your existing folder represented as buckets. + +If all you need is single-user access to your data, you're done! 🎉 + +If, however, you want to expose data to multiple users, at different privilege levels, you'll need the minio client to create some users and (_potentially_) policies... + +### Setup minio client + +To administer the Minio server, we need the Minio client. While it's possible to download the minio client and run it locally, it's just as easy to do it within a small (5Mb) container. + +I created an alias on my docker nodes, allowing me to run mc quickly: + +``` +alias mc='docker run -it -v /docker/minio/mc/:/root/.mc/ --network traefik_public minio/mc' +``` + +Now I use the alias to launch the client shell, and connect to my minio instance (_I could also use the external, traefik-provided URL_) + +``` +root@ds1:~# mc config host add minio http://app:9000 admin iambatman +mc: Configuration written to `/root/.mc/config.json`. Please update your access credentials. +mc: Successfully created `/root/.mc/share`. +mc: Initialized share uploads `/root/.mc/share/uploads.json` file. +mc: Initialized share downloads `/root/.mc/share/downloads.json` file. +Added `minio` successfully. +root@ds1:~# +``` + +### Add (readonly) user + +Use mc to add a (readonly or readwrite) user, by running ``` mc admin user add minio ``` + +Example: + +``` +root@ds1:~# mc admin user add minio spiderman peterparker readonly +Added user `spiderman` successfully. +root@ds1:~# +``` + +Confirm by listing your users (_admin is excluded from the list_): + +``` +root@node1:~# mc admin user list minio +enabled spiderman readonly +root@node1:~# +``` + +### Make a bucket accessible to users + +By default, all buckets have no "policies" attached to them, and so can only be accessed by the administrative user. Having created some readonly/read-write users above, you'll be wanting to grant them access to buckets. + +The simplest permission scheme is "on or off". Either a bucket has a policy, or it doesn't. (_I believe you can apply policies to subdirectories of buckets in a more advanced configuration_) + +After **no** policy, the most restrictive policy you can attach to a bucket is "download". This policy will allow authenticated users to download contents from the bucket. Apply the "download" policy to a bucket by running ```mc policy download minio/```, i.e.: + +``` +root@ds1:# mc policy download minio/comics +Access permission for `minio/comics` is set to `download` +root@ds1:# +``` + +### Advanced bucketing + +There are some clever complexities you can achieve with user/bucket policies, including: + +* A public bucket, which requires no authentication to read or even write (_for a public dropbox, for example_) +* A special bucket, hidden from most users, but available to VIP users by application of a custom "[canned policy](https://docs.minio.io/docs/minio-multi-user-quickstart-guide.html)" + +### Mount a minio share remotely + +Having setup your buckets, users, and policies - you can give out your minio external URL, and user access keys to your remote users, and they can S3-mount your buckets, interacting with them based on their user policy (_read-only or read/write_) + +I tested the S3 mount using [goofys](https://github.com/kahing/goofys), "a high-performance, POSIX-ish Amazon S3 file system written in Go". + +First, I created ~/.aws/credentials, as follows: + +``` +[default] +aws_access_key_id=spiderman +aws_secret_access_key=peterparker +``` + +And then I ran (_in the foreground, for debugging_), ```goofys --f -debug_s3 --debug_fuse --endpoint=https://traefik.example.com ``` + +To permanently mount an S3 bucket using goofys, I'd add something like this to /etc/fstab: + +``` +goofys#bucket /mnt/mountpoint fuse _netdev,allow_other,--file-mode=0666 0 0 +``` + +## Chef's Notes + +1. There are many S3-filesystem-mounting tools available, I just picked Goofys because it's simple. Google is your friend :) +2. Some applications (_like [NextCloud](/recipes/nextcloud/)_) can natively mount S3 buckets +3. Some backup tools (_like [Duplicity](/recipes/duplicity/)_) can backup directly to S3 buckets + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipes/mqtt.md b/manuscript/recipes/mqtt.md new file mode 100644 index 0000000..a528ba3 --- /dev/null +++ b/manuscript/recipes/mqtt.md @@ -0,0 +1,213 @@ +hero: Kubernetes. The hero we deserve. + +!!! danger "This recipe is a work in progress" + This recipe is **incomplete**, and is featured to align the [patrons](https://www.patreon.com/funkypenguin)'s "premix" repository with the cookbook. "_premix_" is a private git repository available to [all Patreon patrons](https://www.patreon.com/funkypenguin), which includes all the necessary .yml files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```kubectl create -f *.yml``` 👍 + + So... There may be errors and inaccuracies. Jump into [Discord](http://chat.funkypenguin.co.nz) if you're encountering issues 😁 + +# MQTT broker + +I use Elias Kotlyar's [excellent custom firmware](https://github.com/EliasKotlyar/Xiaomi-Dafang-Hacks) for Xiaomi DaFang/XiaoFang cameras, enabling RTSP, MQTT, motion tracking, and other features, integrating directly with [Home Assistant](/recipes/homeassistant/). + +There's currently a [mysterious bug](https://github.com/EliasKotlyar/Xiaomi-Dafang-Hacks/issues/638) though, which prevents TCP communication between Home Assistant and the camera, when MQTT services are enabled on the camera and the mqtt broker runs on the same Raspberry Pi as Home Assistant, using [Hass.io](https://www.home-assistant.io/hassio/). + +A workaround to this bug is to run an MQTT broker **external** to the raspberry pi, which makes the whole problem GoAway(tm). Since an MQTT broker is a single, self-contained container, I've written this recipe as an introduction to our Kubernetes cluster design. + +![MQTT Screenshot](../images/mqtt.png) + +[MQTT](https://mqtt.org/faq) stands for MQ Telemetry Transport. It is a publish/subscribe, extremely simple and lightweight messaging protocol, designed for constrained devices and low-bandwidth, high-latency or unreliable networks. The design principles are to minimise network bandwidth and device resource requirements whilst also attempting to ensure reliability and some degree of assurance of delivery. These principles also turn out to make the protocol ideal of the emerging “machine-to-machine” (M2M) or “Internet of Things” world of connected devices, and for mobile applications where bandwidth and battery power are at a premium. + +## Ingredients + +1. A [Kubernetes cluster](/kubernetes/digital-ocean/) + +## Preparation + +### Create data locations + +Although we could simply bind-mount local volumes to a local Kubuernetes cluster, since we're targetting a cloud-based Kubernetes deployment, we only need a local path to store the YAML files which define the various aspects of our Kubernetes deployment. + +``` +mkdir /var/data/config/mqtt +``` + +### Create namespace + +We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the mqtt stack by creating the following .yaml: + +``` +cat < /var/data/mqtt/namespace.yml +apiVersion: v1 +kind: Namespace +metadata: + name: mqtt +EOF +kubectl create -f /var/data/mqtt/namespace.yaml +``` + +### Create persistent volume claim + +Persistent volume claims are a streamlined way to create a persistent volume and assign it to a container in a pod. Create a claim for the certbot data: + +``` +cat < /var/data/mqtt/persistent-volumeclaim.yml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: mqtt-volumeclaim + namespace: mqtt +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +EOF +kubectl create -f /var/data/mqtt/mqtt-volumeclaim.yaml +``` + +### Create nodeport service + +I like to expose my services using nodeport (_limited to ports 30000-32767_), and then use an external haproxy load balancer to make these available externally. (_This avoids having to pay per-port changes for a loadbalancer from the cloud provider_) + +``` +cat < /var/data/mqtt/service-nodeport.yml +kind: Service +apiVersion: v1 +metadata: + name: mqtt-nodeport + namespace: mqtt +spec: + selector: + app: mqtt + type: NodePort + ports: + - name: mqtts + port: 8883 + protocol: TCP + nodePort : 30883 +EOF +kubectl create -f /var/data/mqtt/service-nodeport.yml +``` +### Create secrets + +It's not always desirable to have sensitive data stored in your .yml files. Maybe you want to check your config into a git repository, or share it. Using Kubernetes Secrets means that you can create "secrets", and use these in your deployments by name, without exposing their contents. + +``` +echo -n "myapikeyissosecret" > cloudflare-key.secret +echo -n "myemailaddress" > cloudflare-email.secret +echo -n "myemailaddress" > letsencrypt-email.secret + +kubectl create secret -n mqtt generic mqtt-credentials \ + --from-file=cloudflare-key.secret \ + --from-file=cloudflare-email.secret \ + --from-file=letsencrypt-email.secret +``` + +!!! tip "Why use ```echo -n```?" + Because. See [my blog post here](https://www.funkypenguin.co.nz/beware-the-hidden-newlines-in-kubernetes-secrets/) for the pain of hunting invisible newlines, that's why! + +## Serving + +### Create deployment + +Now that we have a volume, a service, and a namespace, we can create a deployment for the mqtt pod. Note below the use of volume mounts, environment variables, as well as the secrets. + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary .yml files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```kubectl create -f *.yml``` 👍 + +``` +cat < /var/data/mqtt/mqtt.yml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + namespace: mqtt + name: mqtt + labels: + app: mqtt +spec: + replicas: 1 + selector: + matchLabels: + app: mqtt + template: + metadata: + labels: + app: mqtt + spec: + containers: + - image: funkypenguin/mqtt-certbot-dns + imagePullPolicy: Always +# only uncomment these to get the container to run so that we can transfer files into the PV +# command: [ "/bin/sleep" ] +# args: [ "1h" ] + env: + - name: DOMAIN + value: "*.funkypenguin.co.nz" + - name: EMAIL + valueFrom: + secretKeyRef: + name: mqtt-credentials + key: letsencrypt-email.secret + - name: CLOUDFLARE_EMAIL + valueFrom: + secretKeyRef: + name: mqtt-credentials + key: cloudflare-email.secret + - name: CLOUDFLARE_KEY + valueFrom: + secretKeyRef: + name: mqtt-credentials + key: cloudflare-key.secret +# uncomment this to test LetsEncrypt validations +# - name: TESTCERT +# value: "true" + name: mqtt + resources: + requests: + memory: "50Mi" + cpu: "0.1" + volumeMounts: + # We need the LE certs to persist across reboots to avoid getting rate-limited (bad, bad) + - name: mqtt-volumeclaim + mountPath: /etc/letsencrypt + # A configmap for the mosquitto.conf file + - name: mosquitto-conf + mountPath: /mosquitto/conf/mosquitto.conf + subPath: mosquitto.conf + # A configmap for the mosquitto passwd file + - name: mosquitto-passwd + mountPath: /mosquitto/conf/passwd + subPath: passwd + volumes: + - name: mqtt-volumeclaim + persistentVolumeClaim: + claimName: mqtt-volumeclaim + - name: mosquitto-conf + configMap: + name: mosquitto.conf + - name: mosquitto-passwd + configMap: + name: passwd +EOF +kubectl create -f /var/data/mqtt/mqtt.yml +``` + +Check that your deployment is running, with ```kubectl get pods -n mqtt```. After a minute or so, you should see a "Running" pod, as illustrated below: + +``` +[davidy:~/Documents/Personal/Projects/mqtt-k8s] 130 % kubectl get pods -n mqtt +NAME READY STATUS RESTARTS AGE +mqtt-65f4d96945-bjj44 1/1 Running 0 5m +[davidy:~/Documents/Personal/Projects/mqtt-k8s] % +``` + +To actually **use** your new MQTT broker, you'll need to connect to any one of your nodes (```kubectl get nodes -o wide```) on port 30883 (_the nodeport service we created earlier_). More info on that, and a loadbalancer design, to follow shortly :) + +## Chef's Notes + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipes/munin.md b/manuscript/recipes/munin.md new file mode 100644 index 0000000..be55f96 --- /dev/null +++ b/manuscript/recipes/munin.md @@ -0,0 +1,145 @@ +# Munin + +Munin is a networked resource monitoring tool that can help analyze resource trends and "what just happened to kill our performance?" problems. It is designed to be very plug and play. A default installation provides a lot of graphs with almost no work. + +![Munin Screenshot](../images/munin.png) + +Using Munin you can easily monitor the performance of your computers, networks, SANs, applications, weather measurements and whatever comes to mind. It makes it easy to determine "what's different today" when a performance problem crops up. It makes it easy to see how you're doing capacity-wise on any resources. + +Munin uses the excellent ​RRDTool (written by Tobi Oetiker) and the framework is written in Perl, while plugins may be written in any language. Munin has a master/node architecture in which the master connects to all the nodes at regular intervals and asks them for data. It then stores the data in RRD files, and (if needed) updates the graphs. One of the main goals has been ease of creating new plugins (graphs). + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik) configured per design +3. DNS entry for the hostname you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP + +## Preparation + +### Prepare target nodes + +Depending on what you want to monitor, you'll want to install munin-node. On Ubuntu/Debian, you'll use ```apt-get install munin-node```, and on RHEL/CentOS, run ```yum install munin-node```. Remember to edit ```/etc/munin/munin-node.conf```, and set your node to allow the server to poll it, by adding ```cidr_allow x.x.x.x/x```. + +On CentOS Atomic, of course, you can't install munin-node directly, but you can run it as a containerized instance. In this case, you can't use swarm since you need the container running in privileged mode, so launch a munin-node container on each atomic host using: + +``` +docker run -d --name munin-node --restart=always \ + --privileged --net=host \ + -v /:/rootfs:ro \ + -v /sys:/sys:ro \ + -e ALLOW="cidr_allow 0.0.0.0/0" \ + -p 4949:4949 \ + --restart=always \ + funkypenguin/munin-node +``` + + +### Setup data locations + +We'll need several directories to bind-mount into our container, so create them in /var/data/munin: + +``` +mkdir /var/data/munin +cd /var/data/munin +mkdir -p {log,lib,run,cache} +``` + +### Prepare environment + +Create /var/data/config/munin/munin.env, and populate with the following variables. Use the OAUTH2 variables if you plan to use an [oauth2_proxy](/reference/oauth_proxy/) to protect munin, and set at a **minimum** the ```MUNIN_USER```, ```MUNIN_PASSWORD```, and ```NODES``` values: + +``` +# Use these if you plan to protect the webUI with an oauth_proxy +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET= + +MUNIN_USER=odin +MUNIN_PASSWORD=lokiisadopted +SMTP_HOST=smtp.example.com +SMTP_PORT=587 +SMTP_USERNAME=smtp-username +SMTP_PASSWORD=smtp-password +SMTP_USE_TLS=false +SMTP_ALWAYS_SEND=false +SMTP_MESSAGE='[${var:group};${var:host}] -> ${var:graph_title} -> warnings: ${loop<,>:wfields ${var:label}=${var:value}} / criticals: ${loop<,>:cfields ${var:label}=${var:value}}' +ALERT_RECIPIENT=monitoring@example.com +ALERT_SENDER=alerts@example.com +NODES="node1:192.168.1.1 node2:192.168.1.2 node3:192.168.1.3" +SNMP_NODES="router1:10.0.0.254:9999" +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +``` +version: '3' + +services: + + munin: + image: funkypenguin/munin-server + env_file: /var/data/config/munin/munin.env + networks: + - internal + volumes: + - /var/data/munin/log:/var/log/munin + - /var/data/munin/lib:/var/lib/munin + - /var/data/munin/run:/var/run/munin + - /var/data/munin/cache:/var/cache/munin + + proxy: + image: funkypenguin/oauth2_proxy + env_file: /var/data/config/munin/munin.env + networks: + - traefik_public + - internal + deploy: + labels: + - traefik.frontend.rule=Host:munin.example.com + - traefik.docker.network=traefik + - traefik.port=4180 + command: | + -cookie-secure=false + -upstream=http://munin:8080 + -redirect-url=https://munin.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + +networks: + traefik_public: + external: true + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.24.0/24 +``` + +!!! note + Setup unique static subnets for every stack you deploy. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + +## Serving + +### Launch Munin stack + +Launch the Munin stack by running ```docker stack deploy munin -c ``` + +Log into your new instance at https://**YOUR-FQDN**, with user and password password you specified in munin.env above. + +## Chef's Notes + +1. If you wanted to expose the Munin UI directly, you could remove the oauth2_proxy from the design, and move the traefik-related labels directly to the munin container. You'd also need to add the traefik_public network to the munin container. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/nextcloud.md b/manuscript/recipes/nextcloud.md similarity index 56% rename from manuscript/recipies/nextcloud.md rename to manuscript/recipes/nextcloud.md index 9483935..aba3956 100644 --- a/manuscript/recipies/nextcloud.md +++ b/manuscript/recipes/nextcloud.md @@ -2,6 +2,11 @@ hero: Backup all your stuff. Share it. Privately. # NextCloud +!!! important + Ongoing development of this recipe is sponsored by [The Common Observatory](https://www.observe.global/). Thanks guys! + + [![Common Observatory](../images/common_observatory.png)](https://www.observe.global/) + [NextCloud](https://www.nextcloud.org/) (_a [fork of OwnCloud](https://owncloud.org/blog/owncloud-statement-concerning-the-formation-of-nextcloud-by-frank-karlitschek/), led by original developer Frank Karlitschek_) is a suite of client-server software for creating and using file hosting services. It is functionally similar to Dropbox, although Nextcloud is free and open-source, allowing anyone to install and operate it on a private server. - https://en.wikipedia.org/wiki/Nextcloud @@ -19,12 +24,12 @@ This recipe is based on the official NextCloud docker image, but includes seprat ### Setup data locations -We'll need several directories for [static data](/reference/data_layout/#static-data) to bind-mount into our container, so create them in /var/data/nextcloud (_so that they can be [backed up](/recipies/duplicity/)_) +We'll need several directories for [static data](/reference/data_layout/#static-data) to bind-mount into our container, so create them in /var/data/nextcloud (_so that they can be [backed up](/recipes/duplicity/)_) ``` mkdir /var/data/nextcloud cd /var/data/nextcloud -mkdir -p {apps,config,data,database-dump} +mkdir -p {html,apps,config,data,database-dump} ``` Now make **more** directories for [runtime data](/reference/data_layout/#runtime-data) (_so that they can be **not** backed-up_): @@ -32,7 +37,7 @@ Now make **more** directories for [runtime data](/reference/data_layout/#runtime ``` mkdir /var/data/runtime/nextcloud cd /var/data/runtime/nextcloud -mkdir -p {db,solr,redis} +mkdir -p {db,redis} ``` @@ -85,10 +90,10 @@ services: - traefik.docker.network=traefik_public - traefik.port=80 volumes: - - /var/data/nextcloud/:/var/www/html - - /var/data/nextcloud/apps:/var/www/html/custom_apps - - /var/data/nextcloud/config:/var/www/html/config - - /var/data/nextcloud/data:/var/www/html/data + - /var/data/nextcloud/html:/var/www/html + - /var/data/nextcloud/apps:/var/www/html/custom_apps + - /var/data/nextcloud/config:/var/www/html/config + - /var/data/nextcloud/data:/var/www/html/data db: image: mariadb:10 @@ -124,17 +129,6 @@ services: volumes: - /var/data/runtime/nextcloud/redis:/data - solr: - image: solr:6-alpine - networks: - - internal - volumes: - - /var/data/runtime/nextcloud/solr:/opt/solr/server/solr/mycores - entrypoint: - - docker-entrypoint.sh - - solr-precreate - - nextant - cron: image: nextcloud volumes: @@ -177,18 +171,68 @@ Launch the NextCloud stack by running ```docker stack deploy nextcloud -c /index.php/settings/apps, and install the "**nextant**" app for full-text search +To make NextCloud [a little snappier](https://docs.nextcloud.com/server/13/admin_manual/configuration_server/caching_configuration.html), edit ```/var/data/nextcloud/config/config.php``` (_now that it's been created on the first container launch_), and add the following: -Then navigate to https:///index.php/settings/admin/additional, scroll down to **Nextant (Full Text Search)**, and enter the following: +``` + 'redis' => array( + 'host' => 'redis', + 'port' => 6379, + ), +``` -* Address of your solr servlet : **http://solr:8983/solr/** -* Core: **nextant** +### Use service discovery + +Want to use Calendar/Contacts on your iOS device? Want to avoid dictating long, rambling URL strings to your users, like ```https://nextcloud.batcave.com/remote.php/dav/principals/users/USERNAME/``` ? + +Huzzah! NextCloud supports [service discovery for CalDAV/CardDAV](https://tools.ietf.org/html/rfc6764), allowing you to simply tell your device the primary URL of your server (_**nextcloud.batcave.org**, for example_), and have the device figure out the correct WebDAV path to use. + +We (_and anyone else using the [NextCloud Docker image](https://hub.docker.com/_/nextcloud/)_) are using an SSL-terminating reverse proxy ([Traefik](/ha-docker-swarm/traefik/)) in front of our NextCloud container. In fact, it's not **possible** to setup SSL **within** the NextCloud container. + +When using a reverse proxy, your device requests a URL from your proxy (https://nextcloud.batcave.com/.well-known/caldav), and the reverse proxy then passes that request **unencrypted** to the internal URL of the NextCloud instance (i.e., http://172.16.12.123/.well-known/caldav) + +The Apache webserver on the NextCloud container (_knowing it was spoken to via HTTP_), responds with a 301 redirect to http://nextcloud.batcave.com/remote.php/dav/. See the problem? You requested an **HTTPS** (_encrypted_) url, and in return, you received a redirect to an **HTTP** (_unencrypted_) URL. Any sensible client (_iOS included_) will refuse such schenanigans. + +To correct this, we need to tell NextCloud to always redirect the .well-known URLs to an HTTPS location. This can only be done **after** deploying NextCloud, since it's only on first launch of the container that the .htaccess file is created in the first place. + +To make NextCloud service discovery work with Traefik reverse proxy, edit ```/var/data/nextcloud/html/.htaccess```, and change this: + +``` +RewriteRule ^\.well-known/carddav /remote.php/dav/ [R=301,L] +RewriteRule ^\.well-known/caldav /remote.php/dav/ [R=301,L] +``` + +To this: + +``` +RewriteRule ^\.well-known/carddav https://%{SERVER_NAME}/remote.php/dav/ [R=301,L] +RewriteRule ^\.well-known/caldav https://%{SERVER_NAME}/remote.php/dav/ [R=301,L] +``` + +Then restart your container with ```docker service update nextcloud_nextcloud --force``` to restart apache. + +Your can test for success by running ```curl -i https://nextcloud.batcave.org/.well-known/carddav```. You should get a 301 redirect to your equivalent of https://nextcloud.batcave.org/remote.php/dav/, as below: + +``` +[davidy:~] % curl -i https://nextcloud.batcave.org/.well-known/carddav +HTTP/2 301 +content-type: text/html; charset=iso-8859-1 +date: Wed, 12 Dec 2018 08:30:11 GMT +location: https://nextcloud.batcave.org/remote.php/dav/ +``` + +Note that this .htaccess can be overwritten by NextCloud, and you may have to reapply the change in future. I've created an [issue requesting a permanent fix](https://github.com/nextcloud/docker/issues/577). + +!!! important + Ongoing development of this recipe is sponsored by [The Common Observatory](https://www.observe.global/). Thanks guys! + + [![Common Observatory](../images/common_observatory.png)](https://www.observe.global/) ## Chef's Notes -1. Since many of my other recipies use PostgreSQL, I'd have preferred to use Postgres over MariaDB, but MariaDB seems to be the [preferred database type](https://github.com/nextcloud/server/issues/5912). +1. Since many of my other recipes use PostgreSQL, I'd have preferred to use Postgres over MariaDB, but MariaDB seems to be the [preferred database type](https://github.com/nextcloud/server/issues/5912). +2. I'm [not the first user](https://github.com/nextcloud/docker/issues/528) to stumble across the service discovery bug with reverse proxies. ### Tip your waiter (donate) diff --git a/manuscript/recipes/openldap.md b/manuscript/recipes/openldap.md new file mode 100644 index 0000000..7315d22 --- /dev/null +++ b/manuscript/recipes/openldap.md @@ -0,0 +1,448 @@ +# OpenLDAP + +!!! important + Development of this recipe is sponsored by [The Common Observatory](https://www.observe.global/). Thanks guys! + + [![Common Observatory](../images/common_observatory.png)](https://www.observe.global/) + +LDAP is probably the most ubiquitous authentication backend, before the current era of "[stupid social sign-ons](https://www.usatoday.com/story/tech/columnist/2018/10/23/how-separate-your-social-networks-your-regular-sites/1687763002/)". Many of the recipes featured in the cookbook (_[NextCloud](/recipe/nextcloud/), [Kanboard](/recipe/kanboard/), [Gitlab](/recipe/gitlab/), etc_) offer LDAP integration. + +## Big deal, who cares? + +If you're the only user of your tools, it probably doesn't bother you _too_ much to setup new user accounts for every tool. As soon as you start sharing tools with collaborators (_think 10 staff using NextCloud_), you suddenly feel the pain of managing a growing collection of local user accounts per-service. + +Enter OpenLDAP - the most crusty, PITA, fiddly platform to setup (_yes, I'm a little bitter, [dynamic configuration backend](https://linux.die.net/man/5/slapd-config)!_), but hugely useful for one job - a Lightweight Protocol for managing a Directory used for Access (_see what I did [there](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol)?_) + +The nice thing about OpenLDAP is, like MySQL, once you've setup the server, you probably never have to interact directly with it. There are many tools which will let you interact with your LDAP database via a(n ugly) UI. + +This recipe combines the raw power of OpenLDAP with the flexibility and featureset of LDAP Account Manager. + +![OpenLDAP Screenshot](../images/openldap.jpeg) + +## What's the takeaway? + +What you'll end up with is a directory structure which will allow integration with popular tools (_[NextCloud](/recipe/nextcloud/), [Kanboard](/recipe/kanboard/), [Gitlab](/recipe/gitlab/), etc_), as well as with KeyCloak (_an upcoming recipe_), for **true** SSO. + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostname (_i.e. "lam.your-domain.com"_) you intend to use for LDAP Account Manager, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP + +## Preparation + +### Setup data locations + +We'll need several directories to bind-mount into our container, so create them in /var/data/openldap: + +``` +mkdir /var/data/openldap/openldap +mkdir /var/data/runtime/openldap/ +``` + +!!! note "Why 2 directories?" + For rationale, see my [data layout explanation](/reference/data_layout/) + +### Prepare environment + +Create /var/data/openldap/openldap.env, and populate with the following variables, customized for your own domain structure. Take care with LDAP_DOMAIN, this is core to your directory structure, and can't easily be changed later. + +``` +LDAP_DOMAIN=batcave.gotham +LDAP_ORGANISATION=BatCave Inc +LDAP_ADMIN_PASSWORD=supermansucks +LDAP_TLS=false + +# Use these if you plan to protect the LDAP Account Manager webUI with an oauth_proxy +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET= +``` + +!!! note + I use an [OAuth proxy](/reference/oauth_proxy/) to protect access to the web UI, when the sensitivity of the protected data (i.e. my authentication store) warrants it, or if I don't necessarily trust the security of the webUI. + +Create ```authenticated-emails.txt```, and populate with the email addresses (_matched to GitHub user accounts, in my case_) to which you want grant access, using OAuth2. + +### Create config.cfg + +The Dockerized version of LDAP Account Manager is a little fiddly. In order to maintain a config file which persists across container restarts, we need to present the container with a copy of /var/www/html/config/lam.conf, tweaked for our own requirements. + +Create ```/var/data/openldap/lam/config/config.cfg``` as follows: + +???+ note "Much scroll, very text. Click here to collapse it for better readability" + + ``` + # password to add/delete/rename configuration profiles (default: lam) + password: {SSHA}54haBZN/kfgNVJ+W3YJrI2dCic4= iCXkNA== + + # default profile, without ".conf" + default: batcave + + # log level + logLevel: 4 + + # log destination + logDestination: SYSLOG + + # session timeout in minutes + sessionTimeout: 30 + + # list of hosts which may access LAM + allowedHosts: + + # list of hosts which may access LAM Pro self service + allowedHostsSelfService: + + # encrypt session data + encryptSession: true + + # Password: minimum password length + passwordMinLength: 0 + + # Password: minimum uppercase characters + passwordMinUpper: 0 + + # Password: minimum lowercase characters + passwordMinLower: 0 + + # Password: minimum numeric characters + passwordMinNumeric: 0 + + # Password: minimum symbolic characters + passwordMinSymbol: 0 + + # Password: minimum character classes (0-4) + passwordMinClasses: 0 + + # Password: checked rules + checkedRulesCount: -1 + + # Password: must not contain part of user name + passwordMustNotContain3Chars: false + + # Password: must not contain user name + passwordMustNotContainUser: false + + # Email format (default/unix) + mailEOL: default + + # PHP error reporting (default/system) + errorReporting: default + + # License + license: + ``` + +### Create .cfg + +While config.cfg (_above_) defines application-level configuration, .cfg is used to configure "domain-specific" configuration. You probably only need a single profile, but LAM could theoretically be used to administer several totally unrelated LDAP servers, ergo the concept of "profiles". + +Create yours profile (_you chose a default profile in config.cfg above, remember?_) by creating ```/var/data/openldap/lam/config/.conf```, as follows: + +???+ note "Much scroll, very text. Click here to collapse it for better readability" + + ``` + # LDAP Account Manager configuration + # + # Please do not modify this file manually. The configuration can be done completely by the LAM GUI. + # + ################################################################################################### + + # server address (e.g. ldap://localhost:389 or ldaps://localhost:636) + ServerURL: ldap://openldap:389 + + # list of users who are allowed to use LDAP Account Manager + # names have to be separated by semicolons + # e.g. admins: cn=admin,dc=yourdomain,dc=org;cn=root,dc=yourdomain,dc=org + Admins: cn=admin,dc=batcave,dc=gotham + + # password to change these preferences via webfrontend (default: lam) + Passwd: {SSHA}h39N9+gg/Qf1K/986VkKrjWlkcI= S/IAUQ== + + # suffix of tree view + # e.g. dc=yourdomain,dc=org + treesuffix: dc=batcave,dc=gotham + + # default language (a line from config/language) + defaultLanguage: en_GB.utf8 + + # Path to external Script + scriptPath: + + # Server of external Script + scriptServer: + + # Access rights for home directories + scriptRights: 750 + + # Number of minutes LAM caches LDAP searches. + cachetimeout: 5 + + # LDAP search limit. + searchLimit: 0 + + # Module settings + + modules: posixAccount_user_minUID: 10000 + modules: posixAccount_user_maxUID: 30000 + modules: posixAccount_host_minMachine: 50000 + modules: posixAccount_host_maxMachine: 60000 + modules: posixGroup_group_minGID: 10000 + modules: posixGroup_group_maxGID: 20000 + modules: posixGroup_pwdHash: SSHA + modules: posixAccount_pwdHash: SSHA + + # List of active account types. + activeTypes: user,group + + + types: suffix_user: ou=People,dc=batcave,dc=gotham + types: attr_user: #uid;#givenName;#sn;#uidNumber;#gidNumber + types: modules_user: inetOrgPerson,posixAccount,shadowAccount + + types: suffix_group: ou=Groups,dc=batcave,dc=gotham + types: attr_group: #cn;#gidNumber;#memberUID;#description + types: modules_group: posixGroup + + # Password mail subject + lamProMailSubject: Your password was reset + + # Password mail text + lamProMailText: Dear @@givenName@@ @@sn@@,+::++::+your password was reset to: @@newPassword@@+::++::++::+Best regards+::++::+deskside support+::+ + + + + serverDisplayName: + + + # enable TLS encryption + useTLS: no + + + # follow referrals + followReferrals: false + + + # paged results + pagedResults: false + + referentialIntegrityOverlay: false + + + # time zone + timeZone: Europe/London + + scriptUserName: + + scriptSSHKey: + + scriptSSHKeyPassword: + + + # Access level for this profile. + accessLevel: 100 + + + # Login method. + loginMethod: list + + + # Search suffix for LAM login. + loginSearchSuffix: dc=batcave,dc=gotham + + + # Search filter for LAM login. + loginSearchFilter: uid=%USER% + + + # Bind DN for login search. + loginSearchDN: + + + # Bind password for login search. + loginSearchPassword: + + + # HTTP authentication for LAM login. + httpAuthentication: false + + + # Password mail from + lamProMailFrom: + + + # Password mail reply-to + lamProMailReplyTo: + + + # Password mail is HTML + lamProMailIsHTML: false + + + # Allow alternate address + lamProMailAllowAlternateAddress: true + + jobsBindPassword: + + jobsBindUser: + + jobsDatabase: + + jobsDBHost: + + jobsDBPort: + + jobsDBUser: + + jobsDBPassword: + + jobsDBName: + + jobToken: 190339140545 + + pwdResetAllowSpecificPassword: true + + pwdResetAllowScreenPassword: true + + pwdResetForcePasswordChange: true + + pwdResetDefaultPasswordOutput: 2 + + twoFactorAuthentication: none + + twoFactorAuthenticationURL: https://localhost + + twoFactorAuthenticationInsecure: + + twoFactorAuthenticationLabel: + + twoFactorAuthenticationOptional: + + twoFactorAuthenticationCaption: + tools: tool_hide_toolOUEditor: false + tools: tool_hide_toolProfileEditor: false + tools: tool_hide_toolSchemaBrowser: false + tools: tool_hide_toolServerInformation: false + tools: tool_hide_toolTests: false + tools: tool_hide_toolPDFEditor: false + tools: tool_hide_toolFileUpload: false + tools: tool_hide_toolMultiEdit: false + ``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this, at (```/var/data/config/openldap/openldap.yml```) + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 +``` +version: '3' + +services: + openldap: + image: osixia/openldap + env_file: /var/data/config/openldap/openldap.env + networks: + - traefik_public + - auth_internal + volumes: + - /var/data/runtime/openldap/:/var/lib/ldap + - /var/data/openldap/openldap/:/etc/ldap/slapd.d + + lam: + image: jacksgt/ldap-account-manager + networks: + - auth_internal + volumes: + - /var/data/openldap/lam/config/config.cfg:/var/www/html/config/config.cfg + - /var/data/openldap/lam/config/batcave.conf:/var/www/html/config/batcave.conf + + lam-proxy: + image: funkypenguin/oauth2_proxy + env_file: /var/data/config/openldap/openldap.env + networks: + - traefik_public + - auth_internal + deploy: + labels: + - traefik.frontend.rule=Host:lam.batcave.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + command: | + -cookie-secure=false + -upstream=http://lam:80 + -redirect-url=https://lam.batcave.com + -http-address=http://0.0.0.0:4180 + -email-domain=batcave.com + -provider=github + + +networks: + # Used to expose lam-proxy to external access, and openldap to keycloak + traefik_public: + external: true + + # Used to expose openldap to other apps which want to talk to LDAP, including LAM + auth_internal: + external: true +``` + +!!! warning + **Normally**, we set unique static subnets for every stack you deploy, and put the non-public facing components (like databases) in an dedicated _internal network. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + However, you're likely to want to use OpenLdap with KeyCloak, whose JBOSS startup script assumes a single interface, and will crash in a ball of 🔥 if you try to assign multiple interfaces to the container. + + Since we're going to want KeyCloak to be able to talk to OpenLDAP, we have no choice but to leave the OpenLDAP container on the "traefik_public" network. We can, however, create **another** overlay network (_auth_internal, see below_), add it to the openldap container, and use it to provide OpenLDAP access to our other stacks. + +Create **another** stack config file (```/var/data/config/openldap/auth.yml```) containing just the auth_internal network, and a dummy container: + +``` +version: '3' + +services: + helloworld: + image: hello-world + networks: + - internal + +networks: + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.39.0/24 +``` + + + + +## Serving + +### Launch OpenLDAP stack + +Create the auth_internal overlay network, by running ```docker stack deploy auth -c /var/data/config/openldap/auth.yml`, then launch the OpenLDAP stack by running ```docker stack deploy openldap -c /var/data/config/openldap/openldap.yml``` + +Log into your new LAM instance at https://**YOUR-FQDN**. + +On first login, you'll be prompted to create the "_ou=People_" and "_ou=Group_" elements. Proceed to create these. + +You've now setup your OpenLDAP directory structure, and your administration interface, and hopefully won't have to interact with the "special" LDAP Account Manager interface much again! + +Create your users using the "**New User**" button. + + +!!! important + Development of this recipe is sponsored by [The Common Observatory](https://www.observe.global/). Thanks guys! + + [![Common Observatory](../images/common_observatory.png)](https://www.observe.global/) + +## Chef's Notes + +1. The KeyCloak](/recipes/keycloak/) recipe illustrates how to integrate KeyCloak with your LDAP directory, giving you a cleaner interface to manage users, and a raft of SSO / OAuth features. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipes/owntracks.md b/manuscript/recipes/owntracks.md new file mode 100644 index 0000000..caf6c91 --- /dev/null +++ b/manuscript/recipes/owntracks.md @@ -0,0 +1,122 @@ +# OwnTracks + +[OwnTracks](https://owntracks.org/) allows you to keep track of your own location. You can build your private location diary or share it with your family and friends. OwnTracks is open-source and uses open protocols for communication so you can be sure your data stays secure and private. + +![OwnTracks Screenshot](../images/owntracks.png) + +Using a smartphone app, OwnTracks allows you to collect and analyse your own location data **without** sharing this data with a cloud provider (_i.e. Apple, Google_). Potential use cases are: + +* Sharing family locations without relying on Apple Find-My-friends +* Performing automated actions in [HomeAssistant](/recipes/homeassistant/) when you arrive/leave home + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik) configured per design +3. DNS entry for the hostname you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP + +## Preparation + +### Setup data locations + +We'll need a directory so store OwnTracks' data , so create ```/var/data/owntracks```: + +``` +mkdir /var/data/owntracks +``` + +### Prepare environment + +Create owntracks.env, and populate with the following variables + +``` +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET= + +OTR_USER=recorder +OTR_PASSWD=yourpassword +MQTTHOSTNAME=owntracks.example.com +HOSTLIST=owntracks.example.com +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +``` +version: "3.0" + +services: + owntracks-app: + image: funkypenguin/owntracks + env_file : /var/data/config/owntracks/owntracks.env + volumes: + - /var/data/owntracks:/owntracks + networks: + - internal + ports: + - 1883:1883 + - 8883:8883 + - 8083:8083 + + owntracks-proxy: + image: a5huynh/oauth2_proxy + env_file : /var/data/config/owntracks/owntracks.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:owntracks.example.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /var/data/config/owntracks/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://owntracks-app:8083 + -redirect-url=https://owntracks.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + -authenticated-emails-file=/authenticated-emails.txt + +networks: + traefik_public: + external: true + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.15.0/24 +``` + +!!! note + Setup unique static subnets for every stack you deploy. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + + +## Serving + +### Launch OwnTracks stack + +Launch the OwnTracks stack by running ```docker stack deploy owntracks -c ``` + +Log into your new instance at https://**YOUR-FQDN**, with user "root" and the password you specified in gitlab.env. + +## Chef's Notes + +1. If you wanted to expose the OwnTracks Web UI directly, you could remove the oauth2_proxy from the design, and move the traefik-related labels directly to the wekan container. You'd also need to add the traefik network to the owntracks container. +2. I'm using my own image rather than owntracks/recorderd, because of a [potentially swarm-breaking bug](https://github.com/owntracks/recorderd/issues/14) I found in the official container. If this gets resolved (_or if I was mistaken_) I'll update the recipe accordingly. +3. By default, you'll get a fully accessible, unprotected MQTT broker. This may not be suitable for public exposure, so you'll want to look into securing mosquitto with TLS and ACLs. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipes/phpipam.md b/manuscript/recipes/phpipam.md new file mode 100644 index 0000000..53c4172 --- /dev/null +++ b/manuscript/recipes/phpipam.md @@ -0,0 +1,216 @@ +# phpIPAM + +phpIPAM is an open-source web IP address management application (_IPAM_). Its goal is to provide light, modern and useful IP address management. It is php-based application with MySQL database backend, using jQuery libraries, ajax and HTML5/CSS3 features. + +![phpIPAM Screenshot](../images/phpipam.png) + +phpIPAM fulfils a non-sexy, but important role - It helps you manage your IP address allocation. + +## Why should you care about this? + +You probably have a home network, with 20-30 IP addresses, for your family devices, your ![IoT devices](/recipe/home-assistant), your smart TV, etc. If you want to (a) monitor them, and (b) audit who does what, you care about what IPs they're assigned by your DHCP server. + +You could simple keep track of all devices with leases in your DHCP server, but what happens if your (_hypothetical?_) Ubiquity Edge Router X crashes and burns due to lack of disk space, and you loose track of all your leases? Well, you have to start from scratch, is what! + +And that [HomeAssistant](/recipes/homeassistant/) config, which you so carefully compiled, refers to each device by IP/DNS name, so you'd better make sure you recreate it consistently! + +Enter phpIPAM. A tool designed to help home keeps as well as large organisations keep track of their IP (_and VLAN, VRF, and AS number_) allocations. + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostname (_i.e. "phpipam.your-domain.com"_) you intend to use for phpIPAM, pointed to your [keepalived](ha-docker-swarm/keepalived/) IPIP + +## Preparation + +### Setup data locations + +We'll need several directories to bind-mount into our container, so create them in /var/data/phpipam: + +``` +mkdir /var/data/phpipam/databases-dump -p +mkdir /var/data/runtime/phpipam -p +``` + +### Prepare environment + +Create phpipam.env, and populate with the following variables +``` +# Setup for github, phpipam application +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET= + +# For MariaDB/MySQL database +MYSQL_ROOT_PASSWORD=imtoosecretformyshorts +MYSQL_DATABASE=phpipam +MYSQL_USER=phpipam +MYSQL_PASSWORD=secret + +# phpIPAM-specific variables +MYSQL_ENV_MYSQL_USER=phpipam +MYSQL_ENV_MYSQL_PASSWORD=secret +MYSQL_ENV_MYSQL_DB=phpipam +MYSQL_ENV_MYSQL_HOST=db + +# For backup +BACKUP_NUM_KEEP=7 +BACKUP_FREQUENCY=1d +``` + +Additionally, create phpipam-backup.env, and populate with the following variables: + +``` +# For MariaDB/MySQL database +MYSQL_ROOT_PASSWORD=imtoosecretformyshorts +MYSQL_DATABASE=phpipam +MYSQL_USER=phpipam +MYSQL_PASSWORD=secret + +# For backup +BACKUP_NUM_KEEP=7 +BACKUP_FREQUENCY=1d +``` + +### Create nginx.conf + +I usually protect my stacks using an [oauth proxy](/reference/oauth_proxy/) container in front of the app. This protects me from either accidentally exposing a platform to the world, or having a insecure platform accessed and abused. + +In the case of phpIPAM, the oauth_proxy creates an additional complexity, since it passes the "Authorization" HTTP header to the phpIPAM container. phpIPAH then examines the header, determines that the provided username (_my email address associated with my oauth provider_) doesn't match a local user account, and denies me access without the opportunity to retry. + +The (_dirty_) solution I've come up with is to insert an Nginx instance in the path between the oauth_proxy and the phpIPAM container itself. Nginx can remove the authorization header, so that phpIPAM can prompt me to login with a web-based form. + +Create /var/data/phpipam/nginx.conf as follows: + + +``` +upstream app-upstream { + server app:80; +} + +server { + listen 80; + server_name ~.; + + # Just redirect everything to the upstream + # Yes, it's embarassing. We are just a mechanism to strip an AUTH header :( + location ^~ / { + proxy_pass http://app-upstream; + proxy_set_header Authorization ""; + } + +} +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +``` +version: '3' + +services: + + db: + image: mariadb:10 + env_file: /var/data/config/phpipam/phpipam.env + networks: + - internal + volumes: + - /var/data/runtime/phpipam/db:/var/lib/mysql + + proxy: + image: funkypenguin/oauth2_proxy + env_file: /var/data/config/phpipam/phpipam.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:phpipam.example.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /var/data/config/phpipam/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://nginx + -redirect-url=https://phpipam.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + -authenticated-emails-file=/authenticated-emails.txt + + # Wait, what? Why do we have an oauth_proxy _and_ an nginx frontend for a simple webapp? + # Well, it's a long story. Basically, the phpipam container sees the "auth" headers passed by the + # oauth_proxy, and decides to use these exclusively to authenticate users. So no web-based login form, just "access denied" + # To work around this, we add nginx reverse proxy to the mix. A PITA, but an easy way to solve without altering the PHPIPAM code + nginx: + image: nginx:latest + networks: + - internal + volumes: + - /var/data/phpipam/nginx.conf:/etc/nginx/conf.d/default.conf:ro + + app: + image: pierrecdn/phpipam + env_file: /var/data/config/phpipam/phpipam.env + networks: + - internal + + db-backup: + image: mariadb:10 + env_file: /var/data/config/phpipam/phpipam.env + volumes: + - /var/data/phpipam/database-dump:/dump + - /etc/localtime:/etc/localtime:ro + entrypoint: | + bash -c 'bash -s < /dump/dump_\`date +%d-%m-%Y"_"%H_%M_%S\`.sql.gz + (ls -t /dump/dump*.sql.gz|head -n $$BACKUP_NUM_KEEP;ls /dump/dump*.sql.gz)|sort|uniq -u|xargs rm -- {} + sleep $$BACKUP_FREQUENCY + done + EOF' + networks: + - internal + +networks: + traefik_public: + external: true + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.47.0/24 +``` + +!!! note + Setup unique static subnets for every stack you deploy. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + + +## Serving + +### Launch phpIPAM stack + +Launch the phpIPAM stack by running ```docker stack deploy phpipam -c ``` + +Log into your new instance at https://**YOUR-FQDN**, and follow the on-screen prompts to set your first user/password. + +## Chef's Notes + +1. If you wanted to expose the phpIPAM UI directly, you could remove the oauth2_proxy and the nginx services from the design, and move the traefik_public-related labels directly to the phpipam container. You'd also need to add the traefik_public network to the phpipam container. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/piwik.md b/manuscript/recipes/piwik.md similarity index 100% rename from manuscript/recipies/piwik.md rename to manuscript/recipes/piwik.md diff --git a/manuscript/recipies/plex.md b/manuscript/recipes/plex.md similarity index 94% rename from manuscript/recipies/plex.md rename to manuscript/recipes/plex.md index 34ec668..f225b9c 100644 --- a/manuscript/recipies/plex.md +++ b/manuscript/recipes/plex.md @@ -97,6 +97,7 @@ Log into your new instance at https://**YOUR-FQDN** (You'll need to setup a plex ## Chef's Notes 1. Plex uses port 32400 for remote access, using your plex.tv user/password to authenticate you. The inclusion of the traefik proxy in this recipe is simply to allow you to use the web client (as opposed to a client app) by connecting directly to your instance, as opposed to browsing your media via https://plex.tv/web +2. Got an NVIDIA GPU? See [this blog post](https://www.funkypenguin.co.nz/note/gpu-transcoding-with-emby-plex-using-docker-nvidia/) re how to use your GPU to transcode your media! ### Tip your waiter (donate) diff --git a/manuscript/recipies/portainer.md b/manuscript/recipes/portainer.md similarity index 100% rename from manuscript/recipies/portainer.md rename to manuscript/recipes/portainer.md diff --git a/manuscript/recipes/privatebin.md b/manuscript/recipes/privatebin.md new file mode 100644 index 0000000..baa36f4 --- /dev/null +++ b/manuscript/recipes/privatebin.md @@ -0,0 +1,70 @@ +# PrivateBin + +PrivateBin is a minimalist, open source online pastebin where the server (can) has zero knowledge of pasted data. We all need to paste data / log files somewhere when it doesn't make sense to paste it inline. With PasteBin, you can own the hosting, access, and eventual deletion of this data. + +![PrivateBin Screenshot](../images/privatebin.png) + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostname you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP + +## Preparation + +### Setup data locations + +We'll need a single location to bind-mount into our container, so create /var/data/privatebin, and make it world-writable (_there might be a more secure way to do this!_) + +``` +mkdir /var/data/privatebin +chmod 777 /var/data/privatebin/ +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +``` +version: '3' + +services: + app: + image: privatebin/nginx-fpm-alpine + volumes: + - /var/data/privatebin:/srv/data + networks: + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:privatebin.example.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + +networks: + traefik_public: + external: true +``` + +## Serving + +### Launch PrivateBin stack + +Launch the PrivateBin stack by running ```docker stack deploy privatebin -c ``` + +Log into your new instance at https://**YOUR-FQDN**, with user "root" and the password you specified in gitlab.env. + +## Chef's Notes + +1. The [PrivateBin repo](https://github.com/PrivateBin/PrivateBin/blob/master/INSTALL.md) explains how to tweak configuration options, or to use a database instead of file storage, if your volume justifies it :) +2. The inclusion of PrivateBin was due to the efforts of @gkoerk in our [Discord server](http://chat.funkypenguin.co.nz). Thanks Jerry!! + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipes/realms.md b/manuscript/recipes/realms.md new file mode 100644 index 0000000..56c0c85 --- /dev/null +++ b/manuscript/recipes/realms.md @@ -0,0 +1,120 @@ +# Realms + +Realms is a git-based wiki (_like [Gollum](/recipes/gollum/), but with basic authentication and registration_) + +![Realms Screenshot](../images/realms.png) + +Features include: + +* Built with Bootstrap 3. +* Markdown (w/ HTML Support). +* Syntax highlighting (Ace Editor). +* Live preview. +* Collaboration (TogetherJS / Firepad). +* Drafts saved to local storage. +* Handlebars for templates and logic. + +!!! warning "Project likely abandoned" + + In my limited trial, Realms seems _less_ useful than [Gollum](/recipes/gollum/) for my particular use-case (_i.e., you're limited to markdown syntax only_), but other users may enjoy the basic user authentication and registration features, which Gollum lacks. + + Also of note is that the docker image is 1.17GB in size, and the handful of commits to the [source GitHub repo](https://github.com/scragg0x/realms-wiki/commits/master) in the past year has listed TravisCI build failures. This has many of the hallmarks of an abandoned project, to my mind. + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostname you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP + +## Preparation + +### Setup data locations + +Since we'll start with a basic Realms install, let's just create a single directory to hold the realms (SQLite) data: + +``` +mkdir /var/data/realms/ +``` + +Create realms.env, and populate with the following variables (_if you intend to use an [oauth_proxy](/reference/oauth_proxy) to double-secure your installation, which I recommend_) +``` +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET= +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +``` +version: "3" + +services: + realms: + image: realms/realms-wiki:latest + env_file: /var/data/config/realms/realms.env + volumes: + - /var/data/realms:/home/wiki/data + networks: + - internal + + realms_proxy: + image: funkypenguin/oauth2_proxy:latest + env_file : /var/data/config/realms/realms.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:realms.funkypenguin.co.nz + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /var/data/config/realms/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://realms:5000 + -redirect-url=https://realms.funkypenguin.co.nz + -http-address=http://0.0.0.0:4180 + -email-domain=funkypenguin.co.nz + -provider=github + -authenticated-emails-file=/authenticated-emails.txt + +networks: + traefik_public: + external: true + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.35.0/24 +``` + +!!! note + Setup unique static subnets for every stack you deploy. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + + +## Serving + +### Launch Realms stack + +Launch the Wekan stack by running ```docker stack deploy realms -c ``` + +Log into your new instance at https://**YOUR-FQDN**, authenticate against oauth_proxy, and you're immediately presented with Realms wiki, waiting for a fresh edit ;) + +## Chef's Notes + +1. If you wanted to expose the Realms UI directly, you could remove the oauth2_proxy from the design, and move the traefik_public-related labels directly to the realms container. You'd also need to add the traefik_public network to the realms container. +2. The inclusion of Realms was due to the efforts of @gkoerk in our [Discord server](http://chat.funkypenguin.co.nz). Thanks gkoerk! + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipes/swarmprom.md b/manuscript/recipes/swarmprom.md new file mode 100644 index 0000000..26c1f33 --- /dev/null +++ b/manuscript/recipes/swarmprom.md @@ -0,0 +1,402 @@ +# Swarmprom + +[Swarmprom](https://github.com/stefanprodan/swarmprom) is a starter kit for Docker Swarm monitoring with [Prometheus](https://prometheus.io/), [Grafana](http://grafana.org/), [cAdvisor](https://github.com/google/cadvisor), [Node Exporter](https://github.com/prometheus/node_exporter), [Alert Manager](https://github.com/prometheus/alertmanager) and [Unsee](https://github.com/cloudflare/unsee). And it's **damn** sexy. See for yourself: + +![Swarmprom Screenshot](../images/swarmprom.png) + +So what do all these components do? + +* [Prometheus](https://prometheus.io/docs/introduction/overview/) is an open-source systems monitoring and alerting toolkit originally built at SoundCloud. +* [Grafana](http://grafana.org/) is a tool to make data beautiful. +* [cAdvisor](https://github.com/google/cadvisor) +cAdvisor (Container Advisor) provides container users an understanding of the resource usage and performance characteristics of their running containers. It is a running daemon that collects, aggregates, processes, and exports information about running containers. +* [Node Exporter](https://github.com/prometheus/node_exporter) is a Prometheus exporter for hardware and OS metrics +* [Alert Manager](https://github.com/prometheus/alertmanager) Alertmanager handles alerts sent by client applications such as the Prometheus server. It takes care of deduplicating, grouping, and routing them to the correct receiver integrations such as email, Slack, etc. +* [Unsee](https://github.com/cloudflare/unsee) is an alert dashboard for Alert Manager + + +## How does this magic work? + +I'd encourage you to spend some time reading https://github.com/stefanprodan/swarmprom. Stefan has included detailed explanations about which elements perform which functions, as well as how to customize your stack. (_This is only a starting point, after all_) + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) on **17.09.0 or newer** (_doesn't work with CentOS Atomic, unfortunately_) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostnames you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP + +## Preparation + +This is basically a rehash of stefanprodan's [instructions](https://github.com/stefanprodan/swarmprom) to match the way I've configured other recipes. + +### Setup oauth provider + +Grafana includes decent login protections, but from what I can see, Prometheus, AlertManager, and Unsee do no authentication. In order to expose these publicly for your own consumption (my assumption for the rest of this recipe), you'll want to prepare to run [oauth_proxy](/reference/oauth_proxy/) containers in front of each of the 4 web UIs in this recipe. + +### Setup metrics + +Edit (_or create, depending on your OS_) /etc/docker/daemon.json, and add the following, to enable the experimental export of metrics to Prometheus: + +``` +{ + "metrics-addr" : "0.0.0.0:9323", + "experimental" : true +} +``` + +Restart docker with ```systemctl restart docker``` + + +### Setup and populate data locations + +We'll need several files to bind-mount into our containers, so create directories for them and get the latest copies: + +``` +mkdir -p /var/data/swarmprom/dockerd-exporter/ +cd /var/data/swarmprom/dockerd-exporter/ +wget https://raw.githubusercontent.com/stefanprodan/swarmprom/master/dockerd-exporter/Caddyfile + +mkdir -p /var/data/swarmprom/prometheus/rules/ +cd /var/data/swarmprom/prometheus/rules/ +wget https://raw.githubusercontent.com/stefanprodan/swarmprom/master/prometheus/rules/swarm_task.rules.yml +wget https://raw.githubusercontent.com/stefanprodan/swarmprom/master/prometheus/rules/swarm_node.rules.yml + +# Directories for holding runtime data +mkdir /var/data/runtime/swarmprom/grafana/ +mkdir /var/data/runtime/swarmprom/alertmanager/ +mkdir /var/data/runtime/prometheus + +chown nobody:nogroup /var/data/runtime/prometheus +``` + +### Prepare Grafana + +Grafana will make all the data we collect from our swarm beautiful. + +Create /var/data/swarmprom/grafana.env, and populate with the following variables +``` +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET= + +# Disable basic auth (it conflicts with oauth_proxy) +GF_AUTH_BASIC_ENABLED=false + +# Set this to the real-world URL to your grafana install (else you get screwy CSS thanks to oauth_proxy) +GF_SERVER_ROOT_URL=https://grafana.example.com +GF_SERVER_DOMAIN=grafana.example.com + +# Set your default admin/pass here +GF_SECURITY_ADMIN_USER=admin +GF_SECURITY_ADMIN_PASSWORD=ilovemybatmanunderpants +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), based on the original swarmprom [docker-compose.yml](https://github.com/stefanprodan/swarmprom/blob/master/docker-compose.yml) file + + +???+ note "This example is 274 lines long. Click here to collapse it for better readability" + + !!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + ``` + version: "3.3" + + networks: + net: + driver: overlay + attachable: true + + volumes: + prometheus: {} + grafana: {} + alertmanager: {} + + configs: + dockerd_config: + file: /var/data/swarmprom/dockerd-exporter/Caddyfile + node_rules: + file: /var/data/swarmprom/prometheus/rules/swarm_node.rules.yml + task_rules: + file: /var/data/swarmprom/prometheus/rules/swarm_task.rules.yml + + services: + dockerd-exporter: + image: stefanprodan/caddy + networks: + - internal + environment: + - DOCKER_GWBRIDGE_IP=172.18.0.1 + configs: + - source: dockerd_config + target: /etc/caddy/Caddyfile + deploy: + mode: global + resources: + limits: + memory: 128M + reservations: + memory: 64M + + cadvisor: + image: google/cadvisor + networks: + - internal + command: -logtostderr -docker_only + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + - /:/rootfs:ro + - /var/run:/var/run + - /sys:/sys:ro + - /var/lib/docker/:/var/lib/docker:ro + deploy: + mode: global + resources: + limits: + memory: 128M + reservations: + memory: 64M + + grafana: + image: stefanprodan/swarmprom-grafana:5.0.1 + networks: + - internal + env_file: /var/data/config/swarmprom/grafana.env + environment: + - GF_USERS_ALLOW_SIGN_UP=false + - GF_SMTP_ENABLED=${GF_SMTP_ENABLED:-false} + - GF_SMTP_FROM_ADDRESS=${GF_SMTP_FROM_ADDRESS:-grafana@test.com} + - GF_SMTP_FROM_NAME=${GF_SMTP_FROM_NAME:-Grafana} + - GF_SMTP_HOST=${GF_SMTP_HOST:-smtp:25} + - GF_SMTP_USER=${GF_SMTP_USER} + - GF_SMTP_PASSWORD=${GF_SMTP_PASSWORD} + volumes: + - /var/data/runtime/swarmprom/grafana:/var/lib/grafana + deploy: + mode: replicated + replicas: 1 + placement: + constraints: + - node.role == manager + resources: + limits: + memory: 128M + reservations: + memory: 64M + + grafana-proxy: + image: a5huynh/oauth2_proxy + env_file : /var/data/config/swarmprom/grafana.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:grafana.swarmprom.example.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /var/data/config/swarmprom/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://grafana:3000 + -redirect-url=https://grafana.swarmprom.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + -authenticated-emails-file=/authenticated-emails.txt + + alertmanager: + image: stefanprodan/swarmprom-alertmanager:v0.14.0 + networks: + - internal + environment: + - SLACK_URL=${SLACK_URL:-https://hooks.slack.com/services/TOKEN} + - SLACK_CHANNEL=${SLACK_CHANNEL:-general} + - SLACK_USER=${SLACK_USER:-alertmanager} + command: + - '--config.file=/etc/alertmanager/alertmanager.yml' + - '--storage.path=/alertmanager' + volumes: + - /var/data/runtime/swarmprom/alertmanager:/alertmanager + deploy: + mode: replicated + replicas: 1 + placement: + constraints: + - node.role == manager + resources: + limits: + memory: 128M + reservations: + memory: 64M + + alertmanager-proxy: + image: a5huynh/oauth2_proxy + env_file : /var/data/config/swarmprom/alertmanager.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:alertmanager.swarmprom.example.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /var/data/config/swarmprom/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://alertmanager:9093 + -redirect-url=https://alertmanager.swarmprom.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + -authenticated-emails-file=/authenticated-emails.txt + + unsee: + image: cloudflare/unsee:v0.8.0 + networks: + - internal + environment: + - "ALERTMANAGER_URIS=default:http://alertmanager:9093" + deploy: + mode: replicated + replicas: 1 + + unsee-proxy: + image: a5huynh/oauth2_proxy + env_file : /var/data/config/swarmprom/unsee.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:unsee.swarmprom.example.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /var/data/config/swarmprom/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://unsee:8080 + -redirect-url=https://unsee.swarmprom.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + -authenticated-emails-file=/authenticated-emails.txt + + + node-exporter: + image: stefanprodan/swarmprom-node-exporter:v0.15.2 + networks: + - internal + environment: + - NODE_ID={{.Node.ID}} + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /:/rootfs:ro + - /etc/hostname:/etc/nodename + command: + - '--path.sysfs=/host/sys' + - '--path.procfs=/host/proc' + - '--collector.textfile.directory=/etc/node-exporter/' + - '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)' + # no collectors are explicitely enabled here, because the defaults are just fine, + # see https://github.com/prometheus/node_exporter + # disable ipvs collector because it barfs the node-exporter logs full with errors on my centos 7 vm's + - '--no-collector.ipvs' + deploy: + mode: global + resources: + limits: + memory: 128M + reservations: + memory: 64M + + prometheus: + image: stefanprodan/swarmprom-prometheus:v2.2.0-rc.0 + networks: + - internal + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--web.console.libraries=/etc/prometheus/console_libraries' + - '--web.console.templates=/etc/prometheus/consoles' + - '--storage.tsdb.path=/prometheus' + - '--storage.tsdb.retention=24h' + volumes: + - /var/data/runtime/swarmprom/prometheus:/prometheus + configs: + - source: node_rules + target: /etc/prometheus/swarm_node.rules.yml + - source: task_rules + target: /etc/prometheus/swarm_task.rules.yml + deploy: + mode: replicated + replicas: 1 + placement: + constraints: + - node.role == manager + resources: + limits: + memory: 2048M + reservations: + memory: 128M + + prometheus-proxy: + image: a5huynh/oauth2_proxy + env_file : /var/data/config/swarmprom/prometheus.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:prometheus.swarmprom.example.com + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /var/data/config/swarmprom/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://prometheus:9090 + -redirect-url=https://prometheus.swarmprom.example.com + -http-address=http://0.0.0.0:4180 + -email-domain=example.com + -provider=github + -authenticated-emails-file=/authenticated-emails.txt + + + networks: + traefik_public: + external: true + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.29.0/24 + ``` + + !!! note + Setup unique static subnets for every stack you deploy. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + + +## Serving + +### Launch Swarmprom stack + +Launch the Swarm stack by running ```docker stack deploy swarmprom -c ``` + +Log into your new grafana instance, check out your beautiful graphs. Move onto drooling over Prometheus, AlertManager, and Unsee. + +## Chef's Notes + +1. Pay close attention to the ```grafana.env``` config. If you encounter errors about ```basic auth failed```, or failed CSS, it's likely due to misconfiguration of one of the grafana environment variables. + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/owntracks.md b/manuscript/recipes/template.md similarity index 69% rename from manuscript/recipies/owntracks.md rename to manuscript/recipes/template.md index 0909dcd..498f0c6 100644 --- a/manuscript/recipies/owntracks.md +++ b/manuscript/recipes/template.md @@ -1,3 +1,10 @@ +hero: Not all heroes wear capes + +!!! danger "This recipe is a work in progress" + This recipe is **incomplete**, and is featured to align the [patrons](https://www.patreon.com/funkypenguin)'s "premix" repository with the cookbook. "_premix_" is a private git repository available to [all Patreon patrons](https://www.patreon.com/funkypenguin), which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + So... There may be errors and inaccuracies. Jump into [Discord](http://chat.funkypenguin.co.nz) if you're encountering issues 😁 + # NAME Intro @@ -9,8 +16,8 @@ Details ## Ingredients 1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) -2. [Traefik](/ha-docker-swarm/traefik) configured per design -3. 3. DNS entry for the hostname you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostname you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP ## Preparation @@ -24,9 +31,6 @@ cd /var/data/wekan mkdir -p {wekan-db,wekan-db-dump} ``` -Note about mosquitto and chosen image: -https://github.com/owntracks/recorderd/issues/14 - ### Prepare environment Create wekan.env, and populate with the following variables @@ -63,16 +67,16 @@ services: - /var/data/wekan/wekan-db-dump:/dump proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file: /var/data/wekan/wekan.env networks: - - traefik + - traefik_public - internal deploy: labels: - - traefik.frontend.rule=Host:wekan.example.com - - traefik.docker.network=traefik - - traefik.port=4180 + - traefik_public.frontend.rule=Host:wekan.example.com + - traefik_public.docker.network=traefik_public + - traefik_public.port=4180 command: | -cookie-secure=false -upstream=http://wekan:80 @@ -88,7 +92,7 @@ services: env_file: /var/data/wekan/wekan.env networks: - traefik: + traefik_public: external: true internal: driver: overlay @@ -112,7 +116,7 @@ Log into your new instance at https://**YOUR-FQDN**, with user "root" and the pa ## Chef's Notes -1. If you wanted to expose the Wekan UI directly, you could remove the oauth2_proxy from the design, and move the traefik-related labels directly to the wekan container. You'd also need to add the traefik network to the wekan container. +1. If you wanted to expose the Wekan UI directly, you could remove the oauth2_proxy from the design, and move the traefik_public-related labels directly to the wekan container. You'd also need to add the traefik_public network to the wekan container. ### Tip your waiter (donate) diff --git a/manuscript/recipies/tiny-tiny-rss.md b/manuscript/recipes/tiny-tiny-rss.md similarity index 100% rename from manuscript/recipies/tiny-tiny-rss.md rename to manuscript/recipes/tiny-tiny-rss.md diff --git a/manuscript/recipies/turtle-pool.md b/manuscript/recipes/turtle-pool.md similarity index 98% rename from manuscript/recipies/turtle-pool.md rename to manuscript/recipes/turtle-pool.md index a9dcee6..884c31e 100644 --- a/manuscript/recipies/turtle-pool.md +++ b/manuscript/recipes/turtle-pool.md @@ -2,7 +2,7 @@ hero: How to setup a TurtleCoin Mining Pool # TurtleCoin Mining Pool -[Cryptocurrency miners](/recipies/cryptominer) will "pool" their GPU resources ("_hashpower_") into aggregate "_mining pools_", so that by the combined effort of all the miners, the pool will receive a reward for the blocks "mined" into the blockchain, and this reward will be distributed among the miners. +[Cryptocurrency miners](/recipes/cryptominer) will "pool" their GPU resources ("_hashpower_") into aggregate "_mining pools_", so that by the combined effort of all the miners, the pool will receive a reward for the blocks "mined" into the blockchain, and this reward will be distributed among the miners. ![Turtle Pool Screenshot](../images/turtle-pool.png) diff --git a/manuscript/recipies/wallabag.md b/manuscript/recipes/wallabag.md similarity index 98% rename from manuscript/recipies/wallabag.md rename to manuscript/recipes/wallabag.md index dfbba8f..a1bf3cf 100644 --- a/manuscript/recipies/wallabag.md +++ b/manuscript/recipes/wallabag.md @@ -8,7 +8,7 @@ All saved data (_pages, annotations, images, tags, etc_) are stored on your own ![Wallabag Screenshot](../images/wallabag.png) -There are plugins for [Chrome](https://chrome.google.com/webstore/detail/wallabagger/gbmgphmejlcoihgedabhgjdkcahacjlj) and [Firefox](https://addons.mozilla.org/firefox/addon/wallabagger/), as well as apps for [iOS](https://appsto.re/fr/YeqYfb.i), [Android](https://play.google.com/store/apps/details?id=fr.gaulupeau.apps.InThePoche), etc. Wallabag will also integrate nicely with my favorite RSS reader, [Miniflux](https://miniflux.net/) (_for which there is an [existing recipe](/recipies/miniflux)_). +There are plugins for [Chrome](https://chrome.google.com/webstore/detail/wallabagger/gbmgphmejlcoihgedabhgjdkcahacjlj) and [Firefox](https://addons.mozilla.org/firefox/addon/wallabagger/), as well as apps for [iOS](https://appsto.re/fr/YeqYfb.i), [Android](https://play.google.com/store/apps/details?id=fr.gaulupeau.apps.InThePoche), etc. Wallabag will also integrate nicely with my favorite RSS reader, [Miniflux](https://miniflux.net/) (_for which there is an [existing recipe](/recipes/miniflux)_). [Here's a video](https://player.vimeo.com/video/167435064) which shows off the UI a bit more. @@ -22,7 +22,7 @@ There are plugins for [Chrome](https://chrome.google.com/webstore/detail/wallaba ### Setup data locations -We need a filesystem location to store images that Wallabag downloads from the original sources, to re-display when you read your articles, as well as nightly database dumps (_which you **should [backup](/recipies/duplicity/)**_), so create something like this: +We need a filesystem location to store images that Wallabag downloads from the original sources, to re-display when you read your articles, as well as nightly database dumps (_which you **should [backup](/recipes/duplicity/)**_), so create something like this: ``` mkdir -p /var/data/wallabag @@ -93,7 +93,7 @@ services: - /var/data/wallabag/images:/var/www/wallabag/web/assets/images wallabag_proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file: /var/data/config/wallabag/wallabag.env networks: - internal diff --git a/manuscript/recipies/wekan.md b/manuscript/recipes/wekan.md similarity index 99% rename from manuscript/recipies/wekan.md rename to manuscript/recipes/wekan.md index 1edf316..5a17d1f 100644 --- a/manuscript/recipies/wekan.md +++ b/manuscript/recipes/wekan.md @@ -34,6 +34,7 @@ You'll need to know the following: 1. Choose an oauth provider, and obtain a client ID and secret 2. Create wekan.env, and populate with the following variables + ``` OAUTH2_PROXY_CLIENT_ID= OAUTH2_PROXY_CLIENT_SECRET= @@ -70,7 +71,7 @@ services: - /var/data/wekan/database-dump:/dump proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file: /var/data/config/wekan/wekan.env networks: - traefik diff --git a/manuscript/recipes/wetty.md b/manuscript/recipes/wetty.md new file mode 100644 index 0000000..e743c87 --- /dev/null +++ b/manuscript/recipes/wetty.md @@ -0,0 +1,110 @@ +hero: Terminal in a browser, baby! 💻 + +# Wetty + +[Wetty](https://github.com/krishnasrinivas/wetty) is a responsive, modern terminal, in your web browser. Yes, your browser. When combined with secure authentication and SSL encryption, it becomes a useful tool for quick and easy remote access. + +![Wetty Screenshot](../images/wetty.png) + +## Why would you need SSH in a browser window? + +Need shell access to a node with no external access? Deploy Wetty behind an [oauth_proxy](/reference/oauth_proxy/) with a SSL-terminating reverse proxy ([traefik](/ha-docker-swarm/traefik/)), and suddenly you have the means to SSH to your private host from any web browser (_protected by your [oauth_proxy](/reference/oauth_proxy/) of course, and your OAuth provider's 2FA_) + +Here are some other possible use cases: + +1. Access to SSH / CLI from an environment where outgoing SSH is locked down, or SSH client isn't / can't be installed. (_i.e., a corporate network_) +2. Access to long-running processes inside a tmux session (_like [irrsi](https://irssi.org/)_) +3. Remote access to a VM / [container running Kali linux](https://github.com/offensive-security/kali-linux-docker), for penetration testing + +## Ingredients + +1. [Docker swarm cluster](/ha-docker-swarm/design/) with [persistent shared storage](/ha-docker-swarm/shared-storage-ceph.md) +2. [Traefik](/ha-docker-swarm/traefik_public) configured per design +3. DNS entry for the hostname you intend to use, pointed to your [keepalived](ha-docker-swarm/keepalived/) IP + +## Preparation + +### Prepare environment + +Create wetty.env, and populate with the following variables per the [oauth_proxy](/reference/oauth_proxy/) instructions: +``` +OAUTH2_PROXY_CLIENT_ID= +OAUTH2_PROXY_CLIENT_SECRET= +OAUTH2_PROXY_COOKIE_SECRET= + +# To use WeTTY to SSH to a host besides the (mostly useless) alpine container it comes with +SSHHOST=batcomputer.batcave.com +SSHUSER=batman +``` + +### Setup Docker Swarm + +Create a docker swarm config file in docker-compose syntax (v3), something like this: + +!!! tip + I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` 👍 + + +``` +version: "3" +services: + wetty: + image: krishnasrinivas/wetty + env_file : /var/data/config/wetty/wetty.env + networks: + - internal + proxy: + image: funkypenguin/oauth2_proxy:latest + env_file: /var/data/config/wetty/wetty.env + networks: + - internal + - traefik_public + deploy: + labels: + - traefik.frontend.rule=Host:wetty.funkypenguin.co.nz + - traefik.docker.network=traefik_public + - traefik.port=4180 + volumes: + - /etc/localtime:/etc/localtime:ro + - /var/data/config/wetty/authenticated-emails.txt:/authenticated-emails.txt + command: | + -cookie-secure=false + -upstream=http://wetty:3000 + -redirect-url=https://wetty.funkypenguin.co.nz + -http-address=http://0.0.0.0:4180 + -provider=github + -authenticated-emails-file=/authenticated-emails.txt + +networks: + traefik_public: + external: true + internal: + driver: overlay + ipam: + config: + - subnet: 172.16.45.0/24 +``` + +!!! note + Setup unique static subnets for every stack you deploy. This avoids IP/gateway conflicts which can otherwise occur when you're creating/removing stacks a lot. See [my list](/reference/networks/) here. + + + +## Serving + +### Launch Wetty stack + +Launch the Wetty stack by running ```docker stack deploy wetty -c ``` + +Browse to your new browser-cli-terminal at https://**YOUR-FQDN**. Authenticate with your OAuth provider, and then proceed to login, either to the remote host you specified (_batcomputer.batcave.com, in the example above_), or using user and password "term" to log directly into the Wetty alpine container (_from which you can establish egress SSH_) + +## Chef's Notes + +1. You could set SSHHOST to the IP of the "docker0" interface on your host, which is normally 172.17.0.1. (_Or run ```/sbin/ip route|awk '/default/ { print $3 }'``` in the container_) This would then provide you the ability to remote-manage your swarm with only web access to Wetty. +2. The inclusion of Wetty was due to the efforts of @gpulido in our [Discord server](http://chat.funkypenguin.co.nz). Thanks Gabriel! + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/recipies/workflow.md b/manuscript/recipes/workflow.md similarity index 100% rename from manuscript/recipies/workflow.md rename to manuscript/recipes/workflow.md diff --git a/manuscript/recipies/autopirate/plexpy.md b/manuscript/recipies/autopirate/plexpy.md deleted file mode 100644 index 0629d7a..0000000 --- a/manuscript/recipies/autopirate/plexpy.md +++ /dev/null @@ -1,67 +0,0 @@ -!!! warning - This is not a complete recipe - it's a component of the [AutoPirate](/recipies/autopirate/) "_uber-recipe_", but has been split into its own page to reduce complexity. - -# NAME - -Intro - -![Plexpy Screenshot](../../images/plexpy.png) - -Details - - - - - - -#### Plexpy - -## Inclusion into AutoPirate - -To include NZBGet in your [AutoPirate](/recipies/autopirate/) stack, include the following in your autopirate.yml stack definition file: - -!!! tip - I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` - -``` -plexpy: - image: linuxserver/plexpy:latest - env_file : /var/data/config/autopirate/plexpy.env - volumes: - - /var/data/autopirate/plexpy:/config - networks: - - traefik_public - -plexpy_proxy: - image: zappi/oauth2_proxy - env_file : /var/data/config/autopirate/plexpy.env - dns_search: myswarm.example.com - networks: - - internal - - traefik_public - deploy: - labels: - - traefik.frontend.rule=Host:plexpy.example.com - - traefik.docker.network=traefik_public - - traefik.port=4180 - volumes: - - /var/data/config/autopirate/authenticated-emails.txt:/authenticated-emails.txt - command: | - -cookie-secure=false - -upstream=http://plexpy:8181 - -redirect-url=https://plexpy.example.com - -http-address=http://0.0.0.0:4180 - -email-domain=example.com - -provider=github - -authenticated-emails-file=/authenticated-emails.txt -``` - -!!! tip - I share (_with my [patreon patrons](https://www.patreon.com/funkypenguin)_) a private "_premix_" git repository, which includes necessary docker-compose and env files for all published recipes. This means that patrons can launch any recipe with just a ```git pull``` and a ```docker stack deploy``` - - -## Chef's Notes - -1. In many cases, tools will integrate with each other. I.e., Radarr needs to talk to SABnzbd and NZBHydra, Ombi needs to talk to Radarr, etc. Since each tool runs within the stack under its own name, just refer to each tool by name (i.e. "radarr"), and docker swarm will resolve the name to the appropriate container. You can identify the tool-specific port by looking at the docker-compose service definition. - -## Your comments? diff --git a/manuscript/reference/containers.md b/manuscript/reference/containers.md new file mode 100644 index 0000000..9bf644f --- /dev/null +++ b/manuscript/reference/containers.md @@ -0,0 +1,51 @@ +# Containers + +In the course of creating these recipes, I've often ended up creating containers with my own tweaks or changes. Below is a list of all the containers I've built. All of them are automatic builds, and the Dockerfiles and build logs are publicly available: + +Name | Description | Badges +--|--|-- +[funkypenguin/athena](https://hub.docker.com/r/funkypenguin/athena/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/athena.svg)](https://hub.docker.com/r/funkypenguin/athena/)| Athena cryptocurrency daemon/services |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/athena.svg)](https://hub.docker.com/r/funkypenguin/athena/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/athena.svg)](https://hub.docker.com/r/funkypenguin/athena/) +[funkypenguin/alertmanager-discord](https://hub.docker.com/r/funkypenguin/alertmanager-discord/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/alertmanager-discord.svg)](https://hub.docker.com/r/funkypenguin/alertmanager-discord/)| AlertManager-compatible webhook to send Prometheus alerts to a Discord channel |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/alertmanager-discord.svg)](https://hub.docker.com/r/funkypenguin/alertmanager-discord/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/alertmanager-discord.svg)](https://hub.docker.com/r/funkypenguin/alertmanager-discord/) +[funkypenguin/aeon](https://hub.docker.com/r/funkypenguin/aeon/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/aeon.svg)](https://hub.docker.com/r/funkypenguin/aeon/)| Aeon cryptocurrency daemon/services |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/aeon.svg)](https://hub.docker.com/r/funkypenguin/aeon/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/aeon.svg)](https://hub.docker.com/r/funkypenguin/aeon/) +[funkypenguin/bittube](https://hub.docker.com/r/funkypenguin/bittube/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/bittube.svg)](https://hub.docker.com/r/funkypenguin/bittube/)| BitTube cryptocurrency daemon/services |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/bittube.svg)](https://hub.docker.com/r/funkypenguin/bittube/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/bittube.svg)](https://hub.docker.com/r/funkypenguin/bittube/) +[funkypenguin/cryptonote-nodejs-pool](https://hub.docker.com/r/funkypenguin/cryptonote-nodejs-pool/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/cryptonote-nodejs-pool.svg)](https://hub.docker.com/r/funkypenguin/cryptonote-nodejs-pool/)| nodeJS-based mining pool for cryptonote-based mining pools, supporting advanced features like email/telegram notifications |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/cryptonote-nodejs-pool.svg)](https://hub.docker.com/r/funkypenguin/cryptonote-nodejs-pool/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/cryptonote-nodejs-pool.svg)](https://hub.docker.com/r/funkypenguin/cryptonote-nodejs-pool/) +[funkypenguin/conceal-core](https://hub.docker.com/r/funkypenguin/conceald/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/conceald.svg)](https://hub.docker.com/r/funkypenguin/conceald//)| Conceal cryptocurrency daemon/services |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/conceald.svg)](https://hub.docker.com/r/funkypenguin/conceald/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/conceald.svg)](https://hub.docker.com/r/funkypenguin/conceald/) +[funkypenguin/git-docker](https://hub.docker.com/r/funkypenguin/git-docker/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/git-docker.svg)](https://hub.docker.com/r/funkypenguin/git-docker/)| Git client in a docker container, for use on immutable OS (Atomic) hosts|[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/git-docker.svg)](https://hub.docker.com/r/funkypenguin/git-docker/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/git-docker.svg)](https://hub.docker.com/r/funkypenguin/git-docker/) +[funkypenguin/home-assistant](https://hub.docker.com/r/funkypenguin/home-assistant/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/home-assistant.svg)](https://hub.docker.com/r/funkypenguin/home-assistant//)| home-assistant |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/home-assistant.svg)](https://hub.docker.com/r/funkypenguin/home-assistant/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/home-assistant.svg)](https://hub.docker.com/r/funkypenguin/home-assistant/) +[funkypenguin/htpc-cron](https://hub.docker.com/r/funkypenguin/htpc-cron/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/htpc-cron.svg)](https://hub.docker.com/r/funkypenguin/htpc-cron/)| htpc-cron |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/htpc-cron.svg)](https://hub.docker.com/r/funkypenguin/htpc-cron/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/htpc-cron.svg)](https://hub.docker.com/r/funkypenguin/htpc-cron/) +[funkypenguin/kepl](https://hub.docker.com/r/funkypenguin/kepl/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/kepl.svg)](https://hub.docker.com/r/funkypenguin/kepl/)| KEPL cryptocurrency daemon/services |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/kepl.svg)](https://hub.docker.com/r/funkypenguin/kepl/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/kepl.svg)](https://hub.docker.com/r/funkypenguin/kepl/) +[funkypenguin/koson](https://hub.docker.com/r/funkypenguin/koson/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/koson.svg)](https://hub.docker.com/r/funkypenguin/koson/)| koson |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/koson.svg)](https://hub.docker.com/r/funkypenguin/koson/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/koson.svg)](https://hub.docker.com/r/funkypenguin/koson/) +[funkypenguin/loki](https://hub.docker.com/r/funkypenguin/loki/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/loki.svg)](https://hub.docker.com/r/funkypenguin/loki/)| loki |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/loki.svg)](https://hub.docker.com/r/funkypenguin/loki/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/loki.svg)](https://hub.docker.com/r/funkypenguin/loki/) +[funkypenguin/masari](https://hub.docker.com/r/funkypenguin/masari/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/masari.svg)](https://hub.docker.com/r/funkypenguin/masari//)| Masari cryptocurrency daemon/services |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/masari.svg)](https://hub.docker.com/r/funkypenguin/masari/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/masari.svg)](https://hub.docker.com/r/funkypenguin/masari/) +[funkypenguin/monero](https://hub.docker.com/r/funkypenguin/monero/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/monero.svg)](https://hub.docker.com/r/funkypenguin/monero/)| Monero cryptocurrency daemon/services |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/monero.svg)](https://hub.docker.com/r/funkypenguin/monero/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/monero.svg)](https://hub.docker.com/r/funkypenguin/monero/) +[funkypenguin/monkeytips](https://hub.docker.com/r/funkypenguin/monkeytips/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/monkeytips.svg)](https://hub.docker.com/r/funkypenguin/monkeytips//)| MonkeyTips cryptocurrency daemon/services |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/monkeytips.svg)](https://hub.docker.com/r/funkypenguin/monkeytips/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/monkeytips.svg)](https://hub.docker.com/r/funkypenguin/monkeytips/) +[funkypenguin/minio](https://hub.docker.com/r/funkypenguin/minio/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/minio.svg)](https://hub.docker.com/r/funkypenguin/minio/)| minio |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/minio.svg)](https://hub.docker.com/r/funkypenguin/minio/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/minio.svg)](https://hub.docker.com/r/funkypenguin/minio/) +[funkypenguin/mqtt-certbot-dns](https://hub.docker.com/r/funkypenguin/mqtt-certbot-dns/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/mqtt-certbot-dns.svg)](https://hub.docker.com/r/funkypenguin/mqtt-certbot-dns/)| mqtt-certbot-dns |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/mqtt-certbot-dns.svg)](https://hub.docker.com/r/funkypenguin/mqtt-certbot-dns/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/mqtt-certbot-dns.svg)](https://hub.docker.com/r/funkypenguin/mqtt-certbot-dns/) +[funkypenguin/munin-server](https://hub.docker.com/r/funkypenguin/munin-server/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/munin-server.svg)](https://hub.docker.com/r/funkypenguin/munin-server/)| munin-server |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/munin-server.svg)](https://hub.docker.com/r/funkypenguin/munin-server/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/munin-server.svg)](https://hub.docker.com/r/funkypenguin/munin-server/) +[funkypenguin/munin-node](https://hub.docker.com/r/funkypenguin/munin-node/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/munin-node.svg)](https://hub.docker.com/r/funkypenguin/munin-node/)| munin-node |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/munin-node.svg)](https://hub.docker.com/r/funkypenguin/munin-node/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/munin-node.svg)](https://hub.docker.com/r/funkypenguin/munin-node/) +[funkypenguin/mwlib](https://hub.docker.com/r/funkypenguin/mwlib/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/mwlib.svg)](https://hub.docker.com/r/funkypenguin/mwlib/)| mwlib |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/mwlib.svg)](https://hub.docker.com/r/funkypenguin/mwlib/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/mwlib.svg)](https://hub.docker.com/r/funkypenguin/mwlib/) +[funkypenguin/mqttwarn](https://hub.docker.com/r/funkypenguin/mqttwarn/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/mqttwarn.svg)](https://hub.docker.com/r/funkypenguin/mqttwarn/)| mqttwarn |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/mqttwarn.svg)](https://hub.docker.com/r/funkypenguin/mqttwarn/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/mqttwarn.svg)](https://hub.docker.com/r/funkypenguin/mqttwarn/) +[funkypenguin/nginx-proxy-letsencrypt](https://hub.docker.com/r/funkypenguin/nginx-proxy-letsencrypt/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/nginx-proxy-letsencrypt.svg)](https://hub.docker.com/r/funkypenguin/nginx-proxy-letsencrypt/)| nginx-proxy-letsencrypt |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/nginx-proxy-letsencrypt.svg)](https://hub.docker.com/r/funkypenguin/nginx-proxy-letsencrypt/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/nginx-proxy-letsencrypt.svg)](https://hub.docker.com/r/funkypenguin/nginx-proxy-letsencrypt/) +[funkypenguin/nzbdrone](https://hub.docker.com/r/funkypenguin/nzbdrone/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/nzbdrone.svg)](https://hub.docker.com/r/funkypenguin/nzbdrone/)| nzbdrone |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/nzbdrone.svg)](https://hub.docker.com/r/funkypenguin/nzbdrone/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/nzbdrone.svg)](https://hub.docker.com/r/funkypenguin/nzbdrone/) +[funkypenguin/owntracks](https://hub.docker.com/r/funkypenguin/owntracks/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/owntracks.svg)](https://hub.docker.com/r/funkypenguin/owntracks//)| Owntracks |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/owntracks.svg)](https://hub.docker.com/r/funkypenguin/owntracks/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/owntracks.svg)](https://hub.docker.com/r/funkypenguin/owntracks/) +[funkypenguin/oauth2_proxy](https://hub.docker.com/r/funkypenguin/oauth2_proxy/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/oauth2_proxy.svg)](https://hub.docker.com/r/funkypenguin/oauth2_proxy/)| OAuth2 proxy supporting self-signed upstream certs |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/oauth2_proxy.svg)](https://hub.docker.com/r/funkypenguin/oauth2_proxy/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/oauth2_proxy.svg)](https://hub.docker.com/r/funkypenguin/oauth2_proxy/) +[funkypenguin/plex](https://hub.docker.com/r/funkypenguin/plex/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/plex.svg)](https://hub.docker.com/r/funkypenguin/plex/)| plex |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/plex.svg)](https://hub.docker.com/r/funkypenguin/plex/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/plex.svg)](https://hub.docker.com/r/funkypenguin/plex/) +[funkypenguin/radarrsync](https://hub.docker.com/r/funkypenguin/radarrsync/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/radarrsync.svg)](https://hub.docker.com/r/funkypenguin/radarrsync/)| Python script to sync multiple Radarr instances |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/radarrsync.svg)](https://hub.docker.com/r/funkypenguin/radarrsync/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/radarrsync.svg)](https://hub.docker.com/r/funkypenguin/radarrsync/) +[funkypenguin/ryo-currency](https://hub.docker.com/r/funkypenguin/ryo-currency/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/ryo-currency.svg)](https://hub.docker.com/r/funkypenguin/ryo-currency/)| RYO cryptocurrency daemon/services |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/ryo-currency.svg)](https://hub.docker.com/r/funkypenguin/ryo-currency/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/ryo-currency.svg)](https://hub.docker.com/r/funkypenguin/ryo-currency/) +[funkypenguin/rtorrent](https://hub.docker.com/r/funkypenguin/rtorrent/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/rtorrent.svg)](https://hub.docker.com/r/funkypenguin/rtorrent/)| rtorrent |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/rtorrent.svg)](https://hub.docker.com/r/funkypenguin/rtorrent/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/rtorrent.svg)](https://hub.docker.com/r/funkypenguin/rtorrent/) +[funkypenguin/sabnzbd](https://hub.docker.com/r/funkypenguin/sabnzbd/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/sabnzbd.svg)](https://hub.docker.com/r/funkypenguin/oauth2_proxy/)| sabnzbd |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/sabnzbd.svg)](https://hub.docker.com/r/funkypenguin/sabnzbd/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/sabnzbd.svg)](https://hub.docker.com/r/funkypenguin/sabnzbd/) +[funkypenguin/turtlecoind](https://hub.docker.com/r/funkypenguin/turtlecoind/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/turtlecoind.svg)](https://hub.docker.com/r/funkypenguin/turtlecoind/)| turtlecoin |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/turtlecoind.svg)](https://hub.docker.com/r/funkypenguin/turtlecoind/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/turtlecoind.svg)](https://hub.docker.com/r/funkypenguin/turtlecoind/) +[funkypenguin/temasek](https://hub.docker.com/r/funkypenguin/temasek/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/temasek.svg)](https://hub.docker.com/r/funkypenguin/temasek/)| temasek |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/temasek.svg)](https://hub.docker.com/r/funkypenguin/temasek/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/temasek.svg)](https://hub.docker.com/r/funkypenguin/temasek/) +[funkypenguin/turtle-pool](https://hub.docker.com/r/funkypenguin/turtle-pool/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/turtle-pool.svg)](https://hub.docker.com/r/funkypenguin/turtle-pool//)| turtle-pool |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/turtle-pool.svg)](https://hub.docker.com/r/funkypenguin/turtle-pool/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/turtle-pool.svg)](https://hub.docker.com/r/funkypenguin/turtle-pool/) +[funkypenguin/turtlecoin](https://hub.docker.com/r/funkypenguin/turtlecoin/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/turtlecoin.svg)](https://hub.docker.com/r/funkypenguin/turtlecoin/)| turtlecoin |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/turtlecoin.svg)](https://hub.docker.com/r/funkypenguin/turtlecoin/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/turtlecoin.svg)](https://hub.docker.com/r/funkypenguin/turtlecoin/) +[funkypenguin/x-cash](https://hub.docker.com/r/funkypenguin/x-cash/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/x-cash.svg)](https://hub.docker.com/r/funkypenguin/x-cash/)| X-CASH cryptocurrency daemon/services |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/x-cash.svg)](https://hub.docker.com/r/funkypenguin/x-cash/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/x-cash.svg)](https://hub.docker.com/r/funkypenguin/x-cash/) +[funkypenguin/xmrig-cpu](https://hub.docker.com/r/funkypenguin/xmrig-cpu/)
[![Size](https://images.microbadger.com/badges/image/funkypenguin/xmrig-cpu.svg)](https://hub.docker.com/r/funkypenguin/xmrig-cpu/)| xmrig-cpu |[![Docker Pulls](https://img.shields.io/docker/pulls/funkypenguin/xmrig-cpu.svg)](https://hub.docker.com/r/funkypenguin/xmrig-cpu/)
[![Docker Stars](https://img.shields.io/docker/stars/funkypenguin/xmrig-cpu.svg)](https://hub.docker.com/r/funkypenguin/xmrig-cpu/)| + + +## Chef's Notes + +### Tip your waiter (donate) 👏 + +Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) See the [support](/support/) page for (_free or paid)_ ways to say thank you! 👏 + +### Your comments? 💬 diff --git a/manuscript/reference/data_layout.md b/manuscript/reference/data_layout.md index 82ae3b7..6a1ca36 100644 --- a/manuscript/reference/data_layout.md +++ b/manuscript/reference/data_layout.md @@ -1,6 +1,6 @@ # Data layout -The applications deployed in the stack utilize a combination of data-at-rest (_static config, files, etc_) and runtime data (_live database files_). The realtime data can't be [backed up](/recipies/duplicity) with a simple copy-paste, so where we employ databases, we also include containers to perform a regular export of database data to a filesystem location. +The applications deployed in the stack utilize a combination of data-at-rest (_static config, files, etc_) and runtime data (_live database files_). The realtime data can't be [backed up](/recipes/duplicity) with a simple copy-paste, so where we employ databases, we also include containers to perform a regular export of database data to a filesystem location. So that we can confidently backup all our data, I've setup a data layout as follows: diff --git a/manuscript/reference/infrastructure.md b/manuscript/reference/infrastructure.md new file mode 100644 index 0000000..9d2e105 --- /dev/null +++ b/manuscript/reference/infrastructure.md @@ -0,0 +1,192 @@ +## Terraform + +We _could_ describe the manual gcloud/ssh steps required to deploy a Kubernetes cluster to Google Kubernetes Engine, but using Terraform allows us to abstract ourself from the provider, and focus on just the infrastructure we need built. + +The terraform config we produce is theoretically reusabel across AWS, Azure, OpenStack, as well as GCE. + +Install terraform locally - on OSX, I used ```brew install terraform``` + +Confirm it's correctly installed by running ```terraform -v```. My output looks like this: + +``` +[davidy:~] % terraform -v +Terraform v0.11.8 + +[davidy:~] % +``` + +## Google Cloud SDK + +I can't remember how I installed gcloud, but I don't think I used homebrew. Run ```curl https://sdk.cloud.google.com | bash``` for a standard install, followed by ```gcloud init``` for the first-time setup. + +This works: + +``` +cat <<-"BREWFILE" > Brewfile +cask 'google-cloud-sdk' +brew 'kubectl' +brew 'terraform' +BREWFILE +brew bundle --verbose +``` + + +### Prepare for terraform + +I followed [this guide](https://cloud.google.com/community/tutorials/managing-gcp-projects-with-terraform) to setup the following in the "best" way: + +Run ```gcloud beta billing accounts list``` to get your billing account + +``` + +export TF_ADMIN=tf-admin-funkypenguin +export TF_CREDS=serviceaccount.json +export TF_VAR_org_id=250566349101 +export TF_VAR_billing_account=0156AE-7AE048-1DA888 +export TF_VAR_region=australia-southeast1 +export GOOGLE_APPLICATION_CREDENTIALS=${TF_CREDS} + +gcloud projects create ${TF_ADMIN} --set-as-default +gcloud beta billing projects link ${TF_ADMIN} \ + --billing-account ${TF_VAR_billing_account} + + gcloud iam service-accounts create terraform \ + --display-name "Terraform admin account" + Created service account [terraform]. + + gcloud iam service-accounts keys create ${TF_CREDS} \ + --iam-account terraform@${TF_ADMIN}.iam.gserviceaccount.com + created key [c0a49832c94aa0e23278165e2d316ee3d5bad438] of type [json] as [serviceaccount.json] for [terraform@funkypenguin-terraform-admin.iam.gserviceaccount.com] + + gcloud projects add-iam-policy-binding ${TF_ADMIN} \ + > --member serviceAccount:terraform@${TF_ADMIN}.iam.gserviceaccount.com \ + > --role roles/viewer + bindings: + - members: + - user:googlecloud2018@funkypenguin.co.nz + role: roles/owner + - members: + - serviceAccount:terraform@funkypenguin-terraform-admin.iam.gserviceaccount.com + role: roles/viewer + etag: BwV0VGSzYSU= + version: 1gcloud projects add-iam-policy-binding ${TF_ADMIN} \ +> --member serviceAccount:terraform@${TF_ADMIN}.iam.gserviceaccount.com \ +> --role roles/viewer +bindings: +- members: + - user:googlecloud2018@funkypenguin.co.nz + role: roles/owner +- members: + - serviceAccount:terraform@funkypenguin-terraform-admin.iam.gserviceaccount.com + role: roles/viewer +etag: BwV0VGSzYSU= +version: 1 + +gcloud projects add-iam-policy-binding ${TF_ADMIN} \ +> --member serviceAccount:terraform@${TF_ADMIN}.iam.gserviceaccount.com \ +> --role roles/storage.admin +bindings: +- members: + - user:googlecloud2018@funkypenguin.co.nz + role: roles/owner +- members: + - serviceAccount:terraform@funkypenguin-terraform-admin.iam.gserviceaccount.com + role: roles/storage.admin +- members: + - serviceAccount:terraform@funkypenguin-terraform-admin.iam.gserviceaccount.com + role: roles/viewer +etag: BwV0VGZwXfM= +version: 1 + + +gcloud services enable cloudresourcemanager.googleapis.com +gcloud services enable cloudbilling.googleapis.com +gcloud services enable iam.googleapis.com +gcloud services enable compute.googleapis.com + +## FIXME +Enabled Kubernetes Engine API in the tf-admin project, so that terraform can actually compute versions of the engine available + +## FIXME + +I had to add compute admin, service admin, and kubernetes engine admin to my org-level account, in order to use gcloud get-cluster-credentilals + + + +gsutil mb -p ${TF_ADMIN} gs://${TF_ADMIN} +Creating gs://funkypenguin-terraform-admin/... +[davidy:~/Documents … remix/kubernetes/terraform] master(+1/-0)* ± +[davidy:~/Documents … remix/kubernetes/terraform] master(+1/-0)* ± cat > backend.tf < terraform { +heredoc> backend "gcs" { +heredoc> bucket = "${TF_ADMIN}" +heredoc> path = "/terraform.tfstate" +heredoc> project = "${TF_ADMIN}" +heredoc> } +heredoc> } +heredoc> EOF +[davidy:~/Documents … remix/kubernetes/terraform] master(+1/-0)* ± gsutil versioning set on gs://${TF_ADMIN} +Enabling versioning for gs://funkypenguin-terraform-admin/... +[davidy:~/Documents … remix/kubernetes/terraform] master(+1/-0)* ± export GOOGLE_APPLICATION_CREDENTIALS=${TF_CREDS} +export GOOGLE_PROJECT=${TF_ADMIN} + + +``` + +### Create Service Account + +Since it's probably not a great idea to associate your own, master Google Cloud account with your automation process (after all, you can't easily revoke your own credentials if they leak), create a Service Account for terraform under GCE, and grant it the "Compute Admin" role. + +Download the resulting JSON, and save it wherever you're saving your code. Remember to protect this .json file like a password, so add it to .gitignore if you're checking your code into git (_and if you're not checking your code into git, what's wrong with you, just do it now!_) + +### Setup provider.tf + +I setup my provider like this, noting that the project name (which must already be created) came from the output of ```gloud projects list```, and region/zone came from https://cloud.google.com/compute/docs/regions-zones/ + +``` +# Specify the provider (GCP, AWS, Azure) +provider "google" { +credentials = "${file("serviceaccount.json")}" +project = "funkypenguin-mining-pools" +region = "australia-southeast1" +} +``` + +### Setup compute.tf + +Just playing, I setup this: + +``` +# Create a new instance +resource "google_compute_instance" "ubuntu-xenial" { + name = "ubuntu-xenial" + machine_type = "f1-micro" + zone = "us-west1-a" + boot_disk { + initialize_params { + image = "ubuntu-1604-lts" + } +} +network_interface { + network = "default" + access_config {} +} +service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +``` + +### Initialize and plan (it's free) + +Run ```terraform init``` to initialize Terraform + +Then run ```terrafor plan``` to check that the plan looks good. + +### Apply (not necessarily free) + +Once your plan (above) is good, run ```terraform apply``` to put it into motion. This is the point where you may start incurring costs. + +### Setup kubectl + +gcloud container clusters get-credentials $(terraform output cluster_name) --zone $(terraform output cluster_zone) --project $(terraform output project_id) diff --git a/manuscript/reference/networks.md b/manuscript/reference/networks.md index 8290069..61201d5 100644 --- a/manuscript/reference/networks.md +++ b/manuscript/reference/networks.md @@ -6,29 +6,49 @@ Network | Range --|-- [Traefik](https://geek-cookbook.funkypenguin.co.nz/ha-docker-swarm/traefik/) | _unspecified_ [Docker-cleanup](https://geek-cookbook.funkypenguin.co.nz/ha-docker-swarm/docker-swarm-mode/#setup-automated-cleanup) | 172.16.0.0/24 -[Mail Server](https://geek-cookbook.funkypenguin.co.nz/recipies/mail/) | 172.16.1.0/24 -[Gitlab](https://geek-cookbook.funkypenguin.co.nz/recipies/gitlab/) | 172.16.2.0/24 -[Wekan](https://geek-cookbook.funkypenguin.co.nz/recipies/wekan/) | 172.16.3.0/24 -[Piwik](https://geek-cookbook.funkypenguin.co.nz/recipies/piwki/) | 172.16.4.0/24 -[Tiny Tiny RSS](https://geek-cookbook.funkypenguin.co.nz/recipies/tiny-tiny-rss/) | 172.16.5.0/24 -[Huginn](https://geek-cookbook.funkypenguin.co.nz/recipies/huginn/) | 172.16.6.0/24 -[Unifi](https://geek-cookbook.funkypenguin.co.nz/recipies/unifi/) | 172.16.7.0/24 -[Kanboard](https://geek-cookbook.funkypenguin.co.nz/recipies/kanboard/) | 172.16.8.0/24 -[Gollum](https://geek-cookbook.funkypenguin.co.nz/recipies/gollum/) | 172.16.9.0/24 -[Duplicity](https://geek-cookbook.funkypenguin.co.nz/recipies/duplicity/) | 172.16.10.0/24 -[Autopirate](https://geek-cookbook.funkypenguin.co.nz/recipies/autopirate/) | 172.16.11.0/24 -[Nextcloud](https://geek-cookbook.funkypenguin.co.nz/recipies/nextcloud/) | 172.16.12.0/24 -[Portainer](https://geek-cookbook.funkypenguin.co.nz/recipies/portainer/) | 172.16.13.0/24 -[Home-Assistant](https://geek-cookbook.funkypenguin.co.nz/recipies/home-assistant/) | 172.16.14.0/24 -[OwnTracks](https://geek-cookbook.funkypenguin.co.nz/recipies/owntracks/) | 172.16.15.0/24 -[Plex](https://geek-cookbook.funkypenguin.co.nz/recipies/plex/) | 172.16.16.0/24 -[Emby](https://geek-cookbook.funkypenguin.co.nz/recipies/emby/) | 172.16.17.0/24 -[Calibre-Web](https://geek-cookbook.funkypenguin.co.nz/recipies/calibre-web/) | 172.16.18.0/24 -[Wallabag](https://geek-cookbook.funkypenguin.co.nz/recipies/wallabag/) | 172.16.19.0/24 -[InstaPy](https://geek-cookbook.funkypenguin.co.nz/recipies/instapy/) | 172.16.20.0/24 -[Turtle Pool](https://geek-cookbook.funkypenguin.co.nz/recipies/turtle-pool/) | 172.16.21.0/24 -[MiniFlux](https://geek-cookbook.funkypenguin.co.nz/recipies/miniflux/) | 172.16.22.0/24 -[Gitlab Runner](https://geek-cookbook.funkypenguin.co.nz/recipies/gitlab-runner/) | 172.16.23.0/24 +[Mail Server](https://geek-cookbook.funkypenguin.co.nz/recipes/mail/) | 172.16.1.0/24 +[Gitlab](https://geek-cookbook.funkypenguin.co.nz/recipes/gitlab/) | 172.16.2.0/24 +[Wekan](https://geek-cookbook.funkypenguin.co.nz/recipes/wekan/) | 172.16.3.0/24 +[Piwik](https://geek-cookbook.funkypenguin.co.nz/recipes/piwki/) | 172.16.4.0/24 +[Tiny Tiny RSS](https://geek-cookbook.funkypenguin.co.nz/recipes/tiny-tiny-rss/) | 172.16.5.0/24 +[Huginn](https://geek-cookbook.funkypenguin.co.nz/recipes/huginn/) | 172.16.6.0/24 +[Unifi](https://geek-cookbook.funkypenguin.co.nz/recipes/unifi/) | 172.16.7.0/24 +[Kanboard](https://geek-cookbook.funkypenguin.co.nz/recipes/kanboard/) | 172.16.8.0/24 +[Gollum](https://geek-cookbook.funkypenguin.co.nz/recipes/gollum/) | 172.16.9.0/24 +[Duplicity](https://geek-cookbook.funkypenguin.co.nz/recipes/duplicity/) | 172.16.10.0/24 +[Autopirate](https://geek-cookbook.funkypenguin.co.nz/recipes/autopirate/) | 172.16.11.0/24 +[Nextcloud](https://geek-cookbook.funkypenguin.co.nz/recipes/nextcloud/) | 172.16.12.0/24 +[Portainer](https://geek-cookbook.funkypenguin.co.nz/recipes/portainer/) | 172.16.13.0/24 +[Home-Assistant](https://geek-cookbook.funkypenguin.co.nz/recipes/home-assistant/) | 172.16.14.0/24 +[OwnTracks](https://geek-cookbook.funkypenguin.co.nz/recipes/owntracks/) | 172.16.15.0/24 +[Plex](https://geek-cookbook.funkypenguin.co.nz/recipes/plex/) | 172.16.16.0/24 +[Emby](https://geek-cookbook.funkypenguin.co.nz/recipes/emby/) | 172.16.17.0/24 +[Calibre-Web](https://geek-cookbook.funkypenguin.co.nz/recipes/calibre-web/) | 172.16.18.0/24 +[Wallabag](https://geek-cookbook.funkypenguin.co.nz/recipes/wallabag/) | 172.16.19.0/24 +[InstaPy](https://geek-cookbook.funkypenguin.co.nz/recipes/instapy/) | 172.16.20.0/24 +[Turtle Pool](https://geek-cookbook.funkypenguin.co.nz/recipes/turtle-pool/) | 172.16.21.0/24 +[MiniFlux](https://geek-cookbook.funkypenguin.co.nz/recipes/miniflux/) | 172.16.22.0/24 +[Gitlab Runner](https://geek-cookbook.funkypenguin.co.nz/recipes/gitlab-runner/) | 172.16.23.0/24 +[Munin](https://geek-cookbook.funkypenguin.co.nz/recipes/munin/) | 172.16.24.0/24 +[Masari Mining Pool](https://geek-cookbook.funkypenguin.co.nz/recipes/cryptonote-mining-pool/masari/) | 172.16.25.0/24 +[Athena Mining Pool](https://geek-cookbook.funkypenguin.co.nz/recipes/cryptonote-mining-pool/athena/) | 172.16.26.0/24 +[Bookstack](https://geek-cookbook.funkypenguin.co.nz/recipes/bookstack/) | 172.16.33.0/24 +[Swarmprom](https://geek-cookbook.funkypenguin.co.nz/recipes/swarmprom/) | 172.16.34.0/24 +[Realms](https://geek-cookbook.funkypenguin.co.nz/recipes/realms/) | 172.16.35.0/24 +[ElkarBackup](https://geek-cookbook.funkypenguin.co.nz/recipes/elkarbackp/) | 172.16.36.0/24 +[Mayan EDMS](https://geek-cookbook.funkypenguin.co.nz/recipes/realms/) | 172.16.37.0/24 +[Shaarli](https://geek-cookbook.funkypenguin.co.nz/recipes/shaarli/) | 172.16.38.0/24 +[OpenLDAP](https://geek-cookbook.funkypenguin.co.nz/recipes/openldap/) | 172.16.39.0/24 +[MatterMost](https://geek-cookbook.funkypenguin.co.nz/recipes/mattermost/) | 172.16.40.0/24 +[PrivateBin](https://geek-cookbook.funkypenguin.co.nz/recipes/privatebin/) | 172.16.41.0/24 +[Mayan EDMS](https://geek-cookbook.funkypenguin.co.nz/recipes/mayan-edms/) | 172.16.42.0/24 +[Hack MD](https://geek-cookbook.funkypenguin.co.nz/recipes/hackmd/) | 172.16.43.0/24 +[FlightAirMap](https://geek-cookbook.funkypenguin.co.nz/recipes/flightairmap/) |172.16.44.0/24 +[Wetty](https://geek-cookbook.funkypenguin.co.nz/recipes/wetty/) | 172.16.45.0/24 +[FileBrowser](https://geek-cookbook.funkypenguin.co.nz/recipes/filebrowser/) | 172.16.46.0/24 +[phpIPAM](https://geek-cookbook.funkypenguin.co.nz/recipes/phpipam/) | 172.16.47.0/24 +[Dozzle](https://geek-cookbook.funkypenguin.co.nz/recipes/dozzle/) | 172.16.48.0/24 + ## Chef's Notes diff --git a/manuscript/reference/oauth_proxy.md b/manuscript/reference/oauth_proxy.md index ca7956a..e068564 100644 --- a/manuscript/reference/oauth_proxy.md +++ b/manuscript/reference/oauth_proxy.md @@ -47,11 +47,11 @@ I created **/var/data/oauth_proxy/authenticated-emails.txt**, and add my own ema ### Configure stack -You'll need to define a service for the oauth_proxy in every stack which you want to protect. Here's an example from the [Wekan](/recipies/wekan/) recipe: +You'll need to define a service for the oauth_proxy in every stack which you want to protect. Here's an example from the [Wekan](/recipes/wekan/) recipe: ``` proxy: - image: zappi/oauth2_proxy + image: a5huynh/oauth2_proxy env_file : /var/data/wekan/wekan.env networks: - traefik diff --git a/manuscript/reference/troubleshooting.md b/manuscript/reference/troubleshooting.md index 9dbff58..7351ee7 100644 --- a/manuscript/reference/troubleshooting.md +++ b/manuscript/reference/troubleshooting.md @@ -16,6 +16,14 @@ SSH to the host node, and attach to the container using ```docker exec -it _``` to watch a particular service. As the service dies and is recreated, the logs will continue to be displayed. +## Visually monitoring containers with ctop + +For a visual "top-like" display of your container's activity (_as well as a [detailed per-container view](https://github.com/bcicen/ctop/blob/master/_docs/single.md)_), try using [ctop](https://github.com/bcicen/ctop). + +To execute, simply run ```docker run --rm -ti --name ctop -v /var/run/docker.sock:/var/run/docker.sock quay.io/vektorlab/ctop:latest``` + +Example: +![](https://github.com/bcicen/ctop/raw/master/_docs/img/grid.gif) ## Chef's Notes diff --git a/manuscript/sections/chefs-favorites-docker.md b/manuscript/sections/chefs-favorites-docker.md new file mode 100644 index 0000000..9081880 --- /dev/null +++ b/manuscript/sections/chefs-favorites-docker.md @@ -0,0 +1,3 @@ +# Chef's Favorites (Docker) # + +The following recipes are the chef's current favorites - these are recipes actively in use and updated by @funkypenguin \ No newline at end of file diff --git a/manuscript/sections/chefs-favorites-kubernetes.md b/manuscript/sections/chefs-favorites-kubernetes.md new file mode 100644 index 0000000..9081880 --- /dev/null +++ b/manuscript/sections/chefs-favorites-kubernetes.md @@ -0,0 +1,3 @@ +# Chef's Favorites (Docker) # + +The following recipes are the chef's current favorites - these are recipes actively in use and updated by @funkypenguin \ No newline at end of file diff --git a/manuscript/sections/ha-docker-swarm.md b/manuscript/sections/ha-docker-swarm.md index e130429..b5d7348 100644 --- a/manuscript/sections/ha-docker-swarm.md +++ b/manuscript/sections/ha-docker-swarm.md @@ -1,3 +1,3 @@ # HA Docker Swarm # -This section introduces the HA Docker Swarm, which will be the basis for all the recipies discussed. +This section introduces the HA Docker Swarm, which will be the basis for all the recipes discussed. diff --git a/manuscript/sections/kubernetes.md b/manuscript/sections/kubernetes.md new file mode 100644 index 0000000..f1c2c38 --- /dev/null +++ b/manuscript/sections/kubernetes.md @@ -0,0 +1,3 @@ +# Kubernetes # + +This section introduces the Kubernetes design, which will be the basis for all the recipes discussed further. diff --git a/manuscript/sections/menu-docker.md b/manuscript/sections/menu-docker.md new file mode 100644 index 0000000..a674baa --- /dev/null +++ b/manuscript/sections/menu-docker.md @@ -0,0 +1,3 @@ +# Recipies (Docker) # + +Now follows individual recipes. diff --git a/manuscript/sections/menu-kubernetes.md b/manuscript/sections/menu-kubernetes.md new file mode 100644 index 0000000..4149de0 --- /dev/null +++ b/manuscript/sections/menu-kubernetes.md @@ -0,0 +1,3 @@ +# Recipies (Kubernetes) # + +Now follows individual recipes. diff --git a/manuscript/sections/recipies.md b/manuscript/sections/recipies.md deleted file mode 100644 index 0e913fc..0000000 --- a/manuscript/sections/recipies.md +++ /dev/null @@ -1,3 +0,0 @@ -# Recipies # - -Now follows individual recipies. diff --git a/manuscript/sections/reference.md b/manuscript/sections/reference.md index 9198876..18fcf66 100644 --- a/manuscript/sections/reference.md +++ b/manuscript/sections/reference.md @@ -1,3 +1,3 @@ # Reference # -Now follows useful elements which are not full recipies. +Now follows useful elements which are not full recipes. diff --git a/manuscript/sponsored-projects.md b/manuscript/sponsored-projects.md index d71ea5c..602f5c2 100644 --- a/manuscript/sponsored-projects.md +++ b/manuscript/sponsored-projects.md @@ -6,10 +6,13 @@ I regularly donate to / sponsor the following projects. **Join me** in supportin | Project | Donate via.. | ------------- |-------------| -| [Kanboard](/recipies/kanboard/) | [PayPal](https://kanboard.org/#donations) -| [Miniflux](/recipies/miniflux/) | [PayPal](https://miniflux.net/#donations) -| [SABnzbd](/recipies/autopirate/sabnzbd/) | [Paypal / Credit Card / Crypto](https://sabnzbd.org/donate/) -| [Radarr](/recipies/autopirate/radarr/) | [OpenCollective](https://opencollective.com/radarr#budget) -| [Sonarr](/recipies/autopirate/sonarr/) | [BitCoin/CC](https://sonarr.tv/donate) -| [NZBHydra](/recipies/autopirate/nzbhydra/) | [Cryptocurrency](https://github.com/theotherp/nzbhydra2) +| [Kanboard](/recipes/kanboard/) | [PayPal](https://kanboard.org/#donations) +| [Miniflux](/recipes/miniflux/) | [PayPal](https://miniflux.net/#donations) +| [SABnzbd](/recipes/autopirate/sabnzbd/) | [Paypal / Credit Card / Crypto](https://sabnzbd.org/donate/) +| [Radarr](/recipes/autopirate/radarr/) | [OpenCollective](https://opencollective.com/radarr#budget) +| [Sonarr](/recipes/autopirate/sonarr/) | [BitCoin/CC](https://sonarr.tv/donate) +| [NZBHydra](/recipes/autopirate/nzbhydra/) | [Cryptocurrency](https://github.com/theotherp/nzbhydra2) | [Calibre](https://calibre-ebook.com/) | [Credit Card](https://calibre-ebook.com/donate) / [Patreon](https://www.patreon.com/kovidgoyal) / [LibrePay](https://liberapay.com/kovidgoyal/donate) +| [LinuxServer.io](https://www.linuxserver.io) | [PayPal](https://www.linuxserver.io/donate) +| [Pi-hole](https://pi-hole.net/) | [Patreon](https://www.patreon.com/pihole/posts) +| [Franck Nijhof's Hassio Addons](https://www.frenck.nl/about/franck-nijhof/) | [Patreon](https://www.patreon.com/frenck/overview) diff --git a/manuscript/support.md b/manuscript/support.md index f28cd01..3355150 100644 --- a/manuscript/support.md +++ b/manuscript/support.md @@ -2,9 +2,22 @@ hero: "Excuse me... waiter, there's a bug in this recipe!" # Support -## Welcome to the kitchen +## Discord: Where the cool kids are -For community support and engagement, I've setup a [Discourse forum](https://discourse.geek-kitchen.funkypenguin.co.nz/). Using this as the primary means of discussions makes it easy to share recipes / experiences with future geeks. +All the cool kids are hanging out in the [Discord server](http://chat.funkypenguin.co.nz). + +> "Eh? What's Discord? Get off my lawn, young whippersnappers!!" + +Yeah, I know. I also thought Discord was just for the gamer kids, but it turns out it's great for a geeky community. Why? [Let me elucidate ya.](https://www.youtube.com/watch?v=1qHoSWxVqtE).. + +1. Native markdown for code blocks +2. Drag-drop screenshots +3. Costs nothing, no ads +4. Mobile notifications are reliable, individual channels mutable, etc + +## Forums: Party like it's 1999 + +For community support and engagement, I've setup a [Discourse forum](https://discourse.geek-kitchen.funkypenguin.co.nz/). Using this as the primary means of topical discussions makes it easy to share recipes / experiences with future geeks. ## Discuss a recipe @@ -44,7 +57,7 @@ Impulsively **[click here (NOW quick do it!)](https://www.patreon.com/bePatron?u I also gratefully accept donations of most fine socialist/anarchist/hobbyist cryptocurrencies, including the list below (_let me know if I've left out the coin-of-the-week, and I'll happily add it_): -| ist-currency | Address +| -ist-currency | Address | ------------- |-------------| | Bitcoin | 1GBJfmqARmL66gQzUy9HtNWdmAEv74nfXj | Ethereum | 0x19e60ec49e1f053cfdfc193560ecfb3caed928f1 diff --git a/manuscript/whoami.md b/manuscript/whoami.md index 4abdd6d..0117a2c 100644 --- a/manuscript/whoami.md +++ b/manuscript/whoami.md @@ -18,7 +18,7 @@ One of our suppliers asked me to quote to do the same for their organization. Wi During the same "real" job above, I wanted to deploy [jabberd](https://en.wikipedia.org/wiki/Jabberd14), for internal instant messaging within the organization, and as a means to control the sprawl of ad-hoc instant-messaging among staff, using ICQ, MSN, and Yahoo Messenger. -To get management approval to deploy, I wrote a logger (with web UI) for jabber conversations ([Bandersnatch](https://www.funkypenguin.co.nz/project/bandersnatch/)), and a [75-page user manual](https://www.funkypenguin.co.nz/book/jajc-manual/) (in [Docbook XML](http://www.docbook.org/) for a spunky Russian WinXP jabber client, [JAJC](http://jajc.jrudevels.org/). +To get management approval to deploy, I wrote a logger (with web UI) for jabber conversations ([Bandersnatch](https://www.funkypenguin.co.nz/project/bandersnatch/)), and a [75-page user manual](https://www.funkypenguin.co.nz/book/jajc-manual/) (_in [Docbook XML](http://www.docbook.org/)_) for a spunky Russian WinXP jabber client, [JAJC](http://jajc.jrudevels.org/). Due to my contributions to [phpList](http://www.phplist.com), I was approached in 2011 by [Packt Publishing](http://www.packtpub.com), to [write a book](https://www.funkypenguin.co.nz/book/phplist-2-email-campaign-manager) about using PHPList. @@ -26,14 +26,13 @@ Due to my contributions to [phpList](http://www.phplist.com), I was approached i Contact me by: +* Jumping into our [Discord server](http://chat.funkypenguin.co.nz) * Email ([davidy@funkypenguin.co.nz](mailto:davidy@funkypenguin.co.nz)) +* Private, encrypted email with ProtonMail ([funkypenguin@pm.me](mailto:funkypenguin@pm.me)) * Twitter ([@funkypenguin](https://twitter.com/funkypenguin)) -* Mastodon ([@davidy@funkypenguin.co.nz](https://mastodon.funkypenguin.co.nz/@davidy)) - Or by using the form below:
- diff --git a/overrides/README.md b/mkdocs-material/README.md similarity index 100% rename from overrides/README.md rename to mkdocs-material/README.md diff --git a/mkdocs-material/partials/disqus.html b/mkdocs-material/partials/disqus.html new file mode 100644 index 0000000..12100ae --- /dev/null +++ b/mkdocs-material/partials/disqus.html @@ -0,0 +1,12 @@ +
+ + diff --git a/mkdocs.yml b/mkdocs.yml index b9fd68a..6ce9a81 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -15,17 +15,17 @@ repo_url: 'https://github.com/funkypenguin/geek-cookbook' docs_dir: 'manuscript' # Copyright -copyright: 'Copyright © 2016 - 2017 David Young' +copyright: 'Copyright © 2016 - 2019 David Young' #theme_dir: mkdocs-material -pages: - - Home : index.md +nav: + - Home: index.md - Introduction: - - README: README.md + - README: README-UI.md - CHANGELOG: CHANGELOG.md - whoami: whoami.md - - Essential: + - Docker Swarm: - Design: ha-docker-swarm/design.md - VMs: ha-docker-swarm/vms.md - Shared Storage (Ceph): ha-docker-swarm/shared-storage-ceph.md @@ -34,64 +34,114 @@ pages: - Docker Swarm Mode: ha-docker-swarm/docker-swarm-mode.md - Traefik: ha-docker-swarm/traefik.md - Registry: ha-docker-swarm/registry.md - - Mail Server: recipies/mail.md - - Duplicity: recipies/duplicity.md - - Chef's Favorites: - - Huginn: recipies/huginn.md - - Kanboard: recipies/kanboard.md - - Miniflux: recipies/miniflux.md + - Mail Server: recipes/mail.md + - Duplicity: recipes/duplicity.md + - Kubernetes Cluster: + - Start: kubernetes/start.md + - Design: kubernetes/design.md + - Cluster: kubernetes/cluster.md + - Load Balancer: kubernetes/loadbalancer.md + - Snapshots: kubernetes/snapshots.md + - Helm: kubernetes/helm.md + - Traefik: kubernetes/traefik.md + - Chef's Favorites (Docker): - Auto Pirate: - - Start: recipies/autopirate.md - - SABnzbd: recipies/autopirate/sabnzbd.md - - NZBGet: recipies/autopirate/nzbget.md - - Rtorrent: recipies/autopirate/rtorrent.md - - Sonarr: recipies/autopirate/sonarr.md - - Radarr: recipies/autopirate/radarr.md - - Mylar: recipies/autopirate/mylar.md - - Lazy Librarian: recipies/autopirate/lazylibrarian.md - - Headphones: recipies/autopirate/headphones.md - - NZBHydra: recipies/autopirate/nzbhydra.md - - Ombi: recipies/autopirate/ombi.md - - Jackett: recipies/autopirate/jackett.md - - End: recipies/autopirate/end.md - - NextCloud: recipies/nextcloud.md - - Plex: recipies/plex.md - - Emby: recipies/emby.md - - Home Assistant: recipies/homeassistant.md - - CryptoMiner: - - Start: recipies/cryptominer.md - - Mining Rig: recipies/cryptominer/mining-rig.md - - AMD GPU: recipies/cryptominer/amd-gpu.md - - NVidia GPU: recipies/cryptominer/nvidia-gpu.md - - Mining Pools : recipies/cryptominer/mining-pool.md - - Wallets : recipies/cryptominer/wallet.md - - Exchanges: recipies/cryptominer/exchange.md - - Minerhotel: recipies/cryptominer/minerhotel.md - - Monitoring: recipies/cryptominer/monitor.md - - Profit!: recipies/cryptominer/profit.md - - Wallabag: recipies/wallabag.md - - Turtle Pool: recipies/turtle-pool.md + - Start: recipes/autopirate.md + - SABnzbd: recipes/autopirate/sabnzbd.md + - NZBGet: recipes/autopirate/nzbget.md + - Rtorrent: recipes/autopirate/rtorrent.md + - Sonarr: recipes/autopirate/sonarr.md + - Radarr: recipes/autopirate/radarr.md + - Mylar: recipes/autopirate/mylar.md + - Lazy Librarian: recipes/autopirate/lazylibrarian.md + - Headphones: recipes/autopirate/headphones.md + - Lidarr: recipes/autopirate/lidarr.md + - NZBHydra: recipes/autopirate/nzbhydra.md + - NZBHydra 2: recipes/autopirate/nzbhydra2.md + - Ombi: recipes/autopirate/ombi.md + - Jackett: recipes/autopirate/jackett.md + - Heimdall: recipes/autopirate/heimdall.md + - End: recipes/autopirate/end.md + - ElkarBackup: recipes/elkarbackup.md + - Emby: recipes/emby.md + - Home Assistant: + - Start: recipes/homeassistant.md + - iBeacon: recipes/homeassistant/ibeacon.md + - Huginn: recipes/huginn.md + - Kanboard: recipes/kanboard.md + - Miniflux: recipes/miniflux.md + - Munin: recipes/munin.md + - NextCloud: recipes/nextcloud.md + - OwnTracks: recipes/owntracks.md + - phpIPAM: recipes/phpipam.md + - Plex: recipes/plex.md + - PrivateBin: recipes/privatebin.md + - Swarmprom: recipes/swarmprom.md + - Turtle Pool: recipes/turtle-pool.md + - Chef's Favorites (Kubernetes): + - Kanboard: recipes/kubernetes/kanboard.md + - Miniflux: recipes/kubernetes/miniflux.md +# - NextCloud: recipes/kubernetes/nextcloud.md +# - phpIPAM: recipes/kubernetes/phpipam.md +# - PrivateBin: recipes/kubernetes/privatebin.md - Menu: - - Ghost: recipies/ghost.md - - GitLab: recipies/gitlab.md - - GitLab Runner: recipies/gitlab-runner.md - - Gollum: recipies/gollum.md - - Piwik: recipies/piwik.md - - Wekan: recipies/wekan.md - - Tiny Tiny RSS: recipies/tiny-tiny-rss.md - - Portainer: recipies/portainer.md - - InstaPy: recipies/instapy.md - - Calibre-Web: recipies/calibre-web.md - - Wallabag: recipies/wallabag.md + - Bookstack: recipes/bookstack.md + - CryptoMiner: + - Start: recipes/cryptominer.md + - Mining Rig: recipes/cryptominer/mining-rig.md + - AMD GPU: recipes/cryptominer/amd-gpu.md + - NVidia GPU: recipes/cryptominer/nvidia-gpu.md + - Mining Pools : recipes/cryptominer/mining-pool.md + - Wallets : recipes/cryptominer/wallet.md + - Exchanges: recipes/cryptominer/exchange.md + - Minerhotel: recipes/cryptominer/minerhotel.md + - Monitoring: recipes/cryptominer/monitor.md + - Profit!: recipes/cryptominer/profit.md + - Calibre-Web: recipes/calibre-web.md + - Collabora Online: recipes/collabora-online.md + - Ghost: recipes/ghost.md + - GitLab: recipes/gitlab.md + - GitLab Runner: recipes/gitlab-runner.md + - Gollum: recipes/gollum.md + - InstaPy: recipes/instapy.md + - KeyCloak: recipes/keycloak.md + - Minio: recipes/minio.md + - OpenLDAP: recipes/openldap.md + - Piwik: recipes/piwik.md + - Portainer: recipes/portainer.md + - Realms: recipes/realms.md + - Tiny Tiny RSS: recipes/tiny-tiny-rss.md + - Wallabag: recipes/wallabag.md + - Wekan: recipes/wekan.md + - Wetty: recipes/wetty.md +# - CryptoNote Mining Pool: +# - Start: recipes/cryptonote-mining-pool.md +# - Masari: recipes/cryptonote-mining-pool/masari.md +# - Athena: recipes/cryptonote-mining-pool/athena.md +# - SSO Stack: +# - Start: recipes/sso-stack.md +# - OpenLDAP: recipes/sso-stack/openldap.md +# - KeyCloak: recipes/sso-stack/keycloak.md + - Work-in-Progress: +# - MatterMost: recipes/mattermost.md + - IPFS Cluster: recipes/ipfs-cluster.md + - MQTT: recipes/mqtt.md +# - HackMD: recipes/hackmd.md +# - Mastodon: recipes/mastodon.md +# - Mayan EDMS: recipes/mayan-edms.md +# - Shaarli: recipes/shaarli.md +# - UniFi Controller: recipes/unifi-controller.md +# - CyberChef : recipes/cyberchef.md - Reference: - OAuth Proxy: reference/oauth_proxy.md - Data Layout: reference/data_layout.md - Networks: reference/networks.md + - Containers: reference/containers.md - git-docker : reference/git-docker.md - OpenVPN : reference/openvpn.md - Troubleshooting: reference/troubleshooting.md - Support: support.md - - Sponsored Projects: sponsored-projects.md + - Sponsored Projects: sponsored-projects.md theme: name: 'material' @@ -127,18 +177,22 @@ google_analytics: - 'auto' extra_javascript: - - 'extras/javascript/piwik.js' +# - 'extras/javascript/piwik.js' # Extensions markdown_extensions: - admonition - - codehilite(linenums=true) - - toc(permalink=true) + - codehilite: + linenums: true + - toc: + permalink: true - footnotes - pymdownx.arithmatex - - pymdownx.betterem(smart_enable=all) + - pymdownx.betterem: + smart_enable: all - pymdownx.caret - pymdownx.critic + - pymdownx.details - pymdownx.emoji: emoji_generator: !!python/name:pymdownx.emoji.to_svg - pymdownx.inlinehilite @@ -146,6 +200,7 @@ markdown_extensions: - pymdownx.mark - pymdownx.smartsymbols - pymdownx.superfences - - pymdownx.tasklist(custom_checkbox=true) + - pymdownx.tasklist: + custom_checkbox: true - pymdownx.tilde - meta diff --git a/netlify_redirects.txt b/netlify_redirects.txt index 672367e..7930993 100644 --- a/netlify_redirects.txt +++ b/netlify_redirects.txt @@ -1 +1,5 @@ +# Because "Geek" is a singular https://geeks-cookbook.funkypenguin.co.nz/* https://geek-cookbook.funkypenguin.co.nz/:splat 301! + +# Because it took me about a year to realize I was spelling "Recipes" wrong! +https://geek-cookbook.funkypenguin.co.nz/recipies/* https://geek-cookbook.funkypenguin.co.nz/recipes/:splat 301! diff --git a/overrides/README-OVERRIDES.md b/overrides/README-OVERRIDES.md new file mode 100644 index 0000000..907b308 --- /dev/null +++ b/overrides/README-OVERRIDES.md @@ -0,0 +1 @@ +blah diff --git a/overrides/main.html b/overrides/main.html index 118fe19..d2612f8 100644 --- a/overrides/main.html +++ b/overrides/main.html @@ -13,4 +13,6 @@ (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(d); })(); + + {% endblock %} diff --git a/requirements.txt b/requirements.txt index e586682..9ee9e92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ -mkdocs>=0.9.0 -mkdocs-material -pymdown-extensions +mkdocs>=1.0.4 +mkdocs-material>=4.0.2 +pymdown-extensions>=6.0 +Markdown>=3.0.1 diff --git a/runtime.txt b/runtime.txt new file mode 100644 index 0000000..d70c8f8 --- /dev/null +++ b/runtime.txt @@ -0,0 +1 @@ +3.6 diff --git a/scripts/markdown-to-markua.sh b/scripts/markdown-to-markua.sh index b08da9c..63755f3 100755 --- a/scripts/markdown-to-markua.sh +++ b/scripts/markdown-to-markua.sh @@ -11,11 +11,20 @@ find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/💰//g" find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/🍷//g" find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/🏢//g" find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/❤️//g" -find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/:turtle://g" +find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/🐢//g" find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/👋//g" find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/🐦//g" find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/📖//g" -find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/🐦//g" +find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/✉️//g" +find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/📺//g" +find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/🎥//g" +find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/🎵//g" +find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/😁//g" + +# Can't use relative paths in a book, so make all paths static +find manuscript -type f -name "*.md" -print0 | xargs -0 sed -i "s/(\//(https:\/\/geek-cookbook.funkypenguin.co.nz\/)/g" + +