From 6e6033d1681eca232ddbbc1e4d91cf5f407aada7 Mon Sep 17 00:00:00 2001 From: David Young Date: Wed, 17 Aug 2022 23:24:14 +1200 Subject: [PATCH] Tidy up Mastodon Kubernetes recipe Signed-off-by: David Young --- _snippets/premix-cta-kubernetes.md | 4 + _snippets/premix-cta.md | 2 +- manuscript/kubernetes/ingress/nginx.md | 3 +- manuscript/kubernetes/loadbalancer/index.md | 3 +- .../kubernetes/persistence/rook-ceph.md | 53 ----- .../persistence/rook-ceph/cluster.md | 1 + .../kubernetes/persistence/rook-ceph/index.md | 19 ++ .../persistence/rook-ceph/operator.md | 183 ++++++++++++++++++ manuscript/recipes/kubernetes/mastodon.md | 6 +- mkdocs.yml | 7 +- 10 files changed, 219 insertions(+), 62 deletions(-) create mode 100644 _snippets/premix-cta-kubernetes.md delete mode 100644 manuscript/kubernetes/persistence/rook-ceph.md create mode 100644 manuscript/kubernetes/persistence/rook-ceph/cluster.md create mode 100644 manuscript/kubernetes/persistence/rook-ceph/index.md create mode 100644 manuscript/kubernetes/persistence/rook-ceph/operator.md diff --git a/_snippets/premix-cta-kubernetes.md b/_snippets/premix-cta-kubernetes.md new file mode 100644 index 0000000..159ff8c --- /dev/null +++ b/_snippets/premix-cta-kubernetes.md @@ -0,0 +1,4 @@ +!!! tip "Fast-track your fluxing! 🚀" + Is crafting all these YAMLs by hand too much of a PITA? + + I automatically and **instantly** share (_with my [sponsors](https://github.com/sponsors/funkypenguin)_) a private "[_premix_](https://geek-cookbook.funkypenguin.co.nz/premix/)" git repository, which includes an ansible playbook to auto-create all the necessary files in your flux repository! :thumbsup: \ No newline at end of file diff --git a/_snippets/premix-cta.md b/_snippets/premix-cta.md index afc231d..bec726b 100644 --- a/_snippets/premix-cta.md +++ b/_snippets/premix-cta.md @@ -1,4 +1,4 @@ -!!! tip +!!! tip "Fast-track with premix! 🚀" I automatically and **instantly** share (_with my [sponsors](https://github.com/sponsors/funkypenguin)_) a private "[_premix_](https://geek-cookbook.funkypenguin.co.nz/premix/)" git repository, which includes necessary docker-compose and env files for all published recipes. This means that sponsors can launch any recipe with just a `git pull` and a `docker stack deploy` 👍. 🚀 **Update**: Premix now includes an ansible playbook, so that sponsors can deploy an entire stack + recipes, with a single ansible command! (*more [here](https://geek-cookbook.funkypenguin.co.nz/premix/ansible/operation/)*) \ No newline at end of file diff --git a/manuscript/kubernetes/ingress/nginx.md b/manuscript/kubernetes/ingress/nginx.md index 98ff019..822047c 100644 --- a/manuscript/kubernetes/ingress/nginx.md +++ b/manuscript/kubernetes/ingress/nginx.md @@ -1,6 +1,5 @@ --- -title: Install nginx ingress controller into Kuberntes with Flux -description: Nginx Ingress Controller +title: Install nginx ingress controller into Kubernetes with Flux --- # Nginx Ingress Controller for Kubernetes - the "flux way" diff --git a/manuscript/kubernetes/loadbalancer/index.md b/manuscript/kubernetes/loadbalancer/index.md index 917c80b..ccadc69 100644 --- a/manuscript/kubernetes/loadbalancer/index.md +++ b/manuscript/kubernetes/loadbalancer/index.md @@ -1,5 +1,6 @@ --- -description: Kubernetes Loadbalancer options +title: What loadbalancer to use in self-hosted Kubernetes? +description: Here's a simply way to work out which load balancer you'll need for your self-hosted Kubernetes cluster --- # Loadbalancing Services diff --git a/manuscript/kubernetes/persistence/rook-ceph.md b/manuscript/kubernetes/persistence/rook-ceph.md deleted file mode 100644 index 8d2358d..0000000 --- a/manuscript/kubernetes/persistence/rook-ceph.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: How to use Rook Ceph for Persistent Storage in Kubernetes -description: How to deploy Rook Ceph into your Kubernetes cluster for persistent storage ---- - -# Persistent storage in Kubernetes with Rook Ceph / CephFS - -[Ceph](https://docs.ceph.com/en/quincy/) is a highly-reliable, scalable network storage platform which uses individual disks across participating nodes to provide fault-tolerant storage. - -![Ceph Screenshot](/images/ceph.png){ loading=lazy } - -[Rook](https://rook.io) provides an operator for Ceph, decomposing the [10-year-old](https://en.wikipedia.org/wiki/Ceph_(software)#Release_history), at-time-arcane, platform into cloud-native components, created declaratively, whose lifecycle is managed by an operator. - - -## Rook Ceph requirements - -!!! summary "Ingredients" - - Already deployed: - - * [x] A [Kubernetes cluster](/kubernetes/cluster/) - * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped - * [x] An [Ingress](/kubernetes/ingress/) to route incoming traffic to services - - New: - - * [ ] At least 3 nodes with dedicated disks available (*more is better*) - -## Preparation - -### Namespace - -We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this example yaml in my flux repo at `/bootstrap/namespaces/namespace-rook-system.yaml`: - -```yaml title="/bootstrap/namespaces/namespace-mastodon.yaml" -apiVersion: v1 -kind: Namespace -metadata: - name: rook-system -``` - -### HelmRepository - -```yaml title="/bootstrap/helmrepositories/gitepository-rook-release.yaml" -apiVersion: source.toolkit.fluxcd.io/v1beta1 -kind: HelmRepository -metadata: - name: rook-release - namespace: flux-system -spec: - interval: 15m - url: https://charts.rook.io/release -``` diff --git a/manuscript/kubernetes/persistence/rook-ceph/cluster.md b/manuscript/kubernetes/persistence/rook-ceph/cluster.md new file mode 100644 index 0000000..b4b5d7d --- /dev/null +++ b/manuscript/kubernetes/persistence/rook-ceph/cluster.md @@ -0,0 +1 @@ +Working on this, check back soon! ;) \ No newline at end of file diff --git a/manuscript/kubernetes/persistence/rook-ceph/index.md b/manuscript/kubernetes/persistence/rook-ceph/index.md new file mode 100644 index 0000000..d5e3e33 --- /dev/null +++ b/manuscript/kubernetes/persistence/rook-ceph/index.md @@ -0,0 +1,19 @@ +--- +title: How to use Rook Ceph for Persistent Storage in Kubernetes +description: How to deploy Rook Ceph into your Kubernetes cluster for persistent storage +--- +# Persistent storage in Kubernetes with Rook Ceph / CephFS + +[Ceph](https://docs.ceph.com/en/quincy/) is a highly-reliable, scalable network storage platform which uses individual disks across participating nodes to provide fault-tolerant storage. + +![Ceph Screenshot](/images/ceph.png){ loading=lazy } + +[Rook](https://rook.io) provides an operator for Ceph, decomposing the [10-year-old](https://en.wikipedia.org/wiki/Ceph_(software)#Release_history), at-time-arcane, platform into cloud-native components, created declaratively, whose lifecycle is managed by an operator. + +The simplest way to think about running rook-ceph is separate the [operator](/kubernetes/persistence/rook-ceph/operator/) (*a generic worker which manages the lifecycle of your cluster*) from your desired [cluster](/kubernetes/persistence/rook-ceph/cluster/) config itself (*spec*). + +To this end, I've defined each as a separate component, below: + +1. First, install the [operator](/kubernetes/persistence/rook-ceph/operator/) +2. Then, define your [cluster](/kubernetes/persistence/rook-ceph/cluster/) +3. Win! diff --git a/manuscript/kubernetes/persistence/rook-ceph/operator.md b/manuscript/kubernetes/persistence/rook-ceph/operator.md new file mode 100644 index 0000000..5592601 --- /dev/null +++ b/manuscript/kubernetes/persistence/rook-ceph/operator.md @@ -0,0 +1,183 @@ +--- +title: How to use Rook Ceph for Persistent Storage in Kubernetes +description: How to deploy Rook Ceph into your Kubernetes cluster for persistent storage +--- + +# Persistent storage in Kubernetes with Rook Ceph / CephFS + +[Ceph](https://docs.ceph.com/en/quincy/) is a highly-reliable, scalable network storage platform which uses individual disks across participating nodes to provide fault-tolerant storage. + +![Ceph Screenshot](/images/ceph.png){ loading=lazy } + +[Rook](https://rook.io) provides an operator for Ceph, decomposing the [10-year-old](https://en.wikipedia.org/wiki/Ceph_(software)#Release_history), at-time-arcane, platform into cloud-native components, created declaratively, whose lifecycle is managed by an operator. + + +## Rook Ceph requirements + +!!! summary "Ingredients" + + Already deployed: + + * [x] A [Kubernetes cluster](/kubernetes/cluster/) + * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped + +## Preparation + +### Namespace + +We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this example yaml in my flux repo at `/bootstrap/namespaces/namespace-rook-system.yaml`: + +```yaml title="/bootstrap/namespaces/namespace-mastodon.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: rook-system +``` + +### HelmRepository + +We're going to install a helm chart from the Rook Ceph chart repository, so I create the following in my flux repo: + +```yaml title="/bootstrap/helmrepositories/gitepository-rook-release.yaml" +apiVersion: source.toolkit.fluxcd.io/v1beta1 +kind: HelmRepository +metadata: + name: rook-release + namespace: flux-system +spec: + interval: 15m + url: https://charts.rook.io/release +``` + +### Kustomization + +Now that the "global" elements of this deployment (*just the HelmRepository in this case*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/rook-ceph`. I create this example Kustomization in my flux repo: + +```yaml title="/bootstrap/kustomizations/kustomization-rook-ceph.yaml" +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: rook-ceph + namespace: flux-system +spec: + interval: 30m + path: ./rook-ceph + prune: true # remove any elements later removed from the above path + timeout: 10m # if not set, this defaults to interval duration, which is 1h + sourceRef: + kind: GitRepository + name: flux-system + validation: server + healthChecks: + - apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + name: cephblockpools.ceph.rook.io +``` + +--8<-- "premix-cta-kubernetes.md" + +### ConfigMap + +Now we're into the app-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/rook/rook/blob/master/deploy/charts/rook-ceph/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this example yaml in my flux repo: + +```yaml title="rook-ceph/configmap-rook-ceph-helm-chart-value-overrides.yaml" +apiVersion: v1 +kind: ConfigMap +metadata: + name: rook-ceph-helm-chart-value-overrides + namespace: rook-ceph +data: + values.yaml: |- # (1)! + # +``` + +1. Paste in the contents of the upstream `values.yaml` here, intended 4 spaces, and then change the values you need as illustrated below. + +Values I change from the default are: + +```yaml +pspEnable: false # (1)! +``` + +1. PSPs are deprecated, and will eventually be removed in Kubernetes 1.25, at which point this will cause breakage. + + +### HelmRelease + +Finally, having set the scene above, we define the HelmRelease which will actually deploy the rook-ceph operator into the cluster. I save this in my flux repo: + +```yaml title="/rook-ceph/helmrelease-rook-ceph.yaml" +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: rook-ceph + namespace: rook-ceph +spec: + chart: + spec: + chart: rook-ceph + version: 1.9.x + sourceRef: + kind: HelmRepository + name: rook-release + namespace: flux-system + interval: 30m + timeout: 10m + install: + remediation: + retries: 3 + upgrade: + remediation: + retries: -1 # keep trying to remediate + crds: CreateReplace # Upgrade CRDs on package update + releaseName: rook-ceph + valuesFrom: + - kind: ConfigMap + name: rook-ceph-helm-chart-value-overrides + valuesKey: values.yaml # (1)! +``` + +1. This is the default, but best to be explicit for clarity + +## Install Rook Ceph Operator! + +Commit the changes to your flux repository, and either wait for the reconciliation interval, or force a reconcilliation using `flux reconcile source git flux-system`. You should see the kustomization appear... + +```bash +~ ❯ flux get kustomizations rook-ceph +NAME READY MESSAGE REVISION SUSPENDED +rook-ceph True Applied revision: main/70da637 main/70da637 False +~ ❯ +``` + +The helmrelease should be reconciled... + +```bash +~ ❯ flux get helmreleases -n rook-ceph rook-ceph +NAME READY MESSAGE REVISION SUSPENDED +rook-ceph True Release reconciliation succeeded v1.9.9 False +~ ❯ +``` + +And you should have happy rook-ceph operator pods: + +```bash +~ ❯ k get pods -n rook-ceph -l app=rook-ceph-operator +NAME READY STATUS RESTARTS AGE +rook-ceph-operator-7c94b7446d-nwsss 1/1 Running 0 5m14s +~ ❯ +``` + +## Summary + +What have we achieved? We're half-way to getting a ceph cluster, having deployed the operator which will manage the lifecycle of the [ceph cluster](/kubernetes/persistence/rook-ceph/cluster/) we're about to create! + +!!! summary "Summary" + Created: + + * [X] Rook ceph operator running and ready to deploy a cluster! + +--8<-- "recipe-footer.md" + + + diff --git a/manuscript/recipes/kubernetes/mastodon.md b/manuscript/recipes/kubernetes/mastodon.md index ec9edbf..73acfc7 100644 --- a/manuscript/recipes/kubernetes/mastodon.md +++ b/manuscript/recipes/kubernetes/mastodon.md @@ -1,6 +1,6 @@ --- -title: Install Mastodon in Docker Swarm -description: How to install your own Mastodon instance using Docker Swarm +title: Install Mastodon in Kubernetes +description: How to install your own Mastodon instance using Kubernetes --- # Install Mastodon in Kubernetes @@ -26,7 +26,7 @@ description: How to install your own Mastodon instance using Docker Swarm * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped * [x] An [Ingress](/kubernetes/ingress/) to route incoming traffic to services * [x] [Persistent storage](/kubernetes/persistence/) to store persistent stuff - * [x] [mastodon](/kubernetes/mastodon/) to create an DNS entry + * [x] [External DNS](/kubernetes/external-dns/) to create an DNS entry New: diff --git a/mkdocs.yml b/mkdocs.yml index 1bb7268..a5dc65e 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -194,7 +194,10 @@ nav: - Local Path Provisioner: kubernetes/persistence/local-path-provisioner.md - TopoLVM: kubernetes/persistence/topolvm.md # - OpenEBS: kubernetes/persistence/openebs.md - # - Rook Ceph: kubernetes/persistence/rook-ceph.md + - Rook Ceph: + - kubernetes/persistence/rook-ceph/index.md + - Operator: kubernetes/persistence/rook-ceph/operator.md + - Cluster: kubernetes/persistence/rook-ceph/cluster.md # - LongHorn: kubernetes/persistence/longhorn.md # - Backup: # - kubernetes/backup/index.md @@ -317,7 +320,7 @@ theme: extra: social: - icon: 'fontawesome/brands/mastodon' - link: 'https://so.funkypenguin.co.nz/' + link: 'https://so.fnky.nz/' - icon: 'fontawesome/brands/github' link: 'https://github.com/funkypenguin' - icon: 'fontawesome/brands/twitter'