diff --git a/.markdownlint.yaml b/.markdownlint.yaml index ba37377..b10aa8c 100644 --- a/.markdownlint.yaml +++ b/.markdownlint.yaml @@ -13,5 +13,6 @@ # We use fenced code blocks, but this test conflicts with the admonitions plugin we use, which relies # on indentation (which is then falsely detected as a code block) +"MD038": false "MD046": false diff --git a/_snippets/kubernetes-why-full-values-in-configmap.md b/_snippets/kubernetes-why-full-values-in-configmap.md new file mode 100644 index 0000000..df02b71 --- /dev/null +++ b/_snippets/kubernetes-why-full-values-in-configmap.md @@ -0,0 +1,6 @@ +!!! question "That's a lot of unnecessary text!" + > Why not just paste in the subset of values I want to change? + + You know what's harder than working out which values from a 2000-line `values.yaml` to change? + + Answer: Working out what values to change when the upstream helm chart has refactored or added options! By pasting in the entirety of the upstream chart, when it comes time to perform upgrades, you can just duplicate your ConfigMap YAML, paste the new values into one of the copies, and compare them side by side to ensure your original values/decisions persist in the new chart. diff --git a/_snippets/kubernetes-why-not-config-in-helmrelease.md b/_snippets/kubernetes-why-not-config-in-helmrelease.md new file mode 100644 index 0000000..6d94f5b --- /dev/null +++ b/_snippets/kubernetes-why-not-config-in-helmrelease.md @@ -0,0 +1,2 @@ +!!! question "Why not just put config in the HelmRelease?" + While it's true that we could embed values directly into the HelmRelease YAML, this becomes unweildy with large helm charts. It's also simpler (less likely to result in error) if changes to **HelmReleases**, which affect **deployment** of the chart, are defined in separate files to changes in helm chart **values**, which affect **operation** of the chart. \ No newline at end of file diff --git a/_snippets/recipe-footer.md b/_snippets/recipe-footer.md index 6de27f2..c0aacc9 100644 --- a/_snippets/recipe-footer.md +++ b/_snippets/recipe-footer.md @@ -4,11 +4,17 @@ ### Tip your waiter (sponsor) πŸ‘ -Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) Sponsor me on [Github][github_sponsor] / [Patreon][patreon], or see the [contribute](/community/support/) page for more (_free or paid)_ ways to say thank you! πŸ‘ +Did you receive excellent service? Want to compliment the chef? (_..and support development of current and future recipes!_) Sponsor me on [Github][github_sponsor] / [Patreon][patreon], or see the [contribute](/community/support/) page for more (_free or paid)_ ways to say thank you! πŸ‘ + +### Employ your chef (engage) 🀝 + +Is this too much of a geeky PITA? Do you just want results, stat? [I do this for a living](https://www.funkypenguin.co.nz/about/) - I provide consulting and engineering expertise to businesses needing short-term, short-notice support in the cloud-native space, including AWS/Azure/GKE, Kubernetes, CI/CD and automation. + +Learn more about working with me [here](https://www.funkypenguin.co.nz/work-with-me/). ### Flirt with waiter (subscribe) πŸ’Œ -Want to know now when this recipe gets updated, or when future recipes are added? Subscribe to the [RSS feed](https://mastodon.social/@geekcookbook_changes.rss), or leave your email address below, and we'll keep you updated. (*double-opt-in, no monkey business, no spam) +Want to know now when this recipe gets updated, or when future recipes are added? Subscribe to the [RSS feed](https://mastodon.social/@geekcookbook_changes.rss), or leave your email address below, and we'll keep you updated. --8<-- "convertkit-subscribe-form.html" diff --git a/manuscript/images/cert-manager.svg b/manuscript/images/cert-manager.svg new file mode 100644 index 0000000..e3aedf3 --- /dev/null +++ b/manuscript/images/cert-manager.svg @@ -0,0 +1,452 @@ + + + + + + + + + + + + + + + letsencrypt-staging + + + + + letsencrypt-prod + + + + + venafi-as-a-service + + + + + hashicorp-vault + + + + + venafi-tpp + Issuers + + + + + + + + + + cert-managerCertificatesKubernetesSecrets + + + + signed keypair + + + + + foo.bar.comIssuer:venafi-tpp + + + + + example.comwww.example.comIssuer:letsencrypt-prod + + + + + + + + signed keypair + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/manuscript/images/external-dns.png b/manuscript/images/external-dns.png new file mode 100644 index 0000000..0ee8e07 Binary files /dev/null and b/manuscript/images/external-dns.png differ diff --git a/manuscript/images/flux_github_token.png b/manuscript/images/flux_github_token.png new file mode 100644 index 0000000..b3e7c30 Binary files /dev/null and b/manuscript/images/flux_github_token.png differ diff --git a/manuscript/images/ingress.jpg b/manuscript/images/ingress.jpg new file mode 100644 index 0000000..87f3534 Binary files /dev/null and b/manuscript/images/ingress.jpg differ diff --git a/manuscript/images/metallb-pfsense-00.png b/manuscript/images/metallb-pfsense-00.png new file mode 100644 index 0000000..b707085 Binary files /dev/null and b/manuscript/images/metallb-pfsense-00.png differ diff --git a/manuscript/images/metallb-pfsense-01.png b/manuscript/images/metallb-pfsense-01.png new file mode 100644 index 0000000..2be4759 Binary files /dev/null and b/manuscript/images/metallb-pfsense-01.png differ diff --git a/manuscript/images/metallb-pfsense-02.png b/manuscript/images/metallb-pfsense-02.png new file mode 100644 index 0000000..55bcdc2 Binary files /dev/null and b/manuscript/images/metallb-pfsense-02.png differ diff --git a/manuscript/images/metallb-pfsense-03.png b/manuscript/images/metallb-pfsense-03.png new file mode 100644 index 0000000..b6953ae Binary files /dev/null and b/manuscript/images/metallb-pfsense-03.png differ diff --git a/manuscript/images/metallb-pfsense-04.png b/manuscript/images/metallb-pfsense-04.png new file mode 100644 index 0000000..47d37ee Binary files /dev/null and b/manuscript/images/metallb-pfsense-04.png differ diff --git a/manuscript/images/metallb-pfsense-05.png b/manuscript/images/metallb-pfsense-05.png new file mode 100644 index 0000000..75f7b43 Binary files /dev/null and b/manuscript/images/metallb-pfsense-05.png differ diff --git a/manuscript/images/sealed-secrets.png b/manuscript/images/sealed-secrets.png new file mode 100644 index 0000000..9ff0698 Binary files /dev/null and b/manuscript/images/sealed-secrets.png differ diff --git a/manuscript/images/traefik-dashboard.png b/manuscript/images/traefik-dashboard.png new file mode 100644 index 0000000..b25c597 Binary files /dev/null and b/manuscript/images/traefik-dashboard.png differ diff --git a/manuscript/recipes/kubernetes/miniflux.md b/manuscript/kubernetes/backup/index.md similarity index 100% rename from manuscript/recipes/kubernetes/miniflux.md rename to manuscript/kubernetes/backup/index.md diff --git a/manuscript/kubernetes/cluster.md b/manuscript/kubernetes/cluster/digitalocean.md similarity index 84% rename from manuscript/kubernetes/cluster.md rename to manuscript/kubernetes/cluster/digitalocean.md index 56cf0a2..97bbe29 100644 --- a/manuscript/kubernetes/cluster.md +++ b/manuscript/kubernetes/cluster/digitalocean.md @@ -1,3 +1,6 @@ +--- +description: Creating a Kubernetes cluster on DigitalOcean +--- # Kubernetes on DigitalOcean IMO, the easiest Kubernetes cloud provider to experiment with is [DigitalOcean](https://m.do.co/c/e33b78ad621b) (_this is a referral link_). I've included instructions below to start a basic cluster. @@ -39,7 +42,7 @@ DigitalOcean will provide you with a "kubeconfig" file to use to access your clu ## Release the kubectl! -Save your kubeconfig file somewhere, and test it our by running ```kubectl --kubeconfig= get nodes``` +Save your kubeconfig file somewhere, and test it our by running ```kubectl --kubeconfig= get nodes``` [^1] Example output: @@ -69,18 +72,6 @@ festive-merkle-8n9e Ready 58s v1.13.1 That's it. You have a beautiful new kubernetes cluster ready for some action! -## Move on.. - -Still with me? Good. Move on to creating your own external load balancer.. - -* [Start](/kubernetes/) - Why Kubernetes? -* [Design](/kubernetes/design/) - How does it fit together? -* Cluster (this page) - Setup a basic cluster -* [Load Balancer](/kubernetes/loadbalancer/) - Setup inbound access -* [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data -* [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks -* [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm - -[^1]: Ok, yes, there's not much you can do with your cluster _yet_. But stay tuned, more Kubernetes fun to come! +[^1]: Do you live in the CLI? Install the kubectl autocompletion for [bash](https://kubernetes.io/docs/tasks/tools/included/optional-kubectl-configs-bash-linux/) or [zsh](https://kubernetes.io/docs/tasks/tools/included/optional-kubectl-configs-zsh/) to make your life much easier! --8<-- "recipe-footer.md" diff --git a/manuscript/kubernetes/cluster/index.md b/manuscript/kubernetes/cluster/index.md new file mode 100644 index 0000000..427db65 --- /dev/null +++ b/manuscript/kubernetes/cluster/index.md @@ -0,0 +1,64 @@ +--- +description: Choosing HOW to deploy Kubernetes +--- +# Kubernetes Cluster + +There are an ever-increasing amount of ways to deploy and run Kubernetes. The primary distinction to be aware of is whether to fork out for a managed Kubernetes instance or not. Managed instances have some advantages, which I'll detail below, but these come at additional cost. + +## Managed (Cloud Provider) + +### Popular Options + +Popular options are: + +* [DigitalOcean](/kubernetes/cluster/digitalocean/) +* Google Kubernetes Engine (GKE) +* Amazon Elastic Kubernetes Service (EKS) +* Azure Kubernetes Service (AKS) + +### Upgrades + +A managed Kubernetes provider will typically provide a way to migrate to pre-tested and trusted versions of Kuberenetes, as they're released and then tested. This [doesn't mean that upgrades will be trouble-free](https://www.digitalocean.com/community/tech_talks/20-000-upgrades-later-lessons-from-a-year-of-managed-kubernetes-upgrades), but they're likely to be less of a PITA. With Kubernetes' 4-month release cadence, you'll want to keep an eye on updates, and avoid becoming too out-of-date. + +### Horizontal Scaling + +One of the key drawcards for Kubernetes is horizonal scaling. You want to be able to expand/contract your cluster as your workloads change, even if just for one day a month. Doing this on your own hardware is.. awkward. + +### Load Balancing + +Even if you had enough hardware capacity to handle any unexpected scaling requirements, ensuring that traffic can reliably reach your cluster is a complicated problem. You need to present a "virtual" IP for external traffic to ingress the cluster on. There are popular solutions to provide LoadBalancer services to a self-managed cluster (*i.e., [MetalLB](/kubernetes/load-balancer/metallb/)*), but they do represent extra complexity, and won't necessarily be resilient to outages outside of the cluster (*network devices, power, etc*). + +### Storage + +Cloud providers make it easy to connect their storage solutions to your cluster, but you'll pay as you scale, and in most cases, I/O on cloud block storage is throttled along with your provisioned size. (*So a 1Gi volume will have terrible IOPS compared to a 100Gi volume*) + +### Services + +Some things just "work better" in a cloud provider environment. For example, to run a highly available Postgres instance on Kubernetes requires at least 3 nodes, and 3 x storage, plus manual failover/failback in the event of an actual issue. This can represent a huge cost if you simply need a PostgreSQL database to provide (*for example*) a backend to an authentication service like [KeyCloak](/recipes/kubernetes/keycloak/). Cloud providers will have a range of managed database solutions which will cost far less than do-it-yourselfing, and integrate easily and securely into their kubernetes offerings. + +### Summary + +Go with a managed provider if you want your infrastructure to be resilient to your own hardware/connectivity issues. I.e., there's a material impact to a power/network/hardware outage, and the cost of the managed provider is less than the cost of an outage. + +## DIY (Cloud Provider, Bare Metal, VMs) + +### Popular Options + +Popular options are: + +* Rancher's K3s +* Ubuntu's Charmed Kubernetes + +### Flexible + +With self-hosted Kubernetes, you're free to mix/match your configuration as you see fit. You can run a single k3s node on a raspberry pi, or a fully HA pi-cluster, or a handful of combined master/worker nodes on a bunch of proxmox VMs, or on plain bare-metal. + +### Education + +You'll learn more about how to care for and feed your cluster if you build it yourself. But you'll definately spend more time on it, and it won't always be when you expect! + +### Summary + +Go with a self-hosted cluster if you want to learn more, you'd rather spend time than money, or you've already got significant investment in local infructure and technical skillz. + +--8<-- "recipe-footer.md" diff --git a/manuscript/kubernetes/cluster/k3s.md b/manuscript/kubernetes/cluster/k3s.md new file mode 100644 index 0000000..970b939 --- /dev/null +++ b/manuscript/kubernetes/cluster/k3s.md @@ -0,0 +1,139 @@ +--- +description: Creating a Kubernetes cluster on k3s +--- +# Deploy your cluster on k3s + +If you're wanting to self-host your cluster, the simplest and most widely-supported approach is Rancher's [k3s](https://k3s.io/). + +!!! summary "Ingredients" + + * [ ] One or more "modern" Linux hosts to serve as cluster masters. (*Using an odd number of masters is required for HA*). Additional steps are required for [Raspbian Buster](https://rancher.com/docs/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster), [Alpine](https://rancher.com/docs/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup), or [RHEL/CentOS](https://rancher.com/docs/k3s/latest/en/advanced/#additional-preparation-for-red-hat-centos-enterprise-linux). + + Optional: + + * [ ] Additional hosts to serve as cluster agents (*assuming that not everybody gets to be a master!*) + +## Preparation + +Ensure you have sudo access to your nodes, and that each node meets the [installation requirements](https://rancher.com/docs/k3s/latest/en/installation/installation-requirements/). + +## Deploy k3s (one node only ever) + +If you only want a single-node k3s cluster, then simply run the following to do the deployment: + +```bash +MYSECRET=iambatman +curl -fL https://get.k3s.io | K3S_TOKEN=${MYSECRET} \ + sh -s - --disable traefik server +``` + +!!! question "Why no traefik?" + k3s comes with the traefik ingress "built-in", so why not deploy it? Because we'd rather deploy it **later** (*if we even want it*), using the same [deployment strategy](/kubernetes/deployment/flux/) which we use with all of our other services, so that we can easily update/configure it. + +## Deploy k3s (mooar nodes!) + +### Deploy first master + +You may only have one node now, but it's a good idea to prepare for future expansion by bootstrapping k3s in "embedded etcd" multi-master HA mode. Pick a secret to use for your server token, and run the following: + +```bash +MYSECRET=iambatman +curl -fL https://get.k3s.io | K3S_TOKEN=${MYSECRET} \ + sh -s - --disable traefik --disable servicelb server --cluster-init +``` + +!!! question "y no servicelb?" + K3s includes a [rudimentary load balancer](/kubernetes/loadbalancer/k3s/) which utilizes host ports to make a given port available on all nodes. If you plan to deploy one, and only one k3s node, then this is a viable configuration, and you can leave out the `--disable servicelb` text above. If you plan for more nodes and HA htough, then you're better off deploying [MetalLB](/kubernetes/loadbalancer/metallb/) to do "real" loadbalancing. + +You should see output which looks something like this: + +```bash +root@shredder:~# curl -fL https://get.k3s.io | K3S_TOKEN=${MYSECRET} \ +> sh -s - --disable traefik server --cluster-init + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 +100 27318 100 27318 0 0 144k 0 --:--:-- --:--:-- --:--:-- 144k +[INFO] Finding release for channel stable +[INFO] Using v1.21.5+k3s2 as release +[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.21.5+k3s2/sha256sum-amd64.txt +[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.21.5+k3s2/k3s +[INFO] Verifying binary download +[INFO] Installing k3s to /usr/local/bin/k3s +[INFO] Skipping installation of SELinux RPM +[INFO] Creating /usr/local/bin/kubectl symlink to k3s +[INFO] Creating /usr/local/bin/crictl symlink to k3s +[INFO] Creating /usr/local/bin/ctr symlink to k3s +[INFO] Creating killall script /usr/local/bin/k3s-killall.sh +[INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh +[INFO] env: Creating environment file /etc/systemd/system/k3s.service.env +[INFO] systemd: Creating service file /etc/systemd/system/k3s.service +[INFO] systemd: Enabling k3s unit +Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service β†’ /etc/systemd/system/k3s.service. +[INFO] systemd: Starting k3s +root@shredder:~# +``` + +Provided the last line of output says `Starting k3s` and not something more troublesome-sounding.. you have a cluster! Run `k3s kubectl get nodes -o wide` to confirm this, which has the useful side-effect of printing out your first master's IP address (*which we'll need for the next step*) + +```bash +root@shredder:~# k3s kubectl get nodes -o wide +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +shredder Ready control-plane,etcd,master 83s v1.21.5+k3s2 192.168.39.201 Ubuntu 20.04.3 LTS 5.4.0-70-generic containerd://1.4.11-k3s1 +root@shredder:~# +``` + +!!! tip "^Z undo undo ..." + Oops! Did you mess something up? Just run `k3s-uninstall.sh` to wipe all traces of K3s, and start over! + +### Deploy other masters (optional) + +Now that the first master is deploy, add additional masters (*remember to keep the total number of masters to an odd number*) by referencing the secret, and the IP address of the first master, on all the others: + +```bash +MYSECRET=iambatman +curl -fL https://get.k3s.io | K3S_TOKEN=${MYSECRET} \ + sh -s - server --disable servicelb --server https://:6443 +``` + +Run `k3s kubectl get nodes` to see your new master node make friends with the others: + +```bash +root@shredder:~# k3s kubectl get nodes +NAME STATUS ROLES AGE VERSION +bebop Ready control-plane,etcd,master 4m13s v1.21.5+k3s2 +rocksteady Ready control-plane,etcd,master 4m42s v1.21.5+k3s2 +shredder Ready control-plane,etcd,master 8m54s v1.21.5+k3s2 +root@shredder:~# +``` + +### Deploy agents (optional) + +If you have more nodes which you want _not_ to be considered masters, then run the following on each. Note that the command syntax differs slightly from the masters (*which is why k3s deploys this as k3s-agent instead*) + +```bash +MYSECRET=iambatman +curl -fL https://get.k3s.io | K3S_TOKEN=${MYSECRET} \ + K3S_URL=https://:6443 \ + sh -s - +``` + +!!! question "y no kubectl on agent?" + If you tried to run `k3s kubectl` on an agent, you'll notice that it returns an error about `localhost:8080` being refused. This is **normal**, and it happens because agents aren't necessarily "trusted" to the same degree that masters are, and so the cluster admin credentials are **not** saved to the filesystem, as they are with masters. + +!!! tip "^Z undo undo ..." + Oops! Did you mess something up? Just run `k3s-agent-uninstall.sh` to wipe all traces of K3s agent, and start over! + +## Release the kubectl! + +k3s will have saved your kubeconfig file on the masters to `/etc/rancher/k3s/k3s.yaml`. This file contains the necessary config and certificates to administer your cluster, and should be treated with the same respect and security as your root password. To interact with the cluster, you need to tell the kubectl command where to find this `KUBECONFIG` file. There are a few ways to do this... + +1. Prefix your `kubectl` commands with `k3s`. i.e., `kubectl cluster-info` becomes `k3s kubectl cluster-info` +2. Update your environment variables in your shell to set `KUBECONFIG` to `/etc/rancher/k3s/k3s.yaml` +3. Copy ``/etc/rancher/k3s/k3s.yaml` to `~/.kube/config`, which is the default location `kubectl` will look for + +Examine your beautiful new cluster by running `kubectl cluster-info` [^1] + +[^1]: Do you live in the CLI? Install the kubectl autocompletion for [bash](https://kubernetes.io/docs/tasks/tools/included/optional-kubectl-configs-bash-linux/) or [zsh](https://kubernetes.io/docs/tasks/tools/included/optional-kubectl-configs-zsh/) to make your life much easier! + +--8<-- "recipe-footer.md" diff --git a/manuscript/kubernetes/deployment/flux/design.md b/manuscript/kubernetes/deployment/flux/design.md new file mode 100644 index 0000000..818d05e --- /dev/null +++ b/manuscript/kubernetes/deployment/flux/design.md @@ -0,0 +1,65 @@ +--- +description: Kubernetes Flux deployment strategy - Design +--- +# Design + +!!! question "Shouldn't a design **precede** installation instructions?" + In this case, I felt that an [installation](/kubernetes/deployment/flux/install/) and a practical demonstration upfront, would help readers to understand the flux design, and make it simpler to then explain how to [operate](/kubernetes/deployment/flux/operate/) flux themselves! πŸ’ͺ + +Flux is power and flexible enough to fit many use-cases. After some experience and dead-ends, I've worked out a way to deploy Flux with enough flexibility but structure to make it an almost-invisible part of how my cluster "just works" on an ongoing basis.. + +## Diagram + +Consider this entity relationship diagram: + +``` mermaid + erDiagram + repo-path-flux-system ||..|{ app-namespace : "contains yaml for" + repo-path-flux-system ||..|{ app-kustomization : "contains yaml for" + repo-path-flux-system ||..|{ helmrepositories : "contains yaml for" + + app-kustomization ||..|| repo-path-app : "points flux at" + + flux-system-kustomization ||..|| repo-path-flux-system : "points flux at" + + repo-path-app ||..|{ app-helmreleases: "contains yaml for" + repo-path-app ||..|{ app-configmap: "contains yaml for" + repo-path-app ||..|o app-sealed-secrets: "contains yaml for" + + app-configmap ||..|| app-helmreleases : configures + helmrepositories ||..|| app-helmreleases : "host charts for" + + app-helmreleases ||..|{ app-containers : deploys + app-containers }|..|o app-sealed-secrets : references +``` + +## Explanation + +And here's what it all means, starting from the top... + +1. The flux-system **Kustomization** tells flux to look in the repo in `/flux-system`, and apply any YAMLs it finds (*with optional kustomize templating, if you're an uber-ninja!*). +2. Within `/flux-system`, we've defined (for convenience), 3 subfolders, containing YAML for: + 1. `namespaces` : Any other **Namespaces** we want to deploy for our apps + 2. `helmrepositories` : Any **HelmRepositories** we later want to pull helm charts from + 3. `kustomizations` : An **Kustomizations** we need to tell flux to import YAMLs from **elsewhere** in the repository +3. In turn, each app's **Kustomization** (*which we just defined above*) tells flux to look in the repo in the `/` path, and apply any YAMLs it finds (*with optional kustomize templating, if you're an uber-ninja!*). +4. Within the `/` path, we define **at least** the following: + 1. A **HelmRelease** for the app, telling flux which version of what chart to apply from which **HelmRepository** + 2. A **ConfigMap** for the HelmRelease, which contains all the custom (*and default!*) values for the chart +5. Of course, we can also put any **other** YAML into the `/` path in the repo, which may include additional ConfigMaps, SealedSecrets (*for safely storing secrets in a repo*), Ingresses, etc. + +!!! question "That seems overly complex!" + > "Why not just stick all the YAML into one folder and let flux reconcile it all-at-once?" + + Several reasons: + + * We need to be able to deploy multiple copies of the same helm chart into different namespaces. Imagine if you wanted to deploy a "postgres" helm chart into a namespace for KeyCloak, plus another one for NextCloud. Putting each HelmRelease resource into its own namespace allows us to do this, while sourcing them all from a common HelmRepository + * As your cluster grows in complexity, you end up with dependency issues, and sometimes you need one chart deployed first, in order to create CRDs which are depended upon by a second chart (*like Prometheus' ServiceMonitor*). Isolating apps to a kustomization-per-app means you can implement dependencies and health checks to allow a complex cluster design without chicken vs egg problems! + +## Got it? + +Good! I describe how to put this design into action on the [next page](/kubernetes/deployment/flux/operate/)... + +[^1]: ERDs are fancy diagrams for nERDs which [represent cardinality between entities](https://en.wikipedia.org/wiki/Entity%E2%80%93relationship_model#Crow's_foot_notation) scribbled using the foot of a crow πŸ“ + +--8<-- "recipe-footer.md" diff --git a/manuscript/kubernetes/deployment/flux/install.md b/manuscript/kubernetes/deployment/flux/install.md new file mode 100644 index 0000000..80d29b4 --- /dev/null +++ b/manuscript/kubernetes/deployment/flux/install.md @@ -0,0 +1,134 @@ +--- +description: Kubernetes Flux deployment strategy - Installation +--- + +# Flux Installation + +[Flux](https://fluxcd.io/) is a set of continuous and progressive delivery solutions for Kubernetes that are open and extensible. + +Using flux to manage deployments into the cluster means: + +1. All change is version-controlled (*i.e. "GitOps"*) +2. It's not necessary to expose the cluster API (*i.e., which would otherwise be the case if you were using CI*) +3. Deployments can be paused, rolled back, examine, debugged using Kubernetes primitives and tooling + +!!! summary "Ingredients" + + * [x] [Install the flux CLI tools](https://fluxcd.io/docs/installation/#install-the-flux-cli) on a host which has access to your cluster's apiserver. + * [x] Create a GitHub [personal access token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line) that can create repositories by checking all permissions under repo. + * [x] Create a private GitHub repository dedicated to your flux deployments + +## Basics + +Here's a simplified way to think about the various flux components.. + +1. You need a source for flux to look at. This is usually a Git repository, although it can also be a helm repository, an S3 bucket. A source defines the entire repo (*not a path or a folder structure*). +2. Within your source, you define one or more kustomizations. Each kustomization is a _location_ on your source (*i.e., myrepo/nginx*) containing YAML files to be applied directly to the API server. +3. The YAML files inside the kustomization include: + 1. HelmRepositories (*think of these as the repos you'd add to helm with `helm repo`*) + 2. HelmReleases (*these are charts which live in HelmRepositories*) + 3. Any other valid Kubernetes YAML manifests (*i.e., ConfigMaps, etc)* + +## Preparation + +### Install flux CLI + +This section is a [direct copy of the official docs](https://fluxcd.io/docs/installation/#install-the-flux-cli), to save you having to open another tab.. + +=== "HomeBrew (MacOS/Linux)" + + With [Homebrew](https://brew.sh/) for macOS and Linux: + + ```bash + brew install fluxcd/tap/flux + ``` + +=== "Bash (MacOS/Linux)" + + With Bash for macOS and Linux: + + ```bash + curl -s https://fluxcd.io/install.sh | sudo bash + ``` + +=== "Chocolatey" + + With [Chocolatey](https://chocolatey.org/) for Windows: + + ```bash + choco install flux + ``` + +### Create GitHub Token + +Create a GitHub [personal access token](https://github.com/settings/tokens) that can create repositories by checking all permissions under repo. (*we'll use the token in the bootstrapping step below*) + +### Create GitHub Repo + +Now we'll create a repo for flux - it can (*and probably should!*) be private. I've created a [template repo to get you started](https://github.com/geek-cookbook/template-flux/generate), but you could simply start with a blank repo too.[^1] + +### Bootstrap Flux + +Having prepared all of the above, we're now ready to deploy flux. Before we start, take a look at all the running pods in the cluster, with `kubectl get pods -A`. You should see something like this... + +```bash +root@shredder:~# k3s kubectl get pods -A +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system coredns-7448499f4d-qfszx 1/1 Running 0 6m32s +kube-system local-path-provisioner-5ff76fc89d-rqh52 1/1 Running 0 6m32s +kube-system metrics-server-86cbb8457f-25688 1/1 Running 0 6m32s +``` + +Now, run a customized version of the following: + +```bash +GITHUB_TOKEN= +flux bootstrap github \ + --owner=my-github-username \ + --repository=my-github-username/my-repository \ + --personal +``` + +Once the flux bootstrap is completed without errors, list the pods in the cluster again, with `kubectl get pods -A`. This time, you see something like this: + +```bash +root@shredder:~# k3s kubectl get pods -A +NAMESPACE NAME READY STATUS RESTARTS AGE +flux-system helm-controller-f7c5b6c56-nk7rm 1/1 Running 0 5m48s +flux-system kustomize-controller-55db56f44f-4kqs2 1/1 Running 0 5m48s +flux-system notification-controller-77f68bf8f4-9zlw9 1/1 Running 0 5m48s +flux-system source-controller-8457664f8f-8qhhm 1/1 Running 0 5m48s +kube-system coredns-7448499f4d-qfszx 1/1 Running 0 15m +kube-system local-path-provisioner-5ff76fc89d-rqh52 1/1 Running 0 15m +kube-system metrics-server-86cbb8457f-25688 1/1 Running 0 15m +traefik svclb-traefik-ppvhr 2/2 Running 0 5m31s +traefik traefik-f48b94477-d476p 1/1 Running 0 5m31s +root@shredder:~# +``` + +### What just happened? + +Flux installed its controllers into the `flux-system` namespace, and created two new objects: + +1. A **GitRepository** called `flux-system`, pointing to your GitHub repo. +2. A **Kustomization** called `flux-system`, pointing to the `flux-system` directory in the above repo. + +If you used my template repo, some extra things also happened.. + +1. I'd pre-populated the `flux-system` directory in the template repo with 3 folders: + 1. [helmrepositories](https://github.com/geek-cookbook/template-flux/tree/main/flux-system/helmrepositories), for storing repositories used for deploying helm charts + 2. [kustomizations](https://github.com/geek-cookbook/template-flux/tree/main/flux-system/kustomizations), for storing additional kustomizations *(which in turn can reference other paths in the repo*) + 3. [namespaces](https://github.com/geek-cookbook/template-flux/tree/main/flux-system/namespaces), for storing namespace manifests (*since these need to exist before we can deploy helmreleases into them*) +2. Because the `flux-system` Kustomization includes everything **recursively** under `flux-system` path in the repo, all of the above were **also** applied to the cluster +3. I'd pre-prepared a [Namespace](https://github.com/geek-cookbook/template-flux/blob/main/flux-system/namespaces/namespace-podinfo.yaml), [HelmRepository](https://github.com/geek-cookbook/template-flux/blob/main/flux-system/helmrepositories/helmrepository-podinfo.yaml), and [Kustomization](https://github.com/geek-cookbook/template-flux/blob/main/flux-system/kustomizations/kustomization-podinfo.yaml) for "podinfo", a simple example application, so these were applied to the cluster +4. The kustomization we added for podinfo refers to the `/podinfo` path in the repo, so everything in **this** folder was **also** applied to the cluster +5. In the `/podinfo` path of the repo is a [HelmRelease](https://github.com/geek-cookbook/template-flux/blob/main/podinfo/helmrelease-podinfo.yaml) (*an object describing how to deploy a helm chart*), and a [ConfigMap](https://github.com/geek-cookbook/template-flux/blob/main/podinfo/configmap-pofinfo-helm-chart-value-overrides-configmap.yaml) (*which ontain the `values.yaml` for the podinfo helm chart*) +6. Flux recognized the podinfo **HelmRelease**, applied it along with the values in the **ConfigMap**, and consequently we have podinfo deployed from the latest helm chart, into the cluster, and managed by Flux! πŸ’ͺ + +## Wait, but why? + +That's best explained on the [next page](/kubernetes/deployment/flux/design/), describing the design we're using... + +--8<-- "recipe-footer.md" + +[^1]: The [template repo](https://github.com/geek-cookbook/template-flux/) also "bootstraps" a simple example re how to [operate flux](/kubernetes/deployment/flux/operate/), by deploying the podinfo helm chart. diff --git a/manuscript/kubernetes/deployment/flux/operate.md b/manuscript/kubernetes/deployment/flux/operate.md new file mode 100644 index 0000000..9669ff9 --- /dev/null +++ b/manuscript/kubernetes/deployment/flux/operate.md @@ -0,0 +1,158 @@ +--- +description: Kubernetes Flux deployment strategy - Operation +--- + +# Operation + +Having described [how to install flux](/kubernetes/deployment/flux/install/), and [how our flux deployment design works](/kubernetes/deployment/flux/design/), let's finish by exploring how to **use** flux to deploy helm charts into a cluster! + +## Deploy App + +We'll need 5 files per-app, to deploy and manage our apps using flux. The example below will use the following highlighted files: + +```hl_lines="4 6 8 10 11" +β”œβ”€β”€ README.md +β”œβ”€β”€ flux-system +β”‚Β Β  β”œβ”€β”€ helmrepositories +β”‚Β Β  β”‚Β Β  └── helmrepository-podinfo.yaml +β”‚Β Β  β”œβ”€β”€ kustomizations +β”‚Β Β  β”‚Β Β  └── kustomization-podinfo.yaml +β”‚Β Β  └── namespaces +β”‚Β Β  └── namespace-podinfo.yaml +└── podinfo + β”œβ”€β”€ configmap-podinfo-helm-chart-value-overrides.yaml + └── helmrelease-podinfo.yaml +``` + +???+ question "5 files! That seems overly complex!" + > "Why not just stick all the YAML into one folder and let flux reconcile it all-at-once?" + + Several reasons: + + * We need to be able to deploy multiple copies of the same helm chart into different namespaces. Imagine if you wanted to deploy a "postgres" helm chart into a namespace for KeyCloak, plus another one for NextCloud. Putting each HelmRelease resource into its own namespace allows us to do this, while sourcing them all from a common HelmRepository + * As your cluster grows in complexity, you end up with dependency issues, and sometimes you need one chart deployed first, in order to create CRDs which are depended upon by a second chart (*like Prometheus' ServiceMonitor*). Isolating apps to a kustomization-per-app means you can implement dependencies and health checks to allow a complex cluster design without chicken vs egg problems! + * I like to use the one-object-per-yaml-file approach. Kubernetes is complex enough without trying to define multiple objects in one file, or having confusingly-generic filenames such as `app.yaml`! πŸ€¦β€β™‚οΈ + +### Identify target helm chart + +Identify your target helm chart. Let's take podinfo as an example. Here's the [official chart](https://github.com/stefanprodan/podinfo/tree/master/charts/podinfo), and here's the [values.yaml](https://github.com/stefanprodan/podinfo/tree/master/charts/podinfo/values.yaml) which describes the default values passed to the chart (*and the options the user has to make changes*). + +### Create HelmRepository + +The README instructs users to add the repo "podinfo" with the URL `ttps://stefanprodan.github.io/podinfo`, so +create a suitable HelmRepository YAML in `flux-system/helmrepositories/helmrepository-podinfo.yaml`. Here's [my example](https://github.com/geek-cookbook/template-flux/blob/main/flux-system/helmrepositories/helmrepository-podinfo.yaml). + +!!! question "Why such obtuse file names?" + > Why not just call the HelmRepository YAML `podinfo.yaml`? Why prefix the filename with the API object `helmrepository-`? + + We're splitting the various "bits" which define this app into multiple YAMLs, and we'll soon have multiple apps in our repo, each with their own set of "bits". It gets very confusing quickly, when comparing git commit diffs, if you're not explicitly clear on what file you're working on, or which changes you're reviewing. Plus, adding the API object name to the filename provides extra "metadata" to the file structure, and makes "fuzzy searching" for quick-opening of files in tools like VSCode more effective. + +### Create Namespace + +Create a namespace for the chart. Typically you'd name this the same as your chart name. Here's [my namespace-podinfo.yaml](https://github.com/geek-cookbook/template-flux/blob/main/flux-system/namespaces/namespace-podinfo.yaml). + +??? example "Here's an example Namespace..." + + ```yaml + apiVersion: v1 + kind: Namespace + metadata: + name: podinfo + ``` + +### Create Kustomization + +Create a kustomization for the chart, pointing flux to a path in the repo where the chart-specific YAMLs will be found. Here's my [kustomization-podinfo.yaml](https://github.com/geek-cookbook/template-flux/blob/main/flux-system/kustomizations/kustomization-podinfo.yaml). + +??? example "Here's an example Kustomization..." + + ```yaml + apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 + kind: Kustomization + metadata: + name: podinfo + namespace: flux-system + spec: + interval: 15m + path: podinfo + prune: true # remove any elements later removed from the above path + timeout: 2m # if not set, this defaults to interval duration, which is 1h + sourceRef: + kind: GitRepository + name: flux-system + validation: server + healthChecks: + - apiVersion: apps/v1 + kind: Deployment + name: podinfo + namespace: podinfo + ``` + +### Create HelmRelease + +Now create a HelmRelease for the chart - the HelmRelease defines how the (generic) chart from the HelmRepository will be installed into our cluster. Here's my [podinfo/helmrelease-podinfo.yaml](https://github.com/geek-cookbook/template-flux/blob/main/podinfo/helmrelease-podinfo.yaml). + +??? example "Here's an example HelmRelease..." + + ```yaml + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: podinfo + namespace: podinfo + spec: + chart: + spec: + chart: podinfo # Must be the same as the upstream chart name + version: 10.x # Pin to semver major versions to avoid breaking changes but still get bugfixes/updates + sourceRef: + kind: HelmRepository + name: podinfo # References the HelmRepository you created earlier + namespace: flux-system # All HelmRepositories exist in the flux-system namespace + interval: 15m + timeout: 5m + releaseName: podinfo # _may_ be different from the upstream chart name, but could cause confusion + valuesFrom: + - kind: ConfigMap + name: podinfo-helm-chart-value-overrides # Align with the name of the ConfigMap containing all values + valuesKey: values.yaml # This is the default, but best to be explicit for clarity + ``` + +### Create ConfigMap + +Finally, create a ConfigMap to be used to pass helm chart values to the chart. Note that it is **possible** to pass values directly in the HelmRelease, but.. it's messy. I find it easier to let the HelmRelease **describe** the release, and to let the configmap **configure** the release. It also makes tracking changes more straightforward. + +As a second note, it's strictly only necessary to include in the ConfigMap the values you want to **change** from the chart's defaults. I find this to be too confusing as charts are continually updated by their developers, and this can obsucre valuable options over time. So I place in my ConfigMaps the **entire** contents of the chart's `values.yaml` file, and then I explicitly overwrite the values I want to change. + +!!! tip "Making chart updates simpl(er)" + This also makes updating my values for an upstream chart refactor a simple process - I duplicate the ConfigMap, paste-overwrite with the values.yaml for the refactored/updated chart, and compare the old and new versions side-by-side, to ensure I'm still up-to-date. + +It's too large to display nicely below, but here's my [podinfo/configmap-podinfo-helm-chart-value-overrides.yaml](https://github.com/geek-cookbook/template-flux/blob/main/podinfo/configmap-podinfo-helm-chart-value-overrides.yaml) + +!!! tip "Yes, I am sticking to my super-obtuse file naming convention!" + Doesn't it make it easier to understand, at a glance, exactly what this YAML file is intended to be? + +### Commit the changes + +Simply commit your changes, sit back, and wait for flux to do its 1-min update. If you like to watch the fun, you could run `watch -n1 flux get kustomizations` so that you'll see the reconciliation take place (*if you're quick*). You can also force flux to check the repo for changes manually, by running `flux reconcile source git flux-system`. + +## Making changes + +Let's say you decide that instead of 1 replica of the podinfo pod, you'd like 3 replicas. Edit your configmap, and change `replicaCount: 1` to `replicaCount: 3`. + +Commit your changes, and once again do the waiting / impatient-reconciling jig. This time you'll have to wait up to 15 minutes though... + +!!! question "Why 15 minutes?" + > I thought we check the repo every minute? + + Yes, we check the entire GitHub repository for changes every 1 min, and changes to a kustomization are applied immediately. I.e., your podinfo ConfigMap gets updated within a minute (roughly). But the interval value for the HelmRelease is set to 15 minutes, so you could be waiting for as long as 15 minutes for flux to re-reconcile your HelmRelease with the ConfigMap, and to apply any changes. I've found that setting the HelmRelease interval too low causes (a) lots of unnecessary resource usage on behalf of flux, and (b) less stability when you have a large number of HelmReleases, some of whom depend on each other. + + You can force a HelmRelease to reconcile, by running `flux reconcile helmrelease -n ` + +## Success! + +We did it. The Holy Grail. We deployed an application into the cluster, without touching the cluster. Pinch yourself, and then prove it worked by running `flux get kustomizations`, or `kubectl get helmreleases -n podinfo`. + +--8<-- "recipe-footer.md" + +[^1]: Got suggestions for improvements here? Shout out in the comments below! diff --git a/manuscript/kubernetes/deployment/index.md b/manuscript/kubernetes/deployment/index.md new file mode 100644 index 0000000..7ab88ac --- /dev/null +++ b/manuscript/kubernetes/deployment/index.md @@ -0,0 +1,22 @@ +--- +description: Kubernetes deployment strategies +--- + +# Deployment + +So far our Kubernetes journey has been fairly linear - your standard "geek follows instructions" sort of deal. + +When it comes to a deployment methodology, there are a few paths you can take, and it's possible to "mix-and-match" if you want to (*and if you enjoy extra pain and frustration!*) + +Being imperative, Kubernetes is "driven" by your definitions of an intended state. I.e., "*I want a minecraft server and a 3-node redis cluster*". The state is defined by resources (pod, deployment, PVC) etc, which you apply to the Kubernetes apiserver, normally using YAML. + +Now you _could_ hand-craft some YAML files, and manually apply these to the apiserver, but there are much smarter and more scalable ways to drive Kubernetes. + +The typical methods of deploying applications into Kubernetes, sorted from least to most desirable and safe are: + +1. A human applies YAML directly to the apiserver. +2. A human applies a helm chart directly to the apiserver. +3. A human updates a version-controlled set of configs, and a CI process applies YAML/helm chart directly to the apiserver. +4. A human updates a version-controlled set of configs, and a trusted process _within_ the cluster "reaches out" to the config, and applies it to itself. + +In our case, #4 is achieved with [Flux](/kubernetes/deployment/flux/). diff --git a/manuscript/kubernetes/diycluster.md b/manuscript/kubernetes/diycluster.md deleted file mode 100644 index 7af1b28..0000000 --- a/manuscript/kubernetes/diycluster.md +++ /dev/null @@ -1,313 +0,0 @@ -# DIY Kubernetes - -If you are looking for a little more of a challenge, or just don't have the money to fork out to managed Kubernetes, you're in luck. -Kubernetes provides many ways to run a cluster, by far the simplest method is with `minikube` but there are other methods like `k3s` and using `drp` to deploy a cluster. -After all, DIY its in our DNA. - -## Ingredients - -1. Basic knowledge of Kubernetes terms (Will come in handy) [Start](/kubernetes/start) -2. Some Linux machines (Depends on what recipe you follow) - -## Minikube - -First, what is minikube? -Minikube is a method of running Kubernetes on your local machine. -It is mainly targeted at developers looking to test if their application will work with Kubernetes without deploying it to a production cluster. For this reason, -I do not recommend running your cluster on minikube as it isn't designed for deployment, and is only a single node cluster. - -If you want to use minikube, there is a guide below but again, I recommend using something more production-ready like `k3s` or `drp` - -### Ingredients - -1. A Fresh Linux Machine -2. Some basic Linux knowledge (or can just copy-paste) - -!!! note - Make sure you are running a SystemD based distro like Ubuntu. - Although minikube will run on macOS and Windows, - they add in additional complexities to the installation as they - require running a Linux based image running in a VM, - that although minikube will manage, adds to the complexities. And - even then, who uses Windows or macOS in production anyways? πŸ™‚ - If you are serious about running on windows/macOS, - check the official MiniKube guides - [here](https://minikube.sigs.k8s.io/docs/start/) - -### Installation - -After booting yourself up a fresh Linux machine and getting to a console, -you can now install minikube. - -Download and install our minikube binary - -```sh -curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 -sudo install minikube-linux-amd64 /usr/local/bin/minikube -``` - -Now we can boot up our cluster - -```sh -sudo minikube start --vm-driver=none -#Start our minikube instance, and make it use the machine to host the cluster, instead of a VM -sudo minikube config set vm-driver none #Set our default vm driver to none -``` - -You are now set up with minikube! - -!!! warning - MiniKube is not a production-grade method of deploying Kubernetes - -## K3S - -What is k3s? -K3s is a production-ready method of deploying Kubernetes on many machines, -where a full Kubernetes deployment is not required, AKA - your cluster (unless your a big SaaS company, in that case, can I get a job?). - -### Ingredients - -1. A handful of Linux machines (3 or more, virtualized or not) -2. Some Linux knowledge. -3. Patience. - -### Setting your Linux Machines up - -Firstly, my flavour of choice for deployment is Ubuntu Server, -although it is not as enterprise-friendly as RHEL (That's Red Hat Enterprise Linux for my less geeky readers) or CentOS (The free version of RHEL). -Ubuntu ticks all the boxes for k3s to run on and allows you to follow lots of other guides on managing and maintaining your Ubuntu server. - -Firstly, download yourself a version of Ubuntu Server from [here](https://ubuntu.com/download/server) (Whatever is latest) -Then spin yourself up as many systems as you need with the following guide - -!!! note - I am running a 3 node cluster, with nodes running on Ubuntu 19.04, all virtualized with VMWare ESXi - Your setup doesn't need to be as complex as mine, you can use 3 old Dell OptiPlex if you really want πŸ™‚ - -1. Insert your installation medium into the machine, and boot it. -2. Select your language -3. Select your keyboard layout -4. Select `Install Ubuntu` -5. Check and modify your network settings if required, make sure to write down your IPs -6. Select Done on Proxy, unless you use a proxy -7. Select Done on Mirror, as it has picked the best mirror for you unless you have a local mirror you want to use (in that case you are uber-geek) -8. Select `Use An Entire Disk` for Filesystem, and basically hit enter for the rest of the disk setup, -just make sure to read the prompts and understand what you are doing -9. Now that you are up to setting up the profile, this is where things change. -You are going to want to set up the same account on all the machines, but change the server name just a tad every time. -![Profile Setup for Node 1](../images/diycluster-k3s-profile-setup.png) -![Profile Setup for Node 2](../images/diycluster-k3s-profile-setup-node2.png) -10. Now install OpenSSH on the server, if you wish to import your existing SSH key from GitHub or Launchpad, -you can do that now and save yourself a step later. -11. Skip over Featured Server snaps by clicking `Done` -12. Wait for your server to install everything and drop you to a Linux prompt - -13. Repeat for all your nodes - -### Pre-installation of k3s - -For the rest of this guide, you will need some sort of Linux/macOS based terminal. -On Windows you can do this with Windows Subsystem for Linux (WSL) see [here for information on WSL.](https://aka.ms/wslinstall) - -The rest of this guide will all be from your local terminal. - -If you already have an SSH key generated or added an existing one, skip this step. -From your PC,run `ssh-keygen` to generate a public and private key pair -(You can use this instead of typing your password in every time you want to connect via ssh) - -```sh -$ ssh-keygen -Generating public/private rsa key pair. -Enter file in which to save the key (/home/thomas/.ssh/id_rsa): [enter] -Enter passphrase (empty for no passphrase): [password] -Enter same passphrase again: [password] -Your identification has been saved in /home/thomas/.ssh/id_rsa. -Your public key has been saved in /home/thomas/.ssh/id_rsa.pub. -The key fingerprint is: -... -The key's randomart image is: -... -``` - -If you have already imported a key from GitHub or Launchpad, skip this step. - -```sh -$ ssh-copy-id [username]@[hostname] -/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/home/thomas/.ssh/id_rsa.pub" -The authenticity of host 'thomas-k3s-node1 (theipaddress)' can't be established. -ECDSA key fingerprint is SHA256:... -Are you sure you want to continue connecting (yes/no)? yes -/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed -/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys -thomas@thomas-k3s-node1's password: [insert your password now] - -Number of key(s) added: 1 -``` - -You will want to do this once for every machine, replacing the hostname with the other next nodes hostname each time. - -!!! note - If your hostnames aren't resolving correct, try adding them to your `/etc/hosts` file - -### Installation - -If you have access to the premix repository, you can download the ansible-playbook and follow the steps contained in there, if not sit back and prepare to do it manually. - -!!! tip - Becoming a patron will allow you to get the ansible-playbook to setup k3s on your own hosts. For as little as 5$/m you can get access to the ansible playbooks for this recipe, and more! - See [funkypenguin's Patreon](https://www.patreon.com/funkypenguin) for more! - - -Select one node to become your master, in my case `thomas-k3s-node1`. -Now SSH into this node, and run the following: - -```sh -localpc$ ssh thomas@thomas-k3s-node1 -Enter passphrase for key '/home/thomas/.ssh/id_rsa': [ssh key password] - -thomas-k3s-node1$ curl -sfL https://get.k3s.io | sh - -[sudo] password for thomas: [password entered in setup] -[INFO] Finding latest release -[INFO] Using v1.0.0 as release -[INFO] Downloading hash https://github.com/rancher/k3s/releases/download/v1.0.0/sha256sum-amd64.txt -[INFO] Downloading binary https://github.com/rancher/k3s/releases/download/v1.0.0/k3s -[INFO] Verifying binary download -[INFO] Installing k3s to /usr/local/bin/k3s -[INFO] Creating /usr/local/bin/kubectl symlink to k3s -[INFO] Creating /usr/local/bin/crictl symlink to k3s -[INFO] Creating /usr/local/bin/ctr symlink to k3s -[INFO] Creating killall script /usr/local/bin/k3s-killall.sh -[INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh -[INFO] env: Creating environment file /etc/systemd/system/k3s.service.env -[INFO] systemd: Creating service file /etc/systemd/system/k3s.service -[INFO] systemd: Enabling k3s unit -Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service β†’ /etc/systemd/system/k3s.service. -[INFO] systemd: Starting k3s -``` - -Before we log out of the master, we need the token from it. -Make sure to note this token down -(please don't write it on paper, use something like `notepad` or `vim`, it's ~100 characters) - -```sh -thomas-k3s-node1$ sudo cat /var/lib/rancher/k3s/server/node-token -K1097e226f95f56d90a4bab7151... -``` - -Make sure all nodes can access each other by hostname, whether you add them to `/etc/hosts` or to your DNS server - -Now that you have your master node setup, you can now add worker nodes - -SSH into the other nodes, and run the following making sure to replace values with ones that suit your installation - -```sh -localpc$ ssh thomas@thomas-k3s-node2 -Enter passphrase for key '/home/thomas/.ssh/id_rsa': [ssh key password] - -thomas-k3s-node2$ curl -sfL https://get.k3s.io | K3S_URL=https://thomas-k3s-node1:6443 K3S_TOKEN=K1097e226f95f56d90a4bab7151... sh - -``` - -Now test your installation! - -SSH into your master node - -```sh -ssh thomas@thomas-k3s-node1 -Enter passphrase for key '/home/thomas/.ssh/id_rsa': [ssh key password] - -thomas-k3s-node1$ sudo kubectl get nodes - -NAME STATUS ROLES AGE VERSION -thomas-k3s-node1 Ready master 15m3s v1.16.3-k3s.2 -thomas-k3s-node2 Ready 6m58s v1.16.3-k3s.2 -thomas-k3s-node3 Ready 6m12s v1.16.3-k3s.2 -``` - -If you got Ready for all your nodes, Well Done! Your k3s cluster is now running! If not try getting help in our discord. - -### Post-Installation - -Now you can get yourself a kubeconfig for your cluster. -SSH into your master node, and run the following - -```sh -localpc$ ssh thomas@thomas-k3s-node1 -Enter passphrase for key '/home/thomas/.ssh/id_rsa': [ssh key password] - -thomas-k3s-node1$ sudo kubectl config view --flatten -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: LS0tLS1CRUdJTiBD... - server: https://127.0.0.1:6443 - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - password: thisishowtolosecontrolofyourk3s - username: admin -``` - -Make sure to change `clusters.cluster.server` to have the master node's name instead of `127.0.0.1`, in my case making it `https://thomas-k3s-node1:6443` - -!!! warning - This kubeconfig file can grant full access to your Kubernetes installation, I recommend you protect this file just as well as you protect your passwords - -You will probably want to save this kubeconfig file into a file on your local machine, say `my-k3s-cluster.yml` or `where-8-hours-of-my-life-went.yml`. -Now test it out! - -```sh -localpc$ kubectl --kubeconfig=my-k3s-cluster.yml get nodes -NAME STATUS ROLES AGE VERSION -thomas-k3s-node1 Ready master 495m v1.16.3-k3s.2 -thomas-k3s-node2 Ready 488m v1.16.3-k3s.2 -thomas-k3s-node3 Ready 487m v1.16.3-k3s.2 -``` - - - -That is all! You have yourself a Kubernetes cluster for you and your dog to enjoy. - -## DRP - -DRP or Digital Rebar Provisioning Tool is a tool designed to automatically setup your cluster, installing an operating system for you, and doing all the configuration like we did in the k3s setup. - -This section is WIP, instead, try using the K3S guide above πŸ™‚ - -## Where from now - -Now that you have wasted half a lifetime on installing your very own cluster, you can install more to it. Like a load balancer! - -* [Start](/kubernetes/) - Why Kubernetes? -* [Design](/kubernetes/design/) - How does it fit together? -* Cluster (this page) - Setup a basic cluster -* [Load Balancer](/kubernetes/loadbalancer/) - Setup inbound access -* [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data -* [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks -* [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm - -## About your guest chef - -This article, believe it or not, was not diced up by your regular chef (funkypenguin). -Instead, today's article was diced up by HexF, a fellow kiwi (hence a lot of kiwi references) who enjoys his sysadmin time. -Feel free to talk to today's chef in the discord, or see one of his many other links that you can follow below - -[Twitter](https://twitter.com/hexf_me) β€’ [Website](https://hexf.me/) β€’ [Github](https://github.com/hexf) - - - ---8<-- "recipe-footer.md" diff --git a/manuscript/kubernetes/external-dns.md b/manuscript/kubernetes/external-dns.md new file mode 100644 index 0000000..e710860 --- /dev/null +++ b/manuscript/kubernetes/external-dns.md @@ -0,0 +1,1081 @@ +# External DNS + +Kubernetes' internal DNS / service-discovery means that every service is resolvable within the cluster. You can create a Wordpress pod with a database URL pointing to "mysql", and trust that it'll find the service named "mysql" in the same namespace. (*Or "mysql.weirdothernamespace" if you prefer*) + +This super-handy DNS magic only works within the cluster though. When you wanted to connect to the hypothetical Wordpress service from **outside** of the cluster, you'd need to manually create a DNS entry pointing to the [LoadBalancer](/kubernetes/loadbalancer/) IP of that service. While using wildcard DNS might make this a **little** easier, it's still too manual and not at all "*gitopsy*" enough! + +ExternalDNS is a controller for Kubernetes which watches the objects you create (*Services, Ingresses, etc*), and configures External DNS providers (*like CloudFlare, Route53, etc*) accordingly. With External DNS, you **can** just deploy an ingress referencing "*mywordywordpressblog.batman.com*", and have that DNS entry autocreated on your provider within minutes πŸ’ͺ + +!!! summary "Ingredients" + + * [x] A [Kubernetes cluster](/kubernetes/cluster/) + * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped + * [x] API credentials for a [supported DNS provider](https://github.com/kubernetes-sigs/external-dns) + +## Preparation + +### Namespace + +We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-external-dns.yaml`: + +??? example "Example Namespace (click to expand)" + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: external-dns +``` + +### HelmRepository + +Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. In this case, we're using the (*prolific*) [bitnami chart repository](https://github.com/bitnami/charts/tree/master/bitnami), so per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-external-dns.yaml`: + +??? example "Example HelmRepository (click to expand)" + ```yaml + apiVersion: source.toolkit.fluxcd.io/v1beta1 + kind: HelmRepository + metadata: + name: bitnami + namespace: flux-system + spec: + interval: 15m + url: https://charts.bitnami.com/bitnami + ``` + +### Kustomization + +Now that the "global" elements of this deployment (*just the HelmRepository in this case*z*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/external-dns`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-external-dns.yaml`: + +??? example "Example Kustomization (click to expand)" + ```yaml + apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 + kind: Kustomization + metadata: + name: external-dns + namespace: flux-system + spec: + interval: 15m + path: ./external-dns + prune: true # remove any elements later removed from the above path + timeout: 2m # if not set, this defaults to interval duration, which is 1h + sourceRef: + kind: GitRepository + name: flux-system + validation: server + healthChecks: + - apiVersion: apps/v1 + kind: Deployment + name: external-dns + namespace: external-dns + ``` + +### ConfigMap + +Now we're into the external-dns-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/bitnami-labs/external-dns/blob/main/helm/external-dns/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `external-dns/configmap-external-dns-helm-chart-value-overrides.yaml`: + +??? example "Example ConfigMap (click to expand)" + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + name: external-dns-helm-chart-value-overrides + namespace: external-dns + data: + values.yaml: |- + ## @section Global parameters + ## Global Docker image parameters + ## Please, note that this will override the image parameters, including dependencies, configured to use the global value + ## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + + ## @param global.imageRegistry Global Docker image registry + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## + global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + + ## @section Common parameters + + ## @param nameOverride String to partially override external-dns.fullname template (will maintain the release name) + ## + nameOverride: "" + ## @param fullnameOverride String to fully override external-dns.fullname template + ## + fullnameOverride: "" + ## @param clusterDomain Kubernetes Cluster Domain + ## + clusterDomain: cluster.local + + ## @section external-dns parameters + + ## Bitnami external-dns image version + ## ref: https://hub.docker.com/r/bitnami/external-dns/tags/ + ## @param image.registry ExternalDNS image registry + ## @param image.repository ExternalDNS image repository + ## @param image.tag ExternalDNS Image tag (immutable tags are recommended) + ## @param image.pullPolicy ExternalDNS image pull policy + ## @param image.pullSecrets ExternalDNS image pull secrets + ## + image: + registry: docker.io + repository: bitnami/external-dns + tag: 0.10.1-debian-10-r5 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param sources [array] K8s resources type to be observed for new DNS entries by ExternalDNS + ## + sources: + # - crd + - service + - ingress + # - contour-httpproxy + ## @param provider DNS provider where the DNS records will be created. + ## Available providers are: + ## - alibabacloud, aws, azure, azure-private-dns, cloudflare, coredns, designate, digitalocean, google, hetzner, infoblox, linode, rfc2136, transip + ## + provider: aws + ## Flags related to processing sources + ## ref: https://github.com/kubernetes-sigs/external-dns/blob/master/pkg/apis/externaldns/types.go#L272 + ## @param namespace Limit sources of endpoints to a specific namespace (default: all namespaces) + ## + namespace: "" + ## @param fqdnTemplates Templated strings that are used to generate DNS names from sources that don't define a hostname themselves + ## + fqdnTemplates: [] + ## @param combineFQDNAnnotation Combine FQDN template and annotations instead of overwriting + ## + combineFQDNAnnotation: false + ## @param ignoreHostnameAnnotation Ignore hostname annotation when generating DNS names, valid only when fqdn-template is set + ## + ignoreHostnameAnnotation: false + ## @param publishInternalServices Allow external-dns to publish DNS records for ClusterIP services + ## + publishInternalServices: false + ## @param publishHostIP Allow external-dns to publish host-ip for headless services + ## + publishHostIP: false + ## @param serviceTypeFilter The service types to take care about (default: all, options: ClusterIP, NodePort, LoadBalancer, ExternalName) + ## + serviceTypeFilter: [] + ## Alibaba cloud configuration to be set via arguments/env. variables + ## These will be added to /etc/kubernetes/alibaba-cloud.json via secret + ## + alibabacloud: + ## @param alibabacloud.accessKeyId When using the Alibaba Cloud provider, set `accessKeyId` in the Alibaba Cloud configuration file (optional) + ## + accessKeyId: "" + ## @param alibabacloud.accessKeySecret When using the Alibaba Cloud provider, set `accessKeySecret` in the Alibaba Cloud configuration file (optional) + ## + accessKeySecret: "" + ## @param alibabacloud.regionId When using the Alibaba Cloud provider, set `regionId` in the Alibaba Cloud configuration file (optional) + ## + regionId: "" + ## @param alibabacloud.vpcId Alibaba Cloud VPC Id + ## + vpcId: "" + ## @param alibabacloud.secretName Use an existing secret with key "alibaba-cloud.json" defined. + ## This ignores alibabacloud.accessKeyId, and alibabacloud.accessKeySecret + ## + secretName: "" + ## @param alibabacloud.zoneType Zone Filter. Available values are: public, private, or no value for both + ## + zoneType: "" + ## AWS configuration to be set via arguments/env. variables + ## + aws: + ## AWS credentials + ## @param aws.credentials.secretKey When using the AWS provider, set `aws_secret_access_key` in the AWS credentials (optional) + ## @param aws.credentials.accessKey When using the AWS provider, set `aws_access_key_id` in the AWS credentials (optional) + ## @param aws.credentials.mountPath When using the AWS provider, determine `mountPath` for `credentials` secret + ## + credentials: + secretKey: "" + accessKey: "" + ## Before external-dns 0.5.9 home dir should be `/root/.aws` + ## + mountPath: "/.aws" + ## @param aws.credentials.secretName Use an existing secret with key "credentials" defined. + ## This ignores aws.credentials.secretKey, and aws.credentials.accessKey + ## + secretName: "" + ## @param aws.region When using the AWS provider, `AWS_DEFAULT_REGION` to set in the environment (optional) + ## + region: "us-east-1" + ## @param aws.zoneType When using the AWS provider, filter for zones of this type (optional, options: public, private) + ## + zoneType: "" + ## @param aws.assumeRoleArn When using the AWS provider, assume role by specifying --aws-assume-role to the external-dns daemon + ## + assumeRoleArn: "" + ## @param aws.apiRetries Maximum number of retries for AWS API calls before giving up + ## + apiRetries: 3 + ## @param aws.batchChangeSize When using the AWS provider, set the maximum number of changes that will be applied in each batch + ## + batchChangeSize: 1000 + ## @param aws.zoneTags When using the AWS provider, filter for zones with these tags + ## + zoneTags: [] + ## @param aws.preferCNAME When using the AWS provider, replaces Alias records with CNAME (options: true, false) + ## + preferCNAME: "" + ## @param aws.evaluateTargetHealth When using the AWS provider, sets the evaluate target health flag (options: true, false) + ## + evaluateTargetHealth: "" + ## Azure configuration to be set via arguments/env. variables + ## + azure: + ## When a secret to load azure.json is not specified, the host's /etc/kubernetes/azure.json will be used + ## @param azure.secretName When using the Azure provider, set the secret containing the `azure.json` file + ## + secretName: "" + ## @param azure.cloud When using the Azure provider, set the Azure Cloud + ## + cloud: "" + ## @param azure.resourceGroup When using the Azure provider, set the Azure Resource Group + ## + resourceGroup: "" + ## @param azure.tenantId When using the Azure provider, set the Azure Tenant ID + ## + tenantId: "" + ## @param azure.subscriptionId When using the Azure provider, set the Azure Subscription ID + ## + subscriptionId: "" + ## @param azure.aadClientId When using the Azure provider, set the Azure AAD Client ID + ## + aadClientId: "" + ## @param azure.aadClientSecret When using the Azure provider, set the Azure AAD Client Secret + ## + aadClientSecret: "" + ## @param azure.useManagedIdentityExtension When using the Azure provider, set if you use Azure MSI + ## + useManagedIdentityExtension: false + ## @param azure.userAssignedIdentityID When using the Azure provider with Azure MSI, set Client ID of Azure user-assigned managed identity (optional, otherwise system-assigned managed identity is used) + ## + userAssignedIdentityID: "" + ## Cloudflare configuration to be set via arguments/env. variables + ## + cloudflare: + ## @param cloudflare.apiToken When using the Cloudflare provider, `CF_API_TOKEN` to set (optional) + ## + apiToken: "" + ## @param cloudflare.apiKey When using the Cloudflare provider, `CF_API_KEY` to set (optional) + ## + apiKey: "" + ## @param cloudflare.secretName When using the Cloudflare provider, it's the name of the secret containing cloudflare_api_token or cloudflare_api_key. + ## This ignores cloudflare.apiToken, and cloudflare.apiKey + ## + secretName: "" + ## @param cloudflare.email When using the Cloudflare provider, `CF_API_EMAIL` to set (optional). Needed when using CF_API_KEY + ## + email: "" + ## @param cloudflare.proxied When using the Cloudflare provider, enable the proxy feature (DDOS protection, CDN...) (optional) + ## + proxied: true + ## CoreDNS configuration to be set via arguments/env variables + ## + coredns: + ## @param coredns.etcdEndpoints When using the CoreDNS provider, set etcd backend endpoints (comma-separated list) + ## Secure (https) endpoints can be used as well, in that case `etcdTLS` section + ## should be filled in accordingly + ## + etcdEndpoints: "http://etcd-extdns:2379" + ## Configuration of the secure communication and client authentication to the etcd cluster + ## If enabled all the values under this key must hold a valid data + ## + etcdTLS: + ## @param coredns.etcdTLS.enabled When using the CoreDNS provider, enable secure communication with etcd + ## + enabled: false + ## @param coredns.etcdTLS.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param coredns.etcdTLS.secretName When using the CoreDNS provider, specify a name of existing Secret with etcd certs and keys + ## ref: https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/security.md + ## ref (secret creation): + ## https://github.com/bitnami/charts/tree/master/bitnami/etcd#configure-certificates-for-client-communication + ## + secretName: "etcd-client-certs" + ## @param coredns.etcdTLS.mountPath When using the CoreDNS provider, set destination dir to mount data from `coredns.etcdTLS.secretName` to + ## + mountPath: "/etc/coredns/tls/etcd" + ## @param coredns.etcdTLS.caFilename When using the CoreDNS provider, specify CA PEM file name from the `coredns.etcdTLS.secretName` + ## + caFilename: "ca.crt" + ## @param coredns.etcdTLS.certFilename When using the CoreDNS provider, specify cert PEM file name from the `coredns.etcdTLS.secretName` + ## Will be used by external-dns to authenticate against etcd + ## + certFilename: "cert.pem" + ## @param coredns.etcdTLS.keyFilename When using the CoreDNS provider, specify private key PEM file name from the `coredns.etcdTLS.secretName` + ## Will be used by external-dns to authenticate against etcd + ## + keyFilename: "key.pem" + ## OpenStack Designate provider configuration to be set via arguments/env. variables + ## + designate: + ## Set Openstack environment variables (optional). Username and password will be saved in a kubernetes secret + ## The alternative to this is to export the necessary Openstack environment variables in the extraEnv argument + ## @param designate.username When using the Designate provider, specify the OpenStack authentication username. (optional) + ## @param designate.password When using the Designate provider, specify the OpenStack authentication password. (optional) + ## @param designate.authUrl When using the Designate provider, specify the OpenStack authentication Url. (optional) + ## @param designate.regionName When using the Designate provider, specify the OpenStack region name. (optional) + ## @param designate.userDomainName When using the Designate provider, specify the OpenStack user domain name. (optional) + ## @param designate.projectName When using the Designate provider, specify the OpenStack project name. (optional) + ## @param designate.username When using the Designate provider, specify the OpenStack authentication username. (optional) + ## e.g: + ## username: "someuser" + ## password: "p@55w0rd" + ## authUrl: "https://mykeystone.example.net:5000/v3/" + ## regionName: "dev" + ## userDomainName: "development" + ## projectName: "myteamname" + ## + username: "" + password: "" + authUrl: "" + regionName: "" + userDomainName: "" + projectName: "" + ## @param designate.customCAHostPath When using the Designate provider, use a CA file already on the host to validate Openstack APIs. This conflicts with `designate.customCA.enabled` + ## This conflicts setting the above customCA to true and chart rendering will fail if you set customCA to true and specify customCAHostPath + ## + customCAHostPath: "" + ## Use a custom CA (optional) + ## @param designate.customCA.enabled When using the Designate provider, enable a custom CA (optional) + ## @param designate.customCA.content When using the Designate provider, set the content of the custom CA + ## @param designate.customCA.mountPath When using the Designate provider, set the mountPath in which to mount the custom CA configuration + ## @param designate.customCA.filename When using the Designate provider, set the custom CA configuration filename + ## + customCA: + enabled: false + content: "" + mountPath: "/config/designate" + filename: "designate-ca.pem" + ## DigitalOcean configuration to be set via arguments/env. variables + ## + digitalocean: + ## @param digitalocean.apiToken When using the DigitalOcean provider, `DO_TOKEN` to set (optional) + ## + apiToken: "" + ## @param digitalocean.secretName Use an existing secret with key "digitalocean_api_token" defined. + ## This ignores digitalocean.apiToken + ## + secretName: "" + ## Google configuration to be set via arguments/env. variables + ## + google: + ## @param google.project When using the Google provider, specify the Google project (required when provider=google) + ## + project: "" + ## @param google.serviceAccountSecret When using the Google provider, specify the existing secret which contains credentials.json (optional) + ## + serviceAccountSecret: "" + ## @param google.serviceAccountSecretKey When using the Google provider with an existing secret, specify the key name (optional) + ## + serviceAccountSecretKey: "credentials.json" + ## @param google.serviceAccountKey When using the Google provider, specify the service account key JSON file. In this case a new secret will be created holding this service account (optional) + ## + serviceAccountKey: "" + ## Hetzner configuration to be set via arguments/env. variables + ## + hetzner: + ## @param hetzner.token When using the Hetzner provider, specify your token here. (required when `hetzner.secretName` is not provided. In this case a new secret will be created holding the token.) + ## Mutually exclusive with `hetzner.secretName`. + ## + token: "" + ## @param hetzner.secretName When using the Hetzner provider, specify the existing secret which contains your token. Disables the usage of `hetzner.token` (optional) + ## + secretName: "" + ## @param hetzner.secretKey When using the Hetzner provider with an existing secret, specify the key name (optional) + ## + secretKey: "hetzner_token" + ## Infoblox configuration to be set via arguments/env. variables + ## + infoblox: + ## @param infoblox.wapiUsername When using the Infoblox provider, specify the Infoblox WAPI username + ## + wapiUsername: "admin" + ## @param infoblox.wapiPassword When using the Infoblox provider, specify the Infoblox WAPI password (required when provider=infoblox) + ## + wapiPassword: "" + ## @param infoblox.gridHost When using the Infoblox provider, specify the Infoblox Grid host (required when provider=infoblox) + ## + gridHost: "" + ## @param infoblox.view Infoblox view + ## + view: "" + ## Optional keys + ## + ## Existing secret name, when in place wapiUsername and wapiPassword are not required + ## secretName: "" + ## + ## @param infoblox.domainFilter When using the Infoblox provider, specify the domain (optional) + ## + domainFilter: "" + ## @param infoblox.noSslVerify When using the Infoblox provider, disable SSL verification (optional) + ## + noSslVerify: false + ## @param infoblox.wapiPort When using the Infoblox provider, specify the Infoblox WAPI port (optional) + ## + wapiPort: "" + ## @param infoblox.wapiVersion When using the Infoblox provider, specify the Infoblox WAPI version (optional) + ## + wapiVersion: "" + ## @param infoblox.wapiConnectionPoolSize When using the Infoblox provider, specify the Infoblox WAPI request connection pool size (optional) + ## + wapiConnectionPoolSize: "" + ## @param infoblox.wapiHttpTimeout When using the Infoblox provider, specify the Infoblox WAPI request timeout in seconds (optional) + ## + wapiHttpTimeout: "" + ## @param infoblox.maxResults When using the Infoblox provider, specify the Infoblox Max Results (optional) + ## + maxResults: "" + ## Linode configuration to be set via arguments/env. variables + ## + linode: + ## @param linode.apiToken When using the Linode provider, `LINODE_TOKEN` to set (optional) + ## + apiToken: "" + ## @param linode.secretName Use an existing secret with key "linode_api_token" defined. + ## This ignores linode.apiToken + ## + secretName: "" + ## NS1 configuration to be set via arguments/env. variables + ## @param ns1.minTTL When using the ns1 provider, specify minimal TTL, as an integer, for records + ## + ns1: + minTTL: 10 + ## OVH configuration to be set via arguments/env. variables + ## + ovh: + ## @param ovh.consumerKey When using the OVH provider, specify the existing consumer key. (required when provider=ovh and `ovh.secretName` is not provided.) + ## + consumerKey: "" + ## @param ovh.applicationKey When using the OVH provider with an existing application, specify the application key. (required when provider=ovh and `ovh.secretName` is not provided.) + ## + applicationKey: "" + ## @param ovh.applicationSecret When using the OVH provider with an existing application, specify the application secret. (required when provider=ovh and `ovh.secretName` is not provided.) + ## + applicationSecret: "" + ## @param ovh.secretName When using the OVH provider, it's the name of the secret containing `ovh_consumer_key`, `ovh_application_key` and `ovh_application_secret`. Disables usage of other `ovh`. + ## with following keys: + ## - ovh_consumer_key + ## - ovh_application_key + ## - ovh_application_secret + ## This ignores consumerKey, applicationKey & applicationSecret + ## + secretName: "" + ## Scaleway configuration to be set via arguments/env. variables + ## + scaleway: + ## @param scaleway.scwAccessKey When using the Scaleway provider, specify an existing access key. (required when provider=scaleway) + ## + scwAccessKey: "" + ## @param scaleway.scwSecretKey When using the Scaleway provider, specify an existing secret key. (required when provider=scaleway) + ## + scwSecretKey: "" + ## @param scaleway.scwDefaultOrganizationId When using the Scaleway provider, specify the existing organization id. (required when provider=scaleway) + ## + scwDefaultOrganizationId: "" + ## RFC 2136 configuration to be set via arguments/env. variables + ## + rfc2136: + ## @param rfc2136.host When using the rfc2136 provider, specify the RFC2136 host (required when provider=rfc2136) + ## + host: "" + ## @param rfc2136.port When using the rfc2136 provider, specify the RFC2136 port (optional) + ## + port: 53 + ## @param rfc2136.zone When using the rfc2136 provider, specify the zone (required when provider=rfc2136) + ## + zone: "" + ## @param rfc2136.tsigSecret When using the rfc2136 provider, specify the tsig secret to enable security. (do not specify if `rfc2136.secretName` is provided.) (optional) + ## + tsigSecret: "" + ## @param rfc2136.secretName When using the rfc2136 provider, specify the existing secret which contains your tsig secret. Disables the usage of `rfc2136.tsigSecret` (optional) + ## + secretName: "" + ## @param rfc2136.tsigSecretAlg When using the rfc2136 provider, specify the tsig secret to enable security (optional) + ## + tsigSecretAlg: hmac-sha256 + ## @param rfc2136.tsigKeyname When using the rfc2136 provider, specify the tsig keyname to enable security (optional) + ## + tsigKeyname: externaldns-key + ## @param rfc2136.tsigAxfr When using the rfc2136 provider, enable AFXR to enable security (optional) + ## + tsigAxfr: true + ## @param rfc2136.minTTL When using the rfc2136 provider, specify minimal TTL (in duration format) for records[ns, us, ms, s, m, h], see more https://golang.org/pkg/time/#ParseDuration + ## + minTTL: "0s" + ## @param rfc2136.rfc3645Enabled When using the rfc2136 provider, extend using RFC3645 to support secure updates over Kerberos with GSS-TSIG + ## + rfc3645Enabled: false + ## @param rfc2136.kerberosConfig When using the rfc2136 provider with rfc3645Enabled, the contents of a configuration file for krb5 (optional) + ## + kerberosConfig: "" + ## @param rfc2136.kerberosUsername When using the rfc2136 provider with rfc3645Enabled, specify the username to authenticate with (optional) + ## + kerberosUsername: "" + ## @param rfc2136.kerberosPassword When using the rfc2136 provider with rfc3645Enabled, specify the password to authenticate with (optional) + ## + kerberosPassword: "" + ## @param rfc2136.kerberosRealm When using the rfc2136 provider with rfc3645Enabled, specify the realm to authenticate to (required when provider=rfc2136 and rfc2136.rfc3645Enabled=true) + ## + kerberosRealm: "" + + ## PowerDNS configuration to be set via arguments/env. variables + ## + pdns: + ## @param pdns.apiUrl When using the PowerDNS provider, specify the API URL of the server. + ## + apiUrl: "" + ## @param pdns.apiPort When using the PowerDNS provider, specify the API port of the server. + ## + apiPort: "8081" + ## @param pdns.apiKey When using the PowerDNS provider, specify the API key of the server. + ## + apiKey: "" + ## @param pdns.secretName When using the PowerDNS provider, specify as secret name containing the API Key + ## + secretName: "" + ## TransIP configuration to be set via arguments/env. variables + ## + transip: + ## @param transip.account When using the TransIP provider, specify the account name. + ## + account: "" + ## @param transip.apiKey When using the TransIP provider, specify the API key to use. + ## + apiKey: "" + ## VinylDNS configuration to be set via arguments/env. variables + ## + vinyldns: + ## @param vinyldns.host When using the VinylDNS provider, specify the VinylDNS API host. + ## + host: "" + ## @param vinyldns.accessKey When using the VinylDNS provider, specify the Access Key to use. + ## + accessKey: "" + ## @param vinyldns.secretKey When using the VinylDNS provider, specify the Secret key to use. + ## + secretKey: "" + ## @param domainFilters Limit possible target zones by domain suffixes (optional) + ## + domainFilters: [] + ## @param excludeDomains Exclude subdomains (optional) + ## + excludeDomains: [] + ## @param regexDomainFilter Limit possible target zones by regex domain suffixes (optional) + ## If regexDomainFilter is specified, domainFilters will be ignored + ## + regexDomainFilter: "" + ## @param regexDomainExclusion Exclude subdomains by using regex pattern (optional) + ## If regexDomainFilter is specified, excludeDomains will be ignored and external-dns will use regexDomainExclusion even though regexDomainExclusion is empty + ## + regexDomainExclusion: "" + ## @param zoneNameFilters Filter target zones by zone domain (optional) + ## + zoneNameFilters: [] + ## @param zoneIdFilters Limit possible target zones by zone id (optional) + ## + zoneIdFilters: [] + ## @param annotationFilter Filter sources managed by external-dns via annotation using label selector (optional) + ## + annotationFilter: "" + ## @param dryRun When enabled, prints DNS record changes rather than actually performing them (optional) + ## + dryRun: false + ## @param triggerLoopOnEvent When enabled, triggers run loop on create/update/delete events in addition to regular interval (optional) + ## + triggerLoopOnEvent: false + ## @param interval Interval update period to use + ## + interval: "1m" + ## @param logLevel Verbosity of the logs (options: panic, debug, info, warning, error, fatal, trace) + ## + logLevel: info + ## @param logFormat Which format to output logs in (options: text, json) + ## + logFormat: text + ## @param policy Modify how DNS records are synchronized between sources and providers (options: sync, upsert-only ) + ## + policy: upsert-only + ## @param registry Registry method to use (options: txt, aws-sd, noop) + ## ref: https://github.com/kubernetes-sigs/external-dns/blob/master/docs/proposal/registry.md + ## + registry: "txt" + ## @param txtPrefix When using the TXT registry, a prefix for ownership records that avoids collision with CNAME entries (optional) (Mutual exclusive with txt-suffix) + ## + txtPrefix: "" + ## @param txtSuffix When using the TXT registry, a suffix for ownership records that avoids collision with CNAME entries (optional).suffix (Mutual exclusive with txt-prefix) + ## + txtSuffix: "" + ## @param txtOwnerId A name that identifies this instance of ExternalDNS. Currently used by registry types: txt & aws-sd (optional) + ## But other registry types might be added in the future. + ## + txtOwnerId: "" + ## @param forceTxtOwnerId (backward compatibility) When using the non-TXT registry, it will pass the value defined by `txtOwnerId` down to the application (optional) + ## This setting added for backward compatibility for + ## customers who already used bitnami/external-dns helm chart + ## to privision 'aws-sd' registry type. + ## Previously bitnami/external-dns helm chart did not pass + ## txtOwnerId value down to the external-dns application + ## so the app itself sets that value to be a string 'default'. + ## If existing customers force the actual txtOwnerId value to be + ## passed properly, their external-dns updates will stop working + ## because the owner's value for exting DNS records in + ## AWS Service Discovery would remain 'default'. + ## NOTE: It is up to the end user to update AWS Service Discovery + ## 'default' values in description fields to make it work with new + ## value passed as txtOwnerId when forceTxtOwnerId=true + forceTxtOwnerId: false + ## @param extraArgs Extra arguments to be passed to external-dns + ## + extraArgs: {} + ## @param extraEnv Extra environment variables to be passed to external-dns + ## + ## extraEnv: + ## - name: VARNAME1 + ## value: value1 + ## - name: VARNAME2 + ## valueFrom: + ## secretKeyRef: + ## name: existing-secret + ## key: varname2-key + ## + extraEnv: [] + ## @param replicas Desired number of ExternalDNS replicas + ## + replicas: 1 + ## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param podAnnotations Additional annotations to apply to the pod. + ## + podAnnotations: {} + ## @param podLabels Additional labels to be added to pods + ## + podLabels: {} + ## @param priorityClassName priorityClassName + ## + priorityClassName: "" + ## @param secretAnnotations Additional annotations to apply to the secret + ## + secretAnnotations: {} + ## Options for the source type "crd" + ## + crd: + ## @param crd.create Install and use the integrated DNSEndpoint CRD + ## + create: false + ## @param crd.apiversion Sets the API version for the CRD to watch + ## + apiversion: "" + ## @param crd.kind Sets the kind for the CRD to watch + ## + kind: "" + ## Kubernetes svc configutarion + ## + service: + ## @param service.enabled Whether to create Service resource or not + ## + enabled: true + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.port ExternalDNS client port + ## + port: 7979 + ## @param service.nodePort Port to bind to for NodePort service type (client port) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param service.clusterIP IP address to assign to service + ## + clusterIP: "" + ## @param service.externalIPs Service external IP addresses + ## + externalIPs: [] + ## @param service.loadBalancerIP IP address to assign to load balancer (if supported) + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges List of IP CIDRs allowed access to load balancer (if supported) + ## + loadBalancerSourceRanges: [] + ## @param service.annotations Annotations to add to service + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param service.labels Provide any additional labels which may be required. + ## This can be used to have external-dns show up in `kubectl cluster-info` + ## kubernetes.io/cluster-service: "true" + ## kubernetes.io/name: "external-dns" + ## + labels: {} + ## ServiceAccount parameters + ## https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param serviceAccount.create Determine whether a Service Account should be created or it should reuse a exiting one. + ## + create: true + ## @param serviceAccount.name ServiceAccount to use. A name is generated using the external-dns.fullname template if it is not set + ## + name: "" + ## @param serviceAccount.annotations Additional Service Account annotations + ## + annotations: {} + ## @param serviceAccount.automountServiceAccountToken Automount API credentials for a service account. + ## + automountServiceAccountToken: true + ## RBAC parameters + ## https://kubernetes.io/docs/reference/access-authn-authz/rbac/ + ## + rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## + create: true + ## @param rbac.clusterRole Whether to create Cluster Role. When set to false creates a Role in `namespace` + ## + clusterRole: true + ## @param rbac.apiVersion Version of the RBAC API + ## + apiVersion: v1 + ## @param rbac.pspEnabled Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + pspEnabled: false + ## @param securityContext Security context for the container + ## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## Example: + ## securityContext: + ## allowPrivilegeEscalation: false + ## readOnlyRootFilesystem: true + ## capabilities: + ## drop: ["ALL"] + ## + securityContext: {} + ## @param podSecurityContext.fsGroup Group ID for the container + ## @param podSecurityContext.runAsUser User ID for the container + ## + podSecurityContext: + fsGroup: 1001 + runAsUser: 1001 + ## Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param resources.limits The resources limits for the container + ## @param resources.requests The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 50m + ## memory: 50Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 10m + ## memory: 50Mi + requests: {} + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param livenessProbe.enabled Enable livenessProbe + ## @param livenessProbe.httpGet.path Request path for livenessProbe + ## @param livenessProbe.httpGet.port Port for livenessProbe + ## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + httpGet: + path: /healthz + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 2 + successThreshold: 1 + ## Configure extra options for readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param readinessProbe.enabled Enable readinessProbe + ## @param readinessProbe.httpGet.path Request path for readinessProbe + ## @param readinessProbe.httpGet.port Port for readinessProbe + ## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + httpGet: + path: /healthz + port: http + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param extraVolumes A list of volumes to be added to the pod + ## + extraVolumes: [] + ## @param extraVolumeMounts A list of volume mounts to be added to the pod + ## + extraVolumeMounts: [] + ## @param podDisruptionBudget Configure PodDisruptionBudget + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + podDisruptionBudget: {} + ## Prometheus Exporter / Metrics + ## + metrics: + ## @param metrics.enabled Enable prometheus to access external-dns metrics endpoint + ## + enabled: false + ## @param metrics.podAnnotations Annotations for enabling prometheus to access the metrics endpoint + ## + podAnnotations: {} + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor object + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.selector Additional labels for ServiceMonitor object + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## e.g: + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + + ``` +--8<-- "kubernetes-why-full-values-in-configmap.md" + +Then work your way through the values you pasted, and change any which are specific to your configuration. + +I recommend changing: + +```yaml + sources: + # - crd + - service + - ingress + # - contour-httpproxy +``` + +To: + +```yaml + sources: + - crd + # - service + # - ingress + # - contour-httpproxy +``` + +!!! question "Why only use CRDs as a source?" + > I thought the whole point of this magic was to create DNS entries from services or ingresses! + + You can do that, yes. However, I prefer to be prescriptive, and explicitly decide when a DNS entry will be created. By [using CRDs](#using-crds) (*External DNS creates a new type of resource called a "DNSEndpoint"*), I add my DNS entries as YAML files into each kustomization, and I can still employ wildcard DNS where appropriate. + +### Secret + +As you work your way through `values.yaml`, you'll notice that it contains specific placholders for credentials for various DNS providers. +Take for example, this config for cloudflare: + +???+ example "Example snippet of CloudFlare config from ConfigMap" + ```yaml + cloudflare: + ## @param cloudflare.apiToken When using the Cloudflare provider, `CF_API_TOKEN` to set (optional) + ## + apiToken: "" + ## @param cloudflare.apiKey When using the Cloudflare provider, `CF_API_KEY` to set (optional) + ## + apiKey: "" + ## @param cloudflare.secretName When using the Cloudflare provider, it's the name of the secret containing cloudflare_api_token or cloudflare_api_key. + ## This ignores cloudflare.apiToken, and cloudflare.apiKey + ## + secretName: "" + ## @param cloudflare.email When using the Cloudflare provider, `CF_API_EMAIL` to set (optional). Needed when using CF_API_KEY + ## + email: "" + ## @param cloudflare.proxied When using the Cloudflare provider, enable the proxy feature (DDOS protection, CDN...) (optional) + ## + proxied: true + ``` + +In the case of CloudFlare (*and this may differ per-provider*), you can either enter your credentials in cleartext (*baaad idea, since we intend to commit these files into a repo*), or you can reference a secret, which External DNS will expect to find in its namespace. + +Thanks to [Sealed Secrets](/kubernetes/sealed-secrets/), we have a safe way of committing secrets into our repository, so to create this cloudflare secret, you'd run something like this: + +```bash + kubectl create secret generic cloudflare-api-token \ + --namespace external-dns \ + --dry-run=client \ + --from-literal=cloudflare_api_token=gobbledegook -o json \ + | kubeseal --cert \ + | kubectl create -f - \ + > /external-dns/sealedsecret-cloudflare-api-token.yaml +``` + +And your sealed secret would end up in `external-dns/sealedsecret-cloudflare-api-token.yaml`. + +### HelmRelease + +Lastly, having set the scene above, we define the HelmRelease which will actually deploy the external-dns controller into the cluster, with the config we defined above. I save this in my flux repo as `external-dns/helmrelease-external-dns.yaml`: + +??? example "Example HelmRelease (click to expand)" + ```yaml + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: external-dns + namespace: external-dns + spec: + chart: + spec: + chart: external-dns + version: 4.x + sourceRef: + kind: HelmRepository + name: bitnami + namespace: flux-system + interval: 15m + timeout: 5m + releaseName: external-dns + valuesFrom: + - kind: ConfigMap + name: external-dns-helm-chart-value-overrides + valuesKey: values.yaml # This is the default, but best to be explicit for clarity + ``` + +--8<-- "kubernetes-why-not-config-in-helmrelease.md" + +## Serving + +Once you've committed your YAML files into your repo, you should soon see some pods appear in the `external-dns` namespace! + +### Using CRDs + +If you're the sort of person who doesn't like to just leak[^1] every service/ingress name into public DNS, you may prefer to manage your DNS entries using CRDs. + +You can instruct ExternalDNS to create any DNS entry you please, using a **DNSEndpoint** resource, and place these in the appropriate folder in your flux repo to be deployed with your HelmRelease: + +```yaml +apiVersion: externaldns.k8s.io/v1alpha1 +kind: DNSEndpoint +metadata: + name: batcave.example.com + namespace: batcave +spec: + endpoints: + - dnsName: batcave.example.com + recordTTL: 180 + recordType: A + targets: + - 192.168.99.216 +``` + +You can even create wildcard DNS entries, for example by setting `dnsName: *.batcave.example.com`. + +Finally, (*and this is how I prefer to manage mine*), you can create a few A records for "permanent" endpoints stuff like Ingresses, and then point arbitrary DNS names to these records, like this: + +```yaml +apiVersion: externaldns.k8s.io/v1alpha1 +kind: DNSEndpoint +metadata: + name: "robinsroost.example.com" + namespace: batcave +spec: + endpoints: + - dnsName: "robinsroost.example.com" + recordTTL: 180 + recordType: CNAME + targets: + - "batcave.example.com" +``` + +### Troubleshooting + +If DNS entries **aren't** created as you expect, then the best approach is to check the external-dns logs, by running `kubectl logs -n external-dns -l app.kubernetes.io/name=external-dns`. + +--8<-- "recipe-footer.md" + +[^1]: Why yes, I **have** accidentally caused outages / conflicts by "leaking" DNS entries automatically! diff --git a/manuscript/kubernetes/index.md b/manuscript/kubernetes/index.md index f2c94df..eba98d3 100644 --- a/manuscript/kubernetes/index.md +++ b/manuscript/kubernetes/index.md @@ -3,68 +3,47 @@ My first introduction to Kubernetes was a children's story: - + -## Wait, what? +## Why Kubernetes? -Why would you want to use Kubernetes for your self-hosted recipes over simple Docker Swarm? Here's my personal take.. +Why would you want to Kubernetes for your self-hosted recipes, over simple Docker Swarm? Here's my personal take.. -I use Docker swarm both at home (_on a single-node swarm_), and on a trio of Ubuntu 16.04 VPSs in a shared lab OpenStack environment. +### Docker Swarm is dead -In both cases above, I'm responsible for maintaining the infrastructure supporting Docker - either the physical host, or the VPS operating systems. +Sorry to say, but from where I sit, there's no innovation or development happening in docker swarm. -I started experimenting with Kubernetes as a plan to improve the reliability of my cryptocurrency mining pools (_the contended lab VPSs negatively impacted the likelihood of finding a block_), and as a long-term replacement for my aging home server. +Yes, I know, after Docker Inc [sold its platform business to Mirantis in Nov 2019](https://www.mirantis.com/blog/mirantis-acquires-docker-enterprise-platform-business/), in Feb 2020 Mirantis [back-tracked](https://www.mirantis.com/blog/mirantis-will-continue-to-support-and-develop-docker-swarm/) on their original plan to sunset swarm after 2 years, and stated that they'd continue to invest in swarm. But seriously, look around. Nobody is interested in swarm right now... -What I enjoy about building recipes and self-hosting is **not** the operating system maintenance, it's the tools and applications that I can quickly launch in my swarms. If I could **only** play with the applications, and not bother with the maintenance, I totally would. +... Not even Mirantis! As of Nov 2021, the Mirantis blog tag "[kubernetes](https://www.mirantis.com/tag/kubernetes/)" had 8 posts within the past month. The tag "[docker](https://www.mirantis.com/tag/docker/)" has 8 posts in the past **2 years**, the 8th being the original announcement of the Docker aquisition. The tag "[docker swarm](https://www.mirantis.com/tag/docker-swarm/)" has only 2 posts, **ever**. -Kubernetes (_on a cloud provider, mind you!_) does this for me. I feed Kubernetes a series of YAML files, and it takes care of all the rest, including version upgrades, node failures/replacements, disk attach/detachments, etc. +Dead. [Extinct. Like the doodoo](https://youtu.be/NxnZC9L_YXE?t=47). + +### Once you go Kubernetes, you can't go back + +For years now, [I've provided Kubernetes design consulting](https://www.funkypenguin.co.nz/work-with-me/) to small clients and large enterprises. The implementation details in each case vary widely, but there are some primitives which I've come to take for granted, and I wouldn't easily do without. A few examples: + +* **CLI drives API from anywhere**. From my laptop, I can use my credentials to manage any number of Kubernetes clusters, simply by switching kubectl "context". Each interaction is an API call against an HTTPS endpoint. No SSHing to hosts and manually running docker command as root! +* **GitOps is magic**. There are multiple ways to achieve it, but having changes you commit to a repo automatically applied to a cluster, "Just Works(tm)". The process removes so much friction from making changes that it makes you more productive, and a better "gitizen" ;P +* **Controllers are trustworthy**. I've come to trust that when I tell Kubernetes to run 3 replicas on separate hosts, to scale up a set of replicas based on CPU load metrics, or provision a blob of storage for a given workloa, that this will be done in a consistent and visible way. I'll be able to see logs / details for each action taken by the controller, and adjust my own instructions/configuration accordingly if necessary. ## Uggh, it's so complicated! -Yes, but that's a necessary sacrifice for the maturity, power and flexibility it offers. Like docker-compose syntax, Kubernetes uses YAML to define its various, interworking components. +Yes, it's more complex than Docker Swarm. And that complexity can definately be a barrier, although with improved tooling, it's continually becoming less-so. However, you don't need to be a mechanic to drive a car or to use a chainsaw. You just need a basic understanding of some core primitives, and then you get on with using the tool to achieve your goals, without needing to know every detail about how it works! -Let's talk some definitions. Kubernetes.io provides a [glossary](https://kubernetes.io/docs/reference/glossary/?fundamental=true). My definitions are below: +Your end-goal is probably "*I want to reliably self-host services I care about*", and not "*I want to fully understand a complex, scalable, and highly sophisticated container orchestrator*". [^1] -- **Node** : A compute instance which runs docker containers, managed by a cluster master. - -- **Cluster** : One or more "worker nodes" which run containers. Very similar to a Docker Swarm node. In most cloud provider deployments, the [master node for your cluster is provided free of charge](https://www.sdxcentral.com/articles/news/google-eliminates-gke-management-fees-kubernetes-clusters/2017/11/), but you don't get to access it. - -- **Pod** : A collection of one or more the containers. If a pod runs multiple containers, these containers always run on the same node. - -- **Deployment** : A definition of a desired state. I.e., "I want a pod with containers A and B running". The Kubernetes master then ensures that any changes necessary to maintain the state are taken. (_I.e., if a pod crashes, but is supposed to be running, a new pod will be started_) - -- **Service** : Unlike Docker Swarm, service discovery is not _built in_ to Kubernetes. For your pods to discover each other (say, to have "webserver" talk to "database"), you create a service for each pod, and refer to these services when you want your containers (_in pods_) to talk to each other. Complicated, yes, but the abstraction allows you to do powerful things, like auto-scale-up a bunch of database "pods" behind a service called "database", or perform a rolling container image upgrade with zero impact. - -- **External access** : Services not only allow pods to discover each other, but they're also the mechanism through which the outside world can talk to a container. At the simplest level, this is akin to exposing a container port on a docker host. - -- **Ingress** : When mapping ports to applications is inadequate (think virtual web hosts), an ingress is a sort of "inbound router" which can receive requests on one port (i.e., HTTPS), and forward them to a variety of internal pods, based on things like VHOST, etc. For us, this is the functional equivalent of what Traefik does in Docker Swarm. In fact, we use a Traefik Ingress in Kubernetes to accomplish the same. - -- **Persistent Volume** : A virtual disk which is attached to a pod, storing persistent data. Meets the requirement for shared storage from Docker Swarm. I.e., if a persistent volume (PV) is bound to a pod, and the pod dies and is recreated, or get upgraded to a new image, the PV the data is bound to the new container. PVs can be "claimed" in a YAML definition, so that your Kubernetes provider will auto-create a PV when you launch your pod. PVs can be snapshotted. - -- **Namespace** : An abstraction to separate a collection of pods, services, ingresses, etc. A "virtual cluster within a cluster". Can be used for security, or simplicity. For example, since we don't have individual docker stacks anymore, if you commonly name your database container "db", and you want to deploy two applications which both use a database container, how will you name your services? Use namespaces to keep each application ("nextcloud" vs "kanboard") separate. Namespaces also allow you to allocate resources **limits** to the aggregate of containers in a namespace, so you could, for example, limit the "nextcloud" namespace to 2.3 CPUs and 1200MB RAM. +So let's get on with learning how to use the tool... ## Mm.. maaaaybe, how do I start? -If you're like me, and you learn by doing, either play with the examples at , or jump right in by setting up a Google Cloud trial (_you get \$300 credit for 12 months_), or a small cluster on [Digital Ocean](/kubernetes/cluster/). +Primarily you need 2 things: -If you're the learn-by-watching type, just search for "Kubernetes introduction video". There's a **lot** of great content available. +1. A cluster +2. A way to deploy workloads into the cluster -## I'm ready, gimme some recipes! - -As of Jan 2019, our first (_and only!_) Kubernetes recipe is a WIP for the Mosquitto [MQTT](/recipes/mqtt/) broker. It's a good, simple starter if you're into home automation (_shoutout to [Home Assistant](/recipes/homeassistant/)!_), since it only requires a single container, and a simple NodePort service. - -I'd love for your [feedback](/support/) on the Kubernetes recipes, as well as suggestions for what to add next. The current rough plan is to replicate the Chef's Favorites recipes (_see the left-hand panel_) into Kubernetes first. - -## Move on.. - -Still with me? Good. Move on to reviewing the design elements - -- Start (this page) - Why Kubernetes? -- [Design](/kubernetes/design/) - How does it fit together? -- [Cluster](/kubernetes/cluster/) - Setup a basic cluster -- [Load Balancer](/kubernetes/loadbalancer/) - Setup inbound access -- [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data -- [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks -- [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm +Practically, you need some extras too, but you can mix-and-match these. --8<-- "recipe-footer.md" + +[^1]: Of course, if you **do** enjoy understanding the intricacies of how your tools work, you're in good company! diff --git a/manuscript/kubernetes/ingress/index.md b/manuscript/kubernetes/ingress/index.md new file mode 100644 index 0000000..414239b --- /dev/null +++ b/manuscript/kubernetes/ingress/index.md @@ -0,0 +1,19 @@ +--- +description: What is a Kubernetes Ingress? +--- +# Ingresses + +In Kubernetes, an Ingress is a way to describe how to route traffic coming **into** the cluster, so that (*for example*) will end up on a [Radarr][radarr] pod, but will end up on a [Sonarr][sonarr] pod. + +![Ingress illustration](/images/ingress.jpg) + +There are many popular Ingress Controllers, we're going to cover two equally useful options: + +1. [Traefik](/kubernetes/ingress/traefik/) +2. [Nginx](/kubernetes/ingress/nginx/) + +Choose at least one of the above (*there may be valid reasons to use both!* [^1]), so that you can expose applications via Ingress. + +--8<-- "recipe-footer.md" + +[^1]: One cluster I manage uses traefik Traefik for public services, but Nginx for internal managemnet services such as Prometheus, etc. The idea is that you'd need one type of Ingress to help debug problems with the _other_ type! diff --git a/manuscript/kubernetes/ingress/nginx.md b/manuscript/kubernetes/ingress/nginx.md new file mode 100644 index 0000000..74334e4 --- /dev/null +++ b/manuscript/kubernetes/ingress/nginx.md @@ -0,0 +1,240 @@ +--- +description: Nginx Ingress Controller +--- +# Nginx Ingress Controller + +The [Nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/) is the grandpappy of Ingress Controllers, with releases dating back ot at least 2016. Of course, Nginx itself is a battle-tested rock, [released in 2004](https://en.wikipedia.org/wiki/Nginx) and has been constantly updated / improved ever since. + +Having such a pedigree though can make it a little awkward for the unfamiliar to configure Ngnix, whereas something like [Traefik](/kubernetes/ingress/traefik/), being newer-on-the-scene, is more user-friendly, and offers (*among other features*) a free **dashboard**. (*Nginx's dashboard is only available in the commercial Nginx+ package, which is a [monumental PITA](https://www.nginx.com/blog/deploying-nginx-nginx-plus-docker/) to run*) + +Nginx Ingress Controller does make for a nice, simple "default" Ingress controller, if you don't want to do anything fancy. + +!!! summary "Ingredients" + + * [x] A [Kubernetes cluster](/kubernetes/cluster/) + * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped + * [x] A [load-balancer](/kubernetes/load-balancer/) solution (*either [k3s](/kubernetes/load-balancer/k3s/) or [MetalLB](/kubernetes/load-balancer/metallb/)*) + + Optional: + + * [x] [Cert-Manager](/kubernetes/cert-manager/) deployed to request/renew certificates + * [x] [External DNS](/kubernetes/external-dns/) configured to respond to ingresses, or with a wildcard DNS entry + +## Preparation + +### Namespace + +We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-nginx-ingress-controller.yaml`: + +??? example "Example NameSpace (click to expand)" + ```yaml + apiVersion: v1 + kind: Namespace + metadata: + name: nginx-ingress-controller + ``` + +### HelmRepository + +Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. In this case, we're using the (*prolific*) [bitnami chart repository](https://github.com/bitnami/charts/tree/master/bitnami), so per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-bitnami.yaml`: + +??? example "Example HelmRepository (click to expand)" + ```yaml + apiVersion: source.toolkit.fluxcd.io/v1beta1 + kind: HelmRepository + metadata: + name: bitnami + namespace: flux-system + spec: + interval: 15m + url: https://charts.bitnami.com/bitnami + ``` + +### Kustomization + +Now that the "global" elements of this deployment (*Namespace and HelmRepository*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/nginx-ingress-controller`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-nginx-ingress-controller.yaml`: + +??? example "Example Kustomization (click to expand)" + ```yaml + apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 + kind: Kustomization + metadata: + name: nginx-ingress-controller + namespace: flux-system + spec: + interval: 15m + path: ./nginx-ingress-controller + prune: true # remove any elements later removed from the above path + timeout: 2m # if not set, this defaults to interval duration, which is 1h + sourceRef: + kind: GitRepository + name: flux-system + validation: server + healthChecks: + - apiVersion: apps/v1 + kind: Deployment + name: nginx-ingress-controller + namespace: nginx-ingress-controller + + ``` + +### ConfigMap + +Now we're into the nginx-ingress-controller-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/bitnami/charts/blob/master/bitnami/nginx-ingress-controller/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `nginx-ingress-controller/configmap-nginx-ingress-controller-helm-chart-value-overrides.yaml`: + +??? example "Example ConfigMap (click to expand)" + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + creationTimestamp: null + name: nginx-ingress-controller-helm-chart-value-overrides + namespace: nginx-ingress-controller + data: + values.yaml: |- + # paste chart values.yaml (indented) here and alter as required + ``` + +--8<-- "kubernetes-why-full-values-in-configmap.md" + +Then work your way through the values you pasted, and change any which are specific to your configuration. It may not be necessary to change anything. + +### HelmRelease + +Lastly, having set the scene above, we define the HelmRelease which will actually deploy nginx-ingress-controller into the cluster, with the config and extra ConfigMap we defined above. I save this in my flux repo as `nginx-ingress-controller/helmrelease-nginx-ingress-controller.yaml`: + +??? example "Example HelmRelease (click to expand)" + ```yaml + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: nginx-ingress-controller + namespace: nginx-ingress-controller + spec: + chart: + spec: + chart: nginx-ingress-controller + version: 9.x + sourceRef: + kind: HelmRepository + name: bitnami + namespace: flux-system + interval: 15m + timeout: 5m + releaseName: nginx-ingress-controller + valuesFrom: + - kind: ConfigMap + name: nginx-ingress-controller-helm-chart-value-overrides + valuesKey: values.yaml # This is the default, but best to be explicit for clarity + ``` + +--8<-- "kubernetes-why-not-config-in-helmrelease.md" + +## Deploy nginx-ingress-controller + +Having committed the above to your flux repository, you should shortly see a nginx-ingress-controller kustomization, and in the `nginx-ingress-controller` namespace, a controller and a speaker pod for every node: + +```bash +demo@shredder:~$ kubectl get pods -n nginx-ingress-controller +NAME READY STATUS RESTARTS AGE +nginx-ingress-controller-5b849b4fbd-svbxk 1/1 Running 0 24h +nginx-ingress-controller-5b849b4fbd-xt7vc 1/1 Running 0 24h +nginx-ingress-controller-default-backend-867d86fb8f-t27j9 1/1 Running 0 24h +demo@shredder:~$ +``` + +### How do I know it's working? + +#### Test Service + +By default, the chart will deploy nginx ingress controller's service in [LoadBalancer](/kubernetes/loadbalancer/) mode. When you use kubectl to display the service (`kubectl get services -n nginx-ingress-controller`), you'll see the external IP displayed: + +```bash +demo@shredder:~$ kubectl get services -n nginx-ingress-controller +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +nginx-ingress-controller LoadBalancer 10.152.183.162 172.168.209.1 80:30756/TCP,443:30462/TCP 24h +nginx-ingress-controller-default-backend ClusterIP 10.152.183.200 80/TCP 24h +demo@shredder:~$ +``` + +!!! question "Where does the external IP come from?" + If you're using [k3s's load balancer](/kubernetes/loadbalancer/k3s/), the external IP will likely be the IP of the the nodes running k3s. If you're using [MetalLB](/kubernetes/loadbalancer/metallb/), the external IP should come from the list of addresses in the pool you allocated. + +Pointing your web browser to the external IP displayed should result in the default backend page (*or an nginx-branded 404*). Congratulations, you have external access to the ingress controller! πŸ₯³ + +#### Test Ingress + +Still, you didn't deploy an ingress controller to look at 404 pages! If you used my [template repository](https://github.com/geek-cookbook/template-flux) to start off your [flux deployment strategy](/kubernetes/deployment/flux/), then the podinfo helm chart has already been deployed. By default, the podinfo configmap doesn't deploy an Ingress, but you can change this using the magic of GitOps... πŸͺ„ + +Edit your podinfo helmrelease configmap (`/podinfo/configmap-podinfo-helm-chart-value-overrides.yaml`), and change `ingress.enabled` to `true`, and set the host name to match your local domain name (*already configured using [External DNS](/kubernetes/external-dns/)*): + +``` yaml hl_lines="2 8" + ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: podinfo.local +``` + +To: + +``` yaml hl_lines="2 8" + ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: podinfo. +``` + +Commit your changes, wait for a reconciliation, and run `kubectl get ingress -n podinfo`. You should see an ingress created matching the host defined above, and the ADDRESS value should match the service address of the nginx-ingress-controller service. + +```bash +root@cn1:~# kubectl get ingress -A +NAMESPACE NAME CLASS HOSTS ADDRESS PORTS AGE +podinfo podinfo podinfo.example.com 172.168.209.1 80, 443 91d +``` + +!!! question "Why is there no class value?" + You don't **have** to define an ingress class if you only have one **class** of ingress, since typically your ingress controller will assume the default class. When you run multiple ingress controllers (say, nginx **and** [traeifk](/kubernetes/ingress/traefik/), or multiple nginx instances with different access controls) then classes become more important. + +Now assuming your [DNS is correct](/kubernetes/external-dns/), you should be able to point your browser to the hostname you chose, and see the beautiful podinfo page! πŸ₯³πŸ₯³ + +#### Test SSL + +Ha, but we're not done yet! We have exposed a service via our load balancer, we've exposed a route to a service via an Ingress, but let's get rid of that nasty "insecure" message in the browser when using HTTPS... + +Since you setup [SSL certificates,](/kubernetes/ssl-certificates/) including [secret-replicator](/kubernetes/ssl-certificates/secret-replicator/), you should end up with a `letsencrypt-wildcard-cert` secret in every namespace, including `podinfo`. + +So once again, alter the podinfo ConfigMap to change this: + +```yaml hl_lines="2 4" + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local +``` + +To this: + +```yaml hl_lines="2 4" + tls: + - secretName: letsencrypt-wildcard-cert + hosts: + - podinfo. +``` + +Commit your changes, wait for the reconciliation, and the next time you point your browser at your ingress, you should get a beautiful, valid, officially-signed SSL certificate[^1]! πŸ₯³πŸ₯³πŸ₯³ + +### Troubleshooting + +Are things not working as expected? Watch the nginx-ingress-controller's logs with ```kubectl logs -n nginx-ingress-controller -l app.kubernetes.io/name=nginx-ingress-controller -f```. + +--8<-- "recipe-footer.md" + +[^1]: The beauty of this design is that the same process will now work for any other application you deploy, without any additional manual effort for DNS or SSL setup! diff --git a/manuscript/kubernetes/ingress/traefik/dashboard.md b/manuscript/kubernetes/ingress/traefik/dashboard.md new file mode 100644 index 0000000..26bbdec --- /dev/null +++ b/manuscript/kubernetes/ingress/traefik/dashboard.md @@ -0,0 +1,16 @@ +# Traefik Dashboard + +One of the advantages [Traefik](/kubernetes/ingress/traefik/) offers over [Nginx](/kubernetes/ingress/nginx/), is a native dashboard available in the open-source version (*Nginx+, the commercially-supported version, also includes a dashboard*). + +![Traefik Dashboard Screenshot](../../../images/traefik-dashboard.png) + +!!! summary "Ingredients" + + * [x] A [Kubernetes cluster](/kubernetes/cluster/) + * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped + * [x] A [load-balancer](/kubernetes/load-balancer/) solution (*either [k3s](/kubernetes/load-balancer/k3s/) or [MetalLB](/kubernetes/load-balancer/metallb/)*) + * [x] [Traefik](/kubernetes/ingress/traefik/) deployed per-design + +--8<-- "recipe-footer.md" + +[^1]: The beauty of this design is that the same process will now work for any other application you deploy, without any additional manual effort for DNS or SSL setup! diff --git a/manuscript/kubernetes/ingress/traefik/index.md b/manuscript/kubernetes/ingress/traefik/index.md new file mode 100644 index 0000000..15fbb36 --- /dev/null +++ b/manuscript/kubernetes/ingress/traefik/index.md @@ -0,0 +1,239 @@ +# Traefik Ingress Controller + +Unlike grumpy ol' man [Nginx](/kubernetes/ingress/ngnix/) :older_man:, Traefik, a microservice-friendly reverse proxy, is relatively fresh in the "cloud-native" space, having been "born" :baby_bottle: [in the same year that Kubernetes was launched](https://techcrunch.com/2020/09/23/five-years-after-creating-traefik-application-proxy-open-source-project-hits-2b-downloads/). + +Traefik natively includes some features which Nginx lacks: + +* [x] Ability to use cross-namespace TLS certificates (*this may be accidental, but it totally works currently*) +* [x] An elegant "middleware" implementation allowing certain requests to pass through additional layers of authentication +* [x] A beautiful dashboard + +![Traefik Screenshot](../../../images/traefik.png) + +!!! summary "Ingredients" + + * [x] A [Kubernetes cluster](/kubernetes/cluster/) + * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped + * [x] A [load-balancer](/kubernetes/load-balancer/) solution (*either [k3s](/kubernetes/load-balancer/k3s/) or [MetalLB](/kubernetes/load-balancer/metallb/)*) + + Optional: + + * [x] [Cert-Manager](/kubernetes/cert-manager/) deployed to request/renew certificates + * [x] [External DNS](/kubernetes/external-dns/) configured to respond to ingresses, or with a wildcard DNS entry + +## Preparation + +### Namespace + +We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-traefik.yaml`: + +??? example "Example NameSpace (click to expand)" + ```yaml + apiVersion: v1 + kind: Namespace + metadata: + name: traefik + ``` + +### HelmRepository + +Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. In this case, we're using the official [Traefik helm chart](https://github.com/traefik/traefik-helm-chart), so per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-traefik.yaml`: + +??? example "Example HelmRepository (click to expand)" + ```yaml + apiVersion: source.toolkit.fluxcd.io/v1beta1 + kind: HelmRepository + metadata: + name: traefik + namespace: flux-system + spec: + interval: 15m + url: https://helm.traefik.io/traefik + ``` + +### Kustomization + +Now that the "global" elements of this deployment (*Namespace and HelmRepository*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/traefik`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-traefik.yaml`: + +??? example "Example Kustomization (click to expand)" + ```yaml + apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 + kind: Kustomization + metadata: + name: traefik + namespace: flux-system + spec: + interval: 15m + path: ./traefik + prune: true # remove any elements later removed from the above path + timeout: 2m # if not set, this defaults to interval duration, which is 1h + sourceRef: + kind: GitRepository + name: flux-system + validation: server + healthChecks: + - apiVersion: apps/v1 + kind: Deployment + name: traefik + namespace: traefik + + ``` + +### ConfigMap + +Now we're into the traefik-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/traefik/traefik-helm-chart/blob/master/traefik/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `traefik/configmap-traefik-helm-chart-value-overrides.yaml`: + +??? example "Example ConfigMap (click to expand)" + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + creationTimestamp: null + name: traefik-helm-chart-value-overrides + namespace: traefik + data: + values.yaml: |- + # paste chart values.yaml (indented) here and alter as required> + ``` + +--8<-- "kubernetes-why-full-values-in-configmap.md" + +Then work your way through the values you pasted, and change any which are specific to your configuration. It may not be necessary to change anything. + +### HelmRelease + +Lastly, having set the scene above, we define the HelmRelease which will actually deploy traefik into the cluster, with the config and extra ConfigMap we defined above. I save this in my flux repo as `traefik/helmrelease-traefik.yaml`: + +??? example "Example HelmRelease (click to expand)" + ```yaml + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: traefik + namespace: traefik + spec: + chart: + spec: + chart: traefik + version: 9.x + sourceRef: + kind: HelmRepository + name: bitnami + namespace: flux-system + interval: 15m + timeout: 5m + releaseName: traefik + valuesFrom: + - kind: ConfigMap + name: traefik-helm-chart-value-overrides + valuesKey: values.yaml # This is the default, but best to be explicit for clarity + ``` + +--8<-- "kubernetes-why-not-config-in-helmrelease.md" + +## Deploy traefik + +Having committed the above to your flux repository, you should shortly see a traefik kustomization, and in the `traefik` namespace, a controller and a speaker pod for every node: + +```bash +demo@shredder:~$ kubectl get pods -n traefik +NAME READY STATUS RESTARTS AGE +traefik-5b849b4fbd-svbxk 1/1 Running 0 24h +traefik-5b849b4fbd-xt7vc 1/1 Running 0 24h +demo@shredder:~$ +``` + +### How do I know it's working? + +#### Test Service + +By default, the chart will deploy Traefik in [LoadBalancer](/kubernetes/loadbalancer/) mode. When you use kubectl to display the service (`kubectl get services -n traefik`), you'll see the external IP displayed: + +```bash +demo@shredder:~$ kubectl get services -n traefik +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +traefik LoadBalancer 10.152.183.162 172.168.209.1 80:30756/TCP,443:30462/TCP 24h +demo@shredder:~$ +``` + +!!! question "Where does the external IP come from?" + If you're using [k3s's load balancer](/kubernetes/loadbalancer/k3s/), the external IP will likely be the IP of the the nodes running k3s. If you're using [MetalLB](/kubernetes/loadbalancer/metallb/), the external IP should come from the list of addresses in the pool you allocated. + +Pointing your web browser to the external IP displayed should result in a 404 page. Congratulations, you have external access to the Traefik ingress controller! πŸ₯³ + +#### Test Ingress + +Still, you didn't deploy an ingress controller to look at 404 pages! If you used my [template repository](https://github.com/geek-cookbook/template-flux) to start off your [flux deployment strategy](/kubernetes/deployment/flux/), then the podinfo helm chart has already been deployed. By default, the podinfo configmap doesn't deploy an Ingress, but you can change this using the magic of GitOps... πŸͺ„ + +Edit your podinfo helmrelease configmap (`/podinfo/configmap-podinfo-helm-chart-value-overrides.yaml`), and change `ingress.enabled` to `true`, and set the host name to match your local domain name (*already configured using [External DNS](/kubernetes/external-dns/)*): + +``` yaml hl_lines="2 8" + ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: podinfo.local +``` + +To: + +``` yaml hl_lines="2 8" + ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: podinfo. +``` + +Commit your changes, wait for a reconciliation, and run `kubectl get ingress -n podinfo`. You should see an ingress created matching the host defined above, and the ADDRESS value should match the service address of the traefik service. + +```bash +root@cn1:~# kubectl get ingress -A +NAMESPACE NAME CLASS HOSTS ADDRESS PORTS AGE +podinfo podinfo podinfo.example.com 172.168.209.1 80, 443 91d +``` + +!!! question "Why is there no class value?" + You don't **have** to define an ingress class if you only have one **class** of ingress, since typically your ingress controller will assume the default class. When you run multiple ingress controllers (say, nginx **and** [traeifk](/kubernetes/ingress/traefik/), or multiple nginx instances with different access controls) then classes become more important. + +Now assuming your [DNS is correct](/kubernetes/external-dns/), you should be able to point your browser to the hostname you chose, and see the beautiful podinfo page! πŸ₯³πŸ₯³ + +#### Test SSL + +Ha, but we're not done yet! We have exposed a service via our load balancer, we've exposed a route to a service via an Ingress, but let's get rid of that nasty "insecure" message in the browser when using HTTPS... + +Since you setup [SSL certificates,](/kubernetes/ssl-certificates/) including [secret-replicator](/kubernetes/ssl-certificates/secret-replicator/), you should end up with a `letsencrypt-wildcard-cert` secret in every namespace, including `podinfo`. + +So once again, alter the podinfo ConfigMap to change this: + +```yaml hl_lines="2 4" + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local +``` + +To this: + +```yaml hl_lines="2 4" + tls: + - secretName: letsencrypt-wildcard-cert + hosts: + - podinfo. +``` + +Commit your changes, wait for the reconciliation, and the next time you point your browser at your ingress, you should get a beautiful, valid, officially-signed SSL certificate[^1]! πŸ₯³πŸ₯³πŸ₯³ + +### Troubleshooting + +Are things not working as expected? Watch the traefik's logs with ```kubectl logs -n traefik -l app.kubernetes.io/name=traefik -f```. + +--8<-- "recipe-footer.md" + +[^1]: The beauty of this design is that the same process will now work for any other application you deploy, without any additional manual effort for DNS or SSL setup! diff --git a/manuscript/kubernetes/loadbalancer.md b/manuscript/kubernetes/loadbalancer.md deleted file mode 100644 index a01a8b1..0000000 --- a/manuscript/kubernetes/loadbalancer.md +++ /dev/null @@ -1,332 +0,0 @@ -# Load Balancer - -One of the issues I encountered early on in migrating my Docker Swarm workloads to Kubernetes on GKE, was how to reliably permit inbound traffic into the cluster. - -There were several complications with the "traditional" mechanisms of providing a load-balanced ingress, not the least of which was cost. I also found that even if I paid my cloud provider (_Google_) for a load-balancer Kubernetes service, this service required a unique IP per exposed port, which was incompatible with my mining pool empire (_mining pools need to expose multiple ports on the same DNS name_). - -See further examination of the problem and possible solutions in the [Kubernetes design](/kubernetes/design/#the-challenges-of-external-access) page. - -This recipe details a simple design to permit the exposure of as many ports as you like, on a single public IP, to a cluster of Kubernetes nodes running as many pods/containers as you need, with services exposed via NodePort. - -![Kubernetes Design](/images/kubernetes-cluster-design.png) - -## Ingredients - -1. [Kubernetes cluster](/kubernetes/cluster/) -2. VM _outside_ of Kubernetes cluster, with a fixed IP address. Perhaps, on a [\$5/month Digital Ocean Droplet](https://www.digitalocean.com/?refcode=e33b78ad621b).. (_yes, another referral link. Mooar 🍷 for me!_) -3. Geek-Fu required : 🐧🐧🐧 (_complex - inline adjustments required_) - -## Preparation - -### Summary - -### Create LetsEncrypt certificate - -!!! warning -Safety first, folks. You wouldn't run a webhook exposed to the big bad ol' internet without first securing it with a valid SSL certificate? Of course not, I didn't think so! - -Use whatever method you prefer to generate (and later, renew) your LetsEncrypt cert. The example below uses the CertBot docker image for CloudFlare DNS validation, since that's what I've used elsewhere. - -We **could** run our webhook as a simple HTTP listener, but really, in a world where LetsEncrypt cacn assign you a wildcard certificate in under 30 seconds, thaht's unforgivable. Use the following **general** example to create a LetsEncrypt wildcard cert for your host: - -In my case, since I use CloudFlare, I create /etc/webhook/letsencrypt/cloudflare.ini: - -```ini -dns_cloudflare_email=davidy@funkypenguin.co.nz -dns_cloudflare_api_key=supersekritnevergonnatellyou -``` - -I request my cert by running: - -```bash -cd /etc/webhook/ -docker run -ti --rm -v "$(pwd)"/letsencrypt:/etc/letsencrypt certbot/dns-cloudflare --preferred-challenges dns certonly --dns-cloudflare --dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini -d ''*.funkypenguin.co.nz' -``` - -!!! question -Why use a wildcard cert? So my enemies can't examine my certs to enumerate my various services and discover my weaknesses, of course! - -I add the following as a cron command to renew my certs every day: - -```bash -cd /etc/webhook && docker run -ti --rm -v "$(pwd)"/letsencrypt:/etc/letsencrypt certbot/dns-cloudflare renew --dns-cloudflare --dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini -``` - -Once you've confirmed you've got a valid LetsEncrypt certificate stored in `/etc/webhook/letsencrypt/live//fullcert.pem`, proceed to the next step.. - -### Install webhook - -We're going to use to run our webhook. On some distributions (_❀️ ya, Debian!_), webhook and its associated systemd config can be installed by running `apt-get install webhook`. - -### Create webhook config - -We'll create a single webhook, by creating `/etc/webhook/hooks.json` as follows. Choose a nice secure random string for your MY_TOKEN value! - -```bash -mkdir /etc/webhook -export MY_TOKEN=ilovecheese -echo << EOF > /etc/webhook/hooks.json -[ - { - "id": "update-haproxy", - "execute-command": "/etc/webhook/update-haproxy.sh", - "command-working-directory": "/etc/webhook", - "pass-arguments-to-command": - [ - { - "source": "payload", - "name": "name" - }, - { - "source": "payload", - "name": "frontend-port" - }, - { - "source": "payload", - "name": "backend-port" - }, - { - "source": "payload", - "name": "dst-ip" - }, - { - "source": "payload", - "name": "action" - } - ], - "trigger-rule": - { - "match": - { - "type": "value", - "value": "$MY_TOKEN", - "parameter": - { - "source": "header", - "name": "X-Funkypenguin-Token" - } - } - } - } -] -EOF -``` - -!!! note -Note that to avoid any bozo from calling our we're matching on a token header in the request called `X-Funkypenguin-Token`. Webhook will **ignore** any request which doesn't include a matching token in the request header. - -### Update systemd for webhook - -!!! note -This section is particular to Debian Stretch and its webhook package. If you're using another OS for your VM, just ensure that you can start webhook with a config similar to the one illustrated below. - -Since we want to force webhook to run in secure mode (_no point having a token if it can be extracted from a simple packet capture!_) I ran `systemctl edit webhook`, and pasted in the following: - -```bash -[Service] -# Override the default (non-secure) behaviour of webhook by passing our certificate details and custom hooks.json location -ExecStart= -ExecStart=/usr/bin/webhook -hooks /etc/webhook/hooks.json -verbose -secure -cert /etc/webhook/letsencrypt/live/funkypenguin.co.nz/fullchain.pem -key /etc/webhook/letsencrypt/live/funkypenguin.co.nz/privkey.pem -``` - -Then I restarted webhook by running `systemctl enable webhook && systemctl restart webhook`. I watched the subsequent logs by running `journalctl -u webhook -f` - -### Create /etc/webhook/update-haproxy.sh - -When successfully authenticated with our top-secret token, our webhook will execute a local script, defined as follows (_yes, you should create this file_): - -```bash -#!/bin/bash - -NAME=$1 -FRONTEND_PORT=$2 -BACKEND_PORT=$3 -DST_IP=$4 -ACTION=$5 - -# Bail if we haven't received our expected parameters -if [[ "$#" -ne 5 ]] -then - echo "illegal number of parameters" - exit 2; -fi - -# Either add or remove a service based on $ACTION -case $ACTION in - add) - # Create the portion of haproxy config - cat << EOF > /etc/webhook/haproxy/$FRONTEND_PORT.inc -### >> Used to run $NAME:${FRONTEND_PORT} -frontend ${FRONTEND_PORT}_frontend - bind *:$FRONTEND_PORT - mode tcp - default_backend ${FRONTEND_PORT}_backend - -backend ${FRONTEND_PORT}_backend - mode tcp - balance roundrobin - stick-table type ip size 200k expire 30m - stick on src - server s1 $DST_IP:$BACKEND_PORT -### << Used to run $NAME:$FRONTEND_PORT -EOF - ;; - delete) - rm /etc/webhook/haproxy/$FRONTEND_PORT.inc - ;; - *) - echo "Invalid action $ACTION" - exit 2 -esac - -# Concatenate all the haproxy configs into a single file -cat /etc/webhook/haproxy/global /etc/webhook/haproxy/*.inc > /etc/webhook/haproxy/pre_validate.cfg - -# Validate the generated config -haproxy -f /etc/webhook/haproxy/pre_validate.cfg -c - -# If validation was successful, only _then_ copy it over to /etc/haproxy/haproxy.cfg, and reload -if [[ $? -gt 0 ]] -then - echo "HAProxy validation failed, not continuing" - exit 2 -else - # Remember what the original file looked like - m1=$(md5sum "/etc/haproxy/haproxy.cfg") - - # Overwrite the original file - cp /etc/webhook/haproxy/pre_validate.cfg /etc/haproxy/haproxy.cfg - - # Get MD5 of new file - m2=$(md5sum "/etc/haproxy/haproxy.cfg") - - # Only if file has changed, then we need to reload haproxy - if [ "$m1" != "$m2" ] ; then - echo "HAProxy config has changed, reloading" - systemctl reload haproxy - fi -fi -``` - -### Create /etc/webhook/haproxy/global - -Create `/etc/webhook/haproxy/global` and populate with something like the following. This will be the non-dynamically generated part of our HAProxy config: - -```ini -global - log /dev/log local0 - log /dev/log local1 notice - chroot /var/lib/haproxy - stats socket /run/haproxy/admin.sock mode 660 level admin - stats timeout 30s - user haproxy - group haproxy - daemon - - # Default SSL material locations - ca-base /etc/ssl/certs - crt-base /etc/ssl/private - - # Default ciphers to use on SSL-enabled listening sockets. - # For more information, see ciphers(1SSL). This list is from: - # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ - # An alternative list with additional directives can be obtained from - # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy - ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS - ssl-default-bind-options no-sslv3 - -defaults - log global - mode tcp - option tcplog - option dontlognull - timeout connect 5000 - timeout client 5000000 - timeout server 5000000 - errorfile 400 /etc/haproxy/errors/400.http - errorfile 403 /etc/haproxy/errors/403.http - errorfile 408 /etc/haproxy/errors/408.http - errorfile 500 /etc/haproxy/errors/500.http - errorfile 502 /etc/haproxy/errors/502.http - errorfile 503 /etc/haproxy/errors/503.http - errorfile 504 /etc/haproxy/errors/504.http -``` - -## Serving - -### Take the bait! - -Whew! We now have all the components of our automated load-balancing solution in place. Browse to your VM's FQDN at , and you should see the text "_Hook rules were not satisfied_", with a valid SSL certificate (_You didn't send a token_). - -If you don't see the above, then check the following: - -1. Does the webhook verbose log (`journalctl -u webhook -f`) complain about invalid arguments or missing files? -2. Is port 9000 open to the internet on your VM? - -### Apply to pods - -You'll see me use this design in any Kubernetes-based recipe which requires container-specific ports, like UniFi. Here's an excerpt of the .yml which defines the UniFi controller: - -```yaml - -spec: - containers: - - image: lscr.io/linuxserver/unifi - name: controller - volumeMounts: - - name: controller-volumeclaim - mountPath: /config - - image: funkypenguin/poor-mans-k8s-lb - imagePullPolicy: Always - name: 8080-phone-home - env: - - name: REPEAT_INTERVAL - value: "600" - - name: FRONTEND_PORT - value: "8080" - - name: BACKEND_PORT - value: "30808" - - name: NAME - value: "unifi-adoption" - - name: WEBHOOK - value: "https://my-secret.url.wouldnt.ya.like.to.know:9000/hooks/update-haproxy" - - name: WEBHOOK_TOKEN - valueFrom: - secretKeyRef: - name: unifi-credentials - key: webhook_token.secret - -``` - -The takeaways here are: - -1. We add the funkypenguin/poor-mans-k8s-lb containier to any pod which has special port requirements, forcing the container to run on the same node as the other containers in the pod (_in this case, the UniFi controller_) -2. We use a Kubernetes secret for the webhook token, so that our .yml can be shared without exposing sensitive data - -Here's what the webhook logs look like when the above is added to the UniFi deployment: - -```bash -Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 Started POST /hooks/update-haproxy -Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 update-haproxy got matched -Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 update-haproxy hook triggered successfully -Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 Completed 200 OK in 2.123921ms -Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 executing /etc/webhook/update-haproxy.sh (/etc/webhook/update-haproxy.sh) with arguments ["/etc/webhook/update-haproxy.sh" "unifi-adoption" "8080" "30808" "35.244.91.178" "add"] and environment [] using /etc/webhook as cwd -Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 command output: Configuration file is valid - -``` - -## Move on.. - -Still with me? Good. Move on to setting up an ingress SSL terminating proxy with Traefik.. - -- [Start](/kubernetes/) - Why Kubernetes? -- [Design](/kubernetes/design/) - How does it fit together? -- [Cluster](/kubernetes/cluster/) - Setup a basic cluster -- Load Balancer (this page) - Setup inbound access -- [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data -- [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks -- [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm - -[^1]: This is MVP of the load balancer solution. Any suggestions for improvements are welcome πŸ˜‰ - ---8<-- "recipe-footer.md" diff --git a/manuscript/kubernetes/loadbalancer/index.md b/manuscript/kubernetes/loadbalancer/index.md new file mode 100644 index 0000000..917c80b --- /dev/null +++ b/manuscript/kubernetes/loadbalancer/index.md @@ -0,0 +1,54 @@ +--- +description: Kubernetes Loadbalancer options +--- +# Loadbalancing Services + +## TL;DR + +1. I have multiple nodes (*you'd benefit from [MetalLB](/kubernetes/loadbalancer/metallb/)*) +2. I only need/want one node (*just go with [k3s svclb](/kubernetes/loadbalancer/k3s/)*) + +## But why? + +In Kubernetes, you don't access your containers / pods "*directly*", other than for debugging purposes. Rather, we have a construct called a "*service*", which is "in front of" one or more pods. + +Consider that this is how containers talk to each other under Docker Swarm: + +```mermaid +sequenceDiagram + wordpress->>+mysql: Are you there? + mysql->>+wordpress: Yes, ready to serve! + +``` + +But **this** is how containers (pods) talk to each other under Kubernetes: + +```mermaid +sequenceDiagram + wordpress->>+mysql-service: Are you there? + mysql-service->>+mysql-pods: Are you there? + mysql-pods->>+wordpress: Yes, ready to serve! +``` + +Why do we do this? + +1. A service isn't pinned to a particular node, it's a virtual IP which lives in the cluster and doesn't change as pods/nodes come and go. +2. Using a service "in front of" pods means that rolling updates / scaling of the pods can take place, but communication with the service is uninterrupted (*assuming correct configuration*). + +Here's some [more technical detail](https://kubernetes.io/docs/concepts/services-networking/service/) into how it works, but what you need to know is that when you want to interact with your containers in Kubernetes (*either from other containers or from outside, as a human*), you'll be talking to **services.** + +Also, services are not exposed outside of the cluster by default. There are 3 levels of "exposure" for your Kubernetes services, briefly: + +1. ClusterIP (*A service is only available to other services in the cluster - this is the default*) +2. NodePort (*A mostly-random high-port on the node running the pod is forwarded to the pod*)[^1] +3. LoadBalancer (*Some external help is required to forward a particular IP into the cluster, terminating on the node running your pod*) + +For anything vaguely useful, only `LoadBalancer` is a viable option. Even though `NodePort` may allow you to access services directly, who wants to remember that they need to access [Radarr][radarr] on `192.168.1.44:34542` and Homer on `192.168.1.44:34532`? Ugh. + +Assuming you only had a single Kubernetes node (*say, a small k3s deployment*), you'd want 100% of all incoming traffic to be directed to that node, and so you wouldn't **need** a loadbalancer. You'd just point some DNS entries / firewall NATs at the IP of the cluster, and be done. + +(*This is [the way k3s works](/kubernetes/loadbalancer/k3s/) by default, although it's still called a LoadBalancer*) + +--8<-- "recipe-footer.md" + +[^1]: It is possible to be prescriptive about which port is used for a Nodeport-exposed service, and this is occasionally [a valid deployment strategy](https://github.com/portainer/k8s/#using-nodeport-on-a-localremote-cluster), but you're usually limited to ports between 30000 and 32768. diff --git a/manuscript/kubernetes/loadbalancer/k3s.md b/manuscript/kubernetes/loadbalancer/k3s.md new file mode 100644 index 0000000..88ab4d0 --- /dev/null +++ b/manuscript/kubernetes/loadbalancer/k3s.md @@ -0,0 +1,27 @@ +--- +description: k3s' lightweight loadbalancer +--- + +# K3s Load Balancing + +If your cluster is using K3s, and you have only one node, then you could be adequately served by the [built in "klipper" loadbalbancer provided with k3s](https://rancher.com/docs/k3s/latest/en/networking/#service-load-balancer). + +If you want more than one node in your cluster[^1] (*either now or in future*), I'd steer you towards [MetalLB](/kubernetes/loadbalancer/metallb/) instead). + +## How does it work? + +When **not** deployed with `--disable servicelb`, every time you create a service of type `LoadBalancer`, k3s will deploy a daemonset (*a collection of pods which run on every host in the cluster*), listening on that given port on the host. So deploying a LoadBalancer service for nginx on ports 80 and 443, for example, would result in **every** cluster host listening on ports 80 and 443, and sending any incoming traffic to the ngnix service. + +## Well that's great, isn't it? + +Yes, to get you started. But consider the following limitations: + +1. This magic can only happen **once** per port. So you can't, for example, run two mysql instances on port 3306. +2. Because **every** host listens on the exposed ports, you can't run anything **else** on the hosts, which listens on those ports +3. Having multiple hosts listening on a given port still doesn't solve the problem of how to reliably direct traffic to all hosts, and how to gracefully fail over if one of the hosts fails. + +To tackle these issues, you need some more advanced network configuration, along with [MetalLB](/kubernetes/loadbalancer/metallb/). + +--8<-- "recipe-footer.md" + +[^1]: And seriously, if you're building a Kubernetes cluster, of **course** you'll want more than one host! diff --git a/manuscript/kubernetes/loadbalancer/metallb/index.md b/manuscript/kubernetes/loadbalancer/metallb/index.md new file mode 100644 index 0000000..785c3ea --- /dev/null +++ b/manuscript/kubernetes/loadbalancer/metallb/index.md @@ -0,0 +1,287 @@ +--- +description: MetalLB - Load-balancing for bare-metal Kubernetes clusters +--- +# MetalLB + +[MetalLB](https://metallb.universe.tf/) offers a network [load balancer](/kubernetes/load-balancer/) implementation which workes on "bare metal" (*as opposed to a cloud provider*). + +MetalLB does two jobs: + +1. Provides address allocation to services out of a pool of addresses which you define +2. Announces these addresses to devices outside the cluster, either using ARP/NDP (L2) or BGP (L3) + +!!! summary "Ingredients" + + * [x] A [Kubernetes cluster](/kubernetes/cluster/) + * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped + * [x] If k3s is used, then it was deployed with `--disable servicelb` + + Optional: + + * [ ] Network firewall/router supporting BGP (*ideal but not required*) + +## Preparation + +### Allocations + +You'll need to make some decisions re IP allocations. + +* What is the range of addresses you want to use for your LoadBalancer service pool? If you're using BGP, this can be a dedicated subnet (*i.e. a /24*), and if you're not, this should be a range of IPs in your existing network space for your cluster nodes (*i.e., 192.168.1.100-200*) +* If you're using BGP, pick two [private AS numbers](https://datatracker.ietf.org/doc/html/rfc6996#section-5) between 64512 and 65534 inclusively. + +### Namespace + +We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-metallb.yaml`: + +??? example "Example NameSpace (click to expand)" + ```yaml + apiVersion: v1 + kind: Namespace + metadata: + name: metallb-system + ``` + +### HelmRepository + +Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. In this case, we're using the (*prolific*) [bitnami chart repository](https://github.com/bitnami/charts/tree/master/bitnami), so per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-bitnami.yaml`: + +??? example "Example HelmRepository (click to expand)" + ```yaml + apiVersion: source.toolkit.fluxcd.io/v1beta1 + kind: HelmRepository + metadata: + name: bitnami + namespace: flux-system + spec: + interval: 15m + url: https://charts.bitnami.com/bitnami + ``` + +### Kustomization + +Now that the "global" elements of this deployment (*Namespace and HelmRepository*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/metallb-system`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-metallb.yaml`: + +??? example "Example Kustomization (click to expand)" + ```yaml + apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 + kind: Kustomization + metadata: + name: metallb--metallb-system + namespace: flux-system + spec: + interval: 15m + path: ./metallb-system + prune: true # remove any elements later removed from the above path + timeout: 2m # if not set, this defaults to interval duration, which is 1h + sourceRef: + kind: GitRepository + name: flux-system + validation: server + healthChecks: + - apiVersion: apps/v1 + kind: Deployment + name: metallb-controller + namespace: metallb-system + + ``` + +!!! question "What's with that screwy name?" + > Why'd you call the kustomization `metallb--metallb-system`? + + I keep my file and object names as consistent as possible. In most cases, the helm chart is named the same as the namespace, but in some cases, by upstream chart or historical convention, the namespace is different to the chart name. MetalLB is one of these - the helmrelease/chart name is `metallb`, but the typical namespace it's deployed in is `metallb-system`. (*Appending `-system` seems to be a convention used in some cases for applications which support the entire cluster*). To avoid confusion when I list all kustomizations with `kubectl get kustomization -A`, I give these oddballs a name which identifies both the helmrelease and the namespace. + +### ConfigMap (for HelmRelease) + +Now we're into the metallb-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/bitnami/charts/blob/master/bitnami/metallb/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `metallb/configmap-metallb-helm-chart-value-overrides.yaml`: + +??? example "Example ConfigMap (click to expand)" + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + creationTimestamp: null + name: metallb-helm-chart-value-overrides + namespace: metallb-system + data: + values.yaml: |- + ## @section Global parameters + ## Global Docker image parameters + ## Please, note that this will override the image parameters, including dependencies, configured to use the global value + ## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + + ## @param global.imageRegistry Global Docker image registry + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## + global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + + prometheus: + ## Prometheus Operator service monitors + ## + serviceMonitor: + ## @param speaker.prometheus.serviceMonitor.enabled Enable support for Prometheus Operator + ## + enabled: false + ## @param speaker.prometheus.serviceMonitor.jobLabel Job label for scrape target + ## + jobLabel: "app.kubernetes.io/name" + ## @param speaker.prometheus.serviceMonitor.interval Scrape interval. If not set, the Prometheus default scrape interval is used + ## + interval: "" + ## @param speaker.prometheus.serviceMonitor.metricRelabelings Specify additional relabeling of metrics + ## + metricRelabelings: [] + ## @param speaker.prometheus.serviceMonitor.relabelings Specify general relabeling + ## + relabelings: [] + ``` + +--8<-- "kubernetes-why-full-values-in-configmap.md" + +Then work your way through the values you pasted, and change any which are specific to your configuration. I'd recommend changing the following: + +* `existingConfigMap: metallb-config`: I prefer to set my MetalLB config independently of the chart config, so I set this to `metallb-config`, which I then define below. +* `commonAnnotations`: Anticipating the future use of Reloader to bounce applications when their config changes, I add the `configmap.reloader.stakater.com/reload: "metallb-config"` annotation to all deployed objects, which will instruct Reloader to bounce the daemonset if the ConfigMap changes. + +### ConfigMap (for MetalLB) + +Finally, it's time to actually configure MetalLB! As discussed above, I prefer to configure the helm chart to apply config from an existing ConfigMap, so that I isolate my application configuration from my chart configuration (*and make tracking changes easier*). In my setup, I'm using BGP against a pair of pfsense[^1] firewalls, so per the [official docs](https://metallb.universe.tf/configuration/), I use the following configuration, saved in my flux repo as `flux-system/configmap-metallb-config.yaml`: + +??? example "Example ConfigMap (click to expand)" + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + namespace: metallb-system + name: metallb-config + data: + config: | + peers: + - peer-address: 192.168.33.2 + peer-asn: 64501 + my-asn: 64500 + - peer-address: 192.168.33.4 + peer-asn: 64501 + my-asn: 64500 + + address-pools: + - name: default + protocol: bgp + avoid-buggy-ips: true + addresses: + - 192.168.32.0/24 + ``` + +!!! question "What does that mean?" + In the config referenced above, I define one pool of addresses (`192.168.32.0/24`) which MetalLB is responsible for allocating to my services. MetalLB will then "advertise" these addresses to my firewalls (`192.168.33.2` and `192.168.33.4`), in an eBGP relationship where the firewalls' ASN is `64501` and MetalLB's ASN is `64500`. Provided I'm using my firewalls as my default gateway (*a VIP*), when I try to access one of the `192.168.32.x` IPs from any subnet connected to my firewalls, the traffic will be routed from the firewall to one of the cluster nodes running the pods selected by that service. + +!!! note "Dude, that's too complicated!" + There's an easier way, with some limitations. If you configure MetalLB in L2 mode, all you need to do is to define a range of IPs within your existing node subnet, like this: + + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + namespace: metallb-system + name: metallb-config + data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - 192.168.1.240-192.168.1.250 + ``` + +### HelmRelease + +Lastly, having set the scene above, we define the HelmRelease which will actually deploy MetalLB into the cluster, with the config and extra ConfigMap we defined above. I save this in my flux repo as `metallb/helmrelease-metallb.yaml`: + +??? example "Example HelmRelease (click to expand)" + ```yaml + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: metallb + namespace: metallb-system + spec: + chart: + spec: + chart: metallb + version: 2.x + sourceRef: + kind: HelmRepository + name: bitnami + namespace: flux-system + interval: 15m + timeout: 5m + releaseName: metallb + valuesFrom: + - kind: ConfigMap + name: metallb-helm-chart-value-overrides + valuesKey: values.yaml # This is the default, but best to be explicit for clarity + ``` + +--8<-- "kubernetes-why-not-config-in-helmrelease.md" + +## Deploy MetalLB + +Having committed the above to your flux repository, you should shortly see a metallb kustomization, and in the `metallb-system` namespace, a controller and a speaker pod for every node: + +```bash +root@cn1:~# kubectl get pods -n metallb-system -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +metallb-controller-779d8686f6-mgb4s 1/1 Running 0 21d 10.0.6.19 wn3 +metallb-speaker-2qh2d 1/1 Running 0 21d 192.168.33.24 wn4 +metallb-speaker-7rz24 1/1 Running 0 21d 192.168.33.22 wn2 +metallb-speaker-gbm5r 1/1 Running 0 21d 192.168.33.23 wn3 +metallb-speaker-gzgd2 1/1 Running 0 21d 192.168.33.21 wn1 +metallb-speaker-nz6kd 1/1 Running 0 21d 192.168.33.25 wn5 +root@cn1:~# +``` + +!!! question "Why are there no speakers on my masters?" + + In some cluster setups, master nodes are "tainted" to prevent workloads running on them and consuming capacity required for "mastering". If this is the case for you, but you actually **do** want to run some externally-exposed workloads on your masters, you'll need to update the `speaker.tolerations` value for the HelmRelease config to include: + + ```yaml + - key: "node-role.kubernetes.io/master" + effect: "NoSchedule" + ``` + +### How do I know it's working? + +If you used my [template repository](https://github.com/geek-cookbook/template-flux) to start off your [flux deployment strategy](/kubernetes/deployment/flux/), then the podinfo helm chart has already been deployed. By default, the podinfo service is in `ClusterIP` mode, so it's only reachable within the cluster. + +Edit your podinfo helmrelease configmap (`/podinfo/configmap-podinfo-helm-chart-value-overrides.yaml`), and change this: + +``` yaml hl_lines="6" + + # Kubernetes Service settings + service: + enabled: true + annotations: {} + type: ClusterIP + +``` + +To: + +``` yaml hl_lines="6" + + # Kubernetes Service settings + service: + enabled: true + annotations: {} + type: LoadBalancer + +``` + +Commit your changes, wait for a reconciliation, and run `kubectl get services -n podinfo`. All going well, you should see that the service now has an IP assigned from the pool you chose for MetalLB! + +--8<-- "recipe-footer.md" + +[^1]: I've documented an example re [how to configure BGP between MetalLB and pfsense](/kubernetes/loadbalancer/metallb/pfsense/). diff --git a/manuscript/kubernetes/loadbalancer/metallb/pfsense.md b/manuscript/kubernetes/loadbalancer/metallb/pfsense.md new file mode 100644 index 0000000..eec4917 --- /dev/null +++ b/manuscript/kubernetes/loadbalancer/metallb/pfsense.md @@ -0,0 +1,79 @@ +--- +description: Using MetalLB with pfsense and BGP +--- +# MetalLB with pfSense + +This is an addendum to the MetalLB recipe, explaining how to configure MetalLB to perform BGP peering with a pfSense firewall. + +!!! summary "Ingredients" + + * [X] A [Kubernetes cluster](/kubernetes/cluster/) + * [X] [MetalLB](/kubernetes/loadbalancer/metallb/) deployed + * [X] One or more pfSense firewalls + * [X] Basic familiarity with pfSense operation + +## Preparation + +Complete the [MetalLB](/kubernetes/loadbalancer/metallb/) installation, including the process of identifying ASNs for both your pfSense firewall and your MetalLB configuration. + +Install the FRR package in pfsense, under **System -> Package Manager -> Available Packages** + +### Configure FRR Global/Zebra + +Under **Services -> FRR Global/Zebra**, enable FRR, set your router ID (*this will be your router's peer IP in MetalLB config*), and set a master password (*because apparently you have to, even though we don't use it*): + +![Enabling BGP routing](../../../../../../images/metallb-pfsense-00.png) + +### Configure FRR BGP + +Under **Services -> FRR BGP**, globally enable BGP, and set your local AS and router ID: + +![Enabling BGP routing](../../../../../../images/metallb-pfsense-01.png) + +### Configure FRR BGP Advanced + +Use the tabs at the top of the FRR configuration to navigate to "**Advanced**"... + +![Enabling BGP routing](../../../../../../images/metallb-pfsense-02.png) + +... and scroll down to **eBGP**. Check the checkbox titled "**Disable eBGP Require Policy**: + +![Enabling BGP routing](../../../../../../images/metallb-pfsense-03.png) + +!!! question "Isn't disabling a policy check a Bad Idea(tm)?" + If you're an ISP, sure. If you're only using eBGP to share routes between MetalLB and pfsense, then applying policy is an unnecessary complication.[^1] + +### Configure BGP neighbors + +#### Peer Group + +It's useful to bundle our configurations within a "peer group" (*a collection of settings which applies to all neighbors who are members of that group*), so start off by creating a neighbor with the name of "**metallb**" (*this will become a peer-group*). Set the remote AS (*because you have to*), and leave the rest of the settings as default. + +!!! question "Why bother with a peer group?" + > If we're not changing any settings, why are we bothering with a peer group? + + We may later want to change settings which affect all the peers, such as prefix lists, route-maps, etc. We're doing this now for the benefit of our future selves πŸ’ͺ + +#### Individual Neighbors + +Now add each node running MetalLB, as a BGP neighbor. Pick the peer-group you created above, and configure each neighbor's ASN: + +![Enabling BGP routing](../../../../../../images/metallb-pfsense-04.png) + +## Serving + +Once you've added your neighbors, you should be able to use the FRR tab navigation (*it's weird, I know!*) to get to Status / BGP, and identify your neighbors, and all the routes learned from them. In the screenshot below, you'll note that **most** routes are learned from all the neighbors - that'll be service backed by a daemonset, running on all nodes. The `192.168.32.3/32` route, however, is only received from `192.168.33.22`, meaning only one node is running the pods backing this service, so only those pods are advertising the route to pfSense: + +![BGP route-](../../../../../../images/metallb-pfsense-05.png) + +### Troubleshooting + +If you're not receiving any routes from MetalLB, or if the neighbors aren't in an established state, here are a few suggestions for troubleshooting: + +1. Confirm on PFSense that the BGP connections (*TCP port 179*) are not being blocked by the firewall +2. Examine the metallb speaker logs in the cluster, by running `kubectl logs -n metallb-system -l app.kubernetes.io/name=metallb` +3. SSH to the pfsense, start a shell and launch the FFR shell by running `vtysh`. Now you're in a cisco-like console where commands like `show ip bgp sum` and `show ip bgp neighbors received-routes` will show you interesting debugging things. + +--8<-- "recipe-footer.md" + +[^1]: If you decide to deploy some policy with route-maps, prefix-lists, etc, it's all found under **Services -> FRR Global/Zebra** πŸ¦“ diff --git a/manuscript/kubernetes/monitoring/index.md b/manuscript/kubernetes/monitoring/index.md new file mode 100644 index 0000000..26b4f23 --- /dev/null +++ b/manuscript/kubernetes/monitoring/index.md @@ -0,0 +1,314 @@ +# Miniflux + +Miniflux is a lightweight RSS reader, developed by [FrΓ©dΓ©ric Guillot](https://github.com/fguillot). (_Who also happens to be the developer of the favorite Open Source Kanban app, [Kanboard](/recipes/kanboard/)_) + +![Miniflux Screenshot](/images/miniflux.png) + +I've [reviewed Miniflux in detail on my blog](https://www.funkypenguin.co.nz/review/miniflux-lightweight-self-hosted-rss-reader/), but features (among many) that I appreciate: + +* Compatible with the Fever API, read your feeds through existing mobile and desktop clients (_This is the killer feature for me. I hardly ever read RSS on my desktop, I typically read on my iPhone or iPad, using [Fiery Feeds](http://cocoacake.net/apps/fiery/) or my new squeeze, [Unread](https://www.goldenhillsoftware.com/unread/)_) +* Send your bookmarks to Pinboard, Wallabag, Shaarli or Instapaper (_I use this to automatically pin my bookmarks for collection on my [blog](https://www.funkypenguin.co.nz/)_) +* Feeds can be configured to download a "full" version of the content (_rather than an excerpt_) +* Use the Bookmarklet to subscribe to a website directly from any browsers + +!!! abstract "2.0+ is a bit different" + [Some things changed](https://docs.miniflux.net/en/latest/migration.html) when Miniflux 2.0 was released. For one thing, the only supported database is now postgresql (_no more SQLite_). External themes are gone, as is PHP (_in favor of golang_). It's been a controversial change, but I'm keen on minimal and single-purpose, so I'm still very happy with the direction of development. The developer has laid out his [opinions](https://docs.miniflux.net/en/latest/opinionated.html) re the decisions he's made in the course of development. + +## Ingredients + +1. A [Kubernetes Cluster](/kubernetes/design/) including [Traefik Ingress](/kubernetes/traefik/) +2. A DNS name for your miniflux instance (*miniflux.example.com*, below) pointing to your [load balancer](/kubernetes/loadbalancer/), fronting your Traefik ingress + +## Preparation + +### Prepare traefik for namespace + +When you deployed [Traefik via the helm chart](/kubernetes/traefik/), you would have customized ```values.yml``` for your deployment. In ```values.yml``` is a list of namespaces which Traefik is permitted to access. Update ```values.yml``` to include the *miniflux* namespace, as illustrated below: + +```yaml + +kubernetes: + namespaces: + - kube-system + - nextcloud + - kanboard + - miniflux + +``` + +If you've updated ```values.yml```, upgrade your traefik deployment via helm, by running ```helm upgrade --values values.yml traefik stable/traefik --recreate-pods``` + +### Create data locations + +Although we could simply bind-mount local volumes to a local Kubuernetes cluster, since we're targetting a cloud-based Kubernetes deployment, we only need a local path to store the YAML files which define the various aspects of our Kubernetes deployment. + +```bash +mkdir /var/data/config/miniflux +``` + +### Create namespace + +We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the miniflux stack with the following .yml: + +```bash +cat < /var/data/config/miniflux/namespace.yml +apiVersion: v1 +kind: Namespace +metadata: + name: miniflux +EOF +kubectl create -f /var/data/config/miniflux/namespace.yaml +``` + +### Create persistent volume claim + +Persistent volume claims are a streamlined way to create a persistent volume and assign it to a container in a pod. Create a claim for the miniflux postgres database: + +```bash +cat < /var/data/config/miniflux/db-persistent-volumeclaim.yml +kkind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: miniflux-db + namespace: miniflux + annotations: + backup.kubernetes.io/deltas: P1D P7D +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +EOF +kubectl create -f /var/data/config/miniflux/db-persistent-volumeclaim.yaml +``` + +!!! question "What's that annotation about?" + The annotation is used by [k8s-snapshots](/kubernetes/snapshots/) to create daily incremental snapshots of your persistent volumes. In this case, our volume is snapshotted daily, and copies kept for 7 days. + +### Create secrets + +It's not always desirable to have sensitive data stored in your .yml files. Maybe you want to check your config into a git repository, or share it. Using Kubernetes Secrets means that you can create "secrets", and use these in your deployments by name, without exposing their contents. Run the following, replacing ```imtoosexyformyadminpassword```, and the ```mydbpass``` value in both postgress-password.secret **and** database-url.secret: + +```bash +echo -n "imtoosexyformyadminpassword" > admin-password.secret +echo -n "mydbpass" > postgres-password.secret +echo -n "postgres://miniflux:mydbpass@db/miniflux?sslmode=disable" > database-url.secret + +kubectl create secret -n mqtt generic miniflux-credentials \ + --from-file=admin-password.secret \ + --from-file=database-url.secret \ + --from-file=database-url.secret +``` + +!!! tip "Why use ```echo -n```?" + Because. See [my blog post here](https://www.funkypenguin.co.nz/blog/beware-the-hidden-newlines-in-kubernetes-secrets/) for the pain of hunting invisible newlines, that's why! + +## Serving + +Now that we have a [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/), and a [configmap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), we can create [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [services](https://kubernetes.io/docs/concepts/services-networking/service/), and an [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) for the miniflux [pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/). + +### Create db deployment + +Deployments tell Kubernetes about the desired state of the pod (*which it will then attempt to maintain*). Create the db deployment by excecuting the following. Note that the deployment refers to the secrets created above. + +--8<-- "premix-cta.md" + +```bash +cat < /var/data/miniflux/db-deployment.yml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + namespace: miniflux + name: db + labels: + app: db +spec: + replicas: 1 + selector: + matchLabels: + app: db + template: + metadata: + labels: + app: db + spec: + containers: + - image: postgres:11 + name: db + volumeMounts: + - name: miniflux-db + mountPath: /var/lib/postgresql/data + env: + - name: POSTGRES_USER + value: "miniflux" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: miniflux-credentials + key: postgres-password.secret + volumes: + - name: miniflux-db + persistentVolumeClaim: + claimName: miniflux-db +``` + +### Create app deployment + +Create the app deployment by excecuting the following. Again, note that the deployment refers to the secrets created above. + +```bash +cat < /var/data/miniflux/app-deployment.yml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + namespace: miniflux + name: app + labels: + app: app +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - image: miniflux/miniflux + name: app + env: + # This is necessary for the miniflux to update the db schema, even on an empty DB + - name: CREATE_ADMIN + value: "1" + - name: RUN_MIGRATIONS + value: "1" + - name: ADMIN_USERNAME + value: "admin" + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: miniflux-credentials + key: admin-password.secret + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: miniflux-credentials + key: database-url.secret +EOF +kubectl create -f /var/data/miniflux/deployment.yml +``` + +### Check pods + +Check that your deployment is running, with ```kubectl get pods -n miniflux```. After a minute or so, you should see 2 "Running" pods, as illustrated below: + +```bash +[funkypenguin:~] % kubectl get pods -n miniflux +NAME READY STATUS RESTARTS AGE +app-667c667b75-5jjm9 1/1 Running 0 4d +db-fcd47b88f-9vvqt 1/1 Running 0 4d +[funkypenguin:~] % +``` + +### Create db service + +The db service resource "advertises" the availability of PostgreSQL's port (TCP 5432) in your pod, to the rest of the cluster (*constrained within your namespace*). It seems a little like overkill coming from the Docker Swarm's automated "service discovery" model, but the Kubernetes design allows for load balancing, rolling upgrades, and health checks of individual pods, without impacting the rest of the cluster elements. + +```bash +cat < /var/data/miniflux/db-service.yml +kind: Service +apiVersion: v1 +metadata: + name: db + namespace: miniflux +spec: + selector: + app: db + ports: + - protocol: TCP + port: 5432 + clusterIP: None +EOF +kubectl create -f /var/data/miniflux/service.yml +``` + +### Create app service + +The app service resource "advertises" the availability of miniflux's HTTP listener port (TCP 8080) in your pod. This is the service which will be referred to by the ingress (below), so that Traefik can route incoming traffic to the miniflux app. + +```bash +cat < /var/data/miniflux/app-service.yml +kind: Service +apiVersion: v1 +metadata: + name: app + namespace: miniflux +spec: + selector: + app: app + ports: + - protocol: TCP + port: 8080 + clusterIP: None +EOF +kubectl create -f /var/data/miniflux/app-service.yml +``` + +### Check services + +Check that your services are deployed, with ```kubectl get services -n miniflux```. You should see something like this: + +```bash +[funkypenguin:~] % kubectl get services -n miniflux +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +app ClusterIP None 8080/TCP 55d +db ClusterIP None 5432/TCP 55d +[funkypenguin:~] % +``` + +### Create ingress + +The ingress resource tells Traefik what to forward inbound requests for *miniflux.example.com* to your service (defined above), which in turn passes the request to the "app" pod. Adjust the config below for your domain. + +```bash +cat < /var/data/miniflux/ingress.yml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: app + namespace: miniflux + annotations: + kubernetes.io/ingress.class: traefik +spec: + rules: + - host: miniflux.example.com + http: + paths: + - backend: + serviceName: app + servicePort: 8080 +EOF +kubectl create -f /var/data/miniflux/ingress.yml +``` + +Check that your service is deployed, with ```kubectl get ingress -n miniflux```. You should see something like this: + +```bash +[funkypenguin:~] 130 % kubectl get ingress -n miniflux +NAME HOSTS ADDRESS PORTS AGE +app miniflux.funkypenguin.co.nz 80 55d +[funkypenguin:~] % +``` + +### Access Miniflux + +At this point, you should be able to access your instance on your chosen DNS name (*i.e. *) + +### Troubleshooting + +To look at the Miniflux pod's logs, run ```kubectl logs -n miniflux -f```. For further troubleshooting hints, see [Troubleshooting](/reference/kubernetes/troubleshooting/). + +--8<-- "recipe-footer.md" diff --git a/manuscript/kubernetes/persistence/index.md b/manuscript/kubernetes/persistence/index.md new file mode 100644 index 0000000..8e7692e --- /dev/null +++ b/manuscript/kubernetes/persistence/index.md @@ -0,0 +1,11 @@ +# Persistence + +So we've gone as far as we can with our cluster, without any form of persistence. As soon as we want to retain data, be it a database, metrics history, or objects, we need one or more ways to persist data within the cluster. + +Here are some popular options, ranked in difficulty/complexity, in vaguely ascending order: + +* [Local Path Provisioner](/kubernetes/persistence/local-path-provisioner/) (on k3s) +* [TopoLVM](/kubernetes/persistence/topolvm/) +* OpenEBS (coming soon) +* Rook Ceph (coming soon) +* Longhorn (coming soon) diff --git a/manuscript/kubernetes/persistence/local-path-provisioner.md b/manuscript/kubernetes/persistence/local-path-provisioner.md new file mode 100644 index 0000000..1f01705 --- /dev/null +++ b/manuscript/kubernetes/persistence/local-path-provisioner.md @@ -0,0 +1,45 @@ +# Local Path Provisioner + +[k3s](/kubernetes/cluster/k3s/) installs itself with "Local Path Provisioner", a simple controller whose job it is to create local volumes on each k3s node. If you only have one node, or you just want something simple to start learning with, then `local-path` is ideal, since it requires no further setup. + +!!! summary "Ingredients" + + * [x] A [Kubernetes cluster](/kubernetes/cluster/) deployed with [k3s](/kubernetes/cluster/k3s/) + +Here's how you know you've got the StorageClass: + +```bash +root@shredder:~# kubectl get sc +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 60m +root@shredder:~# +``` + +## Preparation + +### Basics + +A few things you should know: + +1. This is not **network storage**. The volume you create will forever be found to the k3s node its pod is executed on. If you later take that node down for maintenance, the pods will not be able to start on other nodes, because they won't find their volumes. +2. The default path for the volumes is `/opt/local-path-provisioner`, although this can be changed by [editing a ConfigMap](https://github.com/rancher/local-path-provisioner/blob/master/README.md#customize-the-configmap). Make sure you have enough disk space! [^1] +3. There's no support for resizing a volume. If you create a volume and later work out that it's too small, you'll have to destroy it and recreate it. (*More sophisticated provisioners like [rook-ceph](/kubernetes/persistence/rook-ceph/) and [topolvm](/kubernetes/persistence/topolvm/) allow for dynamic resizing of volumes*) + +### When to use it + +* When you don't care much about your storage. This seems backwards, but sometimes you need large amounts of storage for relatively ephemeral reasons, like batch processing, or log aggregation. You may decide the convenience of using Local Path Provisioner for quick, hard-drive-speed storage outweighs the minor hassle of loosing your metrics data if you were to have a node outage. +* When [TopoLVM](/kubernetes/persistence/topolvm/) is not a viable option, and you'd rather use available disk space on your existing, formatted filesystems + +### When not to use it + +* When you have any form of redundancy requirement on your persisted data. +* When you're not using k3s. +* You may one day want to resize your volumes. + +### Summary + +In summary, Local Path Provisioner is fine if you have very specifically sized workloads and you don't care about node redundancy. + +--8<-- "recipe-footer.md" + +[^1]: [TopoLVM](/kubernetes/persistence/topolvm/) also creates per-node volumes which aren't "portable" between nodes, but because it relies on LVM, it is "capacity-aware", and is able to distribute storage among multiple nodes based on available capacity. diff --git a/manuscript/kubernetes/persistence/longhorn.md b/manuscript/kubernetes/persistence/longhorn.md new file mode 100644 index 0000000..36f0cf3 --- /dev/null +++ b/manuscript/kubernetes/persistence/longhorn.md @@ -0,0 +1,3 @@ +# Longhorn + +Coming soon! diff --git a/manuscript/kubernetes/persistence/openebs.md b/manuscript/kubernetes/persistence/openebs.md new file mode 100644 index 0000000..7c911fd --- /dev/null +++ b/manuscript/kubernetes/persistence/openebs.md @@ -0,0 +1,3 @@ +# Open EBS + +Coming soon! diff --git a/manuscript/kubernetes/persistence/rook-ceph.md b/manuscript/kubernetes/persistence/rook-ceph.md new file mode 100644 index 0000000..f0a3812 --- /dev/null +++ b/manuscript/kubernetes/persistence/rook-ceph.md @@ -0,0 +1,3 @@ +# Rook Ceph + +Coming soon! diff --git a/manuscript/kubernetes/persistence/topolvm.md b/manuscript/kubernetes/persistence/topolvm.md new file mode 100644 index 0000000..815435b --- /dev/null +++ b/manuscript/kubernetes/persistence/topolvm.md @@ -0,0 +1,279 @@ +# TopoLVM + +TopoLVM is **like** [Local Path Provisioner](/kubernetes/persistence/local-path-provisioner/), in that it deals with local volumes specific to each Kubernetes node, but it offers more flexibility, and is more suited for a production deployment. + +!!! summary "Ingredients" + + * [x] A [Kubernetes cluster](/kubernetes/cluster/) + * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped + * [x] A dedicated disk, or free LVM volume space, for provisioning volumes + +Additional benefits offered by TopoLVM are: + +* Volumes can by dynamically expanded +* The scheduler is capacity-aware, and can schedule pods to nodes with enough capacity for the pods' storage requirements +* Multiple storageclasses are supported, so you could, for example, create a storageclass for HDD-backed volumes, and another for SSD-backed volumes + +## Preparation + +### Volume Group + +Finally you get to do something on your nodes without YAML or git, like a pre-GitOps, bare-metal-cavemonkey! :monkey_face: + +On each node, you'll need an LVM Volume Group (VG) for TopoLVM to consume. The most straightforward to to arrange this is to dedicate a disk to TopoLVM, and create a dedicated PV and VG for it. + +In brief, assuming `/dev/sdb` is the disk (*and it's unused*), you'd do the following to create a VG called `VG-topolvm`: + +```bash +pvcreate /dev/sdb +vgcreate VG-topolvm /dev/sdb +``` + +!!! tip + If you don't have a dedicated disk, you could try installing your OS using LVM partitioning, and leave some space unused, for TopoLVM to consume. Run `vgs` from an installed node to work out what the VG name is that the OS installer chose. + +### Namespace + +We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-topolvm.yaml`: + +??? example "Example NameSpace (click to expand)" + ```yaml + apiVersion: v1 + kind: Namespace + metadata: + name: topolvm-system + ``` + +### HelmRepository + +Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. In this case, we're using the official [TopoLVM helm chart](https://github.com/topolvm/topolvm/tree/main/charts/topolvm), so per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-topolvm.yaml`: + +??? example "Example HelmRepository (click to expand)" + ```yaml + apiVersion: source.toolkit.fluxcd.io/v1beta1 + kind: HelmRepository + metadata: + name: topolvm + namespace: flux-system + spec: + interval: 15m + url: https://topolvm.github.io/topolvm + ``` + +### Kustomization + +Now that the "global" elements of this deployment (*Namespace and HelmRepository*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/topolvm`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-topolvm.yaml`: + +??? example "Example Kustomization (click to expand)" + ```yaml + apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 + kind: Kustomization + metadata: + name: topolvm--topolvm-system + namespace: flux-system + spec: + interval: 15m + path: ./topolvm-system + prune: true # remove any elements later removed from the above path + timeout: 2m # if not set, this defaults to interval duration, which is 1h + sourceRef: + kind: GitRepository + name: flux-system + validation: server + healthChecks: + - apiVersion: apps/v1 + kind: Deployment + name: topolvm-controller + namespace: topolvm-system + - apiVersion: apps/v1 + kind: DaemonSet + name: topolvm-lvmd-0 + namespace: topolvm-system + - apiVersion: apps/v1 + kind: DaemonSet + name: topolvm-node + namespace: topolvm-system + - apiVersion: apps/v1 + kind: DaemonSet + name: topolvm-scheduler + namespace: topolvm-system + ``` + +!!! question "What's with that screwy name?" + > Why'd you call the kustomization `topolvm--topolvm-system`? + + I keep my file and object names as consistent as possible. In most cases, the helm chart is named the same as the namespace, but in some cases, by upstream chart or historical convention, the namespace is different to the chart name. TopoLVM is one of these - the helmrelease/chart name is `topolvm`, but the typical namespace it's deployed in is `topolvm-system`. (*Appending `-system` seems to be a convention used in some cases for applications which support the entire cluster*). To avoid confusion when I list all kustomizations with `kubectl get kustomization -A`, I give these oddballs a name which identifies both the helmrelease and the namespace. + +### ConfigMap + +Now we're into the topolvm-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/topolvm/topolvm/blob/main/charts/topolvm/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `topolvm/configmap-topolvm-helm-chart-value-overrides.yaml`: + +??? example "Example ConfigMap (click to expand)" + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + creationTimestamp: null + name: topolvm-helm-chart-value-overrides + namespace: topolvm + data: + values.yaml: |- + # paste chart values.yaml (indented) here and alter as required> + ``` + +--8<-- "kubernetes-why-full-values-in-configmap.md" + +Then work your way through the values you pasted, and change any which are specific to your configuration. You might want to start off by changing the following to match the name of the [volume group you created above](#volume-group).[^1] + +```yaml hl_lines="10-13" +lvmd: + # lvmd.managed -- If true, set up lvmd service with DaemonSet. + managed: true + + # lvmd.socketName -- Specify socketName. + socketName: /run/topolvm/lvmd.sock + + # lvmd.deviceClasses -- Specify the device-class settings. + deviceClasses: + - name: ssd + volume-group: myvg1 + default: true + spare-gb: 10 +``` + +### HelmRelease + +Lastly, having set the scene above, we define the HelmRelease which will actually deploy TopoLVM into the cluster, with the config we defined above. I save this in my flux repo as `topolvm/helmrelease-topolvm.yaml`: + +??? example "Example HelmRelease (click to expand)" + ```yaml + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: topolvm + namespace: topolvm-system + spec: + chart: + spec: + chart: topolvm + version: 3.x + sourceRef: + kind: HelmRepository + name: topolvm + namespace: flux-system + interval: 15m + timeout: 5m + releaseName: topolvm + valuesFrom: + - kind: ConfigMap + name: topolvm-helm-chart-value-overrides + valuesKey: values.yaml # This is the default, but best to be explicit for clarity + ``` + +--8<-- "kubernetes-why-not-config-in-helmrelease.md" + +## Serving + +### Deploy TopoLVM + +Having committed the above to your flux repository, you should shortly see a topolvm kustomization, and in the `topolvm-system` namespace, a bunch of pods: + +```bash +demo@shredder:~$ kubectl get pods -n topolvm-system +NAME READY STATUS RESTARTS AGE +topolvm-controller-85698b44dd-65fd9 4/4 Running 0 133m +topolvm-controller-85698b44dd-dmncr 4/4 Running 0 133m +topolvm-lvmd-0-98h4q 1/1 Running 0 133m +topolvm-lvmd-0-b29t8 1/1 Running 0 133m +topolvm-lvmd-0-c5vnf 1/1 Running 0 133m +topolvm-lvmd-0-hmmq5 1/1 Running 0 133m +topolvm-lvmd-0-zfldv 1/1 Running 0 133m +topolvm-node-6p4qz 3/3 Running 0 133m +topolvm-node-7vdgt 3/3 Running 0 133m +topolvm-node-mlp4x 3/3 Running 0 133m +topolvm-node-sxtn5 3/3 Running 0 133m +topolvm-node-xf265 3/3 Running 0 133m +topolvm-scheduler-jlwsh 1/1 Running 0 133m +topolvm-scheduler-nj8nz 1/1 Running 0 133m +topolvm-scheduler-tg72z 1/1 Running 0 133m +demo@shredder:~$ +``` + +### How do I know it's working? + +So the controllers etc are running, but how do we know we can actually provision volumes? + +#### Create PVC + +Create a PVC, by running: + +```bash +cat < pub-cert.pem +``` + +Now generate a kubernetes secret locally, using `kubectl --dry-run=client`, as illustrated below: + +```bash +echo -n batman | kubectl create secret \ + generic mysecret --dry-run=client --from-file=foo=/dev/stdin -o json +``` + +The result should look like this: + +```yaml +{ + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "mysecret", + "creationTimestamp": null + }, + "data": { + "foo": "YmF0bWFu" + } +} +``` + +Note that "*YmF0bWFu*", [base64 decoded](https://www.base64decode.org/), will reveal the top-secret secret. Not so secret, Batman! + +Next, pipe the secret (*in json format*) to kubeseal, referencing the public key, and you'll get a totally un-decryptable "sealed" secret in return: + +```bash + echo -n batman | kubectl create secret \ + generic mysecret --dry-run=client --from-file=foo=/dev/stdin -o json \ + | kubeseal --cert pub-cert.pem +``` + +Resulting in something like this: + +```json +{ + "kind": "SealedSecret", + "apiVersion": "bitnami.com/v1alpha1", + "metadata": { + "name": "mysecret", + "namespace": "default", + "creationTimestamp": null + }, + "spec": { + "template": { + "metadata": { + "name": "mysecret", + "namespace": "default", + "creationTimestamp": null + }, + "data": null + }, + "encryptedData": { + "foo": "AgAywfMzHx/4QFa3sa68zUbpmejT/MjuHUnfI/p2eo5xFKf2SsdGiRK4q2gl2yaSeEcAlA/P1vKZpsM+Jlh5WqrFxTtJjTYgXilzTSSTkK8hilZMflCnL1xs7ywH/lk+4gHdI7z0QS7FQztc649Z+SP2gjunOmTnRTczyCbzYlYSdHS9bB7xqLvGIofvn4dtQvapiTIlaFKhr+sDNtd8WVVzJ1eLuGgc9g6u1UjhuGa8NhgQnzXBd4zQ7678pKEpkXpUmINEKMzPchp9+ME5tIDASfV/R8rxkKvwN3RO3vbCNyLXw7KXRdyhd276kfHP4p4s9nUWDHthefsh19C6lT0ixup3PiG6gT8eFPa0v4jenxqtKNczmTwN9+dF4ZqHh93cIRvffZ7RS9IUOc9kUObQgvp3fZlo2B4m36G7or30ZfuontBh4h5INQCH8j/U3tXegGwaShGmKWg+kRFYQYC4ZqHCbNQJtvTHWKELQTStoAiyHyM+T36K6nCoJTixGZ/Nq4NzIvVfcp7I8LGzEbRSTdaO+MlTT3d32HjsJplXZwSzygSNrRRGwHKr5wfo5rTTdBVuZ0A1u1a6aQPQiJYSluKZwAIJKGQyfZC5Fbo+NxSxKS8MoaZjQh5VUPB+Q92WoPJoWbqZqlU2JZOuoyDWz5x7ZS812x1etQCy6QmuLYe+3nXOuQx85drJFdNw4KXzoQs2uSA=" + } + } +} +``` + +!!! question "Who set the namespace to default?" + By default, sealed secrets can only be "unsealed" in the same namespace for which the original secret was created. In the example above, we didn't explicitly specity a namespace when creating our secret, so the default namespace was used. + +Apply the sealed secret to the cluster... + +```bash + echo -n batman | kubectl create secret \ + generic mysecret --dry-run=client --from-file=foo=/dev/stdin -o json \ + | kubeseal --cert pub-cert.pem \ + | kubectl create -f - +``` + +And watch the sealed-secrets controller decrypt it, and turn it into a regular secrets, using `kubectl logs -n sealed-secrets -l app.kubernetes.io/name=sealed-secrets` + +```bash +2021/11/16 10:37:16 Event(v1.ObjectReference{Kind:"SealedSecret", Namespace:"default", Name:"mysecret", +UID:"82ac8c4b-c167-400e-8768-51957364f6b9", APIVersion:"bitnami.com/v1alpha1", ResourceVersion:"147314", +FieldPath:""}): type: 'Normal' reason: 'Unsealed' SealedSecret unsealed successfully +``` + +Finally, confirm that the secret now exists in the `default` namespace: + +```yaml +root@shredder:/tmp# kubectl get secret mysecret -o yaml +apiVersion: v1 +data: + foo: YmF0bWFu +kind: Secret +metadata: + creationTimestamp: "2021-11-16T10:37:16Z" + name: mysecret + namespace: default + ownerReferences: + - apiVersion: bitnami.com/v1alpha1 + controller: true + kind: SealedSecret + name: mysecret + uid: 82ac8c4b-c167-400e-8768-51957364f6b9 + resourceVersion: "147315" + uid: 6f6ba81c-c9a2-45bc-877c-7a8b50afde83 +type: Opaque +root@shredder:/tmp# +``` + +So we now have a means to store an un-decryptable secret in our flux repo, and have only our cluster be able to convert that sealedsecret into a regular secret! + +Based on our [flux deployment strategy](/kubernetes/deployment/flux/), we simply seal up any necessary secrets into the appropriate folder in the flux repository, and have them decrypted and unsealed into the running cluster. For example, if we needed a secret for metallb called "magic-password", containing a key "location-of-rabbit", we'd do this: + +```bash + kubectl create secret generic magic-password \ + --namespace metallb-system \ + --dry-run=client \ + --from-literal=location-of-rabbit=top-hat -o json \ + | kubeseal --cert pub-cert.pem \ + | kubectl create -f - \ + > /metallb/sealedsecret-magic-password.yaml +``` + +Once flux reconciled the above sealedsecret, the sealedsecrets controller in the cluster would confirm that it's able to decrypt the secret, and would create the corresponding regular secret. + +### Using our own keypair + +One flaw in the process above is that we rely on the sealedsecrets controller to generate its own public/private keypair. This means that the pair (*and therefore all the encrypted secrets*) are specific to this cluster (*and this instance of the sealedsecrets controller*) only. + +To go "fully GitOps", we'd want to be able to rebuild our entire cluster "from scratch" using our flux repository. If the keypair is recreated when a new cluster is built, then the existing sealedsecrets would remain forever "sealed".. + +The solution here is to [generate our own public/private keypair](https://github.com/bitnami-labs/sealed-secrets/blob/main/docs/bring-your-own-certificates.md), and to store the private key safely and securely outside of the flux repo[^1]. We'll only need the key once, when deploying a fresh instance of the sealedsecrets controller. + +Once you've got the public/private key pair, create them as kubernetes secrets directly in the cluster, like this: + +```bash +kubectl -n sealed-secrets create secret tls my-own-certs \ + --cert="" --key="" +``` + +And then "label" the secret you just created, so that the sealedsecrets controller knows that it's special: + +```bash +kubectl -n sealed-secrets label secret my-own-certs \ + sealedsecrets.bitnami.com/sealed-secrets-key=active +``` + +Restart the sealedsecret controller deployment, to force it to detect the new secret: + +```bash +root@shredder:~# kubectl rollout restart -n sealed-secrets deployment sealed-secrets +deployment.apps/sealed-secrets restarted +root@shredder:~# +``` + +And now when you create your seadsecrets, refer to the public key you just created using `--cert `. These secrets will be decryptable by **any** sealedsecrets controller bootstrapped with the same keypair (*above*). + +--8<-- "recipe-footer.md" + +[^1]: There's no harm in storing the **public** key in the repo though, which means it's easy to refer to when sealing secrets. diff --git a/manuscript/kubernetes/ssl-certificates/cert-manager.md b/manuscript/kubernetes/ssl-certificates/cert-manager.md new file mode 100644 index 0000000..9469678 --- /dev/null +++ b/manuscript/kubernetes/ssl-certificates/cert-manager.md @@ -0,0 +1,140 @@ +--- +description: Cert Manager generates and renews LetsEncrypt certificates +--- +# Cert Manager + +To interact with your cluster externally, you'll almost certainly be using a web browser, and you'll almost certainly be wanting your browsing session to be SSL-secured. Some Ingress Controllers (i.e. Traefik) will include a default, self-signed, nasty old cert which will permit you to use SSL, but it's faaaar better to use valid certs. + +Cert Manager adds certificates and certificate issuers as resource types in Kubernetes clusters, and simplifies the process of obtaining, renewing and using those certificates. + +![Sealed Secrets illustration](../../../../images/cert-manager.svg) + +It can issue certificates from a variety of supported sources, including Let’s Encrypt, HashiCorp Vault, and Venafi as well as private PKI. + +It will ensure certificates are valid and up to date, and attempt to renew certificates at a configured time before expiry. + +!!! summary "Ingredients" + + * [x] A [Kubernetes cluster](/kubernetes/cluster/) + * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped + +## Preparation + +### Namespace + +We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-cert-manager.yaml`: + +??? example "Example Namespace (click to expand)" + ```yaml + apiVersion: v1 + kind: Namespace + metadata: + name: cert-manager + ``` + +### HelmRepository + +Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-jetstack.yaml`: + +??? example "Example HelmRepository (click to expand)" + ```yaml + apiVersion: source.toolkit.fluxcd.io/v1beta1 + kind: HelmRepository + metadata: + name: jetstack + namespace: flux-system + spec: + interval: 15m + url: https://charts.jetstack.io + ``` + +### Kustomization + +Now that the "global" elements of this deployment (*just the HelmRepository in this case*z*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/cert-manager`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-cert-manager.yaml`: + +??? example "Example Kustomization (click to expand)" + ```yaml + apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 + kind: Kustomization + metadata: + name: cert-manager + namespace: flux-system + spec: + interval: 15m + path: ./cert-manager + prune: true # remove any elements later removed from the above path + timeout: 2m # if not set, this defaults to interval duration, which is 1h + sourceRef: + kind: GitRepository + name: flux-system + validation: server + healthChecks: + - apiVersion: apps/v1 + kind: Deployment + name: cert-manager + namespace: cert-manager + ``` + +### ConfigMap + +Now we're into the cert-manager-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/bitnami-labs/cert-manager/blob/main/helm/cert-manager/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `cert-manager/configmap-cert-manager-helm-chart-value-overrides.yaml`: + +??? example "Example ConfigMap (click to expand)" + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + name: cert-manager-helm-chart-value-overrides + namespace: cert-manager + data: + values.yaml: |- + # paste chart values.yaml (indented) here and alter as required> + ``` +--8<-- "kubernetes-why-full-values-in-configmap.md" + +Then work your way through the values you pasted, and change any which are specific to your configuration. + +### HelmRelease + +Lastly, having set the scene above, we define the HelmRelease which will actually deploy the cert-manager controller into the cluster, with the config we defined above. I save this in my flux repo as `cert-manager/helmrelease-cert-manager.yaml`: + +??? example "Example HelmRelease (click to expand)" + ```yaml + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: cert-manager + namespace: cert-manager + spec: + chart: + spec: + chart: cert-manager + version: 1.6.x + sourceRef: + kind: HelmRepository + name: jetstack + namespace: flux-system + interval: 15m + timeout: 5m + releaseName: cert-manager + valuesFrom: + - kind: ConfigMap + name: cert-manager-helm-chart-value-overrides + valuesKey: values.yaml # This is the default, but best to be explicit for clarity + ``` + +--8<-- "kubernetes-why-not-config-in-helmrelease.md" + +## Serving + +Once you've committed your YAML files into your repo, you should soon see some pods appear in the `cert-manager` namespace! + +What do we have now? Well, we've got the cert-manager controller **running**, but it won't **do** anything until we define some certificate issuers, credentials, and certificates.. + +### Troubleshooting + +If your certificate is not created **aren't** created as you expect, then the best approach is to check the cert-manager logs, by running `kubectl logs -n cert-manager -l app.kubernetes.io/name=cert-manager`. + +--8<-- "recipe-footer.md" + +[^1]: Why yes, I **have** accidentally rate-limited myself by deleting/recreating my prod certificates a few times! diff --git a/manuscript/kubernetes/ssl-certificates/index.md b/manuscript/kubernetes/ssl-certificates/index.md new file mode 100644 index 0000000..691893d --- /dev/null +++ b/manuscript/kubernetes/ssl-certificates/index.md @@ -0,0 +1,22 @@ +# SSL Certificates + +When you expose applications running within your cluster to the outside world, you're going to want to protect these with SSL certificates. Typically, this'll be SSL certificates used by browsers to access your Ingress resources over HTTPS, but SSL certificates would be used for other externally-facing services, for example OpenLDAP, docker-mailserver, etc. + +!!! question "Why do I need SSL if it's just internal?" + It's true that you could expose applications via HTTP only, and **not** bother with SSL. By doing so, however, you "train yourself"[^1] to ignore SSL certificates / browser security warnings. + + One day, this behaviour will bite you in the ass. + + If you want to be a person who relies on privacy and security, then insist on privacy and security **everywhere**. + + Plus, once you put in the effort to setup automated SSL certificates _once_, it's literally **no** extra effort to use them everywhere! + +I've split this section, conceptually, into 3 separate tasks: + +1. Setup [Cert Manager](/kubernetes/ssl-certificates/cert-manager/), a controller whose job it is to request / renew certificates +2. Setup "[Issuers](/kubernetes/ssl-certificates/letsencrypt-issuers/)" for LetsEncrypt, which Cert Manager will use to request certificates +3. Setup a [wildcard certificate](/kubernetes/ssl-certificates/letsencrypt-wildcard/) in such a way that it can be used by Ingresses like Traefik or Ngnix + +--8<-- "recipe-footer.md" + +[^1]: I had a really annoying but smart boss once who taught me this. Hi Mark! :wave: diff --git a/manuscript/kubernetes/ssl-certificates/letsencrypt-issuers.md b/manuscript/kubernetes/ssl-certificates/letsencrypt-issuers.md new file mode 100644 index 0000000..9664485 --- /dev/null +++ b/manuscript/kubernetes/ssl-certificates/letsencrypt-issuers.md @@ -0,0 +1,109 @@ +# LetsEncrypt Issuers + +Certificates are issued by certificate authorities. By far the most common issuer will be LetsEncrypt. + +In order for Cert Manager to request/renew certificates, we have to tell it about our **Issuers**. + +!!! note + There's a minor distinction between an **Issuer** (*only issues certificates within one namespace*) and a **ClusterIssuer** (*issues certificates throughout the cluster*). Typically a **ClusterIssuer** will be suitable. + +!!! summary "Ingredients" + + * [x] A [Kubernetes cluster](/kubernetes/cluster/) + * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped + * [x] [Cert-Manager](/kubernetes/cert-manager/) deployed to request/renew certificates + * [x] API credentials for a [supported DNS01 provider](https://cert-manager.io/docs/configuration/acme/dns01/) for LetsEncrypt wildcard certs + +## Preparation + +### LetsEncrypt Staging + +The ClusterIssuer resource below represents a certificate authority which is able to request certificates for any namespace within the cluster. +I save this in my flux repo as `cert-manager/cluster-issuer-letsencrypt-staging.yaml`. I've highlighted the areas you'll need to pay attention to: + +???+ example "ClusterIssuer for LetsEncrypt Staging" + ```yaml hl_lines="8 15 17-21" + apiVersion: cert-manager.io/v1 + kind: ClusterIssuer + metadata: + name: letsencrypt-staging + spec: + acme: + email: batman@example.com + server: https://acme-staging-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-staging + solvers: + - selector: + dnsZones: + - "example.com" + dns01: + cloudflare: + email: batman@example.com + apiTokenSecretRef: + name: cloudflare-api-token-secret + key: api-token + ``` + +Deploying this issuer YAML into the cluster would provide Cert Manager with the details necessary to start issuing certificates from the LetsEncrypt staging server (*always good to test in staging first!*) + +!!! note + The example above is specific to [Cloudflare](https://cert-manager.io/docs/configuration/acme/dns01/cloudflare/), but the syntax for [other providers](https://cert-manager.io/docs/configuration/acme/dns01/) is similar. + +### LetsEncrypt Prod + +As you'd imagine, the "prod" version of the LetsEncrypt issues is very similar, and I save this in my flux repo as `cert-manager/cluster-issuer-letsencrypt-prod.yaml` + +???+ example "ClusterIssuer for LetsEncrypt Prod" + ```yaml hl_lines="8 15 17-21" + apiVersion: cert-manager.io/v1 + kind: ClusterIssuer + metadata: + name: letsencrypt-prod + spec: + acme: + email: batman@example.com + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-prod + solvers: + - selector: + dnsZones: + - "example.com" + dns01: + cloudflare: + email: batman@example.com + apiTokenSecretRef: + name: cloudflare-api-token-secret + key: api-token + ``` + +!!! note + You'll note that there are two secrets referred to above - `privateKeySecretRef`, referencing `letsencrypt-prod` is for cert-manager to populate as a result of its ACME schenanigans - you don't have to do anything about this particular secret! The cloudflare-specific secret (*and this will change based on your provider*) is expected to be found in the same namespace as the certificate we'll be issuing, and will be discussed when we create our [wildcard certificate](/kubernetes/ssl-certificates/letsencrypt-wildcard/). + +## Serving + +### How do we know it works? + +We're not quite ready to issue certificates yet, but we can now test whether the Issuers are configured correctly for LetsEncrypt. To check their status, **describe** the ClusterIssuers (i.e., `kubectl describe clusterissuer -n cert-manager letsencrypt-prod`), which (*truncated*) shows something like this: + +```yaml +Status: + Acme: + Last Registered Email: admin@example.com + Uri: https://acme-v02.api.letsencrypt.org/acme/acct/34523 + Conditions: + Last Transition Time: 2021-11-18T22:54:20Z + Message: The ACME account was registered with the ACME server + Observed Generation: 1 + Reason: ACMEAccountRegistered + Status: True + Type: Ready +Events: +``` + +Provided your account is registered, you're ready to proceed with [creating a wildcard certificate](/kubernetes/ssl-certificates/letsencrypt-wildcard/)! + +--8<-- "recipe-footer.md" + +[^1]: Since a ClusterIssuer is not a namespaced resource, it doesn't exist in any specific namespace. Therefore, my assumption is that the `apiTokenSecretRef` secret is only "looked for" when a certificate (*which __is__ namespaced*) requires validation. diff --git a/manuscript/kubernetes/ssl-certificates/secret-replicator.md b/manuscript/kubernetes/ssl-certificates/secret-replicator.md new file mode 100644 index 0000000..bc90834 --- /dev/null +++ b/manuscript/kubernetes/ssl-certificates/secret-replicator.md @@ -0,0 +1,175 @@ +# Secret Replicator + +As explained when creating our [LetsEncrypt Wildcard certificates](/kubernetes/ssl-certificates/letsencrypt-wildcard/), it can be problematic that Certificates can't be **shared** between namespaces. One simple solution to this problem is simply to "replicate" secrets from one "source" namespace into all other namespaces. + +!!! summary "Ingredients" + + * [x] A [Kubernetes cluster](/kubernetes/cluster/) + * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped + * [x] [secret-replicator](/kubernetes/secret-replicator/) deployed to request/renew certificates + * [x] [LetsEncrypt Wildcard Certificates](/kubernetes/ssl-certificates/letsencrypt-wildcard/) created in the `letsencrypt-wildcard-cert` namespace + +Kiwigrid's "[Secret Replicator](https://github.com/kiwigrid/secret-replicator)" is a simple controller which replicates secrets from one namespace to another.[^1] + +## Preparation + +### Namespace + +We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-secret-replicator.yaml`: + +??? example "Example Namespace (click to expand)" + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: secret-replicator +``` + +### HelmRepository + +Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-kiwigrid.yaml`: + +??? example "Example HelmRepository (click to expand)" + ```yaml + apiVersion: source.toolkit.fluxcd.io/v1beta1 + kind: HelmRepository + metadata: + name: kiwigrid + namespace: flux-system + spec: + interval: 15m + url: https://kiwigrid.github.io + ``` + +### Kustomization + +Now that the "global" elements of this deployment have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/secret-replicator`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-secret-replicator.yaml`: + +??? example "Example Kustomization (click to expand)" + ```yaml + apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 + kind: Kustomization + metadata: + name: secret-replicator + namespace: flux-system + spec: + interval: 15m + path: ./secret-replicator + prune: true # remove any elements later removed from the above path + timeout: 2m # if not set, this defaults to interval duration, which is 1h + sourceRef: + kind: GitRepository + name: flux-system + validation: server + healthChecks: + - apiVersion: apps/v1 + kind: Deployment + name: secret-replicator + namespace: secret-replicator + ``` + +### ConfigMap + +Now we're into the secret-replicator-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/kiwigrid/helm-charts/blob/master/charts/secret-replicator/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `secret-replicator/configmap-secret-replicator-helm-chart-value-overrides.yaml`: + +??? example "Example ConfigMap (click to expand)" + ```yaml hl_lines="21 27" + apiVersion: v1 + kind: ConfigMap + metadata: + name: secret-replicator-helm-chart-value-overrides + namespace: secret-replicator + data: + values.yaml: |- + # Default values for secret-replicator. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + + image: + repository: kiwigrid/secret-replicator + tag: 0.2.0 + pullPolicy: IfNotPresent + ## Specify ImagePullSecrets for Pods + ## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + # pullSecrets: myregistrykey + + # csv list of secrets + secretList: "letsencrypt-wildcard-cert" + # secretList: "secret1,secret2 + + ignoreNamespaces: "kube-system,kube-public" + + # If defined, allow secret-replicator to watch for secrets in _another_ namespace + secretNamespace: letsencrypt-wildcard-cert" + + rbac: + enabled: true + + resources: {} + # limits: + # cpu: 50m + # memory: 20Mi + # requests: + # cpu: 20m + # memory: 20Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} + ``` +--8<-- "kubernetes-why-full-values-in-configmap.md" + +Note that the following values changed from default, above: + +* `secretList`: `letsencrypt-wildcard-cert` +* `secretNamespace`: `letsencrypt-wildcard-cert` + +### HelmRelease + +Lastly, having set the scene above, we define the HelmRelease which will actually deploy the secret-replicator controller into the cluster, with the config we defined above. I save this in my flux repo as `secret-replicator/helmrelease-secret-replicator.yaml`: + +??? example "Example HelmRelease (click to expand)" + ```yaml + apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + metadata: + name: secret-replicator + namespace: secret-replicator + spec: + chart: + spec: + chart: secret-replicator + version: 0.6.x + sourceRef: + kind: HelmRepository + name: kiwigrid + namespace: flux-system + interval: 15m + timeout: 5m + releaseName: secret-replicator + valuesFrom: + - kind: ConfigMap + name: secret-replicator-helm-chart-value-overrides + valuesKey: values.yaml # This is the default, but best to be explicit for clarity + ``` + +--8<-- "kubernetes-why-not-config-in-helmrelease.md" + +## Serving + +Once you've committed your YAML files into your repo, you should soon see some pods appear in the `secret-replicator` namespace! + +### How do we know it worked? + +Look for secrets across the whole cluster, by running `kubectl get secrets -A | grep letsencrypt-wildcard-cert`. What you should see is an identical secret in every namespace. Note that the **Certificate** only exists in the `letsencrypt-wildcard-cert` namespace, but the secret it **generates** is what gets replicated to every other namespace. + +### Troubleshooting + +If your certificate is not created **aren't** created as you expect, then the best approach is to check the secret-replicator logs, by running `kubectl logs -n secret-replicator -l app.kubernetes.io/name=secret-replicator`. + +--8<-- "recipe-footer.md" + +[^1]: To my great New Zealandy confusion, "Kiwigrid GmbH" is a German company :shrug: diff --git a/manuscript/kubernetes/ssl-certificates/wildcard-certificate.md b/manuscript/kubernetes/ssl-certificates/wildcard-certificate.md new file mode 100644 index 0000000..40795e6 --- /dev/null +++ b/manuscript/kubernetes/ssl-certificates/wildcard-certificate.md @@ -0,0 +1,156 @@ +# Wildcard Certificate + +Now that we have an [Issuer](/kubernetes/ssl-certificates/letsencrypt-issuers/) and the necessary credentials, we can create a wildcard certificate, which we can then feed to our [Ingresses](/kubernetes/ingress/). + +!!! summary "Ingredients" + + * [x] A [Kubernetes cluster](/kubernetes/cluster/) + * [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped + * [x] [Cert-Manager](/kubernetes/cert-manager/) deployed to request/renew certificates + * [x] [LetsEncrypt ClusterIssuers](/kubernetes/ssl-certificates/letsencrypt-issuers/) created using DNS01 validation solvers + +Certificates are Kubernetes secrets, and so are subject to the same limitations / RBAC controls as other secrets. Importantly, they are **namespaced**, so it's not possible to refer to a secret in one namespace, from a pod in **another** namespace. This restriction also applies to Ingress resources (*although there are workarounds*) - An Ingress can only refer to TLS secrets in its own namespace. + +This behaviour can be prohibitive, because (a) we don't want to have to request/renew certificates for every single FQDN served by our cluster, and (b) we don't want more than one wildcard certificate if possible, to avoid being rate-limited at request/renewal time. + +To take advantage of the various workarounds available, I find it best to put the certificates into a dedicated namespace, which I name.. `letsencrypt-wildcard-cert`. + +!!! question "Why not the cert-manager namespace?" + Because cert-manager is a _controller_, whose job it is to act on resources. I should be able to remove cert-manager entirely (even its namespace) from my cluster, and re-add it, without impacting the resources it acts upon. If the certificates lived in the `cert-manager` namespace, then I wouldn't be able to remove the namespace without also destroying the certificates. + +## Preparation + +### Namespace + +We need a namespace to deploy our certificates and associated secrets into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-letsencrypt-wildcard-cert.yaml`: + +??? example "Example Namespace (click to expand)" + ```yaml + apiVersion: v1 + kind: Namespace + metadata: + name: letsencrypt-wildcard-cert + ``` + +### Kustomization + +Now we need a kustomization to tell Flux to install any YAMLs it finds in `/letsencrypt-wildcard-cert`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-letsencrypt-wildcard-cert.yaml`. + +!!! tip + Importantly, note that we define a **dependsOn**, to tell Flux not to try to reconcile this kustomization before the cert-manager and sealedsecrets kustomizations are reconciled. Cert-manager creates the CRDs used to define certificates, so prior to Cert Manager being installed, the cluster won't know what to do with the ClusterIssuers/Certificate resources. + +??? example "Example Kustomization (click to expand)" + ```yaml + apiVersion: kustomize.toolkit.fluxcd.io/v1beta1 + kind: Kustomization + metadata: + name: letsencrypt-wildcard-cert + namespace: flux-system + spec: + interval: 15m + path: ./letsencrypt-wildcard-cert + dependsOn: + - name: "cert-manager" + - name: "sealed-secrets" + prune: true # remove any elements later removed from the above path + timeout: 2m # if not set, this defaults to interval duration, which is 1h + sourceRef: + kind: GitRepository + name: flux-system + validation: server + ``` + +### DNS01 Validation Secret + +The simplest way to validate ownership of a domain to LetsEncrypt is to use DNS-01 validation. In this mode, we "prove" our ownership of a domain name by creating a special TXT record, which LetsEncrypt will check and confirm for validity, before issuing us any certificates for that domain name. + +The [ClusterIssuers we created earlier](/kubernetes/ssl-certificates/letsencrypt-issuers/) included a field `solvers.dns01.cloudflare.apiTokenSecretRef.name`. This value points to a secret (*in the same namespace as the certificate[^1]*) containing credentials necessary to create DNS records automatically. (*again, my examples are for cloudflare, but the [other supported providers](https://cert-manager.io/docs/configuration/acme/dns01/) will have similar secret requirements*) + +Thanks to [Sealed Secrets](/kubernetes/sealed-secrets/), we have a safe way of committing secrets into our repository, so to create necessary secret, you'd run something like this: + +```bash + kubectl create secret generic cloudflare-api-token-secret \ + --namespace letsencrypt-wildcard-cert \ + --dry-run=client \ + --from-literal=api-token=gobbledegook -o json \ + | kubeseal --cert \ + | kubectl create -f - \ + > /letsencrypt-wildcard-cert/sealedsecret-cloudflare-api-token-secret.yaml +``` + +### Staging Certificate + +Finally, we create our certificates! Here's an example certificate resource which uses the letsencrypt-staging issuer (*to avoid being rate-limited while learning!*). I save this in my flux repo as `/letsencrypt-wildcard-cert/certificate-wildcard-cert-letsencrypt-staging.yaml` + +???+ example "Example certificate requested from LetsEncrypt staging" + + ```yaml + apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + name: letsencrypt-wildcard-cert-example.com-staging + namespace: letsencrypt-wildcard-cert + spec: + # secretName doesn't have to match the certificate name, but it may as well, for simplicity! + secretName: letsencrypt-wildcard-cert-example.com-staging + issuerRef: + name: letsencrypt-staging + kind: ClusterIssuer + dnsNames: + - "example.com" + - "*.example.com" + ``` + +## Serving + +### Did it work? + +After committing the above to the repo, provided the YAML syntax is correct, you should end up with a "Certificate" resource in the `letsencrypt-wildcard-cert` namespace. This doesn't mean that the certificate has been issued by LetsEncrypt yet though - describe the certificate for more details, using `kubectl describe certificate -n letsencrypt-wildcard-cert letsencrypt-wildcard-cert-staging`. The `status` field will show you whether the certificate is issued or not: + +```yaml +Status: + Conditions: + Last Transition Time: 2021-11-19T01:09:32Z + Message: Certificate is up to date and has not expired + Observed Generation: 1 + Reason: Ready + Status: True + Type: Ready + Not After: 2022-02-17T00:09:26Z + Not Before: 2021-11-19T00:09:27Z + Renewal Time: 2022-01-18T00:09:26Z + Revision: 1 +``` + +### Troubleshooting + +If your certificate does not become `Ready` within a few minutes [^1], try watching the logs of cert-manager to identify the issue, using `kubectl logs -f -n cert-manager -l app.kubernetes.io/name=cert-manager`. + +### Production Certificate + +Once you know you can happily deploy a staging certificate, it's safe enough to attempt your "prod" certificate. I save this in my flux repo as `/letsencrypt-wildcard-cert/certificate-wildcard-cert-letsencrypt-prod.yaml` + +???+ example "Example certificate requested from LetsEncrypt prod" + + ```yaml + apiVersion: cert-manager.io/v1 + kind: Certificate + metadata: + name: letsencrypt-wildcard-cert-example.com + namespace: letsencrypt-wildcard-cert + spec: + # secretName doesn't have to match the certificate name, but it may as well, for simplicity! + secretName: letsencrypt-wildcard-cert-example.com + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - "example.com" + - "*.example.com" + ``` + +Commit the certificate and follow the steps above to confirm that your prod certificate has been issued. + +--8<-- "recipe-footer.md" + +[^1]: This process can take a frustratingly long time, and watching the cert-manager logs at least gives some assurance that it's progressing! diff --git a/manuscript/recipes/kubernetes/kanboard.md b/manuscript/recipes/kubernetes/kanboard.md deleted file mode 100644 index 87e6e30..0000000 --- a/manuscript/recipes/kubernetes/kanboard.md +++ /dev/null @@ -1,261 +0,0 @@ -# Kanboard - -Kanboard is a Kanban tool, developed by [FrΓ©dΓ©ric Guillot](https://github.com/fguillot). (_Who also happens to be the developer of my favorite RSS reader, [Miniflux](/recipes/miniflux/)_) - -![Kanboard Screenshot](/images/kanboard.png) - -Features include: - -* Visualize your work -* Limit your work in progress to be more efficient -* Customize your boards according to your business activities -* Multiple projects with the ability to drag and drop tasks -* Reports and analytics -* Fast and simple to use -* Access from anywhere with a modern browser -* Plugins and integrations with external services -* Free, open source and self-hosted -* Super simple installation - -## Ingredients - -1. A [Kubernetes Cluster](/kubernetes/design/) including [Traefik Ingress](/kubernetes/traefik/) -2. A DNS name for your kanboard instance (*kanboard.example.com*, below) pointing to your [load balancer](/kubernetes/loadbalancer/), fronting your Traefik ingress - -## Preparation - -### Prepare traefik for namespace - -When you deployed [Traefik via the helm chart](/kubernetes/traefik/), you would have customized ```values.yml``` for your deployment. In ```values.yml``` is a list of namespaces which Traefik is permitted to access. Update ```values.yml``` to include the *kanboard* namespace, as illustrated below: - -```yaml - -kubernetes: - namespaces: - - kube-system - - nextcloud - - kanboard - - miniflux - -``` - -If you've updated ```values.yml```, upgrade your traefik deployment via helm, by running ```helm upgrade --values values.yml traefik stable/traefik --recreate-pods``` - -### Create data locations - -Although we could simply bind-mount local volumes to a local Kubuernetes cluster, since we're targetting a cloud-based Kubernetes deployment, we only need a local path to store the YAML files which define the various aspects of our Kubernetes deployment. - -```bash -mkdir /var/data/config/kanboard -``` - -### Create namespace - -We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the kanboard stack with the following .yml: - -```bash -cat < /var/data/config/kanboard/namespace.yml -apiVersion: v1 -kind: Namespace -metadata: - name: kanboard -EOF -kubectl create -f /var/data/config/kanboard/namespace.yaml -``` - -### Create persistent volume claim - -Persistent volume claims are a streamlined way to create a persistent volume and assign it to a container in a pod. Create a claim for the kanboard app and plugin data: - -```bash -cat < /var/data/config/kanboard/persistent-volumeclaim.yml -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: kanboard-volumeclaim - namespace: kanboard - annotations: - backup.kubernetes.io/deltas: P1D P7D -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi -EOF -kubectl create -f /var/data/config/kanboard/kanboard-volumeclaim.yaml -``` - -!!! question "What's that annotation about?" - The annotation is used by [k8s-snapshots](/kubernetes/snapshots/) to create daily incremental snapshots of your persistent volumes. In this case, our volume is snapshotted daily, and copies kept for 7 days. - -### Create ConfigMap - -Kanboard's configuration is all contained within ```config.php```, which needs to be presented to the container. We _could_ maintain ```config.php``` in the persistent volume we created above, but this would require manually accessing the pod every time we wanted to make a change. - -Instead, we'll create ```config.php``` as a [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), meaning it "lives" within the Kuberetes cluster and can be **presented** to our pod. When we want to make changes, we simply update the ConfigMap (*delete and recreate, to be accurate*), and relaunch the pod. - -Grab a copy of [config.default.php](https://github.com/kanboard/kanboard/blob/master/config.default.php), save it to ```/var/data/config/kanboard/config.php```, and customize it per [the guide](https://docs.kanboard.org/en/latest/admin_guide/config_file.html). - -At the very least, I'd suggest making the following changes: - -```php -define('PLUGIN_INSTALLER', true); // Yes, I want to install plugins using the UI -define('ENABLE_URL_REWRITE', false); // Yes, I want pretty URLs -``` - -Now create the configmap from config.php, by running ```kubectl create configmap -n kanboard kanboard-config --from-file=config.php``` - -## Serving - -Now that we have a [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/), and a [configmap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), we can create a [deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [service](https://kubernetes.io/docs/concepts/services-networking/service/), and [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) for the kanboard [pod](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/). - -### Create deployment - -Create a deployment to tell Kubernetes about the desired state of the pod (*which it will then attempt to maintain*). Note below that we mount the persistent volume **twice**, to both ```/var/www/app/data``` and ```/var/www/app/plugins```, using the subPath value to differentiate them. This trick avoids us having to provision **two** persistent volumes just for data mounted in 2 separate locations. - ---8<-- "premix-cta.md" - -```bash -cat < /var/data/kanboard/deployment.yml -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - namespace: kanboard - name: app - labels: - app: app -spec: - replicas: 1 - selector: - matchLabels: - app: app - template: - metadata: - labels: - app: app - spec: - containers: - - image: kanboard/kanboard - name: app - volumeMounts: - - name: kanboard-config - mountPath: /var/www/app/config.php - subPath: config.php - - name: kanboard-app - mountPath: /var/www/app/data - subPath: data - - name: kanboard-app - mountPath: /var/www/app/plugins - subPath: plugins - volumes: - - name: kanboard-app - persistentVolumeClaim: - claimName: kanboard-app - - name: kanboard-config - configMap: - name: kanboard-config -EOF -kubectl create -f /var/data/kanboard/deployment.yml -``` - -Check that your deployment is running, with ```kubectl get pods -n kanboard```. After a minute or so, you should see a "Running" pod, as illustrated below: - -```bash -[funkypenguin:~] % kubectl get pods -n kanboard -NAME READY STATUS RESTARTS AGE -app-79f97f7db6-hsmfg 1/1 Running 0 11d -[funkypenguin:~] % -``` - -### Create service - -The service resource "advertises" the availability of TCP port 80 in your pod, to the rest of the cluster (*constrained within your namespace*). It seems a little like overkill coming from the Docker Swarm's automated "service discovery" model, but the Kubernetes design allows for load balancing, rolling upgrades, and health checks of individual pods, without impacting the rest of the cluster elements. - -```bash -cat < /var/data/kanboard/service.yml -kind: Service -apiVersion: v1 -metadata: - name: app - namespace: kanboard -spec: - selector: - app: app - ports: - - protocol: TCP - port: 80 - clusterIP: None -EOF -kubectl create -f /var/data/kanboard/service.yml -``` - -Check that your service is deployed, with ```kubectl get services -n kanboard```. You should see something like this: - -```bash -[funkypenguin:~] % kubectl get service -n kanboard -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -app ClusterIP None 80/TCP 38d -[funkypenguin:~] % -``` - -### Create ingress - -The ingress resource tells Traefik what to forward inbound requests for *kanboard.example.com* to your service (defined above), which in turn passes the request to the "app" pod. Adjust the config below for your domain. - -```bash -cat < /var/data/kanboard/ingress.yml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: app - namespace: kanboard - annotations: - kubernetes.io/ingress.class: traefik -spec: - rules: - - host: kanboard.example.com - http: - paths: - - backend: - serviceName: app - servicePort: 80 -EOF -kubectl create -f /var/data/kanboard/ingress.yml -``` - -Check that your service is deployed, with ```kubectl get ingress -n kanboard```. You should see something like this: - -```bash -[funkypenguin:~] % kubectl get ingress -n kanboard -NAME HOSTS ADDRESS PORTS AGE -app kanboard.funkypenguin.co.nz 80 38d -[funkypenguin:~] % -``` - -### Access Kanboard - -At this point, you should be able to access your instance on your chosen DNS name (*i.e. *) - -### Updating config.php - -Since ```config.php``` is a ConfigMap now, to update it, make your local changes, and then delete and recreate the ConfigMap, by running: - -```bash -kubectl delete configmap -n kanboard kanboard-config -kubectl create configmap -n kanboard kanboard-config --from-file=config.php -``` - -Then, in the absense of any other changes to the deployement definition, force the pod to restart by issuing a "null patch", as follows: - -```bash -kubectl patch -n kanboard deployment app -p "{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"date\":\"`date +'%s'`\"}}}}}" -``` - -### Troubleshooting - -To look at the Kanboard pod's logs, run ```kubectl logs -n kanboard -f```. For further troubleshooting hints, see [Troubleshooting](/reference/kubernetes/troubleshooting/). - -[^1]: The simplest deployment of Kanboard uses the default SQLite database backend, stored on the persistent volume. You can convert this to a "real" database running MySQL or PostgreSQL, and running an an additional database pod and service. Contact me if you'd like further details ;) - ---8<-- "recipe-footer.md" diff --git a/manuscript/recipes/kubernetes/wip.md b/manuscript/recipes/kubernetes/wip.md new file mode 100644 index 0000000..26b4f23 --- /dev/null +++ b/manuscript/recipes/kubernetes/wip.md @@ -0,0 +1,314 @@ +# Miniflux + +Miniflux is a lightweight RSS reader, developed by [FrΓ©dΓ©ric Guillot](https://github.com/fguillot). (_Who also happens to be the developer of the favorite Open Source Kanban app, [Kanboard](/recipes/kanboard/)_) + +![Miniflux Screenshot](/images/miniflux.png) + +I've [reviewed Miniflux in detail on my blog](https://www.funkypenguin.co.nz/review/miniflux-lightweight-self-hosted-rss-reader/), but features (among many) that I appreciate: + +* Compatible with the Fever API, read your feeds through existing mobile and desktop clients (_This is the killer feature for me. I hardly ever read RSS on my desktop, I typically read on my iPhone or iPad, using [Fiery Feeds](http://cocoacake.net/apps/fiery/) or my new squeeze, [Unread](https://www.goldenhillsoftware.com/unread/)_) +* Send your bookmarks to Pinboard, Wallabag, Shaarli or Instapaper (_I use this to automatically pin my bookmarks for collection on my [blog](https://www.funkypenguin.co.nz/)_) +* Feeds can be configured to download a "full" version of the content (_rather than an excerpt_) +* Use the Bookmarklet to subscribe to a website directly from any browsers + +!!! abstract "2.0+ is a bit different" + [Some things changed](https://docs.miniflux.net/en/latest/migration.html) when Miniflux 2.0 was released. For one thing, the only supported database is now postgresql (_no more SQLite_). External themes are gone, as is PHP (_in favor of golang_). It's been a controversial change, but I'm keen on minimal and single-purpose, so I'm still very happy with the direction of development. The developer has laid out his [opinions](https://docs.miniflux.net/en/latest/opinionated.html) re the decisions he's made in the course of development. + +## Ingredients + +1. A [Kubernetes Cluster](/kubernetes/design/) including [Traefik Ingress](/kubernetes/traefik/) +2. A DNS name for your miniflux instance (*miniflux.example.com*, below) pointing to your [load balancer](/kubernetes/loadbalancer/), fronting your Traefik ingress + +## Preparation + +### Prepare traefik for namespace + +When you deployed [Traefik via the helm chart](/kubernetes/traefik/), you would have customized ```values.yml``` for your deployment. In ```values.yml``` is a list of namespaces which Traefik is permitted to access. Update ```values.yml``` to include the *miniflux* namespace, as illustrated below: + +```yaml + +kubernetes: + namespaces: + - kube-system + - nextcloud + - kanboard + - miniflux + +``` + +If you've updated ```values.yml```, upgrade your traefik deployment via helm, by running ```helm upgrade --values values.yml traefik stable/traefik --recreate-pods``` + +### Create data locations + +Although we could simply bind-mount local volumes to a local Kubuernetes cluster, since we're targetting a cloud-based Kubernetes deployment, we only need a local path to store the YAML files which define the various aspects of our Kubernetes deployment. + +```bash +mkdir /var/data/config/miniflux +``` + +### Create namespace + +We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the miniflux stack with the following .yml: + +```bash +cat < /var/data/config/miniflux/namespace.yml +apiVersion: v1 +kind: Namespace +metadata: + name: miniflux +EOF +kubectl create -f /var/data/config/miniflux/namespace.yaml +``` + +### Create persistent volume claim + +Persistent volume claims are a streamlined way to create a persistent volume and assign it to a container in a pod. Create a claim for the miniflux postgres database: + +```bash +cat < /var/data/config/miniflux/db-persistent-volumeclaim.yml +kkind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: miniflux-db + namespace: miniflux + annotations: + backup.kubernetes.io/deltas: P1D P7D +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +EOF +kubectl create -f /var/data/config/miniflux/db-persistent-volumeclaim.yaml +``` + +!!! question "What's that annotation about?" + The annotation is used by [k8s-snapshots](/kubernetes/snapshots/) to create daily incremental snapshots of your persistent volumes. In this case, our volume is snapshotted daily, and copies kept for 7 days. + +### Create secrets + +It's not always desirable to have sensitive data stored in your .yml files. Maybe you want to check your config into a git repository, or share it. Using Kubernetes Secrets means that you can create "secrets", and use these in your deployments by name, without exposing their contents. Run the following, replacing ```imtoosexyformyadminpassword```, and the ```mydbpass``` value in both postgress-password.secret **and** database-url.secret: + +```bash +echo -n "imtoosexyformyadminpassword" > admin-password.secret +echo -n "mydbpass" > postgres-password.secret +echo -n "postgres://miniflux:mydbpass@db/miniflux?sslmode=disable" > database-url.secret + +kubectl create secret -n mqtt generic miniflux-credentials \ + --from-file=admin-password.secret \ + --from-file=database-url.secret \ + --from-file=database-url.secret +``` + +!!! tip "Why use ```echo -n```?" + Because. See [my blog post here](https://www.funkypenguin.co.nz/blog/beware-the-hidden-newlines-in-kubernetes-secrets/) for the pain of hunting invisible newlines, that's why! + +## Serving + +Now that we have a [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/), and a [configmap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), we can create [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [services](https://kubernetes.io/docs/concepts/services-networking/service/), and an [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) for the miniflux [pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/). + +### Create db deployment + +Deployments tell Kubernetes about the desired state of the pod (*which it will then attempt to maintain*). Create the db deployment by excecuting the following. Note that the deployment refers to the secrets created above. + +--8<-- "premix-cta.md" + +```bash +cat < /var/data/miniflux/db-deployment.yml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + namespace: miniflux + name: db + labels: + app: db +spec: + replicas: 1 + selector: + matchLabels: + app: db + template: + metadata: + labels: + app: db + spec: + containers: + - image: postgres:11 + name: db + volumeMounts: + - name: miniflux-db + mountPath: /var/lib/postgresql/data + env: + - name: POSTGRES_USER + value: "miniflux" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: miniflux-credentials + key: postgres-password.secret + volumes: + - name: miniflux-db + persistentVolumeClaim: + claimName: miniflux-db +``` + +### Create app deployment + +Create the app deployment by excecuting the following. Again, note that the deployment refers to the secrets created above. + +```bash +cat < /var/data/miniflux/app-deployment.yml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + namespace: miniflux + name: app + labels: + app: app +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - image: miniflux/miniflux + name: app + env: + # This is necessary for the miniflux to update the db schema, even on an empty DB + - name: CREATE_ADMIN + value: "1" + - name: RUN_MIGRATIONS + value: "1" + - name: ADMIN_USERNAME + value: "admin" + - name: ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: miniflux-credentials + key: admin-password.secret + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: miniflux-credentials + key: database-url.secret +EOF +kubectl create -f /var/data/miniflux/deployment.yml +``` + +### Check pods + +Check that your deployment is running, with ```kubectl get pods -n miniflux```. After a minute or so, you should see 2 "Running" pods, as illustrated below: + +```bash +[funkypenguin:~] % kubectl get pods -n miniflux +NAME READY STATUS RESTARTS AGE +app-667c667b75-5jjm9 1/1 Running 0 4d +db-fcd47b88f-9vvqt 1/1 Running 0 4d +[funkypenguin:~] % +``` + +### Create db service + +The db service resource "advertises" the availability of PostgreSQL's port (TCP 5432) in your pod, to the rest of the cluster (*constrained within your namespace*). It seems a little like overkill coming from the Docker Swarm's automated "service discovery" model, but the Kubernetes design allows for load balancing, rolling upgrades, and health checks of individual pods, without impacting the rest of the cluster elements. + +```bash +cat < /var/data/miniflux/db-service.yml +kind: Service +apiVersion: v1 +metadata: + name: db + namespace: miniflux +spec: + selector: + app: db + ports: + - protocol: TCP + port: 5432 + clusterIP: None +EOF +kubectl create -f /var/data/miniflux/service.yml +``` + +### Create app service + +The app service resource "advertises" the availability of miniflux's HTTP listener port (TCP 8080) in your pod. This is the service which will be referred to by the ingress (below), so that Traefik can route incoming traffic to the miniflux app. + +```bash +cat < /var/data/miniflux/app-service.yml +kind: Service +apiVersion: v1 +metadata: + name: app + namespace: miniflux +spec: + selector: + app: app + ports: + - protocol: TCP + port: 8080 + clusterIP: None +EOF +kubectl create -f /var/data/miniflux/app-service.yml +``` + +### Check services + +Check that your services are deployed, with ```kubectl get services -n miniflux```. You should see something like this: + +```bash +[funkypenguin:~] % kubectl get services -n miniflux +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +app ClusterIP None 8080/TCP 55d +db ClusterIP None 5432/TCP 55d +[funkypenguin:~] % +``` + +### Create ingress + +The ingress resource tells Traefik what to forward inbound requests for *miniflux.example.com* to your service (defined above), which in turn passes the request to the "app" pod. Adjust the config below for your domain. + +```bash +cat < /var/data/miniflux/ingress.yml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: app + namespace: miniflux + annotations: + kubernetes.io/ingress.class: traefik +spec: + rules: + - host: miniflux.example.com + http: + paths: + - backend: + serviceName: app + servicePort: 8080 +EOF +kubectl create -f /var/data/miniflux/ingress.yml +``` + +Check that your service is deployed, with ```kubectl get ingress -n miniflux```. You should see something like this: + +```bash +[funkypenguin:~] 130 % kubectl get ingress -n miniflux +NAME HOSTS ADDRESS PORTS AGE +app miniflux.funkypenguin.co.nz 80 55d +[funkypenguin:~] % +``` + +### Access Miniflux + +At this point, you should be able to access your instance on your chosen DNS name (*i.e. *) + +### Troubleshooting + +To look at the Miniflux pod's logs, run ```kubectl logs -n miniflux -f```. For further troubleshooting hints, see [Troubleshooting](/reference/kubernetes/troubleshooting/). + +--8<-- "recipe-footer.md" diff --git a/mkdocs-insiders.yml b/mkdocs-insiders.yml index f383195..8e4a01c 100644 --- a/mkdocs-insiders.yml +++ b/mkdocs-insiders.yml @@ -2,5 +2,5 @@ # that don't degrade for the open-source version INHERIT: mkdocs.yml # disabled for now, since I'm not convinced social cards are better than default thumbnails -plugins: - social: {} +# plugins: +# social: {} diff --git a/mkdocs.yml b/mkdocs.yml index 67ee0aa..61d45f3 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -142,19 +142,88 @@ nav: - Networks: reference/networks.md - OpenVPN : reference/openvpn.md - Troubleshooting: reference/troubleshooting.md - - Kubernetes: + - β›΄ Kubernetes: - Preparation: - kubernetes/index.md - - Design: kubernetes/design.md - - Cluster: kubernetes/cluster.md - - DIY Cluster: kubernetes/diycluster.md - - Load Balancer: kubernetes/loadbalancer.md - - Snapshots: kubernetes/snapshots.md - - Helm: kubernetes/helm.md - - Traefik: kubernetes/traefik.md - # - Chef's Favorites: - # - Istio: recipes/kubernetes/istio.md + - Introduction: kubernetes/index.md + - Cluster: + - kubernetes/cluster/index.md + - Digital Ocean: kubernetes/cluster/digitalocean.md + # - Bare Metal: kubernetes/cluster/baremetal.md + # - Home Lab: kubernetes/cluster/baremetal.md + - k3s: kubernetes/cluster/k3s.md + # - The Hard Way: kubernetes/cluster/the-hard-way.md + - Deployment: + - kubernetes/deployment/index.md + # - YAML: kubernetes/wip.md + # - Helm: kubernetes/wip.md + # - GitHub Actions: kubernetes/wip.md + - Flux: + - Install: kubernetes/deployment/flux/install.md + - Design: kubernetes/deployment/flux/design.md + - Operate: kubernetes/deployment/flux/operate.md + - Essentials: + - Load Balancer: + - kubernetes/loadbalancer/index.md + - k3s: kubernetes/loadbalancer/k3s.md + - MetalLB: + - kubernetes/loadbalancer/metallb/index.md + - pfSense: kubernetes/loadbalancer/metallb/pfsense.md + - Sealed Secrets: kubernetes/sealed-secrets.md + - External DNS: kubernetes/external-dns.md + - SSL Certificates: + - kubernetes/ssl-certificates/index.md + - Cert-Manager: kubernetes/ssl-certificates/cert-manager.md + - LetsEncrypt Issuers: kubernetes/ssl-certificates/letsencrypt-issuers.md + - Wildcard Certificate: kubernetes/ssl-certificates/letsencrypt-wildcard.md + - Secret Replicator: kubernetes/ssl-certificates/secret-replicator.md + - Ingress: + - kubernetes/ingress/index.md + - Traefik: + - kubernetes/ingress/traefik/index.md + # - Dashboard: kubernetes/ingress/traefik/dashboard.md + - Nginx: kubernetes/ingress/nginx.md + - Persistence: + - kubernetes/persistence/index.md + - Local Path Provisioner: kubernetes/persistence/local-path-provisioner.md + - TopoLVM: kubernetes/persistence/topolvm.md + # - Rook Ceph: kubernetes/persistence/rook-ceph.md + # - OpenEBS: kubernetes/persistence/openebs.md + # - LongHorn: kubernetes/persistence/longhorn.md + # - Backup: + # - kubernetes/backup/index.md + # - kubernetes/wip.md + + # - Monitoring: + # - kubernetes/monitoring/index.md + # - Prometheus: kubernetes/wip.md + # - Grafana: kubernetes/wip.md + # - AlertManager: kubernetes/wip.md + # - Goldilocks: kubernetes/wip.md + # - Reloader: kubernetes/wip.md + # - Dashboard: kubernetes/wip.md + # - Kured: kubernetes/wip.md + # - KeyCloak: kubernetes/wip.md + # - Recipes: + # - GitHub Actions Runners: kubernetes/wip.md + # - Cilium: kubernetes/wip.md + # - Concourse: kubernetes/wip.md + # - Flagger: kubernetes/wip.md + # - Flagger: kubernetes/wip.md + # - Flux: recipes/kubernetes/wip.md + # - FoundationDB: kubernetes/wip.md + # - Istio: recipes/kubernetes/wip.md + # - Jaeger: kubernetes/wip.md + # - Kiali: kubernetes/wip.md + # - Minio: kubernetes/wip.md + # - NGINX Ingress: kubernetes/wip.md + # - Polaris: kubernetes/wip.md + # - Portainer: kubernetes/wip.md + # - Prometheus: kubernetes/wip.md + # - Traefik: kubernetes/wip.md + # - Vault: kubernetes/wip.md + # - Webook Receiver: kubernetes/wip.md - πŸš€ Get Premix!: - premix/index.md - Ansible: @@ -185,11 +254,11 @@ theme: - navigation.tabs.sticky - navigation.instant - navigation.sections - - navigation.tracking - navigation.indexes - navigation.top - search.suggest - search.share + - content.code.annotate icon: repo: 'fontawesome/brands/github' palette: @@ -297,7 +366,11 @@ markdown_extensions: repo: geek-cookbook - pymdownx.mark - pymdownx.smartsymbols - - pymdownx.superfences + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format - pymdownx.tasklist: custom_checkbox: true - pymdownx.tilde diff --git a/overrides/main.html b/overrides/main.html index c7f5f0b..3fabc07 100644 --- a/overrides/main.html +++ b/overrides/main.html @@ -21,7 +21,7 @@ {% endblock %} {% block analytics %} - + {% endblock %} diff --git a/requirements.txt b/requirements.txt index 054d410..09f4b18 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ pymdown-extensions>=6.0 Markdown>=3.0.1 mkdocs-minify-plugin>=0.2 mkdocs-autolinks-plugin>=0.2.0 -mkdocs-htmlproofer-plugin>=0.0.3 +# mkdocs-htmlproofer-plugin>=0.0.3 mkdocs-git-revision-date-localized-plugin>=0.4.8 mkdocs-macros-plugin -mkdocs-material +mkdocs-material \ No newline at end of file diff --git a/scripts/recipe-footer.md b/scripts/recipe-footer.md index 4b04870..119edb8 100644 --- a/scripts/recipe-footer.md +++ b/scripts/recipe-footer.md @@ -9,7 +9,7 @@ Did you receive excellent service? Want to make your waiter happy? (_..and suppo ## Flirt with waiter (subscribe) πŸ’Œ -Want to know now when this recipe gets updated, or when future recipes are added? Subscribe to the [RSS feed](https://mastodon.social/@geekcookbook_changes.atom), or leave your email address below, and we'll keep you updated. (*double-opt-in, no monkey business, no spam) +Want to know now when this recipe gets updated, or when future recipes are added? Subscribe to the [RSS feed](https://mastodon.social/@geekcookbook_changes.atom), or leave your email address below, and we'll keep you updated.

Notify me πŸ””

Be the first to know when recipes are added / improved!

    We won't send you spam. Unsubscribe at any time. No monkey-business.

    Powered By ConvertKit
    diff --git a/scripts/serve-insiders.sh b/scripts/serve-insiders.sh index a322855..b50c3e7 100755 --- a/scripts/serve-insiders.sh +++ b/scripts/serve-insiders.sh @@ -1,3 +1,4 @@ #!/bin/bash -docker build --build-arg FROM_SOURCE=ghcr.io/geek-cookbook/mkdocs-material-insiders . -t funkypenguin/mkdocs-material +# docker pull ghcr.io/geek-cookbook/mkdocs-material-insiders +docker build --build-arg FROM_SOURCE=funkypenguin/mkdocs-material-insiders . -t funkypenguin/mkdocs-material docker run --rm --name mkdocs-material -it -p 8123:8000 -v ${PWD}:/docs funkypenguin/mkdocs-material serve -f mkdocs-insiders.yml --dev-addr 0.0.0.0:8000 --dirtyreload