Refresh Kubernetes Guide 💪
@@ -13,5 +13,6 @@
|
||||
|
||||
# We use fenced code blocks, but this test conflicts with the admonitions plugin we use, which relies
|
||||
# on indentation (which is then falsely detected as a code block)
|
||||
"MD038": false
|
||||
"MD046": false
|
||||
|
||||
|
||||
6
_snippets/kubernetes-why-full-values-in-configmap.md
Normal file
@@ -0,0 +1,6 @@
|
||||
!!! question "That's a lot of unnecessary text!"
|
||||
> Why not just paste in the subset of values I want to change?
|
||||
|
||||
You know what's harder than working out which values from a 2000-line `values.yaml` to change?
|
||||
|
||||
Answer: Working out what values to change when the upstream helm chart has refactored or added options! By pasting in the entirety of the upstream chart, when it comes time to perform upgrades, you can just duplicate your ConfigMap YAML, paste the new values into one of the copies, and compare them side by side to ensure your original values/decisions persist in the new chart.
|
||||
2
_snippets/kubernetes-why-not-config-in-helmrelease.md
Normal file
@@ -0,0 +1,2 @@
|
||||
!!! question "Why not just put config in the HelmRelease?"
|
||||
While it's true that we could embed values directly into the HelmRelease YAML, this becomes unweildy with large helm charts. It's also simpler (less likely to result in error) if changes to **HelmReleases**, which affect **deployment** of the chart, are defined in separate files to changes in helm chart **values**, which affect **operation** of the chart.
|
||||
@@ -4,11 +4,17 @@
|
||||
|
||||
### Tip your waiter (sponsor) 👏
|
||||
|
||||
Did you receive excellent service? Want to make your waiter happy? (_..and support development of current and future recipes!_) Sponsor me on [Github][github_sponsor] / [Patreon][patreon], or see the [contribute](/community/support/) page for more (_free or paid)_ ways to say thank you! 👏
|
||||
Did you receive excellent service? Want to compliment the chef? (_..and support development of current and future recipes!_) Sponsor me on [Github][github_sponsor] / [Patreon][patreon], or see the [contribute](/community/support/) page for more (_free or paid)_ ways to say thank you! 👏
|
||||
|
||||
### Employ your chef (engage) 🤝
|
||||
|
||||
Is this too much of a geeky PITA? Do you just want results, stat? [I do this for a living](https://www.funkypenguin.co.nz/about/) - I provide consulting and engineering expertise to businesses needing short-term, short-notice support in the cloud-native space, including AWS/Azure/GKE, Kubernetes, CI/CD and automation.
|
||||
|
||||
Learn more about working with me [here](https://www.funkypenguin.co.nz/work-with-me/).
|
||||
|
||||
### Flirt with waiter (subscribe) 💌
|
||||
|
||||
Want to know now when this recipe gets updated, or when future recipes are added? Subscribe to the [RSS feed](https://mastodon.social/@geekcookbook_changes.rss), or leave your email address below, and we'll keep you updated. (*double-opt-in, no monkey business, no spam)
|
||||
Want to know now when this recipe gets updated, or when future recipes are added? Subscribe to the [RSS feed](https://mastodon.social/@geekcookbook_changes.rss), or leave your email address below, and we'll keep you updated.
|
||||
|
||||
--8<-- "convertkit-subscribe-form.html"
|
||||
|
||||
|
||||
452
manuscript/images/cert-manager.svg
Normal file
@@ -0,0 +1,452 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
overflow="hidden"
|
||||
viewBox="0 0 1088 624"
|
||||
version="1.1"
|
||||
id="svg391"
|
||||
sodipodi:docname="high-level-overview-test.svg"
|
||||
inkscape:version="1.1 (c68e22c387, 2021-05-23)"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<sodipodi:namedview
|
||||
id="namedview393"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
showgrid="false"
|
||||
inkscape:zoom="1.3141026"
|
||||
inkscape:cx="682.97559"
|
||||
inkscape:cy="311.99999"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1028"
|
||||
inkscape:window-x="-6"
|
||||
inkscape:window-y="-6"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="g389" />
|
||||
<defs
|
||||
id="defs241">
|
||||
<clipPath
|
||||
id="clip0">
|
||||
<path
|
||||
d="M0 0 1088 0 1088 624 0 624Z"
|
||||
fill-rule="evenodd"
|
||||
clip-rule="evenodd"
|
||||
id="path220" />
|
||||
</clipPath>
|
||||
</defs>
|
||||
<g
|
||||
clip-path="url(#clip0)"
|
||||
id="g389">
|
||||
<path
|
||||
style="color:#000000;fill:#ffffff;-inkscape-stroke:none"
|
||||
d="M 0,0 H 1088 V 624 H 0 Z"
|
||||
id="rect243" />
|
||||
<g
|
||||
id="rect245">
|
||||
<path
|
||||
style="color:#000000;fill:#ffd966;stroke-width:0.666667;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="m 147.5,111.5 h 211 v 56 h -211 z"
|
||||
id="path645" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="M 147.16602,111.16602 V 111.5 167.83398 h 211.66796 v -56.66796 z m 0.66796,0.66796 h 210.33204 v 55.33204 H 147.83398 Z"
|
||||
id="path647" />
|
||||
</g>
|
||||
<text
|
||||
font-family="Arial, Arial_MSFontService, sans-serif"
|
||||
font-weight="400"
|
||||
font-size="19px"
|
||||
transform="translate(186.804,146)"
|
||||
id="text251">letsencrypt<tspan
|
||||
x="90"
|
||||
y="0"
|
||||
id="tspan247">-</tspan><tspan
|
||||
x="96.166702"
|
||||
y="0"
|
||||
id="tspan249">staging</tspan></text>
|
||||
<g
|
||||
id="rect257">
|
||||
<path
|
||||
style="color:#000000;fill:#ffd966;stroke-width:0.666667;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="m 267.5,26.500099 h 211 v 58 h -211 z"
|
||||
id="path637" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="M 267.16602,26.166016 V 26.5 84.833984 H 478.83398 V 26.166016 Z m 0.66796,0.667968 H 478.16602 V 84.166016 H 267.83398 Z"
|
||||
id="path639" />
|
||||
</g>
|
||||
<text
|
||||
font-family="Arial, Arial_MSFontService, sans-serif"
|
||||
font-weight="400"
|
||||
font-size="19px"
|
||||
transform="translate(318.799,63)"
|
||||
id="text263">letsencrypt<tspan
|
||||
x="90"
|
||||
y="0"
|
||||
id="tspan259">-</tspan><tspan
|
||||
x="96.166702"
|
||||
y="0"
|
||||
id="tspan261">prod</tspan></text>
|
||||
<g
|
||||
id="rect269">
|
||||
<path
|
||||
style="color:#000000;fill:#ffd966;stroke-width:0.666667;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="m 867.5,111.5 h 211 v 57 h -211 z"
|
||||
id="path629" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="M 867.16602,111.16602 V 111.5 168.83398 H 1078.834 v -57.66796 z m 0.66796,0.66796 H 1078.166 v 56.33204 H 867.83398 Z"
|
||||
id="path631" />
|
||||
</g>
|
||||
<text
|
||||
font-family="Arial, Arial_MSFontService, sans-serif"
|
||||
font-weight="400"
|
||||
font-size="19px"
|
||||
transform="translate(910.161,146)"
|
||||
id="text283">venafi<tspan
|
||||
x="49.666599"
|
||||
y="0"
|
||||
id="tspan271">-</tspan><tspan
|
||||
x="55.833302"
|
||||
y="0"
|
||||
id="tspan273">as</tspan><tspan
|
||||
x="75.5"
|
||||
y="0"
|
||||
id="tspan275">-</tspan><tspan
|
||||
x="81.666603"
|
||||
y="0"
|
||||
id="tspan277">a</tspan><tspan
|
||||
x="92"
|
||||
y="0"
|
||||
id="tspan279">-</tspan><tspan
|
||||
x="98.166603"
|
||||
y="0"
|
||||
id="tspan281">service</tspan></text>
|
||||
<g
|
||||
id="rect289">
|
||||
<path
|
||||
style="color:#000000;fill:#ffd966;stroke-width:0.666667;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="m 505.5,66.500099 h 211 V 122.5001 h -211 z"
|
||||
id="path621" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="M 505.16602,66.166016 V 66.5 122.83398 H 716.83398 V 66.166016 Z m 0.66796,0.667968 H 716.16602 V 122.16602 H 505.83398 Z"
|
||||
id="path623" />
|
||||
</g>
|
||||
<text
|
||||
font-family="Arial, Arial_MSFontService, sans-serif"
|
||||
font-weight="400"
|
||||
font-size="19px"
|
||||
transform="translate(564.181,101)"
|
||||
id="text295">hashicorp<tspan
|
||||
x="80.666603"
|
||||
y="0"
|
||||
id="tspan291">-</tspan><tspan
|
||||
x="86.833298"
|
||||
y="0"
|
||||
id="tspan293">vault</tspan></text>
|
||||
<g
|
||||
id="rect301">
|
||||
<path
|
||||
style="color:#000000;fill:#ffd966;stroke-width:0.666667;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="m 744.5,26.500099 h 211 v 57 h -211 z"
|
||||
id="path613" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="M 744.16602,26.166016 V 26.5 83.833984 H 955.83398 V 26.166016 Z m 0.66796,0.667968 H 955.16602 V 83.166016 H 744.83398 Z"
|
||||
id="path615" />
|
||||
</g>
|
||||
<text
|
||||
font-family="Arial, Arial_MSFontService, sans-serif"
|
||||
font-weight="400"
|
||||
font-size="19px"
|
||||
transform="translate(822.831,61)"
|
||||
id="text307">venafi<tspan
|
||||
x="49.666599"
|
||||
y="0"
|
||||
id="tspan303">-</tspan><tspan
|
||||
x="55.833302"
|
||||
y="0"
|
||||
id="tspan305">tpp</tspan></text>
|
||||
<text
|
||||
font-family="Arial, Arial_MSFontService, sans-serif"
|
||||
font-weight="400"
|
||||
font-size="24px"
|
||||
transform="translate(21.6114,97)"
|
||||
id="text313">Issuers</text>
|
||||
<path
|
||||
style="color:#000000;fill:#000000;-inkscape-stroke:none"
|
||||
d="M 0.333333,-2.69038e-7 0.333433,123.41 H -0.333234 L -0.333333,2.69038e-7 Z M 4.0001,122.077 l -3.999995013,8 -4.000004987,-8 z"
|
||||
id="path315"
|
||||
transform="matrix(1,0,0,-1,611.5,252.577)" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;-inkscape-stroke:none"
|
||||
d="M 0.192828,-0.271898 233.304,165.048 232.918,165.592 -0.192828,0.271898 Z M 234.338,161.287 l 4.211,7.89 -8.839,-1.365 z"
|
||||
id="path317"
|
||||
transform="matrix(1,0,0,-1,611.5,252.677)" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;-inkscape-stroke:none"
|
||||
d="M 0.0756516,-0.324635 355.464,82.4935 355.312,83.1428 -0.0756516,0.324635 Z M 354.997,78.6199 l 6.883,5.7112 -8.699,2.08 z"
|
||||
id="path319"
|
||||
transform="matrix(1,0,0,-1,611.5,252.831)" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;-inkscape-stroke:none"
|
||||
d="M 610.856,252.95 377.745,87.6287 378.131,87.0849 611.242,252.406 Z M 376.712,91.3907 372.5,83.5001 l 8.84,1.3651 z"
|
||||
id="path321" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;-inkscape-stroke:none"
|
||||
d="m 611.615,252.724 -351.707,-83.363 0.154,-0.648 351.707,83.363 z M 260.362,173.237 253.5,167.5 l 8.707,-2.047 z"
|
||||
id="path323" />
|
||||
<g
|
||||
id="path325">
|
||||
<path
|
||||
style="color:#000000;fill:#6aa84f;fill-rule:evenodd;stroke-width:0.666667;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="m 505.5,262 c 0,-5.247 4.253,-9.5 9.5,-9.5 h 192 c 5.247,0 9.5,4.253 9.5,9.5 v 38 c 0,5.247 -4.253,9.5 -9.5,9.5 H 515 c -5.247,0 -9.5,-4.253 -9.5,-9.5 z"
|
||||
id="path575" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;fill-rule:evenodd;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="m 515,252.16602 c -5.42712,0 -9.83398,4.40686 -9.83398,9.83398 v 38 c 0,5.42712 4.40686,9.83398 9.83398,9.83398 h 192 c 5.42712,0 9.83398,-4.40686 9.83398,-9.83398 v -38 c 0,-5.42712 -4.40686,-9.83398 -9.83398,-9.83398 z m 0,0.66796 h 192 c 5.06687,0 9.16602,4.09915 9.16602,9.16602 v 38 c 0,5.06687 -4.09915,9.16602 -9.16602,9.16602 H 515 c -5.06687,0 -9.16602,-4.09915 -9.16602,-9.16602 v -38 c 0,-5.06687 4.09915,-9.16602 9.16602,-9.16602 z"
|
||||
id="path577" />
|
||||
</g>
|
||||
<text
|
||||
font-family="Arial, Arial_MSFontService, sans-serif"
|
||||
font-weight="400"
|
||||
font-size="21px"
|
||||
transform="translate(547.233,289)"
|
||||
id="text337">cert<tspan
|
||||
x="35.666599"
|
||||
y="0"
|
||||
id="tspan327">-</tspan><tspan
|
||||
x="42.833302"
|
||||
y="0"
|
||||
id="tspan329">manager</tspan><tspan
|
||||
font-size="24px"
|
||||
x="-344.64801"
|
||||
y="153"
|
||||
id="tspan331">Certificates</tspan><tspan
|
||||
font-size="24px"
|
||||
x="-345.30099"
|
||||
y="266"
|
||||
id="tspan333">Kubernetes</tspan><tspan
|
||||
font-size="24px"
|
||||
x="-324.634"
|
||||
y="295"
|
||||
id="tspan335">Secrets</tspan></text>
|
||||
<path
|
||||
style="color:#000000;fill:#f9cb9c;fill-rule:evenodd;-inkscape-stroke:none"
|
||||
d="m 570.5,532.063 c 0,2.519 -2.043,4.562 -4.562,4.562 v -4.562 c 0,1.259 -1.022,2.281 -2.282,2.281 -1.26,0 -2.281,-1.022 -2.281,-2.281 v 4.562 H 398.063 c -2.52,0 -4.563,2.043 -4.563,4.563 v 54.75 c 0,2.519 2.043,4.562 4.563,4.562 2.519,0 4.562,-2.043 4.562,-4.562 v -4.563 h 163.313 c 2.519,0 4.562,-2.043 4.562,-4.562 z M 398.063,545.75 c 2.519,0 4.562,-2.043 4.562,-4.562 0,-1.26 -1.021,-2.282 -2.281,-2.282 -1.26,0 -2.281,1.022 -2.281,2.282 z"
|
||||
id="path339" />
|
||||
<path
|
||||
style="color:#000000;fill:#c8a37d;fill-rule:evenodd;-inkscape-stroke:none"
|
||||
d="m 398.063,545.75 c 2.519,0 4.562,-2.043 4.562,-4.562 0,-1.26 -1.021,-2.282 -2.281,-2.282 -1.26,0 -2.281,1.022 -2.281,2.282 z m 167.875,-9.125 c 2.519,0 4.562,-2.043 4.562,-4.562 0,-2.52 -2.043,-4.563 -4.562,-4.563 -2.52,0 -4.563,2.043 -4.563,4.563 0,1.26 1.021,2.281 2.281,2.281 1.26,0 2.282,-1.021 2.282,-2.281 z"
|
||||
id="path341" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;fill-rule:evenodd;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="m 565.9375,527.16602 c -2.69923,0 -4.89648,2.19632 -4.89648,4.89648 v 4.22852 H 398.0625 c -2.70016,0 -4.89648,2.19632 -4.89648,4.89648 v 54.75 c 0,2.69923 2.19632,4.89648 4.89648,4.89648 2.69923,0 4.89648,-2.19725 4.89648,-4.89648 v -4.22852 H 565.9375 c 2.69923,0 4.89648,-2.19725 4.89648,-4.89648 v -54.75 c 0,-2.70016 -2.19632,-4.89648 -4.89648,-4.89648 z m 0,0.66796 c 2.33984,0 4.22852,1.88868 4.22852,4.22852 0,2.22036 -1.72083,3.98395 -3.89454,4.16211 v -4.16211 h -0.66601 c 0,1.07861 -0.86947,1.94727 -1.94922,1.94727 -1.07975,0 -1.94727,-0.86848 -1.94727,-1.94727 0,-2.33984 1.88975,-4.22852 4.22852,-4.22852 z m -0.33203,5.83204 v 2.625 h -3.89649 v -2.6211 c 0.48006,0.58593 1.13354,1.00781 1.94727,1.00781 0.8153,0 1.46892,-0.42386 1.94922,-1.01171 z m 4.56055,0.71875 v 52.42773 c 0,2.33877 -1.88975,4.22852 -4.22852,4.22852 H 402.95898 V 541.1875 c 0,-1.44007 -1.17498,-2.61523 -2.61523,-2.61523 -1.44025,0 -2.61328,1.17516 -2.61328,2.61523 v 4.16211 c -2.17556,-0.17723 -3.89649,-1.94108 -3.89649,-4.16211 0,-2.33984 1.88868,-4.22852 4.22852,-4.22852 h 163.3125 0.33398 4.22852 c 1.8433,0 3.39365,-1.06506 4.22852,-2.57421 z m -169.82227,4.85546 c 1.07975,0 1.94727,0.86734 1.94727,1.94727 0,2.22036 -1.72083,3.98395 -3.89454,4.16211 v -4.16211 c 0,-1.07993 0.86752,-1.94727 1.94727,-1.94727 z m -6.50977,4.26954 c 0.83496,1.50915 2.38458,2.57421 4.22852,2.57421 1.8433,0 3.39365,-1.06506 4.22852,-2.57421 v 47.53125 0.33398 4.5625 c 0,2.33877 -1.88975,4.22852 -4.22852,4.22852 -2.33984,0 -4.22852,-1.88975 -4.22852,-4.22852 z"
|
||||
id="path343" />
|
||||
<text
|
||||
font-family="Arial, Arial_MSFontService, sans-serif"
|
||||
font-weight="400"
|
||||
font-size="19px"
|
||||
transform="translate(423.871,570)"
|
||||
id="text345">signed keypair</text>
|
||||
<g
|
||||
id="path347">
|
||||
<path
|
||||
style="color:#000000;fill:#6d9eeb;fill-rule:evenodd;stroke-width:0.666667;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="M 383.834,388.5 H 595.5 v 76.666 c 0,8.469 -6.865,15.334 -15.334,15.334 H 368.5 v -76.666 c 0,-8.469 6.865,-15.334 15.334,-15.334 z"
|
||||
id="path549" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;fill-rule:evenodd;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="m 383.83398,388.16602 c -8.64912,0 -15.66796,7.01884 -15.66796,15.66796 v 77 h 212 c 8.64912,0 15.66796,-7.01884 15.66796,-15.66796 v -77 z m 0,0.66796 h 211.33204 v 76.33204 c 0,8.28885 -6.71115,15 -15,15 H 368.83398 v -76.33204 c 0,-8.28885 6.71115,-15 15,-15 z"
|
||||
id="path551" />
|
||||
</g>
|
||||
<text
|
||||
font-family="Arial, Arial_MSFontService, sans-serif"
|
||||
font-weight="400"
|
||||
font-size="21px"
|
||||
transform="translate(425.756,433)"
|
||||
id="text357">foo.bar.com<tspan
|
||||
font-style="italic"
|
||||
font-size="16px"
|
||||
x="-5.4200101"
|
||||
y="20"
|
||||
id="tspan349">Issuer:</tspan><tspan
|
||||
font-style="italic"
|
||||
font-size="16px"
|
||||
x="47.080002"
|
||||
y="20"
|
||||
id="tspan351">venafi</tspan><tspan
|
||||
font-style="italic"
|
||||
font-size="16px"
|
||||
x="89.580002"
|
||||
y="20"
|
||||
id="tspan353">-</tspan><tspan
|
||||
font-style="italic"
|
||||
font-size="16px"
|
||||
x="94.9133"
|
||||
y="20"
|
||||
id="tspan355">tpp</tspan></text>
|
||||
<g
|
||||
id="path359">
|
||||
<path
|
||||
style="color:#000000;fill:#6d9eeb;fill-rule:evenodd;stroke-width:0.666667;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="M 642.834,388.5 H 853.5 v 76.666 c 0,8.469 -6.865,15.334 -15.334,15.334 H 627.5 v -76.666 c 0,-8.469 6.865,-15.334 15.334,-15.334 z"
|
||||
id="path541" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;fill-rule:evenodd;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="m 642.83398,388.16602 c -8.64912,0 -15.66796,7.01884 -15.66796,15.66796 v 77 h 211 c 8.64912,0 15.66796,-7.01884 15.66796,-15.66796 v -77 z m 0,0.66796 h 210.33204 v 76.33204 c 0,8.28885 -6.71115,15 -15,15 H 627.83398 v -76.33204 c 0,-8.28885 6.71115,-15 15,-15 z"
|
||||
id="path543" />
|
||||
</g>
|
||||
<text
|
||||
font-family="Arial, Arial_MSFontService, sans-serif"
|
||||
font-weight="400"
|
||||
font-size="21px"
|
||||
transform="translate(676.661,420)"
|
||||
id="text371">example.com<tspan
|
||||
x="-25.4067"
|
||||
y="25"
|
||||
id="tspan361">www.example.com</tspan><tspan
|
||||
font-style="italic"
|
||||
font-size="16px"
|
||||
x="-20"
|
||||
y="46"
|
||||
id="tspan363">Issuer:</tspan><tspan
|
||||
font-style="italic"
|
||||
font-size="16px"
|
||||
x="32.5"
|
||||
y="46"
|
||||
id="tspan365">letsencrypt</tspan><tspan
|
||||
font-style="italic"
|
||||
font-size="16px"
|
||||
x="109.667"
|
||||
y="46"
|
||||
id="tspan367">-</tspan><tspan
|
||||
font-style="italic"
|
||||
font-size="16px"
|
||||
x="115"
|
||||
y="46"
|
||||
id="tspan369">prod</tspan></text>
|
||||
<path
|
||||
style="color:#000000;fill:#000000;-inkscape-stroke:none"
|
||||
d="M 0.174994,-0.283704 123.648,75.8769 123.298,76.4443 -0.174994,0.283704 Z M 124.439,72.0563 l 4.709,7.6043 -8.909,-0.7954 z"
|
||||
id="path373"
|
||||
transform="matrix(-1,-8.74228e-8,-8.74228e-8,1,611.648,309.5)" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;-inkscape-stroke:none"
|
||||
d="m 611.674,309.216 123.66,75.582 -0.348,0.569 -123.66,-75.583 z m 124.435,71.758 4.74,7.585 -8.912,-0.759 z"
|
||||
id="path375" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;-inkscape-stroke:none"
|
||||
d="m 482.833,480.5 v 48.939 h -0.666 V 480.5 Z m 3.667,47.606 -4,8 -4,-8 z"
|
||||
id="path377" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;-inkscape-stroke:none"
|
||||
d="m 740.833,480.5 v 48.939 h -0.666 V 480.5 Z m 3.667,47.606 -4,8 -4,-8 z"
|
||||
id="path379" />
|
||||
<path
|
||||
style="color:#000000;fill:#f9cb9c;fill-rule:evenodd;-inkscape-stroke:none"
|
||||
d="m 829.5,532.063 c 0,2.519 -2.043,4.562 -4.562,4.562 v -4.562 c 0,1.259 -1.022,2.281 -2.282,2.281 -1.26,0 -2.281,-1.022 -2.281,-2.281 v 4.562 H 657.063 c -2.52,0 -4.563,2.043 -4.563,4.563 v 54.75 c 0,2.519 2.043,4.562 4.563,4.562 2.519,0 4.562,-2.043 4.562,-4.562 v -4.563 h 163.313 c 2.519,0 4.562,-2.043 4.562,-4.562 z M 657.063,545.75 c 2.519,0 4.562,-2.043 4.562,-4.562 0,-1.26 -1.021,-2.282 -2.281,-2.282 -1.26,0 -2.281,1.022 -2.281,2.282 z"
|
||||
id="path381" />
|
||||
<path
|
||||
style="color:#000000;fill:#c8a37d;fill-rule:evenodd;-inkscape-stroke:none"
|
||||
d="m 657.063,545.75 c 2.519,0 4.562,-2.043 4.562,-4.562 0,-1.26 -1.021,-2.282 -2.281,-2.282 -1.26,0 -2.281,1.022 -2.281,2.282 z m 167.875,-9.125 c 2.519,0 4.562,-2.043 4.562,-4.562 0,-2.52 -2.043,-4.563 -4.562,-4.563 -2.52,0 -4.563,2.043 -4.563,4.563 0,1.26 1.021,2.281 2.281,2.281 1.26,0 2.282,-1.021 2.282,-2.281 z"
|
||||
id="path383" />
|
||||
<path
|
||||
style="color:#000000;fill:#000000;fill-rule:evenodd;stroke-miterlimit:8;-inkscape-stroke:none"
|
||||
d="m 824.9375,527.16602 c -2.69923,0 -4.89648,2.19632 -4.89648,4.89648 v 4.22852 H 657.0625 c -2.70016,0 -4.89648,2.19632 -4.89648,4.89648 v 54.75 c 0,2.69923 2.19632,4.89648 4.89648,4.89648 2.69923,0 4.89648,-2.19725 4.89648,-4.89648 v -4.22852 H 824.9375 c 2.69923,0 4.89648,-2.19725 4.89648,-4.89648 v -54.75 c 0,-2.70016 -2.19632,-4.89648 -4.89648,-4.89648 z m 0,0.66796 c 2.33984,0 4.22852,1.88868 4.22852,4.22852 0,2.22036 -1.72083,3.98395 -3.89454,4.16211 v -4.16211 h -0.66601 c 0,1.07861 -0.86947,1.94727 -1.94922,1.94727 -1.07975,0 -1.94727,-0.86848 -1.94727,-1.94727 0,-2.33984 1.88975,-4.22852 4.22852,-4.22852 z m -0.33203,5.83204 v 2.625 h -3.89649 v -2.6211 c 0.48006,0.58593 1.13354,1.00781 1.94727,1.00781 0.8153,0 1.46892,-0.42386 1.94922,-1.01171 z m 4.56055,0.71875 v 52.42773 c 0,2.33877 -1.88975,4.22852 -4.22852,4.22852 H 661.95898 V 541.1875 c 0,-1.44007 -1.17498,-2.61523 -2.61523,-2.61523 -1.44025,0 -2.61328,1.17516 -2.61328,2.61523 v 4.16211 c -2.17556,-0.17723 -3.89649,-1.94108 -3.89649,-4.16211 0,-2.33984 1.88868,-4.22852 4.22852,-4.22852 h 163.3125 0.33398 4.22852 c 1.8433,0 3.39365,-1.06506 4.22852,-2.57421 z m -169.82227,4.85546 c 1.07975,0 1.94727,0.86734 1.94727,1.94727 0,2.22036 -1.72083,3.98395 -3.89454,4.16211 v -4.16211 c 0,-1.07993 0.86752,-1.94727 1.94727,-1.94727 z m -6.50977,4.26954 c 0.83496,1.50915 2.38458,2.57421 4.22852,2.57421 1.8433,0 3.39365,-1.06506 4.22852,-2.57421 v 47.53125 0.33398 4.5625 c 0,2.33877 -1.88975,4.22852 -4.22852,4.22852 -2.33984,0 -4.22852,-1.88975 -4.22852,-4.22852 z"
|
||||
id="path385" />
|
||||
<text
|
||||
font-family="Arial, Arial_MSFontService, sans-serif"
|
||||
font-weight="400"
|
||||
font-size="19px"
|
||||
transform="translate(682.367,570)"
|
||||
id="text387">signed keypair</text>
|
||||
<g
|
||||
id="g1991"
|
||||
transform="matrix(0.8280038,0,0,0.84377522,155.50129,124.76081)">
|
||||
<path
|
||||
d="m 22.7,17.21 h -3.83 v -1.975 c 0,-1.572 -1.3,-2.86 -2.86,-2.86 -1.56,0 -2.86,1.3 -2.86,2.86 V 17.21 H 9.33 v -1.975 c 0,-3.708 3.023,-6.7 6.7,-6.7 3.708,0 6.7,3.023 6.7,6.7 z"
|
||||
fill="#ffa400"
|
||||
id="path1976" />
|
||||
<path
|
||||
d="M 24.282,17.21 H 7.758 a 1.27,1.27 0 0 0 -1.29,1.29 v 12.2 a 1.27,1.27 0 0 0 1.29,1.3 h 16.524 a 1.27,1.27 0 0 0 1.29,-1.29 V 18.5 c -0.04,-0.725 -0.605,-1.3 -1.3,-1.3 z m -7.456,8.02 v 1.652 c 0,0.443 -0.363,0.846 -0.846,0.846 -0.443,0 -0.846,-0.363 -0.846,-0.846 V 25.23 c -0.524,-0.282 -0.846,-0.846 -0.846,-1.49 0,-0.927 0.766,-1.693 1.693,-1.693 0.927,0 1.693,0.766 1.693,1.693 0.04,0.645 -0.322,1.21 -0.846,1.49 z"
|
||||
fill="#003a70"
|
||||
id="path1978" />
|
||||
<path
|
||||
d="m 6.066,15.395 h -4 A 1.17,1.17 0 0 1 0.897,14.226 1.17,1.17 0 0 1 2.066,13.057 h 4 a 1.17,1.17 0 0 1 1.169,1.169 1.17,1.17 0 0 1 -1.169,1.169 z M 8.886,9.108 A 1.03,1.03 0 0 1 8.161,8.826 L 5.017,6.246 C 4.533,5.843 4.453,5.118 4.857,4.594 5.26,4.11 5.985,4.03 6.509,4.434 l 3.144,2.58 c 0.484,0.403 0.564,1.128 0.16,1.652 C 9.531,8.948 9.208,9.109 8.886,9.109 Z M 16.02,6.368 A 1.17,1.17 0 0 1 14.851,5.199 V 1.17 A 1.17,1.17 0 0 1 16.02,0 1.17,1.17 0 0 1 17.189,1.169 V 5.2 A 1.17,1.17 0 0 1 16.02,6.369 Z m 7.093,2.74 c -0.322,0 -0.685,-0.16 -0.887,-0.443 -0.403,-0.484 -0.322,-1.25 0.16,-1.652 l 3.144,-2.58 c 0.484,-0.403 1.25,-0.322 1.652,0.16 0.402,0.482 0.322,1.25 -0.16,1.652 l -3.144,2.58 a 1.13,1.13 0 0 1 -0.766,0.282 z m 6.81,6.287 h -4.03 a 1.17,1.17 0 0 1 -1.169,-1.169 1.17,1.17 0 0 1 1.169,-1.169 h 4.03 a 1.17,1.17 0 0 1 1.169,1.169 1.17,1.17 0 0 1 -1.169,1.169 z"
|
||||
fill="#ffa400"
|
||||
id="path1980" />
|
||||
</g>
|
||||
<g
|
||||
id="g2012"
|
||||
transform="matrix(0.23808024,0,0,0.23808024,784.86702,38.349964)">
|
||||
<circle
|
||||
class="st0"
|
||||
cx="72"
|
||||
cy="72"
|
||||
r="63"
|
||||
id="circle2008" />
|
||||
|
||||
<path
|
||||
class="st1"
|
||||
d="m 80.9,43 6.5,6.1 -11.9,28.3 C 74,81 72.8,84.4 72,87.8 71.3,84.4 70.1,81 68.6,77.4 L 54.1,43 H 43.2 L 72,110.1 100.8,43 Z"
|
||||
id="path2010" />
|
||||
|
||||
</g>
|
||||
<g
|
||||
id="g2012-0"
|
||||
transform="matrix(0.23808024,0,0,0.23808024,872.72161,122.87773)">
|
||||
<circle
|
||||
class="st0"
|
||||
cx="72"
|
||||
cy="72"
|
||||
r="63"
|
||||
id="circle2008-5" />
|
||||
<path
|
||||
class="st1"
|
||||
d="m 80.9,43 6.5,6.1 -11.9,28.3 C 74,81 72.8,84.4 72,87.8 71.3,84.4 70.1,81 68.6,77.4 L 54.1,43 H 43.2 L 72,110.1 100.8,43 Z"
|
||||
id="path2010-5" />
|
||||
</g>
|
||||
<g
|
||||
id="Logo"
|
||||
transform="matrix(0.2339657,0,0,0.22979822,531.02362,81.927566)">
|
||||
<polygon
|
||||
points="16.73,35.35 44.54,19.3 44.54,0 0,25.69 0,25.71 0,87.41 16.73,97.07 "
|
||||
id="polygon2041" />
|
||||
<polygon
|
||||
points="62.32,0 62.32,49.15 44.54,49.15 44.54,30.81 27.8,40.47 27.8,103.44 44.54,113.12 44.54,64.11 62.32,64.11 62.32,82.33 79.05,72.67 79.05,9.66 "
|
||||
id="polygon2043" />
|
||||
<polygon
|
||||
points="90.12,77.79 62.32,93.84 62.32,113.14 106.86,87.45 106.86,87.43 106.86,25.73 90.12,16.07 "
|
||||
id="polygon2045" />
|
||||
</g>
|
||||
<g
|
||||
id="g1991-5"
|
||||
transform="matrix(0.8280038,0,0,0.84377522,287.34185,40.528799)">
|
||||
<path
|
||||
d="m 22.7,17.21 h -3.83 v -1.975 c 0,-1.572 -1.3,-2.86 -2.86,-2.86 -1.56,0 -2.86,1.3 -2.86,2.86 V 17.21 H 9.33 v -1.975 c 0,-3.708 3.023,-6.7 6.7,-6.7 3.708,0 6.7,3.023 6.7,6.7 z"
|
||||
fill="#ffa400"
|
||||
id="path1976-4" />
|
||||
<path
|
||||
d="M 24.282,17.21 H 7.758 a 1.27,1.27 0 0 0 -1.29,1.29 v 12.2 a 1.27,1.27 0 0 0 1.29,1.3 h 16.524 a 1.27,1.27 0 0 0 1.29,-1.29 V 18.5 c -0.04,-0.725 -0.605,-1.3 -1.3,-1.3 z m -7.456,8.02 v 1.652 c 0,0.443 -0.363,0.846 -0.846,0.846 -0.443,0 -0.846,-0.363 -0.846,-0.846 V 25.23 c -0.524,-0.282 -0.846,-0.846 -0.846,-1.49 0,-0.927 0.766,-1.693 1.693,-1.693 0.927,0 1.693,0.766 1.693,1.693 0.04,0.645 -0.322,1.21 -0.846,1.49 z"
|
||||
fill="#003a70"
|
||||
id="path1978-1" />
|
||||
<path
|
||||
d="m 6.066,15.395 h -4 A 1.17,1.17 0 0 1 0.897,14.226 1.17,1.17 0 0 1 2.066,13.057 h 4 a 1.17,1.17 0 0 1 1.169,1.169 1.17,1.17 0 0 1 -1.169,1.169 z M 8.886,9.108 A 1.03,1.03 0 0 1 8.161,8.826 L 5.017,6.246 C 4.533,5.843 4.453,5.118 4.857,4.594 5.26,4.11 5.985,4.03 6.509,4.434 l 3.144,2.58 c 0.484,0.403 0.564,1.128 0.16,1.652 C 9.531,8.948 9.208,9.109 8.886,9.109 Z M 16.02,6.368 A 1.17,1.17 0 0 1 14.851,5.199 V 1.17 A 1.17,1.17 0 0 1 16.02,0 1.17,1.17 0 0 1 17.189,1.169 V 5.2 A 1.17,1.17 0 0 1 16.02,6.369 Z m 7.093,2.74 c -0.322,0 -0.685,-0.16 -0.887,-0.443 -0.403,-0.484 -0.322,-1.25 0.16,-1.652 l 3.144,-2.58 c 0.484,-0.403 1.25,-0.322 1.652,0.16 0.402,0.482 0.322,1.25 -0.16,1.652 l -3.144,2.58 a 1.13,1.13 0 0 1 -0.766,0.282 z m 6.81,6.287 h -4.03 a 1.17,1.17 0 0 1 -1.169,-1.169 1.17,1.17 0 0 1 1.169,-1.169 h 4.03 a 1.17,1.17 0 0 1 1.169,1.169 1.17,1.17 0 0 1 -1.169,1.169 z"
|
||||
fill="#ffa400"
|
||||
id="path1980-3" />
|
||||
</g>
|
||||
</g>
|
||||
<style
|
||||
type="text/css"
|
||||
id="style2006">
|
||||
.st0{fill-rule:evenodd;clip-rule:evenodd;fill:#FCB116;}
|
||||
.st1{fill:#262626;}
|
||||
</style>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 23 KiB |
BIN
manuscript/images/external-dns.png
Normal file
|
After Width: | Height: | Size: 251 KiB |
BIN
manuscript/images/flux_github_token.png
Normal file
|
After Width: | Height: | Size: 121 KiB |
BIN
manuscript/images/ingress.jpg
Normal file
|
After Width: | Height: | Size: 92 KiB |
BIN
manuscript/images/metallb-pfsense-00.png
Normal file
|
After Width: | Height: | Size: 67 KiB |
BIN
manuscript/images/metallb-pfsense-01.png
Normal file
|
After Width: | Height: | Size: 58 KiB |
BIN
manuscript/images/metallb-pfsense-02.png
Normal file
|
After Width: | Height: | Size: 38 KiB |
BIN
manuscript/images/metallb-pfsense-03.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
manuscript/images/metallb-pfsense-04.png
Normal file
|
After Width: | Height: | Size: 128 KiB |
BIN
manuscript/images/metallb-pfsense-05.png
Normal file
|
After Width: | Height: | Size: 109 KiB |
BIN
manuscript/images/sealed-secrets.png
Normal file
|
After Width: | Height: | Size: 35 KiB |
BIN
manuscript/images/traefik-dashboard.png
Normal file
|
After Width: | Height: | Size: 67 KiB |
@@ -1,3 +1,6 @@
|
||||
---
|
||||
description: Creating a Kubernetes cluster on DigitalOcean
|
||||
---
|
||||
# Kubernetes on DigitalOcean
|
||||
|
||||
IMO, the easiest Kubernetes cloud provider to experiment with is [DigitalOcean](https://m.do.co/c/e33b78ad621b) (_this is a referral link_). I've included instructions below to start a basic cluster.
|
||||
@@ -39,7 +42,7 @@ DigitalOcean will provide you with a "kubeconfig" file to use to access your clu
|
||||
|
||||
## Release the kubectl!
|
||||
|
||||
Save your kubeconfig file somewhere, and test it our by running ```kubectl --kubeconfig=<PATH TO KUBECONFIG> get nodes```
|
||||
Save your kubeconfig file somewhere, and test it our by running ```kubectl --kubeconfig=<PATH TO KUBECONFIG> get nodes``` [^1]
|
||||
|
||||
Example output:
|
||||
|
||||
@@ -69,18 +72,6 @@ festive-merkle-8n9e Ready <none> 58s v1.13.1
|
||||
|
||||
That's it. You have a beautiful new kubernetes cluster ready for some action!
|
||||
|
||||
## Move on..
|
||||
|
||||
Still with me? Good. Move on to creating your own external load balancer..
|
||||
|
||||
* [Start](/kubernetes/) - Why Kubernetes?
|
||||
* [Design](/kubernetes/design/) - How does it fit together?
|
||||
* Cluster (this page) - Setup a basic cluster
|
||||
* [Load Balancer](/kubernetes/loadbalancer/) - Setup inbound access
|
||||
* [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data
|
||||
* [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks
|
||||
* [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm
|
||||
|
||||
[^1]: Ok, yes, there's not much you can do with your cluster _yet_. But stay tuned, more Kubernetes fun to come!
|
||||
[^1]: Do you live in the CLI? Install the kubectl autocompletion for [bash](https://kubernetes.io/docs/tasks/tools/included/optional-kubectl-configs-bash-linux/) or [zsh](https://kubernetes.io/docs/tasks/tools/included/optional-kubectl-configs-zsh/) to make your life much easier!
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
64
manuscript/kubernetes/cluster/index.md
Normal file
@@ -0,0 +1,64 @@
|
||||
---
|
||||
description: Choosing HOW to deploy Kubernetes
|
||||
---
|
||||
# Kubernetes Cluster
|
||||
|
||||
There are an ever-increasing amount of ways to deploy and run Kubernetes. The primary distinction to be aware of is whether to fork out for a managed Kubernetes instance or not. Managed instances have some advantages, which I'll detail below, but these come at additional cost.
|
||||
|
||||
## Managed (Cloud Provider)
|
||||
|
||||
### Popular Options
|
||||
|
||||
Popular options are:
|
||||
|
||||
* [DigitalOcean](/kubernetes/cluster/digitalocean/)
|
||||
* Google Kubernetes Engine (GKE)
|
||||
* Amazon Elastic Kubernetes Service (EKS)
|
||||
* Azure Kubernetes Service (AKS)
|
||||
|
||||
### Upgrades
|
||||
|
||||
A managed Kubernetes provider will typically provide a way to migrate to pre-tested and trusted versions of Kuberenetes, as they're released and then tested. This [doesn't mean that upgrades will be trouble-free](https://www.digitalocean.com/community/tech_talks/20-000-upgrades-later-lessons-from-a-year-of-managed-kubernetes-upgrades), but they're likely to be less of a PITA. With Kubernetes' 4-month release cadence, you'll want to keep an eye on updates, and avoid becoming too out-of-date.
|
||||
|
||||
### Horizontal Scaling
|
||||
|
||||
One of the key drawcards for Kubernetes is horizonal scaling. You want to be able to expand/contract your cluster as your workloads change, even if just for one day a month. Doing this on your own hardware is.. awkward.
|
||||
|
||||
### Load Balancing
|
||||
|
||||
Even if you had enough hardware capacity to handle any unexpected scaling requirements, ensuring that traffic can reliably reach your cluster is a complicated problem. You need to present a "virtual" IP for external traffic to ingress the cluster on. There are popular solutions to provide LoadBalancer services to a self-managed cluster (*i.e., [MetalLB](/kubernetes/load-balancer/metallb/)*), but they do represent extra complexity, and won't necessarily be resilient to outages outside of the cluster (*network devices, power, etc*).
|
||||
|
||||
### Storage
|
||||
|
||||
Cloud providers make it easy to connect their storage solutions to your cluster, but you'll pay as you scale, and in most cases, I/O on cloud block storage is throttled along with your provisioned size. (*So a 1Gi volume will have terrible IOPS compared to a 100Gi volume*)
|
||||
|
||||
### Services
|
||||
|
||||
Some things just "work better" in a cloud provider environment. For example, to run a highly available Postgres instance on Kubernetes requires at least 3 nodes, and 3 x storage, plus manual failover/failback in the event of an actual issue. This can represent a huge cost if you simply need a PostgreSQL database to provide (*for example*) a backend to an authentication service like [KeyCloak](/recipes/kubernetes/keycloak/). Cloud providers will have a range of managed database solutions which will cost far less than do-it-yourselfing, and integrate easily and securely into their kubernetes offerings.
|
||||
|
||||
### Summary
|
||||
|
||||
Go with a managed provider if you want your infrastructure to be resilient to your own hardware/connectivity issues. I.e., there's a material impact to a power/network/hardware outage, and the cost of the managed provider is less than the cost of an outage.
|
||||
|
||||
## DIY (Cloud Provider, Bare Metal, VMs)
|
||||
|
||||
### Popular Options
|
||||
|
||||
Popular options are:
|
||||
|
||||
* Rancher's K3s
|
||||
* Ubuntu's Charmed Kubernetes
|
||||
|
||||
### Flexible
|
||||
|
||||
With self-hosted Kubernetes, you're free to mix/match your configuration as you see fit. You can run a single k3s node on a raspberry pi, or a fully HA pi-cluster, or a handful of combined master/worker nodes on a bunch of proxmox VMs, or on plain bare-metal.
|
||||
|
||||
### Education
|
||||
|
||||
You'll learn more about how to care for and feed your cluster if you build it yourself. But you'll definately spend more time on it, and it won't always be when you expect!
|
||||
|
||||
### Summary
|
||||
|
||||
Go with a self-hosted cluster if you want to learn more, you'd rather spend time than money, or you've already got significant investment in local infructure and technical skillz.
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
139
manuscript/kubernetes/cluster/k3s.md
Normal file
@@ -0,0 +1,139 @@
|
||||
---
|
||||
description: Creating a Kubernetes cluster on k3s
|
||||
---
|
||||
# Deploy your cluster on k3s
|
||||
|
||||
If you're wanting to self-host your cluster, the simplest and most widely-supported approach is Rancher's [k3s](https://k3s.io/).
|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [ ] One or more "modern" Linux hosts to serve as cluster masters. (*Using an odd number of masters is required for HA*). Additional steps are required for [Raspbian Buster](https://rancher.com/docs/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster), [Alpine](https://rancher.com/docs/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup), or [RHEL/CentOS](https://rancher.com/docs/k3s/latest/en/advanced/#additional-preparation-for-red-hat-centos-enterprise-linux).
|
||||
|
||||
Optional:
|
||||
|
||||
* [ ] Additional hosts to serve as cluster agents (*assuming that not everybody gets to be a master!*)
|
||||
|
||||
## Preparation
|
||||
|
||||
Ensure you have sudo access to your nodes, and that each node meets the [installation requirements](https://rancher.com/docs/k3s/latest/en/installation/installation-requirements/).
|
||||
|
||||
## Deploy k3s (one node only ever)
|
||||
|
||||
If you only want a single-node k3s cluster, then simply run the following to do the deployment:
|
||||
|
||||
```bash
|
||||
MYSECRET=iambatman
|
||||
curl -fL https://get.k3s.io | K3S_TOKEN=${MYSECRET} \
|
||||
sh -s - --disable traefik server
|
||||
```
|
||||
|
||||
!!! question "Why no traefik?"
|
||||
k3s comes with the traefik ingress "built-in", so why not deploy it? Because we'd rather deploy it **later** (*if we even want it*), using the same [deployment strategy](/kubernetes/deployment/flux/) which we use with all of our other services, so that we can easily update/configure it.
|
||||
|
||||
## Deploy k3s (mooar nodes!)
|
||||
|
||||
### Deploy first master
|
||||
|
||||
You may only have one node now, but it's a good idea to prepare for future expansion by bootstrapping k3s in "embedded etcd" multi-master HA mode. Pick a secret to use for your server token, and run the following:
|
||||
|
||||
```bash
|
||||
MYSECRET=iambatman
|
||||
curl -fL https://get.k3s.io | K3S_TOKEN=${MYSECRET} \
|
||||
sh -s - --disable traefik --disable servicelb server --cluster-init
|
||||
```
|
||||
|
||||
!!! question "y no servicelb?"
|
||||
K3s includes a [rudimentary load balancer](/kubernetes/loadbalancer/k3s/) which utilizes host ports to make a given port available on all nodes. If you plan to deploy one, and only one k3s node, then this is a viable configuration, and you can leave out the `--disable servicelb` text above. If you plan for more nodes and HA htough, then you're better off deploying [MetalLB](/kubernetes/loadbalancer/metallb/) to do "real" loadbalancing.
|
||||
|
||||
You should see output which looks something like this:
|
||||
|
||||
```bash
|
||||
root@shredder:~# curl -fL https://get.k3s.io | K3S_TOKEN=${MYSECRET} \
|
||||
> sh -s - --disable traefik server --cluster-init
|
||||
% Total % Received % Xferd Average Speed Time Time Time Current
|
||||
Dload Upload Total Spent Left Speed
|
||||
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
|
||||
100 27318 100 27318 0 0 144k 0 --:--:-- --:--:-- --:--:-- 144k
|
||||
[INFO] Finding release for channel stable
|
||||
[INFO] Using v1.21.5+k3s2 as release
|
||||
[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.21.5+k3s2/sha256sum-amd64.txt
|
||||
[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.21.5+k3s2/k3s
|
||||
[INFO] Verifying binary download
|
||||
[INFO] Installing k3s to /usr/local/bin/k3s
|
||||
[INFO] Skipping installation of SELinux RPM
|
||||
[INFO] Creating /usr/local/bin/kubectl symlink to k3s
|
||||
[INFO] Creating /usr/local/bin/crictl symlink to k3s
|
||||
[INFO] Creating /usr/local/bin/ctr symlink to k3s
|
||||
[INFO] Creating killall script /usr/local/bin/k3s-killall.sh
|
||||
[INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh
|
||||
[INFO] env: Creating environment file /etc/systemd/system/k3s.service.env
|
||||
[INFO] systemd: Creating service file /etc/systemd/system/k3s.service
|
||||
[INFO] systemd: Enabling k3s unit
|
||||
Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service.
|
||||
[INFO] systemd: Starting k3s
|
||||
root@shredder:~#
|
||||
```
|
||||
|
||||
Provided the last line of output says `Starting k3s` and not something more troublesome-sounding.. you have a cluster! Run `k3s kubectl get nodes -o wide` to confirm this, which has the useful side-effect of printing out your first master's IP address (*which we'll need for the next step*)
|
||||
|
||||
```bash
|
||||
root@shredder:~# k3s kubectl get nodes -o wide
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
shredder Ready control-plane,etcd,master 83s v1.21.5+k3s2 192.168.39.201 <none> Ubuntu 20.04.3 LTS 5.4.0-70-generic containerd://1.4.11-k3s1
|
||||
root@shredder:~#
|
||||
```
|
||||
|
||||
!!! tip "^Z undo undo ..."
|
||||
Oops! Did you mess something up? Just run `k3s-uninstall.sh` to wipe all traces of K3s, and start over!
|
||||
|
||||
### Deploy other masters (optional)
|
||||
|
||||
Now that the first master is deploy, add additional masters (*remember to keep the total number of masters to an odd number*) by referencing the secret, and the IP address of the first master, on all the others:
|
||||
|
||||
```bash
|
||||
MYSECRET=iambatman
|
||||
curl -fL https://get.k3s.io | K3S_TOKEN=${MYSECRET} \
|
||||
sh -s - server --disable servicelb --server https://<IP OF FIRST MASTER>:6443
|
||||
```
|
||||
|
||||
Run `k3s kubectl get nodes` to see your new master node make friends with the others:
|
||||
|
||||
```bash
|
||||
root@shredder:~# k3s kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
bebop Ready control-plane,etcd,master 4m13s v1.21.5+k3s2
|
||||
rocksteady Ready control-plane,etcd,master 4m42s v1.21.5+k3s2
|
||||
shredder Ready control-plane,etcd,master 8m54s v1.21.5+k3s2
|
||||
root@shredder:~#
|
||||
```
|
||||
|
||||
### Deploy agents (optional)
|
||||
|
||||
If you have more nodes which you want _not_ to be considered masters, then run the following on each. Note that the command syntax differs slightly from the masters (*which is why k3s deploys this as k3s-agent instead*)
|
||||
|
||||
```bash
|
||||
MYSECRET=iambatman
|
||||
curl -fL https://get.k3s.io | K3S_TOKEN=${MYSECRET} \
|
||||
K3S_URL=https://<IP OF FIRST MASTER>:6443 \
|
||||
sh -s -
|
||||
```
|
||||
|
||||
!!! question "y no kubectl on agent?"
|
||||
If you tried to run `k3s kubectl` on an agent, you'll notice that it returns an error about `localhost:8080` being refused. This is **normal**, and it happens because agents aren't necessarily "trusted" to the same degree that masters are, and so the cluster admin credentials are **not** saved to the filesystem, as they are with masters.
|
||||
|
||||
!!! tip "^Z undo undo ..."
|
||||
Oops! Did you mess something up? Just run `k3s-agent-uninstall.sh` to wipe all traces of K3s agent, and start over!
|
||||
|
||||
## Release the kubectl!
|
||||
|
||||
k3s will have saved your kubeconfig file on the masters to `/etc/rancher/k3s/k3s.yaml`. This file contains the necessary config and certificates to administer your cluster, and should be treated with the same respect and security as your root password. To interact with the cluster, you need to tell the kubectl command where to find this `KUBECONFIG` file. There are a few ways to do this...
|
||||
|
||||
1. Prefix your `kubectl` commands with `k3s`. i.e., `kubectl cluster-info` becomes `k3s kubectl cluster-info`
|
||||
2. Update your environment variables in your shell to set `KUBECONFIG` to `/etc/rancher/k3s/k3s.yaml`
|
||||
3. Copy ``/etc/rancher/k3s/k3s.yaml` to `~/.kube/config`, which is the default location `kubectl` will look for
|
||||
|
||||
Examine your beautiful new cluster by running `kubectl cluster-info` [^1]
|
||||
|
||||
[^1]: Do you live in the CLI? Install the kubectl autocompletion for [bash](https://kubernetes.io/docs/tasks/tools/included/optional-kubectl-configs-bash-linux/) or [zsh](https://kubernetes.io/docs/tasks/tools/included/optional-kubectl-configs-zsh/) to make your life much easier!
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
65
manuscript/kubernetes/deployment/flux/design.md
Normal file
@@ -0,0 +1,65 @@
|
||||
---
|
||||
description: Kubernetes Flux deployment strategy - Design
|
||||
---
|
||||
# Design
|
||||
|
||||
!!! question "Shouldn't a design **precede** installation instructions?"
|
||||
In this case, I felt that an [installation](/kubernetes/deployment/flux/install/) and a practical demonstration upfront, would help readers to understand the flux design, and make it simpler to then explain how to [operate](/kubernetes/deployment/flux/operate/) flux themselves! 💪
|
||||
|
||||
Flux is power and flexible enough to fit many use-cases. After some experience and dead-ends, I've worked out a way to deploy Flux with enough flexibility but structure to make it an almost-invisible part of how my cluster "just works" on an ongoing basis..
|
||||
|
||||
## Diagram
|
||||
|
||||
Consider this entity relationship diagram:
|
||||
|
||||
``` mermaid
|
||||
erDiagram
|
||||
repo-path-flux-system ||..|{ app-namespace : "contains yaml for"
|
||||
repo-path-flux-system ||..|{ app-kustomization : "contains yaml for"
|
||||
repo-path-flux-system ||..|{ helmrepositories : "contains yaml for"
|
||||
|
||||
app-kustomization ||..|| repo-path-app : "points flux at"
|
||||
|
||||
flux-system-kustomization ||..|| repo-path-flux-system : "points flux at"
|
||||
|
||||
repo-path-app ||..|{ app-helmreleases: "contains yaml for"
|
||||
repo-path-app ||..|{ app-configmap: "contains yaml for"
|
||||
repo-path-app ||..|o app-sealed-secrets: "contains yaml for"
|
||||
|
||||
app-configmap ||..|| app-helmreleases : configures
|
||||
helmrepositories ||..|| app-helmreleases : "host charts for"
|
||||
|
||||
app-helmreleases ||..|{ app-containers : deploys
|
||||
app-containers }|..|o app-sealed-secrets : references
|
||||
```
|
||||
|
||||
## Explanation
|
||||
|
||||
And here's what it all means, starting from the top...
|
||||
|
||||
1. The flux-system **Kustomization** tells flux to look in the repo in `/flux-system`, and apply any YAMLs it finds (*with optional kustomize templating, if you're an uber-ninja!*).
|
||||
2. Within `/flux-system`, we've defined (for convenience), 3 subfolders, containing YAML for:
|
||||
1. `namespaces` : Any other **Namespaces** we want to deploy for our apps
|
||||
2. `helmrepositories` : Any **HelmRepositories** we later want to pull helm charts from
|
||||
3. `kustomizations` : An **Kustomizations** we need to tell flux to import YAMLs from **elsewhere** in the repository
|
||||
3. In turn, each app's **Kustomization** (*which we just defined above*) tells flux to look in the repo in the `/<app name>` path, and apply any YAMLs it finds (*with optional kustomize templating, if you're an uber-ninja!*).
|
||||
4. Within the `/<app name>` path, we define **at least** the following:
|
||||
1. A **HelmRelease** for the app, telling flux which version of what chart to apply from which **HelmRepository**
|
||||
2. A **ConfigMap** for the HelmRelease, which contains all the custom (*and default!*) values for the chart
|
||||
5. Of course, we can also put any **other** YAML into the `/<app name>` path in the repo, which may include additional ConfigMaps, SealedSecrets (*for safely storing secrets in a repo*), Ingresses, etc.
|
||||
|
||||
!!! question "That seems overly complex!"
|
||||
> "Why not just stick all the YAML into one folder and let flux reconcile it all-at-once?"
|
||||
|
||||
Several reasons:
|
||||
|
||||
* We need to be able to deploy multiple copies of the same helm chart into different namespaces. Imagine if you wanted to deploy a "postgres" helm chart into a namespace for KeyCloak, plus another one for NextCloud. Putting each HelmRelease resource into its own namespace allows us to do this, while sourcing them all from a common HelmRepository
|
||||
* As your cluster grows in complexity, you end up with dependency issues, and sometimes you need one chart deployed first, in order to create CRDs which are depended upon by a second chart (*like Prometheus' ServiceMonitor*). Isolating apps to a kustomization-per-app means you can implement dependencies and health checks to allow a complex cluster design without chicken vs egg problems!
|
||||
|
||||
## Got it?
|
||||
|
||||
Good! I describe how to put this design into action on the [next page](/kubernetes/deployment/flux/operate/)...
|
||||
|
||||
[^1]: ERDs are fancy diagrams for nERDs which [represent cardinality between entities](https://en.wikipedia.org/wiki/Entity%E2%80%93relationship_model#Crow's_foot_notation) scribbled using the foot of a crow 🐓
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
134
manuscript/kubernetes/deployment/flux/install.md
Normal file
@@ -0,0 +1,134 @@
|
||||
---
|
||||
description: Kubernetes Flux deployment strategy - Installation
|
||||
---
|
||||
|
||||
# Flux Installation
|
||||
|
||||
[Flux](https://fluxcd.io/) is a set of continuous and progressive delivery solutions for Kubernetes that are open and extensible.
|
||||
|
||||
Using flux to manage deployments into the cluster means:
|
||||
|
||||
1. All change is version-controlled (*i.e. "GitOps"*)
|
||||
2. It's not necessary to expose the cluster API (*i.e., which would otherwise be the case if you were using CI*)
|
||||
3. Deployments can be paused, rolled back, examine, debugged using Kubernetes primitives and tooling
|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [x] [Install the flux CLI tools](https://fluxcd.io/docs/installation/#install-the-flux-cli) on a host which has access to your cluster's apiserver.
|
||||
* [x] Create a GitHub [personal access token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line) that can create repositories by checking all permissions under repo.
|
||||
* [x] Create a private GitHub repository dedicated to your flux deployments
|
||||
|
||||
## Basics
|
||||
|
||||
Here's a simplified way to think about the various flux components..
|
||||
|
||||
1. You need a source for flux to look at. This is usually a Git repository, although it can also be a helm repository, an S3 bucket. A source defines the entire repo (*not a path or a folder structure*).
|
||||
2. Within your source, you define one or more kustomizations. Each kustomization is a _location_ on your source (*i.e., myrepo/nginx*) containing YAML files to be applied directly to the API server.
|
||||
3. The YAML files inside the kustomization include:
|
||||
1. HelmRepositories (*think of these as the repos you'd add to helm with `helm repo`*)
|
||||
2. HelmReleases (*these are charts which live in HelmRepositories*)
|
||||
3. Any other valid Kubernetes YAML manifests (*i.e., ConfigMaps, etc)*
|
||||
|
||||
## Preparation
|
||||
|
||||
### Install flux CLI
|
||||
|
||||
This section is a [direct copy of the official docs](https://fluxcd.io/docs/installation/#install-the-flux-cli), to save you having to open another tab..
|
||||
|
||||
=== "HomeBrew (MacOS/Linux)"
|
||||
|
||||
With [Homebrew](https://brew.sh/) for macOS and Linux:
|
||||
|
||||
```bash
|
||||
brew install fluxcd/tap/flux
|
||||
```
|
||||
|
||||
=== "Bash (MacOS/Linux)"
|
||||
|
||||
With Bash for macOS and Linux:
|
||||
|
||||
```bash
|
||||
curl -s https://fluxcd.io/install.sh | sudo bash
|
||||
```
|
||||
|
||||
=== "Chocolatey"
|
||||
|
||||
With [Chocolatey](https://chocolatey.org/) for Windows:
|
||||
|
||||
```bash
|
||||
choco install flux
|
||||
```
|
||||
|
||||
### Create GitHub Token
|
||||
|
||||
Create a GitHub [personal access token](https://github.com/settings/tokens) that can create repositories by checking all permissions under repo. (*we'll use the token in the bootstrapping step below*)
|
||||
|
||||
### Create GitHub Repo
|
||||
|
||||
Now we'll create a repo for flux - it can (*and probably should!*) be private. I've created a [template repo to get you started](https://github.com/geek-cookbook/template-flux/generate), but you could simply start with a blank repo too.[^1]
|
||||
|
||||
### Bootstrap Flux
|
||||
|
||||
Having prepared all of the above, we're now ready to deploy flux. Before we start, take a look at all the running pods in the cluster, with `kubectl get pods -A`. You should see something like this...
|
||||
|
||||
```bash
|
||||
root@shredder:~# k3s kubectl get pods -A
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system coredns-7448499f4d-qfszx 1/1 Running 0 6m32s
|
||||
kube-system local-path-provisioner-5ff76fc89d-rqh52 1/1 Running 0 6m32s
|
||||
kube-system metrics-server-86cbb8457f-25688 1/1 Running 0 6m32s
|
||||
```
|
||||
|
||||
Now, run a customized version of the following:
|
||||
|
||||
```bash
|
||||
GITHUB_TOKEN=<your-token>
|
||||
flux bootstrap github \
|
||||
--owner=my-github-username \
|
||||
--repository=my-github-username/my-repository \
|
||||
--personal
|
||||
```
|
||||
|
||||
Once the flux bootstrap is completed without errors, list the pods in the cluster again, with `kubectl get pods -A`. This time, you see something like this:
|
||||
|
||||
```bash
|
||||
root@shredder:~# k3s kubectl get pods -A
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
flux-system helm-controller-f7c5b6c56-nk7rm 1/1 Running 0 5m48s
|
||||
flux-system kustomize-controller-55db56f44f-4kqs2 1/1 Running 0 5m48s
|
||||
flux-system notification-controller-77f68bf8f4-9zlw9 1/1 Running 0 5m48s
|
||||
flux-system source-controller-8457664f8f-8qhhm 1/1 Running 0 5m48s
|
||||
kube-system coredns-7448499f4d-qfszx 1/1 Running 0 15m
|
||||
kube-system local-path-provisioner-5ff76fc89d-rqh52 1/1 Running 0 15m
|
||||
kube-system metrics-server-86cbb8457f-25688 1/1 Running 0 15m
|
||||
traefik svclb-traefik-ppvhr 2/2 Running 0 5m31s
|
||||
traefik traefik-f48b94477-d476p 1/1 Running 0 5m31s
|
||||
root@shredder:~#
|
||||
```
|
||||
|
||||
### What just happened?
|
||||
|
||||
Flux installed its controllers into the `flux-system` namespace, and created two new objects:
|
||||
|
||||
1. A **GitRepository** called `flux-system`, pointing to your GitHub repo.
|
||||
2. A **Kustomization** called `flux-system`, pointing to the `flux-system` directory in the above repo.
|
||||
|
||||
If you used my template repo, some extra things also happened..
|
||||
|
||||
1. I'd pre-populated the `flux-system` directory in the template repo with 3 folders:
|
||||
1. [helmrepositories](https://github.com/geek-cookbook/template-flux/tree/main/flux-system/helmrepositories), for storing repositories used for deploying helm charts
|
||||
2. [kustomizations](https://github.com/geek-cookbook/template-flux/tree/main/flux-system/kustomizations), for storing additional kustomizations *(which in turn can reference other paths in the repo*)
|
||||
3. [namespaces](https://github.com/geek-cookbook/template-flux/tree/main/flux-system/namespaces), for storing namespace manifests (*since these need to exist before we can deploy helmreleases into them*)
|
||||
2. Because the `flux-system` Kustomization includes everything **recursively** under `flux-system` path in the repo, all of the above were **also** applied to the cluster
|
||||
3. I'd pre-prepared a [Namespace](https://github.com/geek-cookbook/template-flux/blob/main/flux-system/namespaces/namespace-podinfo.yaml), [HelmRepository](https://github.com/geek-cookbook/template-flux/blob/main/flux-system/helmrepositories/helmrepository-podinfo.yaml), and [Kustomization](https://github.com/geek-cookbook/template-flux/blob/main/flux-system/kustomizations/kustomization-podinfo.yaml) for "podinfo", a simple example application, so these were applied to the cluster
|
||||
4. The kustomization we added for podinfo refers to the `/podinfo` path in the repo, so everything in **this** folder was **also** applied to the cluster
|
||||
5. In the `/podinfo` path of the repo is a [HelmRelease](https://github.com/geek-cookbook/template-flux/blob/main/podinfo/helmrelease-podinfo.yaml) (*an object describing how to deploy a helm chart*), and a [ConfigMap](https://github.com/geek-cookbook/template-flux/blob/main/podinfo/configmap-pofinfo-helm-chart-value-overrides-configmap.yaml) (*which ontain the `values.yaml` for the podinfo helm chart*)
|
||||
6. Flux recognized the podinfo **HelmRelease**, applied it along with the values in the **ConfigMap**, and consequently we have podinfo deployed from the latest helm chart, into the cluster, and managed by Flux! 💪
|
||||
|
||||
## Wait, but why?
|
||||
|
||||
That's best explained on the [next page](/kubernetes/deployment/flux/design/), describing the design we're using...
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: The [template repo](https://github.com/geek-cookbook/template-flux/) also "bootstraps" a simple example re how to [operate flux](/kubernetes/deployment/flux/operate/), by deploying the podinfo helm chart.
|
||||
158
manuscript/kubernetes/deployment/flux/operate.md
Normal file
@@ -0,0 +1,158 @@
|
||||
---
|
||||
description: Kubernetes Flux deployment strategy - Operation
|
||||
---
|
||||
|
||||
# Operation
|
||||
|
||||
Having described [how to install flux](/kubernetes/deployment/flux/install/), and [how our flux deployment design works](/kubernetes/deployment/flux/design/), let's finish by exploring how to **use** flux to deploy helm charts into a cluster!
|
||||
|
||||
## Deploy App
|
||||
|
||||
We'll need 5 files per-app, to deploy and manage our apps using flux. The example below will use the following highlighted files:
|
||||
|
||||
```hl_lines="4 6 8 10 11"
|
||||
├── README.md
|
||||
├── flux-system
|
||||
│ ├── helmrepositories
|
||||
│ │ └── helmrepository-podinfo.yaml
|
||||
│ ├── kustomizations
|
||||
│ │ └── kustomization-podinfo.yaml
|
||||
│ └── namespaces
|
||||
│ └── namespace-podinfo.yaml
|
||||
└── podinfo
|
||||
├── configmap-podinfo-helm-chart-value-overrides.yaml
|
||||
└── helmrelease-podinfo.yaml
|
||||
```
|
||||
|
||||
???+ question "5 files! That seems overly complex!"
|
||||
> "Why not just stick all the YAML into one folder and let flux reconcile it all-at-once?"
|
||||
|
||||
Several reasons:
|
||||
|
||||
* We need to be able to deploy multiple copies of the same helm chart into different namespaces. Imagine if you wanted to deploy a "postgres" helm chart into a namespace for KeyCloak, plus another one for NextCloud. Putting each HelmRelease resource into its own namespace allows us to do this, while sourcing them all from a common HelmRepository
|
||||
* As your cluster grows in complexity, you end up with dependency issues, and sometimes you need one chart deployed first, in order to create CRDs which are depended upon by a second chart (*like Prometheus' ServiceMonitor*). Isolating apps to a kustomization-per-app means you can implement dependencies and health checks to allow a complex cluster design without chicken vs egg problems!
|
||||
* I like to use the one-object-per-yaml-file approach. Kubernetes is complex enough without trying to define multiple objects in one file, or having confusingly-generic filenames such as `app.yaml`! 🤦♂️
|
||||
|
||||
### Identify target helm chart
|
||||
|
||||
Identify your target helm chart. Let's take podinfo as an example. Here's the [official chart](https://github.com/stefanprodan/podinfo/tree/master/charts/podinfo), and here's the [values.yaml](https://github.com/stefanprodan/podinfo/tree/master/charts/podinfo/values.yaml) which describes the default values passed to the chart (*and the options the user has to make changes*).
|
||||
|
||||
### Create HelmRepository
|
||||
|
||||
The README instructs users to add the repo "podinfo" with the URL `ttps://stefanprodan.github.io/podinfo`, so
|
||||
create a suitable HelmRepository YAML in `flux-system/helmrepositories/helmrepository-podinfo.yaml`. Here's [my example](https://github.com/geek-cookbook/template-flux/blob/main/flux-system/helmrepositories/helmrepository-podinfo.yaml).
|
||||
|
||||
!!! question "Why such obtuse file names?"
|
||||
> Why not just call the HelmRepository YAML `podinfo.yaml`? Why prefix the filename with the API object `helmrepository-`?
|
||||
|
||||
We're splitting the various "bits" which define this app into multiple YAMLs, and we'll soon have multiple apps in our repo, each with their own set of "bits". It gets very confusing quickly, when comparing git commit diffs, if you're not explicitly clear on what file you're working on, or which changes you're reviewing. Plus, adding the API object name to the filename provides extra "metadata" to the file structure, and makes "fuzzy searching" for quick-opening of files in tools like VSCode more effective.
|
||||
|
||||
### Create Namespace
|
||||
|
||||
Create a namespace for the chart. Typically you'd name this the same as your chart name. Here's [my namespace-podinfo.yaml](https://github.com/geek-cookbook/template-flux/blob/main/flux-system/namespaces/namespace-podinfo.yaml).
|
||||
|
||||
??? example "Here's an example Namespace..."
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: podinfo
|
||||
```
|
||||
|
||||
### Create Kustomization
|
||||
|
||||
Create a kustomization for the chart, pointing flux to a path in the repo where the chart-specific YAMLs will be found. Here's my [kustomization-podinfo.yaml](https://github.com/geek-cookbook/template-flux/blob/main/flux-system/kustomizations/kustomization-podinfo.yaml).
|
||||
|
||||
??? example "Here's an example Kustomization..."
|
||||
|
||||
```yaml
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
path: podinfo
|
||||
prune: true # remove any elements later removed from the above path
|
||||
timeout: 2m # if not set, this defaults to interval duration, which is 1h
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
||||
validation: server
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
namespace: podinfo
|
||||
```
|
||||
|
||||
### Create HelmRelease
|
||||
|
||||
Now create a HelmRelease for the chart - the HelmRelease defines how the (generic) chart from the HelmRepository will be installed into our cluster. Here's my [podinfo/helmrelease-podinfo.yaml](https://github.com/geek-cookbook/template-flux/blob/main/podinfo/helmrelease-podinfo.yaml).
|
||||
|
||||
??? example "Here's an example HelmRelease..."
|
||||
|
||||
```yaml
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: podinfo
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: podinfo # Must be the same as the upstream chart name
|
||||
version: 10.x # Pin to semver major versions to avoid breaking changes but still get bugfixes/updates
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: podinfo # References the HelmRepository you created earlier
|
||||
namespace: flux-system # All HelmRepositories exist in the flux-system namespace
|
||||
interval: 15m
|
||||
timeout: 5m
|
||||
releaseName: podinfo # _may_ be different from the upstream chart name, but could cause confusion
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: podinfo-helm-chart-value-overrides # Align with the name of the ConfigMap containing all values
|
||||
valuesKey: values.yaml # This is the default, but best to be explicit for clarity
|
||||
```
|
||||
|
||||
### Create ConfigMap
|
||||
|
||||
Finally, create a ConfigMap to be used to pass helm chart values to the chart. Note that it is **possible** to pass values directly in the HelmRelease, but.. it's messy. I find it easier to let the HelmRelease **describe** the release, and to let the configmap **configure** the release. It also makes tracking changes more straightforward.
|
||||
|
||||
As a second note, it's strictly only necessary to include in the ConfigMap the values you want to **change** from the chart's defaults. I find this to be too confusing as charts are continually updated by their developers, and this can obsucre valuable options over time. So I place in my ConfigMaps the **entire** contents of the chart's `values.yaml` file, and then I explicitly overwrite the values I want to change.
|
||||
|
||||
!!! tip "Making chart updates simpl(er)"
|
||||
This also makes updating my values for an upstream chart refactor a simple process - I duplicate the ConfigMap, paste-overwrite with the values.yaml for the refactored/updated chart, and compare the old and new versions side-by-side, to ensure I'm still up-to-date.
|
||||
|
||||
It's too large to display nicely below, but here's my [podinfo/configmap-podinfo-helm-chart-value-overrides.yaml](https://github.com/geek-cookbook/template-flux/blob/main/podinfo/configmap-podinfo-helm-chart-value-overrides.yaml)
|
||||
|
||||
!!! tip "Yes, I am sticking to my super-obtuse file naming convention!"
|
||||
Doesn't it make it easier to understand, at a glance, exactly what this YAML file is intended to be?
|
||||
|
||||
### Commit the changes
|
||||
|
||||
Simply commit your changes, sit back, and wait for flux to do its 1-min update. If you like to watch the fun, you could run `watch -n1 flux get kustomizations` so that you'll see the reconciliation take place (*if you're quick*). You can also force flux to check the repo for changes manually, by running `flux reconcile source git flux-system`.
|
||||
|
||||
## Making changes
|
||||
|
||||
Let's say you decide that instead of 1 replica of the podinfo pod, you'd like 3 replicas. Edit your configmap, and change `replicaCount: 1` to `replicaCount: 3`.
|
||||
|
||||
Commit your changes, and once again do the waiting / impatient-reconciling jig. This time you'll have to wait up to 15 minutes though...
|
||||
|
||||
!!! question "Why 15 minutes?"
|
||||
> I thought we check the repo every minute?
|
||||
|
||||
Yes, we check the entire GitHub repository for changes every 1 min, and changes to a kustomization are applied immediately. I.e., your podinfo ConfigMap gets updated within a minute (roughly). But the interval value for the HelmRelease is set to 15 minutes, so you could be waiting for as long as 15 minutes for flux to re-reconcile your HelmRelease with the ConfigMap, and to apply any changes. I've found that setting the HelmRelease interval too low causes (a) lots of unnecessary resource usage on behalf of flux, and (b) less stability when you have a large number of HelmReleases, some of whom depend on each other.
|
||||
|
||||
You can force a HelmRelease to reconcile, by running `flux reconcile helmrelease -n <namespace> <name of helmrelease>`
|
||||
|
||||
## Success!
|
||||
|
||||
We did it. The Holy Grail. We deployed an application into the cluster, without touching the cluster. Pinch yourself, and then prove it worked by running `flux get kustomizations`, or `kubectl get helmreleases -n podinfo`.
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: Got suggestions for improvements here? Shout out in the comments below!
|
||||
22
manuscript/kubernetes/deployment/index.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
description: Kubernetes deployment strategies
|
||||
---
|
||||
|
||||
# Deployment
|
||||
|
||||
So far our Kubernetes journey has been fairly linear - your standard "geek follows instructions" sort of deal.
|
||||
|
||||
When it comes to a deployment methodology, there are a few paths you can take, and it's possible to "mix-and-match" if you want to (*and if you enjoy extra pain and frustration!*)
|
||||
|
||||
Being imperative, Kubernetes is "driven" by your definitions of an intended state. I.e., "*I want a minecraft server and a 3-node redis cluster*". The state is defined by resources (pod, deployment, PVC) etc, which you apply to the Kubernetes apiserver, normally using YAML.
|
||||
|
||||
Now you _could_ hand-craft some YAML files, and manually apply these to the apiserver, but there are much smarter and more scalable ways to drive Kubernetes.
|
||||
|
||||
The typical methods of deploying applications into Kubernetes, sorted from least to most desirable and safe are:
|
||||
|
||||
1. A human applies YAML directly to the apiserver.
|
||||
2. A human applies a helm chart directly to the apiserver.
|
||||
3. A human updates a version-controlled set of configs, and a CI process applies YAML/helm chart directly to the apiserver.
|
||||
4. A human updates a version-controlled set of configs, and a trusted process _within_ the cluster "reaches out" to the config, and applies it to itself.
|
||||
|
||||
In our case, #4 is achieved with [Flux](/kubernetes/deployment/flux/).
|
||||
@@ -1,313 +0,0 @@
|
||||
# DIY Kubernetes
|
||||
|
||||
If you are looking for a little more of a challenge, or just don't have the money to fork out to managed Kubernetes, you're in luck.
|
||||
Kubernetes provides many ways to run a cluster, by far the simplest method is with `minikube` but there are other methods like `k3s` and using `drp` to deploy a cluster.
|
||||
After all, DIY its in our DNA.
|
||||
|
||||
## Ingredients
|
||||
|
||||
1. Basic knowledge of Kubernetes terms (Will come in handy) [Start](/kubernetes/start)
|
||||
2. Some Linux machines (Depends on what recipe you follow)
|
||||
|
||||
## Minikube
|
||||
|
||||
First, what is minikube?
|
||||
Minikube is a method of running Kubernetes on your local machine.
|
||||
It is mainly targeted at developers looking to test if their application will work with Kubernetes without deploying it to a production cluster. For this reason,
|
||||
I do not recommend running your cluster on minikube as it isn't designed for deployment, and is only a single node cluster.
|
||||
|
||||
If you want to use minikube, there is a guide below but again, I recommend using something more production-ready like `k3s` or `drp`
|
||||
|
||||
### Ingredients
|
||||
|
||||
1. A Fresh Linux Machine
|
||||
2. Some basic Linux knowledge (or can just copy-paste)
|
||||
|
||||
!!! note
|
||||
Make sure you are running a SystemD based distro like Ubuntu.
|
||||
Although minikube will run on macOS and Windows,
|
||||
they add in additional complexities to the installation as they
|
||||
require running a Linux based image running in a VM,
|
||||
that although minikube will manage, adds to the complexities. And
|
||||
even then, who uses Windows or macOS in production anyways? 🙂
|
||||
If you are serious about running on windows/macOS,
|
||||
check the official MiniKube guides
|
||||
[here](https://minikube.sigs.k8s.io/docs/start/)
|
||||
|
||||
### Installation
|
||||
|
||||
After booting yourself up a fresh Linux machine and getting to a console,
|
||||
you can now install minikube.
|
||||
|
||||
Download and install our minikube binary
|
||||
|
||||
```sh
|
||||
curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64
|
||||
sudo install minikube-linux-amd64 /usr/local/bin/minikube
|
||||
```
|
||||
|
||||
Now we can boot up our cluster
|
||||
|
||||
```sh
|
||||
sudo minikube start --vm-driver=none
|
||||
#Start our minikube instance, and make it use the machine to host the cluster, instead of a VM
|
||||
sudo minikube config set vm-driver none #Set our default vm driver to none
|
||||
```
|
||||
|
||||
You are now set up with minikube!
|
||||
|
||||
!!! warning
|
||||
MiniKube is not a production-grade method of deploying Kubernetes
|
||||
|
||||
## K3S
|
||||
|
||||
What is k3s?
|
||||
K3s is a production-ready method of deploying Kubernetes on many machines,
|
||||
where a full Kubernetes deployment is not required, AKA - your cluster (unless your a big SaaS company, in that case, can I get a job?).
|
||||
|
||||
### Ingredients
|
||||
|
||||
1. A handful of Linux machines (3 or more, virtualized or not)
|
||||
2. Some Linux knowledge.
|
||||
3. Patience.
|
||||
|
||||
### Setting your Linux Machines up
|
||||
|
||||
Firstly, my flavour of choice for deployment is Ubuntu Server,
|
||||
although it is not as enterprise-friendly as RHEL (That's Red Hat Enterprise Linux for my less geeky readers) or CentOS (The free version of RHEL).
|
||||
Ubuntu ticks all the boxes for k3s to run on and allows you to follow lots of other guides on managing and maintaining your Ubuntu server.
|
||||
|
||||
Firstly, download yourself a version of Ubuntu Server from [here](https://ubuntu.com/download/server) (Whatever is latest)
|
||||
Then spin yourself up as many systems as you need with the following guide
|
||||
|
||||
!!! note
|
||||
I am running a 3 node cluster, with nodes running on Ubuntu 19.04, all virtualized with VMWare ESXi
|
||||
Your setup doesn't need to be as complex as mine, you can use 3 old Dell OptiPlex if you really want 🙂
|
||||
|
||||
1. Insert your installation medium into the machine, and boot it.
|
||||
2. Select your language
|
||||
3. Select your keyboard layout
|
||||
4. Select `Install Ubuntu`
|
||||
5. Check and modify your network settings if required, make sure to write down your IPs
|
||||
6. Select Done on Proxy, unless you use a proxy
|
||||
7. Select Done on Mirror, as it has picked the best mirror for you unless you have a local mirror you want to use (in that case you are uber-geek)
|
||||
8. Select `Use An Entire Disk` for Filesystem, and basically hit enter for the rest of the disk setup,
|
||||
just make sure to read the prompts and understand what you are doing
|
||||
9. Now that you are up to setting up the profile, this is where things change.
|
||||
You are going to want to set up the same account on all the machines, but change the server name just a tad every time.
|
||||

|
||||

|
||||
10. Now install OpenSSH on the server, if you wish to import your existing SSH key from GitHub or Launchpad,
|
||||
you can do that now and save yourself a step later.
|
||||
11. Skip over Featured Server snaps by clicking `Done`
|
||||
12. Wait for your server to install everything and drop you to a Linux prompt
|
||||
|
||||
13. Repeat for all your nodes
|
||||
|
||||
### Pre-installation of k3s
|
||||
|
||||
For the rest of this guide, you will need some sort of Linux/macOS based terminal.
|
||||
On Windows you can do this with Windows Subsystem for Linux (WSL) see [here for information on WSL.](https://aka.ms/wslinstall)
|
||||
|
||||
The rest of this guide will all be from your local terminal.
|
||||
|
||||
If you already have an SSH key generated or added an existing one, skip this step.
|
||||
From your PC,run `ssh-keygen` to generate a public and private key pair
|
||||
(You can use this instead of typing your password in every time you want to connect via ssh)
|
||||
|
||||
```sh
|
||||
$ ssh-keygen
|
||||
Generating public/private rsa key pair.
|
||||
Enter file in which to save the key (/home/thomas/.ssh/id_rsa): [enter]
|
||||
Enter passphrase (empty for no passphrase): [password]
|
||||
Enter same passphrase again: [password]
|
||||
Your identification has been saved in /home/thomas/.ssh/id_rsa.
|
||||
Your public key has been saved in /home/thomas/.ssh/id_rsa.pub.
|
||||
The key fingerprint is:
|
||||
...
|
||||
The key's randomart image is:
|
||||
...
|
||||
```
|
||||
|
||||
If you have already imported a key from GitHub or Launchpad, skip this step.
|
||||
|
||||
```sh
|
||||
$ ssh-copy-id [username]@[hostname]
|
||||
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/home/thomas/.ssh/id_rsa.pub"
|
||||
The authenticity of host 'thomas-k3s-node1 (theipaddress)' can't be established.
|
||||
ECDSA key fingerprint is SHA256:...
|
||||
Are you sure you want to continue connecting (yes/no)? yes
|
||||
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
|
||||
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
|
||||
thomas@thomas-k3s-node1's password: [insert your password now]
|
||||
|
||||
Number of key(s) added: 1
|
||||
```
|
||||
|
||||
You will want to do this once for every machine, replacing the hostname with the other next nodes hostname each time.
|
||||
|
||||
!!! note
|
||||
If your hostnames aren't resolving correct, try adding them to your `/etc/hosts` file
|
||||
|
||||
### Installation
|
||||
|
||||
If you have access to the premix repository, you can download the ansible-playbook and follow the steps contained in there, if not sit back and prepare to do it manually.
|
||||
|
||||
!!! tip
|
||||
Becoming a patron will allow you to get the ansible-playbook to setup k3s on your own hosts. For as little as 5$/m you can get access to the ansible playbooks for this recipe, and more!
|
||||
See [funkypenguin's Patreon](https://www.patreon.com/funkypenguin) for more!
|
||||
<!---
|
||||
(Just someone needs to remind me (HexF) to write such playbook)
|
||||
-->
|
||||
|
||||
Select one node to become your master, in my case `thomas-k3s-node1`.
|
||||
Now SSH into this node, and run the following:
|
||||
|
||||
```sh
|
||||
localpc$ ssh thomas@thomas-k3s-node1
|
||||
Enter passphrase for key '/home/thomas/.ssh/id_rsa': [ssh key password]
|
||||
|
||||
thomas-k3s-node1$ curl -sfL https://get.k3s.io | sh -
|
||||
[sudo] password for thomas: [password entered in setup]
|
||||
[INFO] Finding latest release
|
||||
[INFO] Using v1.0.0 as release
|
||||
[INFO] Downloading hash https://github.com/rancher/k3s/releases/download/v1.0.0/sha256sum-amd64.txt
|
||||
[INFO] Downloading binary https://github.com/rancher/k3s/releases/download/v1.0.0/k3s
|
||||
[INFO] Verifying binary download
|
||||
[INFO] Installing k3s to /usr/local/bin/k3s
|
||||
[INFO] Creating /usr/local/bin/kubectl symlink to k3s
|
||||
[INFO] Creating /usr/local/bin/crictl symlink to k3s
|
||||
[INFO] Creating /usr/local/bin/ctr symlink to k3s
|
||||
[INFO] Creating killall script /usr/local/bin/k3s-killall.sh
|
||||
[INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh
|
||||
[INFO] env: Creating environment file /etc/systemd/system/k3s.service.env
|
||||
[INFO] systemd: Creating service file /etc/systemd/system/k3s.service
|
||||
[INFO] systemd: Enabling k3s unit
|
||||
Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service.
|
||||
[INFO] systemd: Starting k3s
|
||||
```
|
||||
|
||||
Before we log out of the master, we need the token from it.
|
||||
Make sure to note this token down
|
||||
(please don't write it on paper, use something like `notepad` or `vim`, it's ~100 characters)
|
||||
|
||||
```sh
|
||||
thomas-k3s-node1$ sudo cat /var/lib/rancher/k3s/server/node-token
|
||||
K1097e226f95f56d90a4bab7151...
|
||||
```
|
||||
|
||||
Make sure all nodes can access each other by hostname, whether you add them to `/etc/hosts` or to your DNS server
|
||||
|
||||
Now that you have your master node setup, you can now add worker nodes
|
||||
|
||||
SSH into the other nodes, and run the following making sure to replace values with ones that suit your installation
|
||||
|
||||
```sh
|
||||
localpc$ ssh thomas@thomas-k3s-node2
|
||||
Enter passphrase for key '/home/thomas/.ssh/id_rsa': [ssh key password]
|
||||
|
||||
thomas-k3s-node2$ curl -sfL https://get.k3s.io | K3S_URL=https://thomas-k3s-node1:6443 K3S_TOKEN=K1097e226f95f56d90a4bab7151... sh -
|
||||
```
|
||||
|
||||
Now test your installation!
|
||||
|
||||
SSH into your master node
|
||||
|
||||
```sh
|
||||
ssh thomas@thomas-k3s-node1
|
||||
Enter passphrase for key '/home/thomas/.ssh/id_rsa': [ssh key password]
|
||||
|
||||
thomas-k3s-node1$ sudo kubectl get nodes
|
||||
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
thomas-k3s-node1 Ready master 15m3s v1.16.3-k3s.2
|
||||
thomas-k3s-node2 Ready <none> 6m58s v1.16.3-k3s.2
|
||||
thomas-k3s-node3 Ready <none> 6m12s v1.16.3-k3s.2
|
||||
```
|
||||
|
||||
If you got Ready for all your nodes, Well Done! Your k3s cluster is now running! If not try getting help in our discord.
|
||||
|
||||
### Post-Installation
|
||||
|
||||
Now you can get yourself a kubeconfig for your cluster.
|
||||
SSH into your master node, and run the following
|
||||
|
||||
```sh
|
||||
localpc$ ssh thomas@thomas-k3s-node1
|
||||
Enter passphrase for key '/home/thomas/.ssh/id_rsa': [ssh key password]
|
||||
|
||||
thomas-k3s-node1$ sudo kubectl config view --flatten
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBD...
|
||||
server: https://127.0.0.1:6443
|
||||
name: default
|
||||
contexts:
|
||||
- context:
|
||||
cluster: default
|
||||
user: default
|
||||
name: default
|
||||
current-context: default
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: default
|
||||
user:
|
||||
password: thisishowtolosecontrolofyourk3s
|
||||
username: admin
|
||||
```
|
||||
|
||||
Make sure to change `clusters.cluster.server` to have the master node's name instead of `127.0.0.1`, in my case making it `https://thomas-k3s-node1:6443`
|
||||
|
||||
!!! warning
|
||||
This kubeconfig file can grant full access to your Kubernetes installation, I recommend you protect this file just as well as you protect your passwords
|
||||
|
||||
You will probably want to save this kubeconfig file into a file on your local machine, say `my-k3s-cluster.yml` or `where-8-hours-of-my-life-went.yml`.
|
||||
Now test it out!
|
||||
|
||||
```sh
|
||||
localpc$ kubectl --kubeconfig=my-k3s-cluster.yml get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
thomas-k3s-node1 Ready master 495m v1.16.3-k3s.2
|
||||
thomas-k3s-node2 Ready <none> 488m v1.16.3-k3s.2
|
||||
thomas-k3s-node3 Ready <none> 487m v1.16.3-k3s.2
|
||||
```
|
||||
|
||||
<!--
|
||||
To the reader concerned about my health, no I did not actually spend 8 hours writing this guide, Instead I spent most of it helping you guys on the discord (👍) and other stuff
|
||||
-->
|
||||
|
||||
That is all! You have yourself a Kubernetes cluster for you and your dog to enjoy.
|
||||
|
||||
## DRP
|
||||
|
||||
DRP or Digital Rebar Provisioning Tool is a tool designed to automatically setup your cluster, installing an operating system for you, and doing all the configuration like we did in the k3s setup.
|
||||
|
||||
This section is WIP, instead, try using the K3S guide above 🙂
|
||||
|
||||
## Where from now
|
||||
|
||||
Now that you have wasted half a lifetime on installing your very own cluster, you can install more to it. Like a load balancer!
|
||||
|
||||
* [Start](/kubernetes/) - Why Kubernetes?
|
||||
* [Design](/kubernetes/design/) - How does it fit together?
|
||||
* Cluster (this page) - Setup a basic cluster
|
||||
* [Load Balancer](/kubernetes/loadbalancer/) - Setup inbound access
|
||||
* [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data
|
||||
* [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks
|
||||
* [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm
|
||||
|
||||
## About your guest chef
|
||||
|
||||
This article, believe it or not, was not diced up by your regular chef (funkypenguin).
|
||||
Instead, today's article was diced up by HexF, a fellow kiwi (hence a lot of kiwi references) who enjoys his sysadmin time.
|
||||
Feel free to talk to today's chef in the discord, or see one of his many other links that you can follow below
|
||||
|
||||
[Twitter](https://twitter.com/hexf_me) • [Website](https://hexf.me/) • [Github](https://github.com/hexf)
|
||||
|
||||
<!--
|
||||
The links above are just redirect links incase anything ever changes, and it has analytics too
|
||||
-->
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
1081
manuscript/kubernetes/external-dns.md
Normal file
@@ -3,68 +3,47 @@
|
||||
My first introduction to Kubernetes was a children's story:
|
||||
|
||||
<!-- markdownlint-disable MD033 -->
|
||||
<iframe width="560" height="315" src="https://www.youtube.com/embed/4ht22ReBjno" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
<iframe width="560" height="315" src="https://www.youtube.com/embed/R9-SOzep73w" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
|
||||
## Wait, what?
|
||||
## Why Kubernetes?
|
||||
|
||||
Why would you want to use Kubernetes for your self-hosted recipes over simple Docker Swarm? Here's my personal take..
|
||||
Why would you want to Kubernetes for your self-hosted recipes, over simple Docker Swarm? Here's my personal take..
|
||||
|
||||
I use Docker swarm both at home (_on a single-node swarm_), and on a trio of Ubuntu 16.04 VPSs in a shared lab OpenStack environment.
|
||||
### Docker Swarm is dead
|
||||
|
||||
In both cases above, I'm responsible for maintaining the infrastructure supporting Docker - either the physical host, or the VPS operating systems.
|
||||
Sorry to say, but from where I sit, there's no innovation or development happening in docker swarm.
|
||||
|
||||
I started experimenting with Kubernetes as a plan to improve the reliability of my cryptocurrency mining pools (_the contended lab VPSs negatively impacted the likelihood of finding a block_), and as a long-term replacement for my aging home server.
|
||||
Yes, I know, after Docker Inc [sold its platform business to Mirantis in Nov 2019](https://www.mirantis.com/blog/mirantis-acquires-docker-enterprise-platform-business/), in Feb 2020 Mirantis [back-tracked](https://www.mirantis.com/blog/mirantis-will-continue-to-support-and-develop-docker-swarm/) on their original plan to sunset swarm after 2 years, and stated that they'd continue to invest in swarm. But seriously, look around. Nobody is interested in swarm right now...
|
||||
|
||||
What I enjoy about building recipes and self-hosting is **not** the operating system maintenance, it's the tools and applications that I can quickly launch in my swarms. If I could **only** play with the applications, and not bother with the maintenance, I totally would.
|
||||
... Not even Mirantis! As of Nov 2021, the Mirantis blog tag "[kubernetes](https://www.mirantis.com/tag/kubernetes/)" had 8 posts within the past month. The tag "[docker](https://www.mirantis.com/tag/docker/)" has 8 posts in the past **2 years**, the 8th being the original announcement of the Docker aquisition. The tag "[docker swarm](https://www.mirantis.com/tag/docker-swarm/)" has only 2 posts, **ever**.
|
||||
|
||||
Kubernetes (_on a cloud provider, mind you!_) does this for me. I feed Kubernetes a series of YAML files, and it takes care of all the rest, including version upgrades, node failures/replacements, disk attach/detachments, etc.
|
||||
Dead. [Extinct. Like the doodoo](https://youtu.be/NxnZC9L_YXE?t=47).
|
||||
|
||||
### Once you go Kubernetes, you can't go back
|
||||
|
||||
For years now, [I've provided Kubernetes design consulting](https://www.funkypenguin.co.nz/work-with-me/) to small clients and large enterprises. The implementation details in each case vary widely, but there are some primitives which I've come to take for granted, and I wouldn't easily do without. A few examples:
|
||||
|
||||
* **CLI drives API from anywhere**. From my laptop, I can use my credentials to manage any number of Kubernetes clusters, simply by switching kubectl "context". Each interaction is an API call against an HTTPS endpoint. No SSHing to hosts and manually running docker command as root!
|
||||
* **GitOps is magic**. There are multiple ways to achieve it, but having changes you commit to a repo automatically applied to a cluster, "Just Works(tm)". The process removes so much friction from making changes that it makes you more productive, and a better "gitizen" ;P
|
||||
* **Controllers are trustworthy**. I've come to trust that when I tell Kubernetes to run 3 replicas on separate hosts, to scale up a set of replicas based on CPU load metrics, or provision a blob of storage for a given workloa, that this will be done in a consistent and visible way. I'll be able to see logs / details for each action taken by the controller, and adjust my own instructions/configuration accordingly if necessary.
|
||||
|
||||
## Uggh, it's so complicated!
|
||||
|
||||
Yes, but that's a necessary sacrifice for the maturity, power and flexibility it offers. Like docker-compose syntax, Kubernetes uses YAML to define its various, interworking components.
|
||||
Yes, it's more complex than Docker Swarm. And that complexity can definately be a barrier, although with improved tooling, it's continually becoming less-so. However, you don't need to be a mechanic to drive a car or to use a chainsaw. You just need a basic understanding of some core primitives, and then you get on with using the tool to achieve your goals, without needing to know every detail about how it works!
|
||||
|
||||
Let's talk some definitions. Kubernetes.io provides a [glossary](https://kubernetes.io/docs/reference/glossary/?fundamental=true). My definitions are below:
|
||||
Your end-goal is probably "*I want to reliably self-host services I care about*", and not "*I want to fully understand a complex, scalable, and highly sophisticated container orchestrator*". [^1]
|
||||
|
||||
- **Node** : A compute instance which runs docker containers, managed by a cluster master.
|
||||
|
||||
- **Cluster** : One or more "worker nodes" which run containers. Very similar to a Docker Swarm node. In most cloud provider deployments, the [master node for your cluster is provided free of charge](https://www.sdxcentral.com/articles/news/google-eliminates-gke-management-fees-kubernetes-clusters/2017/11/), but you don't get to access it.
|
||||
|
||||
- **Pod** : A collection of one or more the containers. If a pod runs multiple containers, these containers always run on the same node.
|
||||
|
||||
- **Deployment** : A definition of a desired state. I.e., "I want a pod with containers A and B running". The Kubernetes master then ensures that any changes necessary to maintain the state are taken. (_I.e., if a pod crashes, but is supposed to be running, a new pod will be started_)
|
||||
|
||||
- **Service** : Unlike Docker Swarm, service discovery is not _built in_ to Kubernetes. For your pods to discover each other (say, to have "webserver" talk to "database"), you create a service for each pod, and refer to these services when you want your containers (_in pods_) to talk to each other. Complicated, yes, but the abstraction allows you to do powerful things, like auto-scale-up a bunch of database "pods" behind a service called "database", or perform a rolling container image upgrade with zero impact.
|
||||
|
||||
- **External access** : Services not only allow pods to discover each other, but they're also the mechanism through which the outside world can talk to a container. At the simplest level, this is akin to exposing a container port on a docker host.
|
||||
|
||||
- **Ingress** : When mapping ports to applications is inadequate (think virtual web hosts), an ingress is a sort of "inbound router" which can receive requests on one port (i.e., HTTPS), and forward them to a variety of internal pods, based on things like VHOST, etc. For us, this is the functional equivalent of what Traefik does in Docker Swarm. In fact, we use a Traefik Ingress in Kubernetes to accomplish the same.
|
||||
|
||||
- **Persistent Volume** : A virtual disk which is attached to a pod, storing persistent data. Meets the requirement for shared storage from Docker Swarm. I.e., if a persistent volume (PV) is bound to a pod, and the pod dies and is recreated, or get upgraded to a new image, the PV the data is bound to the new container. PVs can be "claimed" in a YAML definition, so that your Kubernetes provider will auto-create a PV when you launch your pod. PVs can be snapshotted.
|
||||
|
||||
- **Namespace** : An abstraction to separate a collection of pods, services, ingresses, etc. A "virtual cluster within a cluster". Can be used for security, or simplicity. For example, since we don't have individual docker stacks anymore, if you commonly name your database container "db", and you want to deploy two applications which both use a database container, how will you name your services? Use namespaces to keep each application ("nextcloud" vs "kanboard") separate. Namespaces also allow you to allocate resources **limits** to the aggregate of containers in a namespace, so you could, for example, limit the "nextcloud" namespace to 2.3 CPUs and 1200MB RAM.
|
||||
So let's get on with learning how to use the tool...
|
||||
|
||||
## Mm.. maaaaybe, how do I start?
|
||||
|
||||
If you're like me, and you learn by doing, either play with the examples at <https://labs.play-with-k8s.com/>, or jump right in by setting up a Google Cloud trial (_you get \$300 credit for 12 months_), or a small cluster on [Digital Ocean](/kubernetes/cluster/).
|
||||
Primarily you need 2 things:
|
||||
|
||||
If you're the learn-by-watching type, just search for "Kubernetes introduction video". There's a **lot** of great content available.
|
||||
1. A cluster
|
||||
2. A way to deploy workloads into the cluster
|
||||
|
||||
## I'm ready, gimme some recipes!
|
||||
|
||||
As of Jan 2019, our first (_and only!_) Kubernetes recipe is a WIP for the Mosquitto [MQTT](/recipes/mqtt/) broker. It's a good, simple starter if you're into home automation (_shoutout to [Home Assistant](/recipes/homeassistant/)!_), since it only requires a single container, and a simple NodePort service.
|
||||
|
||||
I'd love for your [feedback](/support/) on the Kubernetes recipes, as well as suggestions for what to add next. The current rough plan is to replicate the Chef's Favorites recipes (_see the left-hand panel_) into Kubernetes first.
|
||||
|
||||
## Move on..
|
||||
|
||||
Still with me? Good. Move on to reviewing the design elements
|
||||
|
||||
- Start (this page) - Why Kubernetes?
|
||||
- [Design](/kubernetes/design/) - How does it fit together?
|
||||
- [Cluster](/kubernetes/cluster/) - Setup a basic cluster
|
||||
- [Load Balancer](/kubernetes/loadbalancer/) - Setup inbound access
|
||||
- [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data
|
||||
- [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks
|
||||
- [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm
|
||||
Practically, you need some extras too, but you can mix-and-match these.
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: Of course, if you **do** enjoy understanding the intricacies of how your tools work, you're in good company!
|
||||
|
||||
19
manuscript/kubernetes/ingress/index.md
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
description: What is a Kubernetes Ingress?
|
||||
---
|
||||
# Ingresses
|
||||
|
||||
In Kubernetes, an Ingress is a way to describe how to route traffic coming **into** the cluster, so that (*for example*) <https://radarr.example.com> will end up on a [Radarr][radarr] pod, but <https://sonarr.example.com> will end up on a [Sonarr][sonarr] pod.
|
||||
|
||||

|
||||
|
||||
There are many popular Ingress Controllers, we're going to cover two equally useful options:
|
||||
|
||||
1. [Traefik](/kubernetes/ingress/traefik/)
|
||||
2. [Nginx](/kubernetes/ingress/nginx/)
|
||||
|
||||
Choose at least one of the above (*there may be valid reasons to use both!* [^1]), so that you can expose applications via Ingress.
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: One cluster I manage uses traefik Traefik for public services, but Nginx for internal managemnet services such as Prometheus, etc. The idea is that you'd need one type of Ingress to help debug problems with the _other_ type!
|
||||
240
manuscript/kubernetes/ingress/nginx.md
Normal file
@@ -0,0 +1,240 @@
|
||||
---
|
||||
description: Nginx Ingress Controller
|
||||
---
|
||||
# Nginx Ingress Controller
|
||||
|
||||
The [Nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/) is the grandpappy of Ingress Controllers, with releases dating back ot at least 2016. Of course, Nginx itself is a battle-tested rock, [released in 2004](https://en.wikipedia.org/wiki/Nginx) and has been constantly updated / improved ever since.
|
||||
|
||||
Having such a pedigree though can make it a little awkward for the unfamiliar to configure Ngnix, whereas something like [Traefik](/kubernetes/ingress/traefik/), being newer-on-the-scene, is more user-friendly, and offers (*among other features*) a free **dashboard**. (*Nginx's dashboard is only available in the commercial Nginx+ package, which is a [monumental PITA](https://www.nginx.com/blog/deploying-nginx-nginx-plus-docker/) to run*)
|
||||
|
||||
Nginx Ingress Controller does make for a nice, simple "default" Ingress controller, if you don't want to do anything fancy.
|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [x] A [Kubernetes cluster](/kubernetes/cluster/)
|
||||
* [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped
|
||||
* [x] A [load-balancer](/kubernetes/load-balancer/) solution (*either [k3s](/kubernetes/load-balancer/k3s/) or [MetalLB](/kubernetes/load-balancer/metallb/)*)
|
||||
|
||||
Optional:
|
||||
|
||||
* [x] [Cert-Manager](/kubernetes/cert-manager/) deployed to request/renew certificates
|
||||
* [x] [External DNS](/kubernetes/external-dns/) configured to respond to ingresses, or with a wildcard DNS entry
|
||||
|
||||
## Preparation
|
||||
|
||||
### Namespace
|
||||
|
||||
We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-nginx-ingress-controller.yaml`:
|
||||
|
||||
??? example "Example NameSpace (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
```
|
||||
|
||||
### HelmRepository
|
||||
|
||||
Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. In this case, we're using the (*prolific*) [bitnami chart repository](https://github.com/bitnami/charts/tree/master/bitnami), so per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-bitnami.yaml`:
|
||||
|
||||
??? example "Example HelmRepository (click to expand)"
|
||||
```yaml
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: bitnami
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
url: https://charts.bitnami.com/bitnami
|
||||
```
|
||||
|
||||
### Kustomization
|
||||
|
||||
Now that the "global" elements of this deployment (*Namespace and HelmRepository*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/nginx-ingress-controller`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-nginx-ingress-controller.yaml`:
|
||||
|
||||
??? example "Example Kustomization (click to expand)"
|
||||
```yaml
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
path: ./nginx-ingress-controller
|
||||
prune: true # remove any elements later removed from the above path
|
||||
timeout: 2m # if not set, this defaults to interval duration, which is 1h
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
||||
validation: server
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: nginx-ingress-controller
|
||||
namespace: nginx-ingress-controller
|
||||
|
||||
```
|
||||
|
||||
### ConfigMap
|
||||
|
||||
Now we're into the nginx-ingress-controller-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/bitnami/charts/blob/master/bitnami/nginx-ingress-controller/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `nginx-ingress-controller/configmap-nginx-ingress-controller-helm-chart-value-overrides.yaml`:
|
||||
|
||||
??? example "Example ConfigMap (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: nginx-ingress-controller-helm-chart-value-overrides
|
||||
namespace: nginx-ingress-controller
|
||||
data:
|
||||
values.yaml: |-
|
||||
# paste chart values.yaml (indented) here and alter as required
|
||||
```
|
||||
|
||||
--8<-- "kubernetes-why-full-values-in-configmap.md"
|
||||
|
||||
Then work your way through the values you pasted, and change any which are specific to your configuration. It may not be necessary to change anything.
|
||||
|
||||
### HelmRelease
|
||||
|
||||
Lastly, having set the scene above, we define the HelmRelease which will actually deploy nginx-ingress-controller into the cluster, with the config and extra ConfigMap we defined above. I save this in my flux repo as `nginx-ingress-controller/helmrelease-nginx-ingress-controller.yaml`:
|
||||
|
||||
??? example "Example HelmRelease (click to expand)"
|
||||
```yaml
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
namespace: nginx-ingress-controller
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: nginx-ingress-controller
|
||||
version: 9.x
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: bitnami
|
||||
namespace: flux-system
|
||||
interval: 15m
|
||||
timeout: 5m
|
||||
releaseName: nginx-ingress-controller
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: nginx-ingress-controller-helm-chart-value-overrides
|
||||
valuesKey: values.yaml # This is the default, but best to be explicit for clarity
|
||||
```
|
||||
|
||||
--8<-- "kubernetes-why-not-config-in-helmrelease.md"
|
||||
|
||||
## Deploy nginx-ingress-controller
|
||||
|
||||
Having committed the above to your flux repository, you should shortly see a nginx-ingress-controller kustomization, and in the `nginx-ingress-controller` namespace, a controller and a speaker pod for every node:
|
||||
|
||||
```bash
|
||||
demo@shredder:~$ kubectl get pods -n nginx-ingress-controller
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
nginx-ingress-controller-5b849b4fbd-svbxk 1/1 Running 0 24h
|
||||
nginx-ingress-controller-5b849b4fbd-xt7vc 1/1 Running 0 24h
|
||||
nginx-ingress-controller-default-backend-867d86fb8f-t27j9 1/1 Running 0 24h
|
||||
demo@shredder:~$
|
||||
```
|
||||
|
||||
### How do I know it's working?
|
||||
|
||||
#### Test Service
|
||||
|
||||
By default, the chart will deploy nginx ingress controller's service in [LoadBalancer](/kubernetes/loadbalancer/) mode. When you use kubectl to display the service (`kubectl get services -n nginx-ingress-controller`), you'll see the external IP displayed:
|
||||
|
||||
```bash
|
||||
demo@shredder:~$ kubectl get services -n nginx-ingress-controller
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
nginx-ingress-controller LoadBalancer 10.152.183.162 172.168.209.1 80:30756/TCP,443:30462/TCP 24h
|
||||
nginx-ingress-controller-default-backend ClusterIP 10.152.183.200 <none> 80/TCP 24h
|
||||
demo@shredder:~$
|
||||
```
|
||||
|
||||
!!! question "Where does the external IP come from?"
|
||||
If you're using [k3s's load balancer](/kubernetes/loadbalancer/k3s/), the external IP will likely be the IP of the the nodes running k3s. If you're using [MetalLB](/kubernetes/loadbalancer/metallb/), the external IP should come from the list of addresses in the pool you allocated.
|
||||
|
||||
Pointing your web browser to the external IP displayed should result in the default backend page (*or an nginx-branded 404*). Congratulations, you have external access to the ingress controller! 🥳
|
||||
|
||||
#### Test Ingress
|
||||
|
||||
Still, you didn't deploy an ingress controller to look at 404 pages! If you used my [template repository](https://github.com/geek-cookbook/template-flux) to start off your [flux deployment strategy](/kubernetes/deployment/flux/), then the podinfo helm chart has already been deployed. By default, the podinfo configmap doesn't deploy an Ingress, but you can change this using the magic of GitOps... 🪄
|
||||
|
||||
Edit your podinfo helmrelease configmap (`/podinfo/configmap-podinfo-helm-chart-value-overrides.yaml`), and change `ingress.enabled` to `true`, and set the host name to match your local domain name (*already configured using [External DNS](/kubernetes/external-dns/)*):
|
||||
|
||||
``` yaml hl_lines="2 8"
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: podinfo.local
|
||||
```
|
||||
|
||||
To:
|
||||
|
||||
``` yaml hl_lines="2 8"
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: podinfo.<your domain name>
|
||||
```
|
||||
|
||||
Commit your changes, wait for a reconciliation, and run `kubectl get ingress -n podinfo`. You should see an ingress created matching the host defined above, and the ADDRESS value should match the service address of the nginx-ingress-controller service.
|
||||
|
||||
```bash
|
||||
root@cn1:~# kubectl get ingress -A
|
||||
NAMESPACE NAME CLASS HOSTS ADDRESS PORTS AGE
|
||||
podinfo podinfo <none> podinfo.example.com 172.168.209.1 80, 443 91d
|
||||
```
|
||||
|
||||
!!! question "Why is there no class value?"
|
||||
You don't **have** to define an ingress class if you only have one **class** of ingress, since typically your ingress controller will assume the default class. When you run multiple ingress controllers (say, nginx **and** [traeifk](/kubernetes/ingress/traefik/), or multiple nginx instances with different access controls) then classes become more important.
|
||||
|
||||
Now assuming your [DNS is correct](/kubernetes/external-dns/), you should be able to point your browser to the hostname you chose, and see the beautiful podinfo page! 🥳🥳
|
||||
|
||||
#### Test SSL
|
||||
|
||||
Ha, but we're not done yet! We have exposed a service via our load balancer, we've exposed a route to a service via an Ingress, but let's get rid of that nasty "insecure" message in the browser when using HTTPS...
|
||||
|
||||
Since you setup [SSL certificates,](/kubernetes/ssl-certificates/) including [secret-replicator](/kubernetes/ssl-certificates/secret-replicator/), you should end up with a `letsencrypt-wildcard-cert` secret in every namespace, including `podinfo`.
|
||||
|
||||
So once again, alter the podinfo ConfigMap to change this:
|
||||
|
||||
```yaml hl_lines="2 4"
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
```
|
||||
|
||||
To this:
|
||||
|
||||
```yaml hl_lines="2 4"
|
||||
tls:
|
||||
- secretName: letsencrypt-wildcard-cert
|
||||
hosts:
|
||||
- podinfo.<your domain name>
|
||||
```
|
||||
|
||||
Commit your changes, wait for the reconciliation, and the next time you point your browser at your ingress, you should get a beautiful, valid, officially-signed SSL certificate[^1]! 🥳🥳🥳
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
Are things not working as expected? Watch the nginx-ingress-controller's logs with ```kubectl logs -n nginx-ingress-controller -l app.kubernetes.io/name=nginx-ingress-controller -f```.
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: The beauty of this design is that the same process will now work for any other application you deploy, without any additional manual effort for DNS or SSL setup!
|
||||
16
manuscript/kubernetes/ingress/traefik/dashboard.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Traefik Dashboard
|
||||
|
||||
One of the advantages [Traefik](/kubernetes/ingress/traefik/) offers over [Nginx](/kubernetes/ingress/nginx/), is a native dashboard available in the open-source version (*Nginx+, the commercially-supported version, also includes a dashboard*).
|
||||
|
||||

|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [x] A [Kubernetes cluster](/kubernetes/cluster/)
|
||||
* [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped
|
||||
* [x] A [load-balancer](/kubernetes/load-balancer/) solution (*either [k3s](/kubernetes/load-balancer/k3s/) or [MetalLB](/kubernetes/load-balancer/metallb/)*)
|
||||
* [x] [Traefik](/kubernetes/ingress/traefik/) deployed per-design
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: The beauty of this design is that the same process will now work for any other application you deploy, without any additional manual effort for DNS or SSL setup!
|
||||
239
manuscript/kubernetes/ingress/traefik/index.md
Normal file
@@ -0,0 +1,239 @@
|
||||
# Traefik Ingress Controller
|
||||
|
||||
Unlike grumpy ol' man [Nginx](/kubernetes/ingress/ngnix/) :older_man:, Traefik, a microservice-friendly reverse proxy, is relatively fresh in the "cloud-native" space, having been "born" :baby_bottle: [in the same year that Kubernetes was launched](https://techcrunch.com/2020/09/23/five-years-after-creating-traefik-application-proxy-open-source-project-hits-2b-downloads/).
|
||||
|
||||
Traefik natively includes some features which Nginx lacks:
|
||||
|
||||
* [x] Ability to use cross-namespace TLS certificates (*this may be accidental, but it totally works currently*)
|
||||
* [x] An elegant "middleware" implementation allowing certain requests to pass through additional layers of authentication
|
||||
* [x] A beautiful dashboard
|
||||
|
||||

|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [x] A [Kubernetes cluster](/kubernetes/cluster/)
|
||||
* [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped
|
||||
* [x] A [load-balancer](/kubernetes/load-balancer/) solution (*either [k3s](/kubernetes/load-balancer/k3s/) or [MetalLB](/kubernetes/load-balancer/metallb/)*)
|
||||
|
||||
Optional:
|
||||
|
||||
* [x] [Cert-Manager](/kubernetes/cert-manager/) deployed to request/renew certificates
|
||||
* [x] [External DNS](/kubernetes/external-dns/) configured to respond to ingresses, or with a wildcard DNS entry
|
||||
|
||||
## Preparation
|
||||
|
||||
### Namespace
|
||||
|
||||
We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-traefik.yaml`:
|
||||
|
||||
??? example "Example NameSpace (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: traefik
|
||||
```
|
||||
|
||||
### HelmRepository
|
||||
|
||||
Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. In this case, we're using the official [Traefik helm chart](https://github.com/traefik/traefik-helm-chart), so per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-traefik.yaml`:
|
||||
|
||||
??? example "Example HelmRepository (click to expand)"
|
||||
```yaml
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
url: https://helm.traefik.io/traefik
|
||||
```
|
||||
|
||||
### Kustomization
|
||||
|
||||
Now that the "global" elements of this deployment (*Namespace and HelmRepository*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/traefik`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-traefik.yaml`:
|
||||
|
||||
??? example "Example Kustomization (click to expand)"
|
||||
```yaml
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
path: ./traefik
|
||||
prune: true # remove any elements later removed from the above path
|
||||
timeout: 2m # if not set, this defaults to interval duration, which is 1h
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
||||
validation: server
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
|
||||
```
|
||||
|
||||
### ConfigMap
|
||||
|
||||
Now we're into the traefik-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/traefik/traefik-helm-chart/blob/master/traefik/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `traefik/configmap-traefik-helm-chart-value-overrides.yaml`:
|
||||
|
||||
??? example "Example ConfigMap (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: traefik-helm-chart-value-overrides
|
||||
namespace: traefik
|
||||
data:
|
||||
values.yaml: |-
|
||||
# paste chart values.yaml (indented) here and alter as required>
|
||||
```
|
||||
|
||||
--8<-- "kubernetes-why-full-values-in-configmap.md"
|
||||
|
||||
Then work your way through the values you pasted, and change any which are specific to your configuration. It may not be necessary to change anything.
|
||||
|
||||
### HelmRelease
|
||||
|
||||
Lastly, having set the scene above, we define the HelmRelease which will actually deploy traefik into the cluster, with the config and extra ConfigMap we defined above. I save this in my flux repo as `traefik/helmrelease-traefik.yaml`:
|
||||
|
||||
??? example "Example HelmRelease (click to expand)"
|
||||
```yaml
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: traefik
|
||||
version: 9.x
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: bitnami
|
||||
namespace: flux-system
|
||||
interval: 15m
|
||||
timeout: 5m
|
||||
releaseName: traefik
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: traefik-helm-chart-value-overrides
|
||||
valuesKey: values.yaml # This is the default, but best to be explicit for clarity
|
||||
```
|
||||
|
||||
--8<-- "kubernetes-why-not-config-in-helmrelease.md"
|
||||
|
||||
## Deploy traefik
|
||||
|
||||
Having committed the above to your flux repository, you should shortly see a traefik kustomization, and in the `traefik` namespace, a controller and a speaker pod for every node:
|
||||
|
||||
```bash
|
||||
demo@shredder:~$ kubectl get pods -n traefik
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
traefik-5b849b4fbd-svbxk 1/1 Running 0 24h
|
||||
traefik-5b849b4fbd-xt7vc 1/1 Running 0 24h
|
||||
demo@shredder:~$
|
||||
```
|
||||
|
||||
### How do I know it's working?
|
||||
|
||||
#### Test Service
|
||||
|
||||
By default, the chart will deploy Traefik in [LoadBalancer](/kubernetes/loadbalancer/) mode. When you use kubectl to display the service (`kubectl get services -n traefik`), you'll see the external IP displayed:
|
||||
|
||||
```bash
|
||||
demo@shredder:~$ kubectl get services -n traefik
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
traefik LoadBalancer 10.152.183.162 172.168.209.1 80:30756/TCP,443:30462/TCP 24h
|
||||
demo@shredder:~$
|
||||
```
|
||||
|
||||
!!! question "Where does the external IP come from?"
|
||||
If you're using [k3s's load balancer](/kubernetes/loadbalancer/k3s/), the external IP will likely be the IP of the the nodes running k3s. If you're using [MetalLB](/kubernetes/loadbalancer/metallb/), the external IP should come from the list of addresses in the pool you allocated.
|
||||
|
||||
Pointing your web browser to the external IP displayed should result in a 404 page. Congratulations, you have external access to the Traefik ingress controller! 🥳
|
||||
|
||||
#### Test Ingress
|
||||
|
||||
Still, you didn't deploy an ingress controller to look at 404 pages! If you used my [template repository](https://github.com/geek-cookbook/template-flux) to start off your [flux deployment strategy](/kubernetes/deployment/flux/), then the podinfo helm chart has already been deployed. By default, the podinfo configmap doesn't deploy an Ingress, but you can change this using the magic of GitOps... 🪄
|
||||
|
||||
Edit your podinfo helmrelease configmap (`/podinfo/configmap-podinfo-helm-chart-value-overrides.yaml`), and change `ingress.enabled` to `true`, and set the host name to match your local domain name (*already configured using [External DNS](/kubernetes/external-dns/)*):
|
||||
|
||||
``` yaml hl_lines="2 8"
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: podinfo.local
|
||||
```
|
||||
|
||||
To:
|
||||
|
||||
``` yaml hl_lines="2 8"
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: podinfo.<your domain name>
|
||||
```
|
||||
|
||||
Commit your changes, wait for a reconciliation, and run `kubectl get ingress -n podinfo`. You should see an ingress created matching the host defined above, and the ADDRESS value should match the service address of the traefik service.
|
||||
|
||||
```bash
|
||||
root@cn1:~# kubectl get ingress -A
|
||||
NAMESPACE NAME CLASS HOSTS ADDRESS PORTS AGE
|
||||
podinfo podinfo <none> podinfo.example.com 172.168.209.1 80, 443 91d
|
||||
```
|
||||
|
||||
!!! question "Why is there no class value?"
|
||||
You don't **have** to define an ingress class if you only have one **class** of ingress, since typically your ingress controller will assume the default class. When you run multiple ingress controllers (say, nginx **and** [traeifk](/kubernetes/ingress/traefik/), or multiple nginx instances with different access controls) then classes become more important.
|
||||
|
||||
Now assuming your [DNS is correct](/kubernetes/external-dns/), you should be able to point your browser to the hostname you chose, and see the beautiful podinfo page! 🥳🥳
|
||||
|
||||
#### Test SSL
|
||||
|
||||
Ha, but we're not done yet! We have exposed a service via our load balancer, we've exposed a route to a service via an Ingress, but let's get rid of that nasty "insecure" message in the browser when using HTTPS...
|
||||
|
||||
Since you setup [SSL certificates,](/kubernetes/ssl-certificates/) including [secret-replicator](/kubernetes/ssl-certificates/secret-replicator/), you should end up with a `letsencrypt-wildcard-cert` secret in every namespace, including `podinfo`.
|
||||
|
||||
So once again, alter the podinfo ConfigMap to change this:
|
||||
|
||||
```yaml hl_lines="2 4"
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
```
|
||||
|
||||
To this:
|
||||
|
||||
```yaml hl_lines="2 4"
|
||||
tls:
|
||||
- secretName: letsencrypt-wildcard-cert
|
||||
hosts:
|
||||
- podinfo.<your domain name>
|
||||
```
|
||||
|
||||
Commit your changes, wait for the reconciliation, and the next time you point your browser at your ingress, you should get a beautiful, valid, officially-signed SSL certificate[^1]! 🥳🥳🥳
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
Are things not working as expected? Watch the traefik's logs with ```kubectl logs -n traefik -l app.kubernetes.io/name=traefik -f```.
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: The beauty of this design is that the same process will now work for any other application you deploy, without any additional manual effort for DNS or SSL setup!
|
||||
@@ -1,332 +0,0 @@
|
||||
# Load Balancer
|
||||
|
||||
One of the issues I encountered early on in migrating my Docker Swarm workloads to Kubernetes on GKE, was how to reliably permit inbound traffic into the cluster.
|
||||
|
||||
There were several complications with the "traditional" mechanisms of providing a load-balanced ingress, not the least of which was cost. I also found that even if I paid my cloud provider (_Google_) for a load-balancer Kubernetes service, this service required a unique IP per exposed port, which was incompatible with my mining pool empire (_mining pools need to expose multiple ports on the same DNS name_).
|
||||
|
||||
See further examination of the problem and possible solutions in the [Kubernetes design](/kubernetes/design/#the-challenges-of-external-access) page.
|
||||
|
||||
This recipe details a simple design to permit the exposure of as many ports as you like, on a single public IP, to a cluster of Kubernetes nodes running as many pods/containers as you need, with services exposed via NodePort.
|
||||
|
||||

|
||||
|
||||
## Ingredients
|
||||
|
||||
1. [Kubernetes cluster](/kubernetes/cluster/)
|
||||
2. VM _outside_ of Kubernetes cluster, with a fixed IP address. Perhaps, on a [\$5/month Digital Ocean Droplet](https://www.digitalocean.com/?refcode=e33b78ad621b).. (_yes, another referral link. Mooar 🍷 for me!_)
|
||||
3. Geek-Fu required : 🐧🐧🐧 (_complex - inline adjustments required_)
|
||||
|
||||
## Preparation
|
||||
|
||||
### Summary
|
||||
|
||||
### Create LetsEncrypt certificate
|
||||
|
||||
!!! warning
|
||||
Safety first, folks. You wouldn't run a webhook exposed to the big bad ol' internet without first securing it with a valid SSL certificate? Of course not, I didn't think so!
|
||||
|
||||
Use whatever method you prefer to generate (and later, renew) your LetsEncrypt cert. The example below uses the CertBot docker image for CloudFlare DNS validation, since that's what I've used elsewhere.
|
||||
|
||||
We **could** run our webhook as a simple HTTP listener, but really, in a world where LetsEncrypt cacn assign you a wildcard certificate in under 30 seconds, thaht's unforgivable. Use the following **general** example to create a LetsEncrypt wildcard cert for your host:
|
||||
|
||||
In my case, since I use CloudFlare, I create /etc/webhook/letsencrypt/cloudflare.ini:
|
||||
|
||||
```ini
|
||||
dns_cloudflare_email=davidy@funkypenguin.co.nz
|
||||
dns_cloudflare_api_key=supersekritnevergonnatellyou
|
||||
```
|
||||
|
||||
I request my cert by running:
|
||||
|
||||
```bash
|
||||
cd /etc/webhook/
|
||||
docker run -ti --rm -v "$(pwd)"/letsencrypt:/etc/letsencrypt certbot/dns-cloudflare --preferred-challenges dns certonly --dns-cloudflare --dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini -d ''*.funkypenguin.co.nz'
|
||||
```
|
||||
|
||||
!!! question
|
||||
Why use a wildcard cert? So my enemies can't examine my certs to enumerate my various services and discover my weaknesses, of course!
|
||||
|
||||
I add the following as a cron command to renew my certs every day:
|
||||
|
||||
```bash
|
||||
cd /etc/webhook && docker run -ti --rm -v "$(pwd)"/letsencrypt:/etc/letsencrypt certbot/dns-cloudflare renew --dns-cloudflare --dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini
|
||||
```
|
||||
|
||||
Once you've confirmed you've got a valid LetsEncrypt certificate stored in `/etc/webhook/letsencrypt/live/<your domain>/fullcert.pem`, proceed to the next step..
|
||||
|
||||
### Install webhook
|
||||
|
||||
We're going to use <https://github.com/adnanh/webhook> to run our webhook. On some distributions (_❤️ ya, Debian!_), webhook and its associated systemd config can be installed by running `apt-get install webhook`.
|
||||
|
||||
### Create webhook config
|
||||
|
||||
We'll create a single webhook, by creating `/etc/webhook/hooks.json` as follows. Choose a nice secure random string for your MY_TOKEN value!
|
||||
|
||||
```bash
|
||||
mkdir /etc/webhook
|
||||
export MY_TOKEN=ilovecheese
|
||||
echo << EOF > /etc/webhook/hooks.json
|
||||
[
|
||||
{
|
||||
"id": "update-haproxy",
|
||||
"execute-command": "/etc/webhook/update-haproxy.sh",
|
||||
"command-working-directory": "/etc/webhook",
|
||||
"pass-arguments-to-command":
|
||||
[
|
||||
{
|
||||
"source": "payload",
|
||||
"name": "name"
|
||||
},
|
||||
{
|
||||
"source": "payload",
|
||||
"name": "frontend-port"
|
||||
},
|
||||
{
|
||||
"source": "payload",
|
||||
"name": "backend-port"
|
||||
},
|
||||
{
|
||||
"source": "payload",
|
||||
"name": "dst-ip"
|
||||
},
|
||||
{
|
||||
"source": "payload",
|
||||
"name": "action"
|
||||
}
|
||||
],
|
||||
"trigger-rule":
|
||||
{
|
||||
"match":
|
||||
{
|
||||
"type": "value",
|
||||
"value": "$MY_TOKEN",
|
||||
"parameter":
|
||||
{
|
||||
"source": "header",
|
||||
"name": "X-Funkypenguin-Token"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
EOF
|
||||
```
|
||||
|
||||
!!! note
|
||||
Note that to avoid any bozo from calling our we're matching on a token header in the request called `X-Funkypenguin-Token`. Webhook will **ignore** any request which doesn't include a matching token in the request header.
|
||||
|
||||
### Update systemd for webhook
|
||||
|
||||
!!! note
|
||||
This section is particular to Debian Stretch and its webhook package. If you're using another OS for your VM, just ensure that you can start webhook with a config similar to the one illustrated below.
|
||||
|
||||
Since we want to force webhook to run in secure mode (_no point having a token if it can be extracted from a simple packet capture!_) I ran `systemctl edit webhook`, and pasted in the following:
|
||||
|
||||
```bash
|
||||
[Service]
|
||||
# Override the default (non-secure) behaviour of webhook by passing our certificate details and custom hooks.json location
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/webhook -hooks /etc/webhook/hooks.json -verbose -secure -cert /etc/webhook/letsencrypt/live/funkypenguin.co.nz/fullchain.pem -key /etc/webhook/letsencrypt/live/funkypenguin.co.nz/privkey.pem
|
||||
```
|
||||
|
||||
Then I restarted webhook by running `systemctl enable webhook && systemctl restart webhook`. I watched the subsequent logs by running `journalctl -u webhook -f`
|
||||
|
||||
### Create /etc/webhook/update-haproxy.sh
|
||||
|
||||
When successfully authenticated with our top-secret token, our webhook will execute a local script, defined as follows (_yes, you should create this file_):
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
NAME=$1
|
||||
FRONTEND_PORT=$2
|
||||
BACKEND_PORT=$3
|
||||
DST_IP=$4
|
||||
ACTION=$5
|
||||
|
||||
# Bail if we haven't received our expected parameters
|
||||
if [[ "$#" -ne 5 ]]
|
||||
then
|
||||
echo "illegal number of parameters"
|
||||
exit 2;
|
||||
fi
|
||||
|
||||
# Either add or remove a service based on $ACTION
|
||||
case $ACTION in
|
||||
add)
|
||||
# Create the portion of haproxy config
|
||||
cat << EOF > /etc/webhook/haproxy/$FRONTEND_PORT.inc
|
||||
### >> Used to run $NAME:${FRONTEND_PORT}
|
||||
frontend ${FRONTEND_PORT}_frontend
|
||||
bind *:$FRONTEND_PORT
|
||||
mode tcp
|
||||
default_backend ${FRONTEND_PORT}_backend
|
||||
|
||||
backend ${FRONTEND_PORT}_backend
|
||||
mode tcp
|
||||
balance roundrobin
|
||||
stick-table type ip size 200k expire 30m
|
||||
stick on src
|
||||
server s1 $DST_IP:$BACKEND_PORT
|
||||
### << Used to run $NAME:$FRONTEND_PORT
|
||||
EOF
|
||||
;;
|
||||
delete)
|
||||
rm /etc/webhook/haproxy/$FRONTEND_PORT.inc
|
||||
;;
|
||||
*)
|
||||
echo "Invalid action $ACTION"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
# Concatenate all the haproxy configs into a single file
|
||||
cat /etc/webhook/haproxy/global /etc/webhook/haproxy/*.inc > /etc/webhook/haproxy/pre_validate.cfg
|
||||
|
||||
# Validate the generated config
|
||||
haproxy -f /etc/webhook/haproxy/pre_validate.cfg -c
|
||||
|
||||
# If validation was successful, only _then_ copy it over to /etc/haproxy/haproxy.cfg, and reload
|
||||
if [[ $? -gt 0 ]]
|
||||
then
|
||||
echo "HAProxy validation failed, not continuing"
|
||||
exit 2
|
||||
else
|
||||
# Remember what the original file looked like
|
||||
m1=$(md5sum "/etc/haproxy/haproxy.cfg")
|
||||
|
||||
# Overwrite the original file
|
||||
cp /etc/webhook/haproxy/pre_validate.cfg /etc/haproxy/haproxy.cfg
|
||||
|
||||
# Get MD5 of new file
|
||||
m2=$(md5sum "/etc/haproxy/haproxy.cfg")
|
||||
|
||||
# Only if file has changed, then we need to reload haproxy
|
||||
if [ "$m1" != "$m2" ] ; then
|
||||
echo "HAProxy config has changed, reloading"
|
||||
systemctl reload haproxy
|
||||
fi
|
||||
fi
|
||||
```
|
||||
|
||||
### Create /etc/webhook/haproxy/global
|
||||
|
||||
Create `/etc/webhook/haproxy/global` and populate with something like the following. This will be the non-dynamically generated part of our HAProxy config:
|
||||
|
||||
```ini
|
||||
global
|
||||
log /dev/log local0
|
||||
log /dev/log local1 notice
|
||||
chroot /var/lib/haproxy
|
||||
stats socket /run/haproxy/admin.sock mode 660 level admin
|
||||
stats timeout 30s
|
||||
user haproxy
|
||||
group haproxy
|
||||
daemon
|
||||
|
||||
# Default SSL material locations
|
||||
ca-base /etc/ssl/certs
|
||||
crt-base /etc/ssl/private
|
||||
|
||||
# Default ciphers to use on SSL-enabled listening sockets.
|
||||
# For more information, see ciphers(1SSL). This list is from:
|
||||
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
|
||||
# An alternative list with additional directives can be obtained from
|
||||
# https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy
|
||||
ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
|
||||
ssl-default-bind-options no-sslv3
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode tcp
|
||||
option tcplog
|
||||
option dontlognull
|
||||
timeout connect 5000
|
||||
timeout client 5000000
|
||||
timeout server 5000000
|
||||
errorfile 400 /etc/haproxy/errors/400.http
|
||||
errorfile 403 /etc/haproxy/errors/403.http
|
||||
errorfile 408 /etc/haproxy/errors/408.http
|
||||
errorfile 500 /etc/haproxy/errors/500.http
|
||||
errorfile 502 /etc/haproxy/errors/502.http
|
||||
errorfile 503 /etc/haproxy/errors/503.http
|
||||
errorfile 504 /etc/haproxy/errors/504.http
|
||||
```
|
||||
|
||||
## Serving
|
||||
|
||||
### Take the bait!
|
||||
|
||||
Whew! We now have all the components of our automated load-balancing solution in place. Browse to your VM's FQDN at <https://whatever.it.is:9000/hooks/update-haproxy>, and you should see the text "_Hook rules were not satisfied_", with a valid SSL certificate (_You didn't send a token_).
|
||||
|
||||
If you don't see the above, then check the following:
|
||||
|
||||
1. Does the webhook verbose log (`journalctl -u webhook -f`) complain about invalid arguments or missing files?
|
||||
2. Is port 9000 open to the internet on your VM?
|
||||
|
||||
### Apply to pods
|
||||
|
||||
You'll see me use this design in any Kubernetes-based recipe which requires container-specific ports, like UniFi. Here's an excerpt of the .yml which defines the UniFi controller:
|
||||
|
||||
```yaml
|
||||
<snip>
|
||||
spec:
|
||||
containers:
|
||||
- image: lscr.io/linuxserver/unifi
|
||||
name: controller
|
||||
volumeMounts:
|
||||
- name: controller-volumeclaim
|
||||
mountPath: /config
|
||||
- image: funkypenguin/poor-mans-k8s-lb
|
||||
imagePullPolicy: Always
|
||||
name: 8080-phone-home
|
||||
env:
|
||||
- name: REPEAT_INTERVAL
|
||||
value: "600"
|
||||
- name: FRONTEND_PORT
|
||||
value: "8080"
|
||||
- name: BACKEND_PORT
|
||||
value: "30808"
|
||||
- name: NAME
|
||||
value: "unifi-adoption"
|
||||
- name: WEBHOOK
|
||||
value: "https://my-secret.url.wouldnt.ya.like.to.know:9000/hooks/update-haproxy"
|
||||
- name: WEBHOOK_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: unifi-credentials
|
||||
key: webhook_token.secret
|
||||
<snip>
|
||||
```
|
||||
|
||||
The takeaways here are:
|
||||
|
||||
1. We add the funkypenguin/poor-mans-k8s-lb containier to any pod which has special port requirements, forcing the container to run on the same node as the other containers in the pod (_in this case, the UniFi controller_)
|
||||
2. We use a Kubernetes secret for the webhook token, so that our .yml can be shared without exposing sensitive data
|
||||
|
||||
Here's what the webhook logs look like when the above is added to the UniFi deployment:
|
||||
|
||||
```bash
|
||||
Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 Started POST /hooks/update-haproxy
|
||||
Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 update-haproxy got matched
|
||||
Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 update-haproxy hook triggered successfully
|
||||
Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 Completed 200 OK in 2.123921ms
|
||||
Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 executing /etc/webhook/update-haproxy.sh (/etc/webhook/update-haproxy.sh) with arguments ["/etc/webhook/update-haproxy.sh" "unifi-adoption" "8080" "30808" "35.244.91.178" "add"] and environment [] using /etc/webhook as cwd
|
||||
Feb 06 23:04:28 haproxy2 webhook[1433]: [webhook] 2019/02/06 23:04:28 command output: Configuration file is valid
|
||||
<HAProxy restarts>
|
||||
```
|
||||
|
||||
## Move on..
|
||||
|
||||
Still with me? Good. Move on to setting up an ingress SSL terminating proxy with Traefik..
|
||||
|
||||
- [Start](/kubernetes/) - Why Kubernetes?
|
||||
- [Design](/kubernetes/design/) - How does it fit together?
|
||||
- [Cluster](/kubernetes/cluster/) - Setup a basic cluster
|
||||
- Load Balancer (this page) - Setup inbound access
|
||||
- [Snapshots](/kubernetes/snapshots/) - Automatically backup your persistent data
|
||||
- [Helm](/kubernetes/helm/) - Uber-recipes from fellow geeks
|
||||
- [Traefik](/kubernetes/traefik/) - Traefik Ingress via Helm
|
||||
|
||||
[^1]: This is MVP of the load balancer solution. Any suggestions for improvements are welcome 😉
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
54
manuscript/kubernetes/loadbalancer/index.md
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
description: Kubernetes Loadbalancer options
|
||||
---
|
||||
# Loadbalancing Services
|
||||
|
||||
## TL;DR
|
||||
|
||||
1. I have multiple nodes (*you'd benefit from [MetalLB](/kubernetes/loadbalancer/metallb/)*)
|
||||
2. I only need/want one node (*just go with [k3s svclb](/kubernetes/loadbalancer/k3s/)*)
|
||||
|
||||
## But why?
|
||||
|
||||
In Kubernetes, you don't access your containers / pods "*directly*", other than for debugging purposes. Rather, we have a construct called a "*service*", which is "in front of" one or more pods.
|
||||
|
||||
Consider that this is how containers talk to each other under Docker Swarm:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
wordpress->>+mysql: Are you there?
|
||||
mysql->>+wordpress: Yes, ready to serve!
|
||||
|
||||
```
|
||||
|
||||
But **this** is how containers (pods) talk to each other under Kubernetes:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
wordpress->>+mysql-service: Are you there?
|
||||
mysql-service->>+mysql-pods: Are you there?
|
||||
mysql-pods->>+wordpress: Yes, ready to serve!
|
||||
```
|
||||
|
||||
Why do we do this?
|
||||
|
||||
1. A service isn't pinned to a particular node, it's a virtual IP which lives in the cluster and doesn't change as pods/nodes come and go.
|
||||
2. Using a service "in front of" pods means that rolling updates / scaling of the pods can take place, but communication with the service is uninterrupted (*assuming correct configuration*).
|
||||
|
||||
Here's some [more technical detail](https://kubernetes.io/docs/concepts/services-networking/service/) into how it works, but what you need to know is that when you want to interact with your containers in Kubernetes (*either from other containers or from outside, as a human*), you'll be talking to **services.**
|
||||
|
||||
Also, services are not exposed outside of the cluster by default. There are 3 levels of "exposure" for your Kubernetes services, briefly:
|
||||
|
||||
1. ClusterIP (*A service is only available to other services in the cluster - this is the default*)
|
||||
2. NodePort (*A mostly-random high-port on the node running the pod is forwarded to the pod*)[^1]
|
||||
3. LoadBalancer (*Some external help is required to forward a particular IP into the cluster, terminating on the node running your pod*)
|
||||
|
||||
For anything vaguely useful, only `LoadBalancer` is a viable option. Even though `NodePort` may allow you to access services directly, who wants to remember that they need to access [Radarr][radarr] on `192.168.1.44:34542` and Homer on `192.168.1.44:34532`? Ugh.
|
||||
|
||||
Assuming you only had a single Kubernetes node (*say, a small k3s deployment*), you'd want 100% of all incoming traffic to be directed to that node, and so you wouldn't **need** a loadbalancer. You'd just point some DNS entries / firewall NATs at the IP of the cluster, and be done.
|
||||
|
||||
(*This is [the way k3s works](/kubernetes/loadbalancer/k3s/) by default, although it's still called a LoadBalancer*)
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: It is possible to be prescriptive about which port is used for a Nodeport-exposed service, and this is occasionally [a valid deployment strategy](https://github.com/portainer/k8s/#using-nodeport-on-a-localremote-cluster), but you're usually limited to ports between 30000 and 32768.
|
||||
27
manuscript/kubernetes/loadbalancer/k3s.md
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
description: k3s' lightweight loadbalancer
|
||||
---
|
||||
|
||||
# K3s Load Balancing
|
||||
|
||||
If your cluster is using K3s, and you have only one node, then you could be adequately served by the [built in "klipper" loadbalbancer provided with k3s](https://rancher.com/docs/k3s/latest/en/networking/#service-load-balancer).
|
||||
|
||||
If you want more than one node in your cluster[^1] (*either now or in future*), I'd steer you towards [MetalLB](/kubernetes/loadbalancer/metallb/) instead).
|
||||
|
||||
## How does it work?
|
||||
|
||||
When **not** deployed with `--disable servicelb`, every time you create a service of type `LoadBalancer`, k3s will deploy a daemonset (*a collection of pods which run on every host in the cluster*), listening on that given port on the host. So deploying a LoadBalancer service for nginx on ports 80 and 443, for example, would result in **every** cluster host listening on ports 80 and 443, and sending any incoming traffic to the ngnix service.
|
||||
|
||||
## Well that's great, isn't it?
|
||||
|
||||
Yes, to get you started. But consider the following limitations:
|
||||
|
||||
1. This magic can only happen **once** per port. So you can't, for example, run two mysql instances on port 3306.
|
||||
2. Because **every** host listens on the exposed ports, you can't run anything **else** on the hosts, which listens on those ports
|
||||
3. Having multiple hosts listening on a given port still doesn't solve the problem of how to reliably direct traffic to all hosts, and how to gracefully fail over if one of the hosts fails.
|
||||
|
||||
To tackle these issues, you need some more advanced network configuration, along with [MetalLB](/kubernetes/loadbalancer/metallb/).
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: And seriously, if you're building a Kubernetes cluster, of **course** you'll want more than one host!
|
||||
287
manuscript/kubernetes/loadbalancer/metallb/index.md
Normal file
@@ -0,0 +1,287 @@
|
||||
---
|
||||
description: MetalLB - Load-balancing for bare-metal Kubernetes clusters
|
||||
---
|
||||
# MetalLB
|
||||
|
||||
[MetalLB](https://metallb.universe.tf/) offers a network [load balancer](/kubernetes/load-balancer/) implementation which workes on "bare metal" (*as opposed to a cloud provider*).
|
||||
|
||||
MetalLB does two jobs:
|
||||
|
||||
1. Provides address allocation to services out of a pool of addresses which you define
|
||||
2. Announces these addresses to devices outside the cluster, either using ARP/NDP (L2) or BGP (L3)
|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [x] A [Kubernetes cluster](/kubernetes/cluster/)
|
||||
* [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped
|
||||
* [x] If k3s is used, then it was deployed with `--disable servicelb`
|
||||
|
||||
Optional:
|
||||
|
||||
* [ ] Network firewall/router supporting BGP (*ideal but not required*)
|
||||
|
||||
## Preparation
|
||||
|
||||
### Allocations
|
||||
|
||||
You'll need to make some decisions re IP allocations.
|
||||
|
||||
* What is the range of addresses you want to use for your LoadBalancer service pool? If you're using BGP, this can be a dedicated subnet (*i.e. a /24*), and if you're not, this should be a range of IPs in your existing network space for your cluster nodes (*i.e., 192.168.1.100-200*)
|
||||
* If you're using BGP, pick two [private AS numbers](https://datatracker.ietf.org/doc/html/rfc6996#section-5) between 64512 and 65534 inclusively.
|
||||
|
||||
### Namespace
|
||||
|
||||
We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-metallb.yaml`:
|
||||
|
||||
??? example "Example NameSpace (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: metallb-system
|
||||
```
|
||||
|
||||
### HelmRepository
|
||||
|
||||
Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. In this case, we're using the (*prolific*) [bitnami chart repository](https://github.com/bitnami/charts/tree/master/bitnami), so per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-bitnami.yaml`:
|
||||
|
||||
??? example "Example HelmRepository (click to expand)"
|
||||
```yaml
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: bitnami
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
url: https://charts.bitnami.com/bitnami
|
||||
```
|
||||
|
||||
### Kustomization
|
||||
|
||||
Now that the "global" elements of this deployment (*Namespace and HelmRepository*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/metallb-system`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-metallb.yaml`:
|
||||
|
||||
??? example "Example Kustomization (click to expand)"
|
||||
```yaml
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: metallb--metallb-system
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
path: ./metallb-system
|
||||
prune: true # remove any elements later removed from the above path
|
||||
timeout: 2m # if not set, this defaults to interval duration, which is 1h
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
||||
validation: server
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: metallb-controller
|
||||
namespace: metallb-system
|
||||
|
||||
```
|
||||
|
||||
!!! question "What's with that screwy name?"
|
||||
> Why'd you call the kustomization `metallb--metallb-system`?
|
||||
|
||||
I keep my file and object names as consistent as possible. In most cases, the helm chart is named the same as the namespace, but in some cases, by upstream chart or historical convention, the namespace is different to the chart name. MetalLB is one of these - the helmrelease/chart name is `metallb`, but the typical namespace it's deployed in is `metallb-system`. (*Appending `-system` seems to be a convention used in some cases for applications which support the entire cluster*). To avoid confusion when I list all kustomizations with `kubectl get kustomization -A`, I give these oddballs a name which identifies both the helmrelease and the namespace.
|
||||
|
||||
### ConfigMap (for HelmRelease)
|
||||
|
||||
Now we're into the metallb-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/bitnami/charts/blob/master/bitnami/metallb/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `metallb/configmap-metallb-helm-chart-value-overrides.yaml`:
|
||||
|
||||
??? example "Example ConfigMap (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: metallb-helm-chart-value-overrides
|
||||
namespace: metallb-system
|
||||
data:
|
||||
values.yaml: |-
|
||||
## @section Global parameters
|
||||
## Global Docker image parameters
|
||||
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
|
||||
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
|
||||
|
||||
## @param global.imageRegistry Global Docker image registry
|
||||
## @param global.imagePullSecrets Global Docker registry secret names as an array
|
||||
##
|
||||
global:
|
||||
imageRegistry: ""
|
||||
## E.g.
|
||||
## imagePullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
<snip>
|
||||
prometheus:
|
||||
## Prometheus Operator service monitors
|
||||
##
|
||||
serviceMonitor:
|
||||
## @param speaker.prometheus.serviceMonitor.enabled Enable support for Prometheus Operator
|
||||
##
|
||||
enabled: false
|
||||
## @param speaker.prometheus.serviceMonitor.jobLabel Job label for scrape target
|
||||
##
|
||||
jobLabel: "app.kubernetes.io/name"
|
||||
## @param speaker.prometheus.serviceMonitor.interval Scrape interval. If not set, the Prometheus default scrape interval is used
|
||||
##
|
||||
interval: ""
|
||||
## @param speaker.prometheus.serviceMonitor.metricRelabelings Specify additional relabeling of metrics
|
||||
##
|
||||
metricRelabelings: []
|
||||
## @param speaker.prometheus.serviceMonitor.relabelings Specify general relabeling
|
||||
##
|
||||
relabelings: []
|
||||
```
|
||||
|
||||
--8<-- "kubernetes-why-full-values-in-configmap.md"
|
||||
|
||||
Then work your way through the values you pasted, and change any which are specific to your configuration. I'd recommend changing the following:
|
||||
|
||||
* `existingConfigMap: metallb-config`: I prefer to set my MetalLB config independently of the chart config, so I set this to `metallb-config`, which I then define below.
|
||||
* `commonAnnotations`: Anticipating the future use of Reloader to bounce applications when their config changes, I add the `configmap.reloader.stakater.com/reload: "metallb-config"` annotation to all deployed objects, which will instruct Reloader to bounce the daemonset if the ConfigMap changes.
|
||||
|
||||
### ConfigMap (for MetalLB)
|
||||
|
||||
Finally, it's time to actually configure MetalLB! As discussed above, I prefer to configure the helm chart to apply config from an existing ConfigMap, so that I isolate my application configuration from my chart configuration (*and make tracking changes easier*). In my setup, I'm using BGP against a pair of pfsense[^1] firewalls, so per the [official docs](https://metallb.universe.tf/configuration/), I use the following configuration, saved in my flux repo as `flux-system/configmap-metallb-config.yaml`:
|
||||
|
||||
??? example "Example ConfigMap (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: metallb-config
|
||||
data:
|
||||
config: |
|
||||
peers:
|
||||
- peer-address: 192.168.33.2
|
||||
peer-asn: 64501
|
||||
my-asn: 64500
|
||||
- peer-address: 192.168.33.4
|
||||
peer-asn: 64501
|
||||
my-asn: 64500
|
||||
|
||||
address-pools:
|
||||
- name: default
|
||||
protocol: bgp
|
||||
avoid-buggy-ips: true
|
||||
addresses:
|
||||
- 192.168.32.0/24
|
||||
```
|
||||
|
||||
!!! question "What does that mean?"
|
||||
In the config referenced above, I define one pool of addresses (`192.168.32.0/24`) which MetalLB is responsible for allocating to my services. MetalLB will then "advertise" these addresses to my firewalls (`192.168.33.2` and `192.168.33.4`), in an eBGP relationship where the firewalls' ASN is `64501` and MetalLB's ASN is `64500`. Provided I'm using my firewalls as my default gateway (*a VIP*), when I try to access one of the `192.168.32.x` IPs from any subnet connected to my firewalls, the traffic will be routed from the firewall to one of the cluster nodes running the pods selected by that service.
|
||||
|
||||
!!! note "Dude, that's too complicated!"
|
||||
There's an easier way, with some limitations. If you configure MetalLB in L2 mode, all you need to do is to define a range of IPs within your existing node subnet, like this:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: metallb-config
|
||||
data:
|
||||
config: |
|
||||
address-pools:
|
||||
- name: default
|
||||
protocol: layer2
|
||||
addresses:
|
||||
- 192.168.1.240-192.168.1.250
|
||||
```
|
||||
|
||||
### HelmRelease
|
||||
|
||||
Lastly, having set the scene above, we define the HelmRelease which will actually deploy MetalLB into the cluster, with the config and extra ConfigMap we defined above. I save this in my flux repo as `metallb/helmrelease-metallb.yaml`:
|
||||
|
||||
??? example "Example HelmRelease (click to expand)"
|
||||
```yaml
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: metallb
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: metallb
|
||||
version: 2.x
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: bitnami
|
||||
namespace: flux-system
|
||||
interval: 15m
|
||||
timeout: 5m
|
||||
releaseName: metallb
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: metallb-helm-chart-value-overrides
|
||||
valuesKey: values.yaml # This is the default, but best to be explicit for clarity
|
||||
```
|
||||
|
||||
--8<-- "kubernetes-why-not-config-in-helmrelease.md"
|
||||
|
||||
## Deploy MetalLB
|
||||
|
||||
Having committed the above to your flux repository, you should shortly see a metallb kustomization, and in the `metallb-system` namespace, a controller and a speaker pod for every node:
|
||||
|
||||
```bash
|
||||
root@cn1:~# kubectl get pods -n metallb-system -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
metallb-controller-779d8686f6-mgb4s 1/1 Running 0 21d 10.0.6.19 wn3 <none> <none>
|
||||
metallb-speaker-2qh2d 1/1 Running 0 21d 192.168.33.24 wn4 <none> <none>
|
||||
metallb-speaker-7rz24 1/1 Running 0 21d 192.168.33.22 wn2 <none> <none>
|
||||
metallb-speaker-gbm5r 1/1 Running 0 21d 192.168.33.23 wn3 <none> <none>
|
||||
metallb-speaker-gzgd2 1/1 Running 0 21d 192.168.33.21 wn1 <none> <none>
|
||||
metallb-speaker-nz6kd 1/1 Running 0 21d 192.168.33.25 wn5 <none> <none>
|
||||
root@cn1:~#
|
||||
```
|
||||
|
||||
!!! question "Why are there no speakers on my masters?"
|
||||
|
||||
In some cluster setups, master nodes are "tainted" to prevent workloads running on them and consuming capacity required for "mastering". If this is the case for you, but you actually **do** want to run some externally-exposed workloads on your masters, you'll need to update the `speaker.tolerations` value for the HelmRelease config to include:
|
||||
|
||||
```yaml
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
effect: "NoSchedule"
|
||||
```
|
||||
|
||||
### How do I know it's working?
|
||||
|
||||
If you used my [template repository](https://github.com/geek-cookbook/template-flux) to start off your [flux deployment strategy](/kubernetes/deployment/flux/), then the podinfo helm chart has already been deployed. By default, the podinfo service is in `ClusterIP` mode, so it's only reachable within the cluster.
|
||||
|
||||
Edit your podinfo helmrelease configmap (`/podinfo/configmap-podinfo-helm-chart-value-overrides.yaml`), and change this:
|
||||
|
||||
``` yaml hl_lines="6"
|
||||
<snip>
|
||||
# Kubernetes Service settings
|
||||
service:
|
||||
enabled: true
|
||||
annotations: {}
|
||||
type: ClusterIP
|
||||
<snip>
|
||||
```
|
||||
|
||||
To:
|
||||
|
||||
``` yaml hl_lines="6"
|
||||
<snip>
|
||||
# Kubernetes Service settings
|
||||
service:
|
||||
enabled: true
|
||||
annotations: {}
|
||||
type: LoadBalancer
|
||||
<snip>
|
||||
```
|
||||
|
||||
Commit your changes, wait for a reconciliation, and run `kubectl get services -n podinfo`. All going well, you should see that the service now has an IP assigned from the pool you chose for MetalLB!
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: I've documented an example re [how to configure BGP between MetalLB and pfsense](/kubernetes/loadbalancer/metallb/pfsense/).
|
||||
79
manuscript/kubernetes/loadbalancer/metallb/pfsense.md
Normal file
@@ -0,0 +1,79 @@
|
||||
---
|
||||
description: Using MetalLB with pfsense and BGP
|
||||
---
|
||||
# MetalLB with pfSense
|
||||
|
||||
This is an addendum to the MetalLB recipe, explaining how to configure MetalLB to perform BGP peering with a pfSense firewall.
|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [X] A [Kubernetes cluster](/kubernetes/cluster/)
|
||||
* [X] [MetalLB](/kubernetes/loadbalancer/metallb/) deployed
|
||||
* [X] One or more pfSense firewalls
|
||||
* [X] Basic familiarity with pfSense operation
|
||||
|
||||
## Preparation
|
||||
|
||||
Complete the [MetalLB](/kubernetes/loadbalancer/metallb/) installation, including the process of identifying ASNs for both your pfSense firewall and your MetalLB configuration.
|
||||
|
||||
Install the FRR package in pfsense, under **System -> Package Manager -> Available Packages**
|
||||
|
||||
### Configure FRR Global/Zebra
|
||||
|
||||
Under **Services -> FRR Global/Zebra**, enable FRR, set your router ID (*this will be your router's peer IP in MetalLB config*), and set a master password (*because apparently you have to, even though we don't use it*):
|
||||
|
||||

|
||||
|
||||
### Configure FRR BGP
|
||||
|
||||
Under **Services -> FRR BGP**, globally enable BGP, and set your local AS and router ID:
|
||||
|
||||

|
||||
|
||||
### Configure FRR BGP Advanced
|
||||
|
||||
Use the tabs at the top of the FRR configuration to navigate to "**Advanced**"...
|
||||
|
||||

|
||||
|
||||
... and scroll down to **eBGP**. Check the checkbox titled "**Disable eBGP Require Policy**:
|
||||
|
||||

|
||||
|
||||
!!! question "Isn't disabling a policy check a Bad Idea(tm)?"
|
||||
If you're an ISP, sure. If you're only using eBGP to share routes between MetalLB and pfsense, then applying policy is an unnecessary complication.[^1]
|
||||
|
||||
### Configure BGP neighbors
|
||||
|
||||
#### Peer Group
|
||||
|
||||
It's useful to bundle our configurations within a "peer group" (*a collection of settings which applies to all neighbors who are members of that group*), so start off by creating a neighbor with the name of "**metallb**" (*this will become a peer-group*). Set the remote AS (*because you have to*), and leave the rest of the settings as default.
|
||||
|
||||
!!! question "Why bother with a peer group?"
|
||||
> If we're not changing any settings, why are we bothering with a peer group?
|
||||
|
||||
We may later want to change settings which affect all the peers, such as prefix lists, route-maps, etc. We're doing this now for the benefit of our future selves 💪
|
||||
|
||||
#### Individual Neighbors
|
||||
|
||||
Now add each node running MetalLB, as a BGP neighbor. Pick the peer-group you created above, and configure each neighbor's ASN:
|
||||
|
||||

|
||||
|
||||
## Serving
|
||||
|
||||
Once you've added your neighbors, you should be able to use the FRR tab navigation (*it's weird, I know!*) to get to Status / BGP, and identify your neighbors, and all the routes learned from them. In the screenshot below, you'll note that **most** routes are learned from all the neighbors - that'll be service backed by a daemonset, running on all nodes. The `192.168.32.3/32` route, however, is only received from `192.168.33.22`, meaning only one node is running the pods backing this service, so only those pods are advertising the route to pfSense:
|
||||
|
||||

|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If you're not receiving any routes from MetalLB, or if the neighbors aren't in an established state, here are a few suggestions for troubleshooting:
|
||||
|
||||
1. Confirm on PFSense that the BGP connections (*TCP port 179*) are not being blocked by the firewall
|
||||
2. Examine the metallb speaker logs in the cluster, by running `kubectl logs -n metallb-system -l app.kubernetes.io/name=metallb`
|
||||
3. SSH to the pfsense, start a shell and launch the FFR shell by running `vtysh`. Now you're in a cisco-like console where commands like `show ip bgp sum` and `show ip bgp neighbors <neighbor ip> received-routes` will show you interesting debugging things.
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: If you decide to deploy some policy with route-maps, prefix-lists, etc, it's all found under **Services -> FRR Global/Zebra** 🦓
|
||||
314
manuscript/kubernetes/monitoring/index.md
Normal file
@@ -0,0 +1,314 @@
|
||||
# Miniflux
|
||||
|
||||
Miniflux is a lightweight RSS reader, developed by [Frédéric Guillot](https://github.com/fguillot). (_Who also happens to be the developer of the favorite Open Source Kanban app, [Kanboard](/recipes/kanboard/)_)
|
||||
|
||||

|
||||
|
||||
I've [reviewed Miniflux in detail on my blog](https://www.funkypenguin.co.nz/review/miniflux-lightweight-self-hosted-rss-reader/), but features (among many) that I appreciate:
|
||||
|
||||
* Compatible with the Fever API, read your feeds through existing mobile and desktop clients (_This is the killer feature for me. I hardly ever read RSS on my desktop, I typically read on my iPhone or iPad, using [Fiery Feeds](http://cocoacake.net/apps/fiery/) or my new squeeze, [Unread](https://www.goldenhillsoftware.com/unread/)_)
|
||||
* Send your bookmarks to Pinboard, Wallabag, Shaarli or Instapaper (_I use this to automatically pin my bookmarks for collection on my [blog](https://www.funkypenguin.co.nz/)_)
|
||||
* Feeds can be configured to download a "full" version of the content (_rather than an excerpt_)
|
||||
* Use the Bookmarklet to subscribe to a website directly from any browsers
|
||||
|
||||
!!! abstract "2.0+ is a bit different"
|
||||
[Some things changed](https://docs.miniflux.net/en/latest/migration.html) when Miniflux 2.0 was released. For one thing, the only supported database is now postgresql (_no more SQLite_). External themes are gone, as is PHP (_in favor of golang_). It's been a controversial change, but I'm keen on minimal and single-purpose, so I'm still very happy with the direction of development. The developer has laid out his [opinions](https://docs.miniflux.net/en/latest/opinionated.html) re the decisions he's made in the course of development.
|
||||
|
||||
## Ingredients
|
||||
|
||||
1. A [Kubernetes Cluster](/kubernetes/design/) including [Traefik Ingress](/kubernetes/traefik/)
|
||||
2. A DNS name for your miniflux instance (*miniflux.example.com*, below) pointing to your [load balancer](/kubernetes/loadbalancer/), fronting your Traefik ingress
|
||||
|
||||
## Preparation
|
||||
|
||||
### Prepare traefik for namespace
|
||||
|
||||
When you deployed [Traefik via the helm chart](/kubernetes/traefik/), you would have customized ```values.yml``` for your deployment. In ```values.yml``` is a list of namespaces which Traefik is permitted to access. Update ```values.yml``` to include the *miniflux* namespace, as illustrated below:
|
||||
|
||||
```yaml
|
||||
<snip>
|
||||
kubernetes:
|
||||
namespaces:
|
||||
- kube-system
|
||||
- nextcloud
|
||||
- kanboard
|
||||
- miniflux
|
||||
<snip>
|
||||
```
|
||||
|
||||
If you've updated ```values.yml```, upgrade your traefik deployment via helm, by running ```helm upgrade --values values.yml traefik stable/traefik --recreate-pods```
|
||||
|
||||
### Create data locations
|
||||
|
||||
Although we could simply bind-mount local volumes to a local Kubuernetes cluster, since we're targetting a cloud-based Kubernetes deployment, we only need a local path to store the YAML files which define the various aspects of our Kubernetes deployment.
|
||||
|
||||
```bash
|
||||
mkdir /var/data/config/miniflux
|
||||
```
|
||||
|
||||
### Create namespace
|
||||
|
||||
We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the miniflux stack with the following .yml:
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/config/miniflux/namespace.yml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: miniflux
|
||||
EOF
|
||||
kubectl create -f /var/data/config/miniflux/namespace.yaml
|
||||
```
|
||||
|
||||
### Create persistent volume claim
|
||||
|
||||
Persistent volume claims are a streamlined way to create a persistent volume and assign it to a container in a pod. Create a claim for the miniflux postgres database:
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/config/miniflux/db-persistent-volumeclaim.yml
|
||||
kkind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: miniflux-db
|
||||
namespace: miniflux
|
||||
annotations:
|
||||
backup.kubernetes.io/deltas: P1D P7D
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
EOF
|
||||
kubectl create -f /var/data/config/miniflux/db-persistent-volumeclaim.yaml
|
||||
```
|
||||
|
||||
!!! question "What's that annotation about?"
|
||||
The annotation is used by [k8s-snapshots](/kubernetes/snapshots/) to create daily incremental snapshots of your persistent volumes. In this case, our volume is snapshotted daily, and copies kept for 7 days.
|
||||
|
||||
### Create secrets
|
||||
|
||||
It's not always desirable to have sensitive data stored in your .yml files. Maybe you want to check your config into a git repository, or share it. Using Kubernetes Secrets means that you can create "secrets", and use these in your deployments by name, without exposing their contents. Run the following, replacing ```imtoosexyformyadminpassword```, and the ```mydbpass``` value in both postgress-password.secret **and** database-url.secret:
|
||||
|
||||
```bash
|
||||
echo -n "imtoosexyformyadminpassword" > admin-password.secret
|
||||
echo -n "mydbpass" > postgres-password.secret
|
||||
echo -n "postgres://miniflux:mydbpass@db/miniflux?sslmode=disable" > database-url.secret
|
||||
|
||||
kubectl create secret -n mqtt generic miniflux-credentials \
|
||||
--from-file=admin-password.secret \
|
||||
--from-file=database-url.secret \
|
||||
--from-file=database-url.secret
|
||||
```
|
||||
|
||||
!!! tip "Why use ```echo -n```?"
|
||||
Because. See [my blog post here](https://www.funkypenguin.co.nz/blog/beware-the-hidden-newlines-in-kubernetes-secrets/) for the pain of hunting invisible newlines, that's why!
|
||||
|
||||
## Serving
|
||||
|
||||
Now that we have a [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/), and a [configmap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), we can create [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [services](https://kubernetes.io/docs/concepts/services-networking/service/), and an [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) for the miniflux [pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/).
|
||||
|
||||
### Create db deployment
|
||||
|
||||
Deployments tell Kubernetes about the desired state of the pod (*which it will then attempt to maintain*). Create the db deployment by excecuting the following. Note that the deployment refers to the secrets created above.
|
||||
|
||||
--8<-- "premix-cta.md"
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/miniflux/db-deployment.yml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: miniflux
|
||||
name: db
|
||||
labels:
|
||||
app: db
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: db
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: db
|
||||
spec:
|
||||
containers:
|
||||
- image: postgres:11
|
||||
name: db
|
||||
volumeMounts:
|
||||
- name: miniflux-db
|
||||
mountPath: /var/lib/postgresql/data
|
||||
env:
|
||||
- name: POSTGRES_USER
|
||||
value: "miniflux"
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: miniflux-credentials
|
||||
key: postgres-password.secret
|
||||
volumes:
|
||||
- name: miniflux-db
|
||||
persistentVolumeClaim:
|
||||
claimName: miniflux-db
|
||||
```
|
||||
|
||||
### Create app deployment
|
||||
|
||||
Create the app deployment by excecuting the following. Again, note that the deployment refers to the secrets created above.
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/miniflux/app-deployment.yml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: miniflux
|
||||
name: app
|
||||
labels:
|
||||
app: app
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: app
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: app
|
||||
spec:
|
||||
containers:
|
||||
- image: miniflux/miniflux
|
||||
name: app
|
||||
env:
|
||||
# This is necessary for the miniflux to update the db schema, even on an empty DB
|
||||
- name: CREATE_ADMIN
|
||||
value: "1"
|
||||
- name: RUN_MIGRATIONS
|
||||
value: "1"
|
||||
- name: ADMIN_USERNAME
|
||||
value: "admin"
|
||||
- name: ADMIN_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: miniflux-credentials
|
||||
key: admin-password.secret
|
||||
- name: DATABASE_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: miniflux-credentials
|
||||
key: database-url.secret
|
||||
EOF
|
||||
kubectl create -f /var/data/miniflux/deployment.yml
|
||||
```
|
||||
|
||||
### Check pods
|
||||
|
||||
Check that your deployment is running, with ```kubectl get pods -n miniflux```. After a minute or so, you should see 2 "Running" pods, as illustrated below:
|
||||
|
||||
```bash
|
||||
[funkypenguin:~] % kubectl get pods -n miniflux
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
app-667c667b75-5jjm9 1/1 Running 0 4d
|
||||
db-fcd47b88f-9vvqt 1/1 Running 0 4d
|
||||
[funkypenguin:~] %
|
||||
```
|
||||
|
||||
### Create db service
|
||||
|
||||
The db service resource "advertises" the availability of PostgreSQL's port (TCP 5432) in your pod, to the rest of the cluster (*constrained within your namespace*). It seems a little like overkill coming from the Docker Swarm's automated "service discovery" model, but the Kubernetes design allows for load balancing, rolling upgrades, and health checks of individual pods, without impacting the rest of the cluster elements.
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/miniflux/db-service.yml
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: db
|
||||
namespace: miniflux
|
||||
spec:
|
||||
selector:
|
||||
app: db
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 5432
|
||||
clusterIP: None
|
||||
EOF
|
||||
kubectl create -f /var/data/miniflux/service.yml
|
||||
```
|
||||
|
||||
### Create app service
|
||||
|
||||
The app service resource "advertises" the availability of miniflux's HTTP listener port (TCP 8080) in your pod. This is the service which will be referred to by the ingress (below), so that Traefik can route incoming traffic to the miniflux app.
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/miniflux/app-service.yml
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: app
|
||||
namespace: miniflux
|
||||
spec:
|
||||
selector:
|
||||
app: app
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
clusterIP: None
|
||||
EOF
|
||||
kubectl create -f /var/data/miniflux/app-service.yml
|
||||
```
|
||||
|
||||
### Check services
|
||||
|
||||
Check that your services are deployed, with ```kubectl get services -n miniflux```. You should see something like this:
|
||||
|
||||
```bash
|
||||
[funkypenguin:~] % kubectl get services -n miniflux
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
app ClusterIP None <none> 8080/TCP 55d
|
||||
db ClusterIP None <none> 5432/TCP 55d
|
||||
[funkypenguin:~] %
|
||||
```
|
||||
|
||||
### Create ingress
|
||||
|
||||
The ingress resource tells Traefik what to forward inbound requests for *miniflux.example.com* to your service (defined above), which in turn passes the request to the "app" pod. Adjust the config below for your domain.
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/miniflux/ingress.yml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: app
|
||||
namespace: miniflux
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
spec:
|
||||
rules:
|
||||
- host: miniflux.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: app
|
||||
servicePort: 8080
|
||||
EOF
|
||||
kubectl create -f /var/data/miniflux/ingress.yml
|
||||
```
|
||||
|
||||
Check that your service is deployed, with ```kubectl get ingress -n miniflux```. You should see something like this:
|
||||
|
||||
```bash
|
||||
[funkypenguin:~] 130 % kubectl get ingress -n miniflux
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
app miniflux.funkypenguin.co.nz 80 55d
|
||||
[funkypenguin:~] %
|
||||
```
|
||||
|
||||
### Access Miniflux
|
||||
|
||||
At this point, you should be able to access your instance on your chosen DNS name (*i.e. <https://miniflux.example.com>*)
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
To look at the Miniflux pod's logs, run ```kubectl logs -n miniflux <name of pod per above> -f```. For further troubleshooting hints, see [Troubleshooting](/reference/kubernetes/troubleshooting/).
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
11
manuscript/kubernetes/persistence/index.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Persistence
|
||||
|
||||
So we've gone as far as we can with our cluster, without any form of persistence. As soon as we want to retain data, be it a database, metrics history, or objects, we need one or more ways to persist data within the cluster.
|
||||
|
||||
Here are some popular options, ranked in difficulty/complexity, in vaguely ascending order:
|
||||
|
||||
* [Local Path Provisioner](/kubernetes/persistence/local-path-provisioner/) (on k3s)
|
||||
* [TopoLVM](/kubernetes/persistence/topolvm/)
|
||||
* OpenEBS (coming soon)
|
||||
* Rook Ceph (coming soon)
|
||||
* Longhorn (coming soon)
|
||||
45
manuscript/kubernetes/persistence/local-path-provisioner.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Local Path Provisioner
|
||||
|
||||
[k3s](/kubernetes/cluster/k3s/) installs itself with "Local Path Provisioner", a simple controller whose job it is to create local volumes on each k3s node. If you only have one node, or you just want something simple to start learning with, then `local-path` is ideal, since it requires no further setup.
|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [x] A [Kubernetes cluster](/kubernetes/cluster/) deployed with [k3s](/kubernetes/cluster/k3s/)
|
||||
|
||||
Here's how you know you've got the StorageClass:
|
||||
|
||||
```bash
|
||||
root@shredder:~# kubectl get sc
|
||||
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
|
||||
local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 60m
|
||||
root@shredder:~#
|
||||
```
|
||||
|
||||
## Preparation
|
||||
|
||||
### Basics
|
||||
|
||||
A few things you should know:
|
||||
|
||||
1. This is not **network storage**. The volume you create will forever be found to the k3s node its pod is executed on. If you later take that node down for maintenance, the pods will not be able to start on other nodes, because they won't find their volumes.
|
||||
2. The default path for the volumes is `/opt/local-path-provisioner`, although this can be changed by [editing a ConfigMap](https://github.com/rancher/local-path-provisioner/blob/master/README.md#customize-the-configmap). Make sure you have enough disk space! [^1]
|
||||
3. There's no support for resizing a volume. If you create a volume and later work out that it's too small, you'll have to destroy it and recreate it. (*More sophisticated provisioners like [rook-ceph](/kubernetes/persistence/rook-ceph/) and [topolvm](/kubernetes/persistence/topolvm/) allow for dynamic resizing of volumes*)
|
||||
|
||||
### When to use it
|
||||
|
||||
* When you don't care much about your storage. This seems backwards, but sometimes you need large amounts of storage for relatively ephemeral reasons, like batch processing, or log aggregation. You may decide the convenience of using Local Path Provisioner for quick, hard-drive-speed storage outweighs the minor hassle of loosing your metrics data if you were to have a node outage.
|
||||
* When [TopoLVM](/kubernetes/persistence/topolvm/) is not a viable option, and you'd rather use available disk space on your existing, formatted filesystems
|
||||
|
||||
### When not to use it
|
||||
|
||||
* When you have any form of redundancy requirement on your persisted data.
|
||||
* When you're not using k3s.
|
||||
* You may one day want to resize your volumes.
|
||||
|
||||
### Summary
|
||||
|
||||
In summary, Local Path Provisioner is fine if you have very specifically sized workloads and you don't care about node redundancy.
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: [TopoLVM](/kubernetes/persistence/topolvm/) also creates per-node volumes which aren't "portable" between nodes, but because it relies on LVM, it is "capacity-aware", and is able to distribute storage among multiple nodes based on available capacity.
|
||||
3
manuscript/kubernetes/persistence/longhorn.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Longhorn
|
||||
|
||||
Coming soon!
|
||||
3
manuscript/kubernetes/persistence/openebs.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Open EBS
|
||||
|
||||
Coming soon!
|
||||
3
manuscript/kubernetes/persistence/rook-ceph.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Rook Ceph
|
||||
|
||||
Coming soon!
|
||||
279
manuscript/kubernetes/persistence/topolvm.md
Normal file
@@ -0,0 +1,279 @@
|
||||
# TopoLVM
|
||||
|
||||
TopoLVM is **like** [Local Path Provisioner](/kubernetes/persistence/local-path-provisioner/), in that it deals with local volumes specific to each Kubernetes node, but it offers more flexibility, and is more suited for a production deployment.
|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [x] A [Kubernetes cluster](/kubernetes/cluster/)
|
||||
* [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped
|
||||
* [x] A dedicated disk, or free LVM volume space, for provisioning volumes
|
||||
|
||||
Additional benefits offered by TopoLVM are:
|
||||
|
||||
* Volumes can by dynamically expanded
|
||||
* The scheduler is capacity-aware, and can schedule pods to nodes with enough capacity for the pods' storage requirements
|
||||
* Multiple storageclasses are supported, so you could, for example, create a storageclass for HDD-backed volumes, and another for SSD-backed volumes
|
||||
|
||||
## Preparation
|
||||
|
||||
### Volume Group
|
||||
|
||||
Finally you get to do something on your nodes without YAML or git, like a pre-GitOps, bare-metal-cavemonkey! :monkey_face:
|
||||
|
||||
On each node, you'll need an LVM Volume Group (VG) for TopoLVM to consume. The most straightforward to to arrange this is to dedicate a disk to TopoLVM, and create a dedicated PV and VG for it.
|
||||
|
||||
In brief, assuming `/dev/sdb` is the disk (*and it's unused*), you'd do the following to create a VG called `VG-topolvm`:
|
||||
|
||||
```bash
|
||||
pvcreate /dev/sdb
|
||||
vgcreate VG-topolvm /dev/sdb
|
||||
```
|
||||
|
||||
!!! tip
|
||||
If you don't have a dedicated disk, you could try installing your OS using LVM partitioning, and leave some space unused, for TopoLVM to consume. Run `vgs` from an installed node to work out what the VG name is that the OS installer chose.
|
||||
|
||||
### Namespace
|
||||
|
||||
We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-topolvm.yaml`:
|
||||
|
||||
??? example "Example NameSpace (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: topolvm-system
|
||||
```
|
||||
|
||||
### HelmRepository
|
||||
|
||||
Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. In this case, we're using the official [TopoLVM helm chart](https://github.com/topolvm/topolvm/tree/main/charts/topolvm), so per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-topolvm.yaml`:
|
||||
|
||||
??? example "Example HelmRepository (click to expand)"
|
||||
```yaml
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: topolvm
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
url: https://topolvm.github.io/topolvm
|
||||
```
|
||||
|
||||
### Kustomization
|
||||
|
||||
Now that the "global" elements of this deployment (*Namespace and HelmRepository*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/topolvm`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-topolvm.yaml`:
|
||||
|
||||
??? example "Example Kustomization (click to expand)"
|
||||
```yaml
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: topolvm--topolvm-system
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
path: ./topolvm-system
|
||||
prune: true # remove any elements later removed from the above path
|
||||
timeout: 2m # if not set, this defaults to interval duration, which is 1h
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
||||
validation: server
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: topolvm-controller
|
||||
namespace: topolvm-system
|
||||
- apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
name: topolvm-lvmd-0
|
||||
namespace: topolvm-system
|
||||
- apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
name: topolvm-node
|
||||
namespace: topolvm-system
|
||||
- apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
name: topolvm-scheduler
|
||||
namespace: topolvm-system
|
||||
```
|
||||
|
||||
!!! question "What's with that screwy name?"
|
||||
> Why'd you call the kustomization `topolvm--topolvm-system`?
|
||||
|
||||
I keep my file and object names as consistent as possible. In most cases, the helm chart is named the same as the namespace, but in some cases, by upstream chart or historical convention, the namespace is different to the chart name. TopoLVM is one of these - the helmrelease/chart name is `topolvm`, but the typical namespace it's deployed in is `topolvm-system`. (*Appending `-system` seems to be a convention used in some cases for applications which support the entire cluster*). To avoid confusion when I list all kustomizations with `kubectl get kustomization -A`, I give these oddballs a name which identifies both the helmrelease and the namespace.
|
||||
|
||||
### ConfigMap
|
||||
|
||||
Now we're into the topolvm-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/topolvm/topolvm/blob/main/charts/topolvm/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `topolvm/configmap-topolvm-helm-chart-value-overrides.yaml`:
|
||||
|
||||
??? example "Example ConfigMap (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: topolvm-helm-chart-value-overrides
|
||||
namespace: topolvm
|
||||
data:
|
||||
values.yaml: |-
|
||||
# paste chart values.yaml (indented) here and alter as required>
|
||||
```
|
||||
|
||||
--8<-- "kubernetes-why-full-values-in-configmap.md"
|
||||
|
||||
Then work your way through the values you pasted, and change any which are specific to your configuration. You might want to start off by changing the following to match the name of the [volume group you created above](#volume-group).[^1]
|
||||
|
||||
```yaml hl_lines="10-13"
|
||||
lvmd:
|
||||
# lvmd.managed -- If true, set up lvmd service with DaemonSet.
|
||||
managed: true
|
||||
|
||||
# lvmd.socketName -- Specify socketName.
|
||||
socketName: /run/topolvm/lvmd.sock
|
||||
|
||||
# lvmd.deviceClasses -- Specify the device-class settings.
|
||||
deviceClasses:
|
||||
- name: ssd
|
||||
volume-group: myvg1
|
||||
default: true
|
||||
spare-gb: 10
|
||||
```
|
||||
|
||||
### HelmRelease
|
||||
|
||||
Lastly, having set the scene above, we define the HelmRelease which will actually deploy TopoLVM into the cluster, with the config we defined above. I save this in my flux repo as `topolvm/helmrelease-topolvm.yaml`:
|
||||
|
||||
??? example "Example HelmRelease (click to expand)"
|
||||
```yaml
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: topolvm
|
||||
namespace: topolvm-system
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: topolvm
|
||||
version: 3.x
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: topolvm
|
||||
namespace: flux-system
|
||||
interval: 15m
|
||||
timeout: 5m
|
||||
releaseName: topolvm
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: topolvm-helm-chart-value-overrides
|
||||
valuesKey: values.yaml # This is the default, but best to be explicit for clarity
|
||||
```
|
||||
|
||||
--8<-- "kubernetes-why-not-config-in-helmrelease.md"
|
||||
|
||||
## Serving
|
||||
|
||||
### Deploy TopoLVM
|
||||
|
||||
Having committed the above to your flux repository, you should shortly see a topolvm kustomization, and in the `topolvm-system` namespace, a bunch of pods:
|
||||
|
||||
```bash
|
||||
demo@shredder:~$ kubectl get pods -n topolvm-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
topolvm-controller-85698b44dd-65fd9 4/4 Running 0 133m
|
||||
topolvm-controller-85698b44dd-dmncr 4/4 Running 0 133m
|
||||
topolvm-lvmd-0-98h4q 1/1 Running 0 133m
|
||||
topolvm-lvmd-0-b29t8 1/1 Running 0 133m
|
||||
topolvm-lvmd-0-c5vnf 1/1 Running 0 133m
|
||||
topolvm-lvmd-0-hmmq5 1/1 Running 0 133m
|
||||
topolvm-lvmd-0-zfldv 1/1 Running 0 133m
|
||||
topolvm-node-6p4qz 3/3 Running 0 133m
|
||||
topolvm-node-7vdgt 3/3 Running 0 133m
|
||||
topolvm-node-mlp4x 3/3 Running 0 133m
|
||||
topolvm-node-sxtn5 3/3 Running 0 133m
|
||||
topolvm-node-xf265 3/3 Running 0 133m
|
||||
topolvm-scheduler-jlwsh 1/1 Running 0 133m
|
||||
topolvm-scheduler-nj8nz 1/1 Running 0 133m
|
||||
topolvm-scheduler-tg72z 1/1 Running 0 133m
|
||||
demo@shredder:~$
|
||||
```
|
||||
|
||||
### How do I know it's working?
|
||||
|
||||
So the controllers etc are running, but how do we know we can actually provision volumes?
|
||||
|
||||
#### Create PVC
|
||||
|
||||
Create a PVC, by running:
|
||||
|
||||
```bash
|
||||
cat <<EOF | kubectl create -f -
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: topolvm-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: topolvm-provisioner
|
||||
resources:
|
||||
requests:
|
||||
storage: 128Mi
|
||||
EOF
|
||||
```
|
||||
|
||||
Examine the PVC by running `kubectl describe pvc topolvm-pvc`
|
||||
|
||||
#### Create Pod
|
||||
|
||||
Now create a pod to consume the PVC, by running:
|
||||
|
||||
```bash
|
||||
cat <<EOF | kubectl create -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: topolvm-test
|
||||
spec:
|
||||
containers:
|
||||
- name: volume-test
|
||||
image: nginx:stable-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: topolvm-rocks
|
||||
mountPath: /data
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumes:
|
||||
- name: topolvm-rocks
|
||||
persistentVolumeClaim:
|
||||
claimName: topolvm-pvc
|
||||
EOF
|
||||
```
|
||||
|
||||
Examine the pod by running `kubectl describe pod topolvm-test`.
|
||||
|
||||
#### Clean up
|
||||
|
||||
Assuming that the pod is in a `Running` state, then TopoLVM is working!
|
||||
|
||||
Clean up your mess, little bare-metal-cave-monkey :monkey_face:, by running:
|
||||
|
||||
```bash
|
||||
kubectl delete pod topolvm-test
|
||||
kubectl delete pvc topolvm-pvc
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
Are things not working as expected? Try one of the following to look for issues:
|
||||
|
||||
1. Watch the lvmd logs, by running `kubectl logs -f -n topolvm-system -l app.kubernetes.io/name=topolvm-lvmd`
|
||||
2. Watch the node logs, by running `kubectl logs -f -n topolvm-system -l app.kubernetes.io/name=topolvm-node`
|
||||
3. Watch the scheduler logs, by running `kubectl logs -f -n topolvm-system -l app.kubernetes.io/name=scheduler`
|
||||
4. Watch the controller node logs, by running `kubectl logs -f -n topolvm-system -l app.kubernetes.io/name=controller`
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: This is where you'd add multiple Volume Groups if you wanted a storageclass per Volume Group
|
||||
425
manuscript/kubernetes/sealed-secrets.md
Normal file
@@ -0,0 +1,425 @@
|
||||
---
|
||||
description: Securely store your secrets in plain sight
|
||||
---
|
||||
|
||||
# Sealed Secrets
|
||||
|
||||
So you're sold on GitOps, you're using the [flux deployment strategy](/kubernetes/deployment/flux/) to deploy all your applications into your cluster, and you sleep like a baby 🍼 at night, knowing that you could rebuild your cluster with a few commands, given every change is stored in git's history.
|
||||
|
||||
But what about your secrets?
|
||||
|
||||
In Kubernetes, a "Secret" is a "teeny-weeny" bit more secure ConfigMap, in that it's base-64 encoded to prevent shoulder-surfing, and access to secrets can be restricted (*separately to ConfigMaps*) using Kubernetes RBAC. In some cases, applications deployed via helm expect to find existing secrets within the cluster, containing things like AWS credentials (*External DNS, Cert Manager*), admin passwords (*Grafana*), etc.
|
||||
|
||||
They're still not very secret though, and you certainly wouldn't want to be storing base64-encoded secrets in a git repository, public or otherwise!
|
||||
|
||||
An elegant solution to this problem is Bitnami Labs' Sealed Secrets.
|
||||
|
||||

|
||||
|
||||
A "[SealedSecret](https://github.com/bitnami-labs/sealed-secrets)" can only be decrypted (*and turned back into a regular Secret*) by the controller in the target cluster. (*or by a controller in another cluster which has been primed with your own private/public pair)* This means the SealedSecret is safe to store and expose anywhere.
|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [x] A [Kubernetes cluster](/kubernetes/cluster/)
|
||||
* [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped
|
||||
|
||||
Optional:
|
||||
|
||||
* [ ] Your own private/public PEM certificate pair for secret encryption/decryption (*ideal but not required*)
|
||||
|
||||
## Preparation
|
||||
|
||||
### Install kubeseal CLI
|
||||
|
||||
=== "HomeBrew (MacOS/Linux)"
|
||||
|
||||
With [Homebrew](https://brew.sh/) for macOS and Linux:
|
||||
|
||||
```bash
|
||||
brew install kubeseal
|
||||
```
|
||||
|
||||
=== "Bash (Linux)"
|
||||
|
||||
With Bash for macOS and Linux:
|
||||
|
||||
(Update for whatever the [latest release](https://github.com/bitnami-labs/sealed-secrets/releases) is)
|
||||
|
||||
```bash
|
||||
wget https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.17.0/kubeseal-linux-amd64 -O kubeseal
|
||||
sudo install -m 755 kubeseal /usr/local/bin/kubeseal
|
||||
```
|
||||
|
||||
### Namespace
|
||||
|
||||
We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-sealed-secrets.yaml`:
|
||||
|
||||
??? example "Example Namespace (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: sealed-secrets
|
||||
```
|
||||
|
||||
### HelmRepository
|
||||
|
||||
Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-sealedsecrets.yaml`:
|
||||
|
||||
??? example "Example HelmRepository (click to expand)"
|
||||
```yaml
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: sealed-secrets
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
url: https://bitnami-labs.github.io/sealed-secrets
|
||||
```
|
||||
|
||||
### Kustomization
|
||||
|
||||
Now that the "global" elements of this deployment (*just the HelmRepository in this case*z*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/sealed-secrets`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-sealed-secrets.yaml`:
|
||||
|
||||
??? example "Example Kustomization (click to expand)"
|
||||
```yaml
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: sealed-secrets
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
path: ./sealed-secrets
|
||||
prune: true # remove any elements later removed from the above path
|
||||
timeout: 2m # if not set, this defaults to interval duration, which is 1h
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
||||
validation: server
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: sealed-secrets
|
||||
namespace: sealed-secrets
|
||||
```
|
||||
|
||||
### ConfigMap
|
||||
|
||||
Now we're into the sealed-secrets-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/bitnami-labs/sealed-secrets/blob/main/helm/sealed-secrets/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `sealed-secrets/configmap-sealed-secrets-helm-chart-value-overrides.yaml`:
|
||||
|
||||
??? example "Example ConfigMap (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: sealed-secrets-helm-chart-value-overrides
|
||||
namespace: sealed-secrets
|
||||
values.yaml: |-
|
||||
image:
|
||||
repository: quay.io/bitnami/sealed-secrets-controller
|
||||
tag: v0.17.0
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret: ""
|
||||
|
||||
resources: {}
|
||||
nodeSelector: {}
|
||||
tolerations: []
|
||||
affinity: {}
|
||||
|
||||
controller:
|
||||
# controller.create: `true` if Sealed Secrets controller should be created
|
||||
create: true
|
||||
# controller.labels: Extra labels to be added to controller deployment
|
||||
labels: {}
|
||||
# controller.service: Configuration options for controller service
|
||||
service:
|
||||
# controller.service.labels: Extra labels to be added to controller service
|
||||
labels: {}
|
||||
|
||||
# namespace: Namespace to deploy the controller.
|
||||
namespace: ""
|
||||
|
||||
serviceAccount:
|
||||
# serviceAccount.create: Whether to create a service account or not
|
||||
create: true
|
||||
# serviceAccount.labels: Extra labels to be added to service account
|
||||
labels: {}
|
||||
# serviceAccount.name: The name of the service account to create or use
|
||||
name: ""
|
||||
|
||||
rbac:
|
||||
# rbac.create: `true` if rbac resources should be created
|
||||
create: true
|
||||
# rbac.labels: Extra labels to be added to rbac resources
|
||||
labels: {}
|
||||
pspEnabled: false
|
||||
|
||||
# secretName: The name of the TLS secret containing the key used to encrypt secrets
|
||||
secretName: "sealed-secrets-key"
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
path: /v1/cert.pem
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
crd:
|
||||
# crd.create: `true` if the crd resources should be created
|
||||
create: true
|
||||
# crd.keep: `true` if the sealed secret CRD should be kept when the chart is deleted
|
||||
keep: true
|
||||
|
||||
networkPolicy: false
|
||||
|
||||
securityContext:
|
||||
# securityContext.runAsUser defines under which user the operator Pod and its containers/processes run.
|
||||
runAsUser: 1001
|
||||
# securityContext.fsGroup defines the filesystem group
|
||||
fsGroup: 65534
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podLabels: {}
|
||||
|
||||
priorityClassName: ""
|
||||
|
||||
serviceMonitor:
|
||||
# Enables ServiceMonitor creation for the Prometheus Operator
|
||||
create: false
|
||||
# How frequently Prometheus should scrape the ServiceMonitor
|
||||
interval:
|
||||
# Extra labels to apply to the sealed-secrets ServiceMonitor
|
||||
labels:
|
||||
# The namespace where the ServiceMonitor is deployed, defaults to the installation namespace
|
||||
namespace:
|
||||
# The timeout after which the scrape is ended
|
||||
scrapeTimeout:
|
||||
|
||||
dashboards:
|
||||
# If enabled, sealed-secrets will create a configmap with a dashboard in json that's going to be picked up by grafana
|
||||
# See https://github.com/helm/charts/tree/master/stable/grafana#configuration - `sidecar.dashboards.enabled`
|
||||
create: false
|
||||
# Extra labels to apply to the dashboard configmaps
|
||||
labels:
|
||||
# The namespace where the dashboards are deployed, defaults to the installation namespace
|
||||
namespace:
|
||||
|
||||
```
|
||||
|
||||
--8<-- "kubernetes-why-full-values-in-configmap.md"
|
||||
|
||||
Then work your way through the values you pasted, and change any which are specific to your configuration (*I stick with the defaults*).
|
||||
|
||||
### HelmRelease
|
||||
|
||||
Lastly, having set the scene above, we define the HelmRelease which will actually deploy the sealed-secrets controller into the cluster, with the config we defined above. I save this in my flux repo as `sealed-secrets/helmrelease-sealed-secrets.yaml`:
|
||||
|
||||
??? example "Example HelmRelease (click to expand)"
|
||||
```yaml
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: sealed-secrets
|
||||
namespace: sealed-secrets
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: sealed-secrets
|
||||
version: 1.x
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: sealed-secrets
|
||||
namespace: flux-system
|
||||
interval: 15m
|
||||
timeout: 5m
|
||||
releaseName: sealed-secrets
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: sealed-secrets-helm-chart-value-overrides
|
||||
valuesKey: values.yaml # This is the default, but best to be explicit for clarity
|
||||
```
|
||||
|
||||
--8<-- "kubernetes-why-not-config-in-helmrelease.md"
|
||||
|
||||
## Serving
|
||||
|
||||
Commit your files to your flux repo, and wait until you see pods show up in the `sealed-secrets` namespace.
|
||||
|
||||
Now you're ready to seal some secrets!
|
||||
|
||||
### Sealing a secret
|
||||
|
||||
To generate sealed secrets, we need the public key that the controller has generated. On a host with a valid `KUBECONFIG` env var, pointing to a kubeconfig file with cluster-admin privileges, run the following to retrieve the public key for the sealed secrets (*this is the public key, it doesn't need to be specifically protected*)
|
||||
|
||||
```bash
|
||||
kubeseal --fetch-cert \
|
||||
--controller-name=sealed-secrets \
|
||||
--controller-namespace=sealed-secrets \
|
||||
> pub-cert.pem
|
||||
```
|
||||
|
||||
Now generate a kubernetes secret locally, using `kubectl --dry-run=client`, as illustrated below:
|
||||
|
||||
```bash
|
||||
echo -n batman | kubectl create secret \
|
||||
generic mysecret --dry-run=client --from-file=foo=/dev/stdin -o json
|
||||
```
|
||||
|
||||
The result should look like this:
|
||||
|
||||
```yaml
|
||||
{
|
||||
"kind": "Secret",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "mysecret",
|
||||
"creationTimestamp": null
|
||||
},
|
||||
"data": {
|
||||
"foo": "YmF0bWFu"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Note that "*YmF0bWFu*", [base64 decoded](https://www.base64decode.org/), will reveal the top-secret secret. Not so secret, Batman!
|
||||
|
||||
Next, pipe the secret (*in json format*) to kubeseal, referencing the public key, and you'll get a totally un-decryptable "sealed" secret in return:
|
||||
|
||||
```bash
|
||||
echo -n batman | kubectl create secret \
|
||||
generic mysecret --dry-run=client --from-file=foo=/dev/stdin -o json \
|
||||
| kubeseal --cert pub-cert.pem
|
||||
```
|
||||
|
||||
Resulting in something like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": "SealedSecret",
|
||||
"apiVersion": "bitnami.com/v1alpha1",
|
||||
"metadata": {
|
||||
"name": "mysecret",
|
||||
"namespace": "default",
|
||||
"creationTimestamp": null
|
||||
},
|
||||
"spec": {
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "mysecret",
|
||||
"namespace": "default",
|
||||
"creationTimestamp": null
|
||||
},
|
||||
"data": null
|
||||
},
|
||||
"encryptedData": {
|
||||
"foo": "AgAywfMzHx/4QFa3sa68zUbpmejT/MjuHUnfI/p2eo5xFKf2SsdGiRK4q2gl2yaSeEcAlA/P1vKZpsM+Jlh5WqrFxTtJjTYgXilzTSSTkK8hilZMflCnL1xs7ywH/lk+4gHdI7z0QS7FQztc649Z+SP2gjunOmTnRTczyCbzYlYSdHS9bB7xqLvGIofvn4dtQvapiTIlaFKhr+sDNtd8WVVzJ1eLuGgc9g6u1UjhuGa8NhgQnzXBd4zQ7678pKEpkXpUmINEKMzPchp9+ME5tIDASfV/R8rxkKvwN3RO3vbCNyLXw7KXRdyhd276kfHP4p4s9nUWDHthefsh19C6lT0ixup3PiG6gT8eFPa0v4jenxqtKNczmTwN9+dF4ZqHh93cIRvffZ7RS9IUOc9kUObQgvp3fZlo2B4m36G7or30ZfuontBh4h5INQCH8j/U3tXegGwaShGmKWg+kRFYQYC4ZqHCbNQJtvTHWKELQTStoAiyHyM+T36K6nCoJTixGZ/Nq4NzIvVfcp7I8LGzEbRSTdaO+MlTT3d32HjsJplXZwSzygSNrRRGwHKr5wfo5rTTdBVuZ0A1u1a6aQPQiJYSluKZwAIJKGQyfZC5Fbo+NxSxKS8MoaZjQh5VUPB+Q92WoPJoWbqZqlU2JZOuoyDWz5x7ZS812x1etQCy6QmuLYe+3nXOuQx85drJFdNw4KXzoQs2uSA="
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
!!! question "Who set the namespace to default?"
|
||||
By default, sealed secrets can only be "unsealed" in the same namespace for which the original secret was created. In the example above, we didn't explicitly specity a namespace when creating our secret, so the default namespace was used.
|
||||
|
||||
Apply the sealed secret to the cluster...
|
||||
|
||||
```bash
|
||||
echo -n batman | kubectl create secret \
|
||||
generic mysecret --dry-run=client --from-file=foo=/dev/stdin -o json \
|
||||
| kubeseal --cert pub-cert.pem \
|
||||
| kubectl create -f -
|
||||
```
|
||||
|
||||
And watch the sealed-secrets controller decrypt it, and turn it into a regular secrets, using `kubectl logs -n sealed-secrets -l app.kubernetes.io/name=sealed-secrets`
|
||||
|
||||
```bash
|
||||
2021/11/16 10:37:16 Event(v1.ObjectReference{Kind:"SealedSecret", Namespace:"default", Name:"mysecret",
|
||||
UID:"82ac8c4b-c167-400e-8768-51957364f6b9", APIVersion:"bitnami.com/v1alpha1", ResourceVersion:"147314",
|
||||
FieldPath:""}): type: 'Normal' reason: 'Unsealed' SealedSecret unsealed successfully
|
||||
```
|
||||
|
||||
Finally, confirm that the secret now exists in the `default` namespace:
|
||||
|
||||
```yaml
|
||||
root@shredder:/tmp# kubectl get secret mysecret -o yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
foo: YmF0bWFu
|
||||
kind: Secret
|
||||
metadata:
|
||||
creationTimestamp: "2021-11-16T10:37:16Z"
|
||||
name: mysecret
|
||||
namespace: default
|
||||
ownerReferences:
|
||||
- apiVersion: bitnami.com/v1alpha1
|
||||
controller: true
|
||||
kind: SealedSecret
|
||||
name: mysecret
|
||||
uid: 82ac8c4b-c167-400e-8768-51957364f6b9
|
||||
resourceVersion: "147315"
|
||||
uid: 6f6ba81c-c9a2-45bc-877c-7a8b50afde83
|
||||
type: Opaque
|
||||
root@shredder:/tmp#
|
||||
```
|
||||
|
||||
So we now have a means to store an un-decryptable secret in our flux repo, and have only our cluster be able to convert that sealedsecret into a regular secret!
|
||||
|
||||
Based on our [flux deployment strategy](/kubernetes/deployment/flux/), we simply seal up any necessary secrets into the appropriate folder in the flux repository, and have them decrypted and unsealed into the running cluster. For example, if we needed a secret for metallb called "magic-password", containing a key "location-of-rabbit", we'd do this:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic magic-password \
|
||||
--namespace metallb-system \
|
||||
--dry-run=client \
|
||||
--from-literal=location-of-rabbit=top-hat -o json \
|
||||
| kubeseal --cert pub-cert.pem \
|
||||
| kubectl create -f - \
|
||||
> <path to repo>/metallb/sealedsecret-magic-password.yaml
|
||||
```
|
||||
|
||||
Once flux reconciled the above sealedsecret, the sealedsecrets controller in the cluster would confirm that it's able to decrypt the secret, and would create the corresponding regular secret.
|
||||
|
||||
### Using our own keypair
|
||||
|
||||
One flaw in the process above is that we rely on the sealedsecrets controller to generate its own public/private keypair. This means that the pair (*and therefore all the encrypted secrets*) are specific to this cluster (*and this instance of the sealedsecrets controller*) only.
|
||||
|
||||
To go "fully GitOps", we'd want to be able to rebuild our entire cluster "from scratch" using our flux repository. If the keypair is recreated when a new cluster is built, then the existing sealedsecrets would remain forever "sealed"..
|
||||
|
||||
The solution here is to [generate our own public/private keypair](https://github.com/bitnami-labs/sealed-secrets/blob/main/docs/bring-your-own-certificates.md), and to store the private key safely and securely outside of the flux repo[^1]. We'll only need the key once, when deploying a fresh instance of the sealedsecrets controller.
|
||||
|
||||
Once you've got the public/private key pair, create them as kubernetes secrets directly in the cluster, like this:
|
||||
|
||||
```bash
|
||||
kubectl -n sealed-secrets create secret tls my-own-certs \
|
||||
--cert="<path to public key>" --key="<path to private key>"
|
||||
```
|
||||
|
||||
And then "label" the secret you just created, so that the sealedsecrets controller knows that it's special:
|
||||
|
||||
```bash
|
||||
kubectl -n sealed-secrets label secret my-own-certs \
|
||||
sealedsecrets.bitnami.com/sealed-secrets-key=active
|
||||
```
|
||||
|
||||
Restart the sealedsecret controller deployment, to force it to detect the new secret:
|
||||
|
||||
```bash
|
||||
root@shredder:~# kubectl rollout restart -n sealed-secrets deployment sealed-secrets
|
||||
deployment.apps/sealed-secrets restarted
|
||||
root@shredder:~#
|
||||
```
|
||||
|
||||
And now when you create your seadsecrets, refer to the public key you just created using `--cert <path to cert>`. These secrets will be decryptable by **any** sealedsecrets controller bootstrapped with the same keypair (*above*).
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: There's no harm in storing the **public** key in the repo though, which means it's easy to refer to when sealing secrets.
|
||||
140
manuscript/kubernetes/ssl-certificates/cert-manager.md
Normal file
@@ -0,0 +1,140 @@
|
||||
---
|
||||
description: Cert Manager generates and renews LetsEncrypt certificates
|
||||
---
|
||||
# Cert Manager
|
||||
|
||||
To interact with your cluster externally, you'll almost certainly be using a web browser, and you'll almost certainly be wanting your browsing session to be SSL-secured. Some Ingress Controllers (i.e. Traefik) will include a default, self-signed, nasty old cert which will permit you to use SSL, but it's faaaar better to use valid certs.
|
||||
|
||||
Cert Manager adds certificates and certificate issuers as resource types in Kubernetes clusters, and simplifies the process of obtaining, renewing and using those certificates.
|
||||
|
||||

|
||||
|
||||
It can issue certificates from a variety of supported sources, including Let’s Encrypt, HashiCorp Vault, and Venafi as well as private PKI.
|
||||
|
||||
It will ensure certificates are valid and up to date, and attempt to renew certificates at a configured time before expiry.
|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [x] A [Kubernetes cluster](/kubernetes/cluster/)
|
||||
* [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped
|
||||
|
||||
## Preparation
|
||||
|
||||
### Namespace
|
||||
|
||||
We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-cert-manager.yaml`:
|
||||
|
||||
??? example "Example Namespace (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: cert-manager
|
||||
```
|
||||
|
||||
### HelmRepository
|
||||
|
||||
Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-jetstack.yaml`:
|
||||
|
||||
??? example "Example HelmRepository (click to expand)"
|
||||
```yaml
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: jetstack
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
url: https://charts.jetstack.io
|
||||
```
|
||||
|
||||
### Kustomization
|
||||
|
||||
Now that the "global" elements of this deployment (*just the HelmRepository in this case*z*) have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/cert-manager`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-cert-manager.yaml`:
|
||||
|
||||
??? example "Example Kustomization (click to expand)"
|
||||
```yaml
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: cert-manager
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
path: ./cert-manager
|
||||
prune: true # remove any elements later removed from the above path
|
||||
timeout: 2m # if not set, this defaults to interval duration, which is 1h
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
||||
validation: server
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: cert-manager
|
||||
namespace: cert-manager
|
||||
```
|
||||
|
||||
### ConfigMap
|
||||
|
||||
Now we're into the cert-manager-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/bitnami-labs/cert-manager/blob/main/helm/cert-manager/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `cert-manager/configmap-cert-manager-helm-chart-value-overrides.yaml`:
|
||||
|
||||
??? example "Example ConfigMap (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cert-manager-helm-chart-value-overrides
|
||||
namespace: cert-manager
|
||||
data:
|
||||
values.yaml: |-
|
||||
# paste chart values.yaml (indented) here and alter as required>
|
||||
```
|
||||
--8<-- "kubernetes-why-full-values-in-configmap.md"
|
||||
|
||||
Then work your way through the values you pasted, and change any which are specific to your configuration.
|
||||
|
||||
### HelmRelease
|
||||
|
||||
Lastly, having set the scene above, we define the HelmRelease which will actually deploy the cert-manager controller into the cluster, with the config we defined above. I save this in my flux repo as `cert-manager/helmrelease-cert-manager.yaml`:
|
||||
|
||||
??? example "Example HelmRelease (click to expand)"
|
||||
```yaml
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: cert-manager
|
||||
namespace: cert-manager
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: cert-manager
|
||||
version: 1.6.x
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: jetstack
|
||||
namespace: flux-system
|
||||
interval: 15m
|
||||
timeout: 5m
|
||||
releaseName: cert-manager
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: cert-manager-helm-chart-value-overrides
|
||||
valuesKey: values.yaml # This is the default, but best to be explicit for clarity
|
||||
```
|
||||
|
||||
--8<-- "kubernetes-why-not-config-in-helmrelease.md"
|
||||
|
||||
## Serving
|
||||
|
||||
Once you've committed your YAML files into your repo, you should soon see some pods appear in the `cert-manager` namespace!
|
||||
|
||||
What do we have now? Well, we've got the cert-manager controller **running**, but it won't **do** anything until we define some certificate issuers, credentials, and certificates..
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If your certificate is not created **aren't** created as you expect, then the best approach is to check the cert-manager logs, by running `kubectl logs -n cert-manager -l app.kubernetes.io/name=cert-manager`.
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: Why yes, I **have** accidentally rate-limited myself by deleting/recreating my prod certificates a few times!
|
||||
22
manuscript/kubernetes/ssl-certificates/index.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# SSL Certificates
|
||||
|
||||
When you expose applications running within your cluster to the outside world, you're going to want to protect these with SSL certificates. Typically, this'll be SSL certificates used by browsers to access your Ingress resources over HTTPS, but SSL certificates would be used for other externally-facing services, for example OpenLDAP, docker-mailserver, etc.
|
||||
|
||||
!!! question "Why do I need SSL if it's just internal?"
|
||||
It's true that you could expose applications via HTTP only, and **not** bother with SSL. By doing so, however, you "train yourself"[^1] to ignore SSL certificates / browser security warnings.
|
||||
|
||||
One day, this behaviour will bite you in the ass.
|
||||
|
||||
If you want to be a person who relies on privacy and security, then insist on privacy and security **everywhere**.
|
||||
|
||||
Plus, once you put in the effort to setup automated SSL certificates _once_, it's literally **no** extra effort to use them everywhere!
|
||||
|
||||
I've split this section, conceptually, into 3 separate tasks:
|
||||
|
||||
1. Setup [Cert Manager](/kubernetes/ssl-certificates/cert-manager/), a controller whose job it is to request / renew certificates
|
||||
2. Setup "[Issuers](/kubernetes/ssl-certificates/letsencrypt-issuers/)" for LetsEncrypt, which Cert Manager will use to request certificates
|
||||
3. Setup a [wildcard certificate](/kubernetes/ssl-certificates/letsencrypt-wildcard/) in such a way that it can be used by Ingresses like Traefik or Ngnix
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: I had a really annoying but smart boss once who taught me this. Hi Mark! :wave:
|
||||
109
manuscript/kubernetes/ssl-certificates/letsencrypt-issuers.md
Normal file
@@ -0,0 +1,109 @@
|
||||
# LetsEncrypt Issuers
|
||||
|
||||
Certificates are issued by certificate authorities. By far the most common issuer will be LetsEncrypt.
|
||||
|
||||
In order for Cert Manager to request/renew certificates, we have to tell it about our **Issuers**.
|
||||
|
||||
!!! note
|
||||
There's a minor distinction between an **Issuer** (*only issues certificates within one namespace*) and a **ClusterIssuer** (*issues certificates throughout the cluster*). Typically a **ClusterIssuer** will be suitable.
|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [x] A [Kubernetes cluster](/kubernetes/cluster/)
|
||||
* [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped
|
||||
* [x] [Cert-Manager](/kubernetes/cert-manager/) deployed to request/renew certificates
|
||||
* [x] API credentials for a [supported DNS01 provider](https://cert-manager.io/docs/configuration/acme/dns01/) for LetsEncrypt wildcard certs
|
||||
|
||||
## Preparation
|
||||
|
||||
### LetsEncrypt Staging
|
||||
|
||||
The ClusterIssuer resource below represents a certificate authority which is able to request certificates for any namespace within the cluster.
|
||||
I save this in my flux repo as `cert-manager/cluster-issuer-letsencrypt-staging.yaml`. I've highlighted the areas you'll need to pay attention to:
|
||||
|
||||
???+ example "ClusterIssuer for LetsEncrypt Staging"
|
||||
```yaml hl_lines="8 15 17-21"
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
email: batman@example.com
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-staging
|
||||
solvers:
|
||||
- selector:
|
||||
dnsZones:
|
||||
- "example.com"
|
||||
dns01:
|
||||
cloudflare:
|
||||
email: batman@example.com
|
||||
apiTokenSecretRef:
|
||||
name: cloudflare-api-token-secret
|
||||
key: api-token
|
||||
```
|
||||
|
||||
Deploying this issuer YAML into the cluster would provide Cert Manager with the details necessary to start issuing certificates from the LetsEncrypt staging server (*always good to test in staging first!*)
|
||||
|
||||
!!! note
|
||||
The example above is specific to [Cloudflare](https://cert-manager.io/docs/configuration/acme/dns01/cloudflare/), but the syntax for [other providers](https://cert-manager.io/docs/configuration/acme/dns01/) is similar.
|
||||
|
||||
### LetsEncrypt Prod
|
||||
|
||||
As you'd imagine, the "prod" version of the LetsEncrypt issues is very similar, and I save this in my flux repo as `cert-manager/cluster-issuer-letsencrypt-prod.yaml`
|
||||
|
||||
???+ example "ClusterIssuer for LetsEncrypt Prod"
|
||||
```yaml hl_lines="8 15 17-21"
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
email: batman@example.com
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod
|
||||
solvers:
|
||||
- selector:
|
||||
dnsZones:
|
||||
- "example.com"
|
||||
dns01:
|
||||
cloudflare:
|
||||
email: batman@example.com
|
||||
apiTokenSecretRef:
|
||||
name: cloudflare-api-token-secret
|
||||
key: api-token
|
||||
```
|
||||
|
||||
!!! note
|
||||
You'll note that there are two secrets referred to above - `privateKeySecretRef`, referencing `letsencrypt-prod` is for cert-manager to populate as a result of its ACME schenanigans - you don't have to do anything about this particular secret! The cloudflare-specific secret (*and this will change based on your provider*) is expected to be found in the same namespace as the certificate we'll be issuing, and will be discussed when we create our [wildcard certificate](/kubernetes/ssl-certificates/letsencrypt-wildcard/).
|
||||
|
||||
## Serving
|
||||
|
||||
### How do we know it works?
|
||||
|
||||
We're not quite ready to issue certificates yet, but we can now test whether the Issuers are configured correctly for LetsEncrypt. To check their status, **describe** the ClusterIssuers (i.e., `kubectl describe clusterissuer -n cert-manager letsencrypt-prod`), which (*truncated*) shows something like this:
|
||||
|
||||
```yaml
|
||||
Status:
|
||||
Acme:
|
||||
Last Registered Email: admin@example.com
|
||||
Uri: https://acme-v02.api.letsencrypt.org/acme/acct/34523
|
||||
Conditions:
|
||||
Last Transition Time: 2021-11-18T22:54:20Z
|
||||
Message: The ACME account was registered with the ACME server
|
||||
Observed Generation: 1
|
||||
Reason: ACMEAccountRegistered
|
||||
Status: True
|
||||
Type: Ready
|
||||
Events: <none>
|
||||
```
|
||||
|
||||
Provided your account is registered, you're ready to proceed with [creating a wildcard certificate](/kubernetes/ssl-certificates/letsencrypt-wildcard/)!
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: Since a ClusterIssuer is not a namespaced resource, it doesn't exist in any specific namespace. Therefore, my assumption is that the `apiTokenSecretRef` secret is only "looked for" when a certificate (*which __is__ namespaced*) requires validation.
|
||||
175
manuscript/kubernetes/ssl-certificates/secret-replicator.md
Normal file
@@ -0,0 +1,175 @@
|
||||
# Secret Replicator
|
||||
|
||||
As explained when creating our [LetsEncrypt Wildcard certificates](/kubernetes/ssl-certificates/letsencrypt-wildcard/), it can be problematic that Certificates can't be **shared** between namespaces. One simple solution to this problem is simply to "replicate" secrets from one "source" namespace into all other namespaces.
|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [x] A [Kubernetes cluster](/kubernetes/cluster/)
|
||||
* [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped
|
||||
* [x] [secret-replicator](/kubernetes/secret-replicator/) deployed to request/renew certificates
|
||||
* [x] [LetsEncrypt Wildcard Certificates](/kubernetes/ssl-certificates/letsencrypt-wildcard/) created in the `letsencrypt-wildcard-cert` namespace
|
||||
|
||||
Kiwigrid's "[Secret Replicator](https://github.com/kiwigrid/secret-replicator)" is a simple controller which replicates secrets from one namespace to another.[^1]
|
||||
|
||||
## Preparation
|
||||
|
||||
### Namespace
|
||||
|
||||
We need a namespace to deploy our HelmRelease and associated ConfigMaps into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-secret-replicator.yaml`:
|
||||
|
||||
??? example "Example Namespace (click to expand)"
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: secret-replicator
|
||||
```
|
||||
|
||||
### HelmRepository
|
||||
|
||||
Next, we need to define a HelmRepository (*a repository of helm charts*), to which we'll refer when we create the HelmRelease. We only need to do this once per-repository. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/helmrepositories/helmrepository-kiwigrid.yaml`:
|
||||
|
||||
??? example "Example HelmRepository (click to expand)"
|
||||
```yaml
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: kiwigrid
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
url: https://kiwigrid.github.io
|
||||
```
|
||||
|
||||
### Kustomization
|
||||
|
||||
Now that the "global" elements of this deployment have been defined, we do some "flux-ception", and go one layer deeper, adding another Kustomization, telling flux to deploy any YAMLs found in the repo at `/secret-replicator`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-secret-replicator.yaml`:
|
||||
|
||||
??? example "Example Kustomization (click to expand)"
|
||||
```yaml
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: secret-replicator
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
path: ./secret-replicator
|
||||
prune: true # remove any elements later removed from the above path
|
||||
timeout: 2m # if not set, this defaults to interval duration, which is 1h
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
||||
validation: server
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: secret-replicator
|
||||
namespace: secret-replicator
|
||||
```
|
||||
|
||||
### ConfigMap
|
||||
|
||||
Now we're into the secret-replicator-specific YAMLs. First, we create a ConfigMap, containing the entire contents of the helm chart's [values.yaml](https://github.com/kiwigrid/helm-charts/blob/master/charts/secret-replicator/values.yaml). Paste the values into a `values.yaml` key as illustrated below, indented 4 tabs (*since they're "encapsulated" within the ConfigMap YAML*). I create this in my flux repo at `secret-replicator/configmap-secret-replicator-helm-chart-value-overrides.yaml`:
|
||||
|
||||
??? example "Example ConfigMap (click to expand)"
|
||||
```yaml hl_lines="21 27"
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: secret-replicator-helm-chart-value-overrides
|
||||
namespace: secret-replicator
|
||||
data:
|
||||
values.yaml: |-
|
||||
# Default values for secret-replicator.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: kiwigrid/secret-replicator
|
||||
tag: 0.2.0
|
||||
pullPolicy: IfNotPresent
|
||||
## Specify ImagePullSecrets for Pods
|
||||
## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
|
||||
# pullSecrets: myregistrykey
|
||||
|
||||
# csv list of secrets
|
||||
secretList: "letsencrypt-wildcard-cert"
|
||||
# secretList: "secret1,secret2
|
||||
|
||||
ignoreNamespaces: "kube-system,kube-public"
|
||||
|
||||
# If defined, allow secret-replicator to watch for secrets in _another_ namespace
|
||||
secretNamespace: letsencrypt-wildcard-cert"
|
||||
|
||||
rbac:
|
||||
enabled: true
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 50m
|
||||
# memory: 20Mi
|
||||
# requests:
|
||||
# cpu: 20m
|
||||
# memory: 20Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
```
|
||||
--8<-- "kubernetes-why-full-values-in-configmap.md"
|
||||
|
||||
Note that the following values changed from default, above:
|
||||
|
||||
* `secretList`: `letsencrypt-wildcard-cert`
|
||||
* `secretNamespace`: `letsencrypt-wildcard-cert`
|
||||
|
||||
### HelmRelease
|
||||
|
||||
Lastly, having set the scene above, we define the HelmRelease which will actually deploy the secret-replicator controller into the cluster, with the config we defined above. I save this in my flux repo as `secret-replicator/helmrelease-secret-replicator.yaml`:
|
||||
|
||||
??? example "Example HelmRelease (click to expand)"
|
||||
```yaml
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: secret-replicator
|
||||
namespace: secret-replicator
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: secret-replicator
|
||||
version: 0.6.x
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: kiwigrid
|
||||
namespace: flux-system
|
||||
interval: 15m
|
||||
timeout: 5m
|
||||
releaseName: secret-replicator
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: secret-replicator-helm-chart-value-overrides
|
||||
valuesKey: values.yaml # This is the default, but best to be explicit for clarity
|
||||
```
|
||||
|
||||
--8<-- "kubernetes-why-not-config-in-helmrelease.md"
|
||||
|
||||
## Serving
|
||||
|
||||
Once you've committed your YAML files into your repo, you should soon see some pods appear in the `secret-replicator` namespace!
|
||||
|
||||
### How do we know it worked?
|
||||
|
||||
Look for secrets across the whole cluster, by running `kubectl get secrets -A | grep letsencrypt-wildcard-cert`. What you should see is an identical secret in every namespace. Note that the **Certificate** only exists in the `letsencrypt-wildcard-cert` namespace, but the secret it **generates** is what gets replicated to every other namespace.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If your certificate is not created **aren't** created as you expect, then the best approach is to check the secret-replicator logs, by running `kubectl logs -n secret-replicator -l app.kubernetes.io/name=secret-replicator`.
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: To my great New Zealandy confusion, "Kiwigrid GmbH" is a German company :shrug:
|
||||
156
manuscript/kubernetes/ssl-certificates/wildcard-certificate.md
Normal file
@@ -0,0 +1,156 @@
|
||||
# Wildcard Certificate
|
||||
|
||||
Now that we have an [Issuer](/kubernetes/ssl-certificates/letsencrypt-issuers/) and the necessary credentials, we can create a wildcard certificate, which we can then feed to our [Ingresses](/kubernetes/ingress/).
|
||||
|
||||
!!! summary "Ingredients"
|
||||
|
||||
* [x] A [Kubernetes cluster](/kubernetes/cluster/)
|
||||
* [x] [Flux deployment process](/kubernetes/deployment/flux/) bootstrapped
|
||||
* [x] [Cert-Manager](/kubernetes/cert-manager/) deployed to request/renew certificates
|
||||
* [x] [LetsEncrypt ClusterIssuers](/kubernetes/ssl-certificates/letsencrypt-issuers/) created using DNS01 validation solvers
|
||||
|
||||
Certificates are Kubernetes secrets, and so are subject to the same limitations / RBAC controls as other secrets. Importantly, they are **namespaced**, so it's not possible to refer to a secret in one namespace, from a pod in **another** namespace. This restriction also applies to Ingress resources (*although there are workarounds*) - An Ingress can only refer to TLS secrets in its own namespace.
|
||||
|
||||
This behaviour can be prohibitive, because (a) we don't want to have to request/renew certificates for every single FQDN served by our cluster, and (b) we don't want more than one wildcard certificate if possible, to avoid being rate-limited at request/renewal time.
|
||||
|
||||
To take advantage of the various workarounds available, I find it best to put the certificates into a dedicated namespace, which I name.. `letsencrypt-wildcard-cert`.
|
||||
|
||||
!!! question "Why not the cert-manager namespace?"
|
||||
Because cert-manager is a _controller_, whose job it is to act on resources. I should be able to remove cert-manager entirely (even its namespace) from my cluster, and re-add it, without impacting the resources it acts upon. If the certificates lived in the `cert-manager` namespace, then I wouldn't be able to remove the namespace without also destroying the certificates.
|
||||
|
||||
## Preparation
|
||||
|
||||
### Namespace
|
||||
|
||||
We need a namespace to deploy our certificates and associated secrets into. Per the [flux design](/kubernetes/deployment/flux/), I create this in my flux repo at `flux-system/namespaces/namespace-letsencrypt-wildcard-cert.yaml`:
|
||||
|
||||
??? example "Example Namespace (click to expand)"
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: letsencrypt-wildcard-cert
|
||||
```
|
||||
|
||||
### Kustomization
|
||||
|
||||
Now we need a kustomization to tell Flux to install any YAMLs it finds in `/letsencrypt-wildcard-cert`. I create this Kustomization in my flux repo at `flux-system/kustomizations/kustomization-letsencrypt-wildcard-cert.yaml`.
|
||||
|
||||
!!! tip
|
||||
Importantly, note that we define a **dependsOn**, to tell Flux not to try to reconcile this kustomization before the cert-manager and sealedsecrets kustomizations are reconciled. Cert-manager creates the CRDs used to define certificates, so prior to Cert Manager being installed, the cluster won't know what to do with the ClusterIssuers/Certificate resources.
|
||||
|
||||
??? example "Example Kustomization (click to expand)"
|
||||
```yaml
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: letsencrypt-wildcard-cert
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 15m
|
||||
path: ./letsencrypt-wildcard-cert
|
||||
dependsOn:
|
||||
- name: "cert-manager"
|
||||
- name: "sealed-secrets"
|
||||
prune: true # remove any elements later removed from the above path
|
||||
timeout: 2m # if not set, this defaults to interval duration, which is 1h
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
||||
validation: server
|
||||
```
|
||||
|
||||
### DNS01 Validation Secret
|
||||
|
||||
The simplest way to validate ownership of a domain to LetsEncrypt is to use DNS-01 validation. In this mode, we "prove" our ownership of a domain name by creating a special TXT record, which LetsEncrypt will check and confirm for validity, before issuing us any certificates for that domain name.
|
||||
|
||||
The [ClusterIssuers we created earlier](/kubernetes/ssl-certificates/letsencrypt-issuers/) included a field `solvers.dns01.cloudflare.apiTokenSecretRef.name`. This value points to a secret (*in the same namespace as the certificate[^1]*) containing credentials necessary to create DNS records automatically. (*again, my examples are for cloudflare, but the [other supported providers](https://cert-manager.io/docs/configuration/acme/dns01/) will have similar secret requirements*)
|
||||
|
||||
Thanks to [Sealed Secrets](/kubernetes/sealed-secrets/), we have a safe way of committing secrets into our repository, so to create necessary secret, you'd run something like this:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cloudflare-api-token-secret \
|
||||
--namespace letsencrypt-wildcard-cert \
|
||||
--dry-run=client \
|
||||
--from-literal=api-token=gobbledegook -o json \
|
||||
| kubeseal --cert <path to public cert> \
|
||||
| kubectl create -f - \
|
||||
> <path to repo>/letsencrypt-wildcard-cert/sealedsecret-cloudflare-api-token-secret.yaml
|
||||
```
|
||||
|
||||
### Staging Certificate
|
||||
|
||||
Finally, we create our certificates! Here's an example certificate resource which uses the letsencrypt-staging issuer (*to avoid being rate-limited while learning!*). I save this in my flux repo as `/letsencrypt-wildcard-cert/certificate-wildcard-cert-letsencrypt-staging.yaml`
|
||||
|
||||
???+ example "Example certificate requested from LetsEncrypt staging"
|
||||
|
||||
```yaml
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: letsencrypt-wildcard-cert-example.com-staging
|
||||
namespace: letsencrypt-wildcard-cert
|
||||
spec:
|
||||
# secretName doesn't have to match the certificate name, but it may as well, for simplicity!
|
||||
secretName: letsencrypt-wildcard-cert-example.com-staging
|
||||
issuerRef:
|
||||
name: letsencrypt-staging
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- "example.com"
|
||||
- "*.example.com"
|
||||
```
|
||||
|
||||
## Serving
|
||||
|
||||
### Did it work?
|
||||
|
||||
After committing the above to the repo, provided the YAML syntax is correct, you should end up with a "Certificate" resource in the `letsencrypt-wildcard-cert` namespace. This doesn't mean that the certificate has been issued by LetsEncrypt yet though - describe the certificate for more details, using `kubectl describe certificate -n letsencrypt-wildcard-cert letsencrypt-wildcard-cert-staging`. The `status` field will show you whether the certificate is issued or not:
|
||||
|
||||
```yaml
|
||||
Status:
|
||||
Conditions:
|
||||
Last Transition Time: 2021-11-19T01:09:32Z
|
||||
Message: Certificate is up to date and has not expired
|
||||
Observed Generation: 1
|
||||
Reason: Ready
|
||||
Status: True
|
||||
Type: Ready
|
||||
Not After: 2022-02-17T00:09:26Z
|
||||
Not Before: 2021-11-19T00:09:27Z
|
||||
Renewal Time: 2022-01-18T00:09:26Z
|
||||
Revision: 1
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If your certificate does not become `Ready` within a few minutes [^1], try watching the logs of cert-manager to identify the issue, using `kubectl logs -f -n cert-manager -l app.kubernetes.io/name=cert-manager`.
|
||||
|
||||
### Production Certificate
|
||||
|
||||
Once you know you can happily deploy a staging certificate, it's safe enough to attempt your "prod" certificate. I save this in my flux repo as `/letsencrypt-wildcard-cert/certificate-wildcard-cert-letsencrypt-prod.yaml`
|
||||
|
||||
???+ example "Example certificate requested from LetsEncrypt prod"
|
||||
|
||||
```yaml
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: letsencrypt-wildcard-cert-example.com
|
||||
namespace: letsencrypt-wildcard-cert
|
||||
spec:
|
||||
# secretName doesn't have to match the certificate name, but it may as well, for simplicity!
|
||||
secretName: letsencrypt-wildcard-cert-example.com
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
dnsNames:
|
||||
- "example.com"
|
||||
- "*.example.com"
|
||||
```
|
||||
|
||||
Commit the certificate and follow the steps above to confirm that your prod certificate has been issued.
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
|
||||
[^1]: This process can take a frustratingly long time, and watching the cert-manager logs at least gives some assurance that it's progressing!
|
||||
@@ -1,261 +0,0 @@
|
||||
# Kanboard
|
||||
|
||||
Kanboard is a Kanban tool, developed by [Frédéric Guillot](https://github.com/fguillot). (_Who also happens to be the developer of my favorite RSS reader, [Miniflux](/recipes/miniflux/)_)
|
||||
|
||||

|
||||
|
||||
Features include:
|
||||
|
||||
* Visualize your work
|
||||
* Limit your work in progress to be more efficient
|
||||
* Customize your boards according to your business activities
|
||||
* Multiple projects with the ability to drag and drop tasks
|
||||
* Reports and analytics
|
||||
* Fast and simple to use
|
||||
* Access from anywhere with a modern browser
|
||||
* Plugins and integrations with external services
|
||||
* Free, open source and self-hosted
|
||||
* Super simple installation
|
||||
|
||||
## Ingredients
|
||||
|
||||
1. A [Kubernetes Cluster](/kubernetes/design/) including [Traefik Ingress](/kubernetes/traefik/)
|
||||
2. A DNS name for your kanboard instance (*kanboard.example.com*, below) pointing to your [load balancer](/kubernetes/loadbalancer/), fronting your Traefik ingress
|
||||
|
||||
## Preparation
|
||||
|
||||
### Prepare traefik for namespace
|
||||
|
||||
When you deployed [Traefik via the helm chart](/kubernetes/traefik/), you would have customized ```values.yml``` for your deployment. In ```values.yml``` is a list of namespaces which Traefik is permitted to access. Update ```values.yml``` to include the *kanboard* namespace, as illustrated below:
|
||||
|
||||
```yaml
|
||||
<snip>
|
||||
kubernetes:
|
||||
namespaces:
|
||||
- kube-system
|
||||
- nextcloud
|
||||
- kanboard
|
||||
- miniflux
|
||||
<snip>
|
||||
```
|
||||
|
||||
If you've updated ```values.yml```, upgrade your traefik deployment via helm, by running ```helm upgrade --values values.yml traefik stable/traefik --recreate-pods```
|
||||
|
||||
### Create data locations
|
||||
|
||||
Although we could simply bind-mount local volumes to a local Kubuernetes cluster, since we're targetting a cloud-based Kubernetes deployment, we only need a local path to store the YAML files which define the various aspects of our Kubernetes deployment.
|
||||
|
||||
```bash
|
||||
mkdir /var/data/config/kanboard
|
||||
```
|
||||
|
||||
### Create namespace
|
||||
|
||||
We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the kanboard stack with the following .yml:
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/config/kanboard/namespace.yml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kanboard
|
||||
EOF
|
||||
kubectl create -f /var/data/config/kanboard/namespace.yaml
|
||||
```
|
||||
|
||||
### Create persistent volume claim
|
||||
|
||||
Persistent volume claims are a streamlined way to create a persistent volume and assign it to a container in a pod. Create a claim for the kanboard app and plugin data:
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/config/kanboard/persistent-volumeclaim.yml
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kanboard-volumeclaim
|
||||
namespace: kanboard
|
||||
annotations:
|
||||
backup.kubernetes.io/deltas: P1D P7D
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
EOF
|
||||
kubectl create -f /var/data/config/kanboard/kanboard-volumeclaim.yaml
|
||||
```
|
||||
|
||||
!!! question "What's that annotation about?"
|
||||
The annotation is used by [k8s-snapshots](/kubernetes/snapshots/) to create daily incremental snapshots of your persistent volumes. In this case, our volume is snapshotted daily, and copies kept for 7 days.
|
||||
|
||||
### Create ConfigMap
|
||||
|
||||
Kanboard's configuration is all contained within ```config.php```, which needs to be presented to the container. We _could_ maintain ```config.php``` in the persistent volume we created above, but this would require manually accessing the pod every time we wanted to make a change.
|
||||
|
||||
Instead, we'll create ```config.php``` as a [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), meaning it "lives" within the Kuberetes cluster and can be **presented** to our pod. When we want to make changes, we simply update the ConfigMap (*delete and recreate, to be accurate*), and relaunch the pod.
|
||||
|
||||
Grab a copy of [config.default.php](https://github.com/kanboard/kanboard/blob/master/config.default.php), save it to ```/var/data/config/kanboard/config.php```, and customize it per [the guide](https://docs.kanboard.org/en/latest/admin_guide/config_file.html).
|
||||
|
||||
At the very least, I'd suggest making the following changes:
|
||||
|
||||
```php
|
||||
define('PLUGIN_INSTALLER', true); // Yes, I want to install plugins using the UI
|
||||
define('ENABLE_URL_REWRITE', false); // Yes, I want pretty URLs
|
||||
```
|
||||
|
||||
Now create the configmap from config.php, by running ```kubectl create configmap -n kanboard kanboard-config --from-file=config.php```
|
||||
|
||||
## Serving
|
||||
|
||||
Now that we have a [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/), and a [configmap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), we can create a [deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [service](https://kubernetes.io/docs/concepts/services-networking/service/), and [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) for the kanboard [pod](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/).
|
||||
|
||||
### Create deployment
|
||||
|
||||
Create a deployment to tell Kubernetes about the desired state of the pod (*which it will then attempt to maintain*). Note below that we mount the persistent volume **twice**, to both ```/var/www/app/data``` and ```/var/www/app/plugins```, using the subPath value to differentiate them. This trick avoids us having to provision **two** persistent volumes just for data mounted in 2 separate locations.
|
||||
|
||||
--8<-- "premix-cta.md"
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/kanboard/deployment.yml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: kanboard
|
||||
name: app
|
||||
labels:
|
||||
app: app
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: app
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: app
|
||||
spec:
|
||||
containers:
|
||||
- image: kanboard/kanboard
|
||||
name: app
|
||||
volumeMounts:
|
||||
- name: kanboard-config
|
||||
mountPath: /var/www/app/config.php
|
||||
subPath: config.php
|
||||
- name: kanboard-app
|
||||
mountPath: /var/www/app/data
|
||||
subPath: data
|
||||
- name: kanboard-app
|
||||
mountPath: /var/www/app/plugins
|
||||
subPath: plugins
|
||||
volumes:
|
||||
- name: kanboard-app
|
||||
persistentVolumeClaim:
|
||||
claimName: kanboard-app
|
||||
- name: kanboard-config
|
||||
configMap:
|
||||
name: kanboard-config
|
||||
EOF
|
||||
kubectl create -f /var/data/kanboard/deployment.yml
|
||||
```
|
||||
|
||||
Check that your deployment is running, with ```kubectl get pods -n kanboard```. After a minute or so, you should see a "Running" pod, as illustrated below:
|
||||
|
||||
```bash
|
||||
[funkypenguin:~] % kubectl get pods -n kanboard
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
app-79f97f7db6-hsmfg 1/1 Running 0 11d
|
||||
[funkypenguin:~] %
|
||||
```
|
||||
|
||||
### Create service
|
||||
|
||||
The service resource "advertises" the availability of TCP port 80 in your pod, to the rest of the cluster (*constrained within your namespace*). It seems a little like overkill coming from the Docker Swarm's automated "service discovery" model, but the Kubernetes design allows for load balancing, rolling upgrades, and health checks of individual pods, without impacting the rest of the cluster elements.
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/kanboard/service.yml
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: app
|
||||
namespace: kanboard
|
||||
spec:
|
||||
selector:
|
||||
app: app
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
clusterIP: None
|
||||
EOF
|
||||
kubectl create -f /var/data/kanboard/service.yml
|
||||
```
|
||||
|
||||
Check that your service is deployed, with ```kubectl get services -n kanboard```. You should see something like this:
|
||||
|
||||
```bash
|
||||
[funkypenguin:~] % kubectl get service -n kanboard
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
app ClusterIP None <none> 80/TCP 38d
|
||||
[funkypenguin:~] %
|
||||
```
|
||||
|
||||
### Create ingress
|
||||
|
||||
The ingress resource tells Traefik what to forward inbound requests for *kanboard.example.com* to your service (defined above), which in turn passes the request to the "app" pod. Adjust the config below for your domain.
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/kanboard/ingress.yml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: app
|
||||
namespace: kanboard
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
spec:
|
||||
rules:
|
||||
- host: kanboard.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: app
|
||||
servicePort: 80
|
||||
EOF
|
||||
kubectl create -f /var/data/kanboard/ingress.yml
|
||||
```
|
||||
|
||||
Check that your service is deployed, with ```kubectl get ingress -n kanboard```. You should see something like this:
|
||||
|
||||
```bash
|
||||
[funkypenguin:~] % kubectl get ingress -n kanboard
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
app kanboard.funkypenguin.co.nz 80 38d
|
||||
[funkypenguin:~] %
|
||||
```
|
||||
|
||||
### Access Kanboard
|
||||
|
||||
At this point, you should be able to access your instance on your chosen DNS name (*i.e. <https://kanboard.example.com>*)
|
||||
|
||||
### Updating config.php
|
||||
|
||||
Since ```config.php``` is a ConfigMap now, to update it, make your local changes, and then delete and recreate the ConfigMap, by running:
|
||||
|
||||
```bash
|
||||
kubectl delete configmap -n kanboard kanboard-config
|
||||
kubectl create configmap -n kanboard kanboard-config --from-file=config.php
|
||||
```
|
||||
|
||||
Then, in the absense of any other changes to the deployement definition, force the pod to restart by issuing a "null patch", as follows:
|
||||
|
||||
```bash
|
||||
kubectl patch -n kanboard deployment app -p "{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"date\":\"`date +'%s'`\"}}}}}"
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
To look at the Kanboard pod's logs, run ```kubectl logs -n kanboard <name of pod per above> -f```. For further troubleshooting hints, see [Troubleshooting](/reference/kubernetes/troubleshooting/).
|
||||
|
||||
[^1]: The simplest deployment of Kanboard uses the default SQLite database backend, stored on the persistent volume. You can convert this to a "real" database running MySQL or PostgreSQL, and running an an additional database pod and service. Contact me if you'd like further details ;)
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
314
manuscript/recipes/kubernetes/wip.md
Normal file
@@ -0,0 +1,314 @@
|
||||
# Miniflux
|
||||
|
||||
Miniflux is a lightweight RSS reader, developed by [Frédéric Guillot](https://github.com/fguillot). (_Who also happens to be the developer of the favorite Open Source Kanban app, [Kanboard](/recipes/kanboard/)_)
|
||||
|
||||

|
||||
|
||||
I've [reviewed Miniflux in detail on my blog](https://www.funkypenguin.co.nz/review/miniflux-lightweight-self-hosted-rss-reader/), but features (among many) that I appreciate:
|
||||
|
||||
* Compatible with the Fever API, read your feeds through existing mobile and desktop clients (_This is the killer feature for me. I hardly ever read RSS on my desktop, I typically read on my iPhone or iPad, using [Fiery Feeds](http://cocoacake.net/apps/fiery/) or my new squeeze, [Unread](https://www.goldenhillsoftware.com/unread/)_)
|
||||
* Send your bookmarks to Pinboard, Wallabag, Shaarli or Instapaper (_I use this to automatically pin my bookmarks for collection on my [blog](https://www.funkypenguin.co.nz/)_)
|
||||
* Feeds can be configured to download a "full" version of the content (_rather than an excerpt_)
|
||||
* Use the Bookmarklet to subscribe to a website directly from any browsers
|
||||
|
||||
!!! abstract "2.0+ is a bit different"
|
||||
[Some things changed](https://docs.miniflux.net/en/latest/migration.html) when Miniflux 2.0 was released. For one thing, the only supported database is now postgresql (_no more SQLite_). External themes are gone, as is PHP (_in favor of golang_). It's been a controversial change, but I'm keen on minimal and single-purpose, so I'm still very happy with the direction of development. The developer has laid out his [opinions](https://docs.miniflux.net/en/latest/opinionated.html) re the decisions he's made in the course of development.
|
||||
|
||||
## Ingredients
|
||||
|
||||
1. A [Kubernetes Cluster](/kubernetes/design/) including [Traefik Ingress](/kubernetes/traefik/)
|
||||
2. A DNS name for your miniflux instance (*miniflux.example.com*, below) pointing to your [load balancer](/kubernetes/loadbalancer/), fronting your Traefik ingress
|
||||
|
||||
## Preparation
|
||||
|
||||
### Prepare traefik for namespace
|
||||
|
||||
When you deployed [Traefik via the helm chart](/kubernetes/traefik/), you would have customized ```values.yml``` for your deployment. In ```values.yml``` is a list of namespaces which Traefik is permitted to access. Update ```values.yml``` to include the *miniflux* namespace, as illustrated below:
|
||||
|
||||
```yaml
|
||||
<snip>
|
||||
kubernetes:
|
||||
namespaces:
|
||||
- kube-system
|
||||
- nextcloud
|
||||
- kanboard
|
||||
- miniflux
|
||||
<snip>
|
||||
```
|
||||
|
||||
If you've updated ```values.yml```, upgrade your traefik deployment via helm, by running ```helm upgrade --values values.yml traefik stable/traefik --recreate-pods```
|
||||
|
||||
### Create data locations
|
||||
|
||||
Although we could simply bind-mount local volumes to a local Kubuernetes cluster, since we're targetting a cloud-based Kubernetes deployment, we only need a local path to store the YAML files which define the various aspects of our Kubernetes deployment.
|
||||
|
||||
```bash
|
||||
mkdir /var/data/config/miniflux
|
||||
```
|
||||
|
||||
### Create namespace
|
||||
|
||||
We use Kubernetes namespaces for service discovery and isolation between our stacks, so create a namespace for the miniflux stack with the following .yml:
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/config/miniflux/namespace.yml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: miniflux
|
||||
EOF
|
||||
kubectl create -f /var/data/config/miniflux/namespace.yaml
|
||||
```
|
||||
|
||||
### Create persistent volume claim
|
||||
|
||||
Persistent volume claims are a streamlined way to create a persistent volume and assign it to a container in a pod. Create a claim for the miniflux postgres database:
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/config/miniflux/db-persistent-volumeclaim.yml
|
||||
kkind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: miniflux-db
|
||||
namespace: miniflux
|
||||
annotations:
|
||||
backup.kubernetes.io/deltas: P1D P7D
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
EOF
|
||||
kubectl create -f /var/data/config/miniflux/db-persistent-volumeclaim.yaml
|
||||
```
|
||||
|
||||
!!! question "What's that annotation about?"
|
||||
The annotation is used by [k8s-snapshots](/kubernetes/snapshots/) to create daily incremental snapshots of your persistent volumes. In this case, our volume is snapshotted daily, and copies kept for 7 days.
|
||||
|
||||
### Create secrets
|
||||
|
||||
It's not always desirable to have sensitive data stored in your .yml files. Maybe you want to check your config into a git repository, or share it. Using Kubernetes Secrets means that you can create "secrets", and use these in your deployments by name, without exposing their contents. Run the following, replacing ```imtoosexyformyadminpassword```, and the ```mydbpass``` value in both postgress-password.secret **and** database-url.secret:
|
||||
|
||||
```bash
|
||||
echo -n "imtoosexyformyadminpassword" > admin-password.secret
|
||||
echo -n "mydbpass" > postgres-password.secret
|
||||
echo -n "postgres://miniflux:mydbpass@db/miniflux?sslmode=disable" > database-url.secret
|
||||
|
||||
kubectl create secret -n mqtt generic miniflux-credentials \
|
||||
--from-file=admin-password.secret \
|
||||
--from-file=database-url.secret \
|
||||
--from-file=database-url.secret
|
||||
```
|
||||
|
||||
!!! tip "Why use ```echo -n```?"
|
||||
Because. See [my blog post here](https://www.funkypenguin.co.nz/blog/beware-the-hidden-newlines-in-kubernetes-secrets/) for the pain of hunting invisible newlines, that's why!
|
||||
|
||||
## Serving
|
||||
|
||||
Now that we have a [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/), and a [configmap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/), we can create [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [services](https://kubernetes.io/docs/concepts/services-networking/service/), and an [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) for the miniflux [pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/).
|
||||
|
||||
### Create db deployment
|
||||
|
||||
Deployments tell Kubernetes about the desired state of the pod (*which it will then attempt to maintain*). Create the db deployment by excecuting the following. Note that the deployment refers to the secrets created above.
|
||||
|
||||
--8<-- "premix-cta.md"
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/miniflux/db-deployment.yml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: miniflux
|
||||
name: db
|
||||
labels:
|
||||
app: db
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: db
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: db
|
||||
spec:
|
||||
containers:
|
||||
- image: postgres:11
|
||||
name: db
|
||||
volumeMounts:
|
||||
- name: miniflux-db
|
||||
mountPath: /var/lib/postgresql/data
|
||||
env:
|
||||
- name: POSTGRES_USER
|
||||
value: "miniflux"
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: miniflux-credentials
|
||||
key: postgres-password.secret
|
||||
volumes:
|
||||
- name: miniflux-db
|
||||
persistentVolumeClaim:
|
||||
claimName: miniflux-db
|
||||
```
|
||||
|
||||
### Create app deployment
|
||||
|
||||
Create the app deployment by excecuting the following. Again, note that the deployment refers to the secrets created above.
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/miniflux/app-deployment.yml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: miniflux
|
||||
name: app
|
||||
labels:
|
||||
app: app
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: app
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: app
|
||||
spec:
|
||||
containers:
|
||||
- image: miniflux/miniflux
|
||||
name: app
|
||||
env:
|
||||
# This is necessary for the miniflux to update the db schema, even on an empty DB
|
||||
- name: CREATE_ADMIN
|
||||
value: "1"
|
||||
- name: RUN_MIGRATIONS
|
||||
value: "1"
|
||||
- name: ADMIN_USERNAME
|
||||
value: "admin"
|
||||
- name: ADMIN_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: miniflux-credentials
|
||||
key: admin-password.secret
|
||||
- name: DATABASE_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: miniflux-credentials
|
||||
key: database-url.secret
|
||||
EOF
|
||||
kubectl create -f /var/data/miniflux/deployment.yml
|
||||
```
|
||||
|
||||
### Check pods
|
||||
|
||||
Check that your deployment is running, with ```kubectl get pods -n miniflux```. After a minute or so, you should see 2 "Running" pods, as illustrated below:
|
||||
|
||||
```bash
|
||||
[funkypenguin:~] % kubectl get pods -n miniflux
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
app-667c667b75-5jjm9 1/1 Running 0 4d
|
||||
db-fcd47b88f-9vvqt 1/1 Running 0 4d
|
||||
[funkypenguin:~] %
|
||||
```
|
||||
|
||||
### Create db service
|
||||
|
||||
The db service resource "advertises" the availability of PostgreSQL's port (TCP 5432) in your pod, to the rest of the cluster (*constrained within your namespace*). It seems a little like overkill coming from the Docker Swarm's automated "service discovery" model, but the Kubernetes design allows for load balancing, rolling upgrades, and health checks of individual pods, without impacting the rest of the cluster elements.
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/miniflux/db-service.yml
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: db
|
||||
namespace: miniflux
|
||||
spec:
|
||||
selector:
|
||||
app: db
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 5432
|
||||
clusterIP: None
|
||||
EOF
|
||||
kubectl create -f /var/data/miniflux/service.yml
|
||||
```
|
||||
|
||||
### Create app service
|
||||
|
||||
The app service resource "advertises" the availability of miniflux's HTTP listener port (TCP 8080) in your pod. This is the service which will be referred to by the ingress (below), so that Traefik can route incoming traffic to the miniflux app.
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/miniflux/app-service.yml
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: app
|
||||
namespace: miniflux
|
||||
spec:
|
||||
selector:
|
||||
app: app
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
clusterIP: None
|
||||
EOF
|
||||
kubectl create -f /var/data/miniflux/app-service.yml
|
||||
```
|
||||
|
||||
### Check services
|
||||
|
||||
Check that your services are deployed, with ```kubectl get services -n miniflux```. You should see something like this:
|
||||
|
||||
```bash
|
||||
[funkypenguin:~] % kubectl get services -n miniflux
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
app ClusterIP None <none> 8080/TCP 55d
|
||||
db ClusterIP None <none> 5432/TCP 55d
|
||||
[funkypenguin:~] %
|
||||
```
|
||||
|
||||
### Create ingress
|
||||
|
||||
The ingress resource tells Traefik what to forward inbound requests for *miniflux.example.com* to your service (defined above), which in turn passes the request to the "app" pod. Adjust the config below for your domain.
|
||||
|
||||
```bash
|
||||
cat <<EOF > /var/data/miniflux/ingress.yml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: app
|
||||
namespace: miniflux
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
spec:
|
||||
rules:
|
||||
- host: miniflux.example.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: app
|
||||
servicePort: 8080
|
||||
EOF
|
||||
kubectl create -f /var/data/miniflux/ingress.yml
|
||||
```
|
||||
|
||||
Check that your service is deployed, with ```kubectl get ingress -n miniflux```. You should see something like this:
|
||||
|
||||
```bash
|
||||
[funkypenguin:~] 130 % kubectl get ingress -n miniflux
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
app miniflux.funkypenguin.co.nz 80 55d
|
||||
[funkypenguin:~] %
|
||||
```
|
||||
|
||||
### Access Miniflux
|
||||
|
||||
At this point, you should be able to access your instance on your chosen DNS name (*i.e. <https://miniflux.example.com>*)
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
To look at the Miniflux pod's logs, run ```kubectl logs -n miniflux <name of pod per above> -f```. For further troubleshooting hints, see [Troubleshooting](/reference/kubernetes/troubleshooting/).
|
||||
|
||||
--8<-- "recipe-footer.md"
|
||||
@@ -2,5 +2,5 @@
|
||||
# that don't degrade for the open-source version
|
||||
INHERIT: mkdocs.yml
|
||||
# disabled for now, since I'm not convinced social cards are better than default thumbnails
|
||||
plugins:
|
||||
social: {}
|
||||
# plugins:
|
||||
# social: {}
|
||||
|
||||
97
mkdocs.yml
@@ -142,19 +142,88 @@ nav:
|
||||
- Networks: reference/networks.md
|
||||
- OpenVPN : reference/openvpn.md
|
||||
- Troubleshooting: reference/troubleshooting.md
|
||||
- Kubernetes:
|
||||
- ⛴ Kubernetes:
|
||||
- Preparation:
|
||||
- kubernetes/index.md
|
||||
- Design: kubernetes/design.md
|
||||
- Cluster: kubernetes/cluster.md
|
||||
- DIY Cluster: kubernetes/diycluster.md
|
||||
- Load Balancer: kubernetes/loadbalancer.md
|
||||
- Snapshots: kubernetes/snapshots.md
|
||||
- Helm: kubernetes/helm.md
|
||||
- Traefik: kubernetes/traefik.md
|
||||
# - Chef's Favorites:
|
||||
# - Istio: recipes/kubernetes/istio.md
|
||||
- Introduction: kubernetes/index.md
|
||||
- Cluster:
|
||||
- kubernetes/cluster/index.md
|
||||
- Digital Ocean: kubernetes/cluster/digitalocean.md
|
||||
# - Bare Metal: kubernetes/cluster/baremetal.md
|
||||
# - Home Lab: kubernetes/cluster/baremetal.md
|
||||
- k3s: kubernetes/cluster/k3s.md
|
||||
# - The Hard Way: kubernetes/cluster/the-hard-way.md
|
||||
- Deployment:
|
||||
- kubernetes/deployment/index.md
|
||||
# - YAML: kubernetes/wip.md
|
||||
# - Helm: kubernetes/wip.md
|
||||
# - GitHub Actions: kubernetes/wip.md
|
||||
- Flux:
|
||||
- Install: kubernetes/deployment/flux/install.md
|
||||
- Design: kubernetes/deployment/flux/design.md
|
||||
- Operate: kubernetes/deployment/flux/operate.md
|
||||
- Essentials:
|
||||
- Load Balancer:
|
||||
- kubernetes/loadbalancer/index.md
|
||||
- k3s: kubernetes/loadbalancer/k3s.md
|
||||
- MetalLB:
|
||||
- kubernetes/loadbalancer/metallb/index.md
|
||||
- pfSense: kubernetes/loadbalancer/metallb/pfsense.md
|
||||
- Sealed Secrets: kubernetes/sealed-secrets.md
|
||||
- External DNS: kubernetes/external-dns.md
|
||||
- SSL Certificates:
|
||||
- kubernetes/ssl-certificates/index.md
|
||||
- Cert-Manager: kubernetes/ssl-certificates/cert-manager.md
|
||||
- LetsEncrypt Issuers: kubernetes/ssl-certificates/letsencrypt-issuers.md
|
||||
- Wildcard Certificate: kubernetes/ssl-certificates/letsencrypt-wildcard.md
|
||||
- Secret Replicator: kubernetes/ssl-certificates/secret-replicator.md
|
||||
- Ingress:
|
||||
- kubernetes/ingress/index.md
|
||||
- Traefik:
|
||||
- kubernetes/ingress/traefik/index.md
|
||||
# - Dashboard: kubernetes/ingress/traefik/dashboard.md
|
||||
- Nginx: kubernetes/ingress/nginx.md
|
||||
- Persistence:
|
||||
- kubernetes/persistence/index.md
|
||||
- Local Path Provisioner: kubernetes/persistence/local-path-provisioner.md
|
||||
- TopoLVM: kubernetes/persistence/topolvm.md
|
||||
# - Rook Ceph: kubernetes/persistence/rook-ceph.md
|
||||
# - OpenEBS: kubernetes/persistence/openebs.md
|
||||
# - LongHorn: kubernetes/persistence/longhorn.md
|
||||
# - Backup:
|
||||
# - kubernetes/backup/index.md
|
||||
# - kubernetes/wip.md
|
||||
|
||||
|
||||
# - Monitoring:
|
||||
# - kubernetes/monitoring/index.md
|
||||
# - Prometheus: kubernetes/wip.md
|
||||
# - Grafana: kubernetes/wip.md
|
||||
# - AlertManager: kubernetes/wip.md
|
||||
# - Goldilocks: kubernetes/wip.md
|
||||
# - Reloader: kubernetes/wip.md
|
||||
# - Dashboard: kubernetes/wip.md
|
||||
# - Kured: kubernetes/wip.md
|
||||
# - KeyCloak: kubernetes/wip.md
|
||||
# - Recipes:
|
||||
# - GitHub Actions Runners: kubernetes/wip.md
|
||||
# - Cilium: kubernetes/wip.md
|
||||
# - Concourse: kubernetes/wip.md
|
||||
# - Flagger: kubernetes/wip.md
|
||||
# - Flagger: kubernetes/wip.md
|
||||
# - Flux: recipes/kubernetes/wip.md
|
||||
# - FoundationDB: kubernetes/wip.md
|
||||
# - Istio: recipes/kubernetes/wip.md
|
||||
# - Jaeger: kubernetes/wip.md
|
||||
# - Kiali: kubernetes/wip.md
|
||||
# - Minio: kubernetes/wip.md
|
||||
# - NGINX Ingress: kubernetes/wip.md
|
||||
# - Polaris: kubernetes/wip.md
|
||||
# - Portainer: kubernetes/wip.md
|
||||
# - Prometheus: kubernetes/wip.md
|
||||
# - Traefik: kubernetes/wip.md
|
||||
# - Vault: kubernetes/wip.md
|
||||
# - Webook Receiver: kubernetes/wip.md
|
||||
- 🚀 Get Premix!:
|
||||
- premix/index.md
|
||||
- Ansible:
|
||||
@@ -185,11 +254,11 @@ theme:
|
||||
- navigation.tabs.sticky
|
||||
- navigation.instant
|
||||
- navigation.sections
|
||||
- navigation.tracking
|
||||
- navigation.indexes
|
||||
- navigation.top
|
||||
- search.suggest
|
||||
- search.share
|
||||
- content.code.annotate
|
||||
icon:
|
||||
repo: 'fontawesome/brands/github'
|
||||
palette:
|
||||
@@ -297,7 +366,11 @@ markdown_extensions:
|
||||
repo: geek-cookbook
|
||||
- pymdownx.mark
|
||||
- pymdownx.smartsymbols
|
||||
- pymdownx.superfences
|
||||
- pymdownx.superfences:
|
||||
custom_fences:
|
||||
- name: mermaid
|
||||
class: mermaid
|
||||
format: !!python/name:pymdownx.superfences.fence_code_format
|
||||
- pymdownx.tasklist:
|
||||
custom_checkbox: true
|
||||
- pymdownx.tilde
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
{% endblock %}
|
||||
|
||||
{% block analytics %}
|
||||
<script src="https://cdn.jsdelivr.net/npm/@widgetbot/crate@3" async defer></script>
|
||||
<!-- <script src="https://cdn.jsdelivr.net/npm/@widgetbot/crate@3" async defer></script> -->
|
||||
<script src="/js/i-am-groot.js"></script>
|
||||
<script>window.plausible = window.plausible || function() { (window.plausible.q = window.plausible.q || []).push(arguments) }</script>
|
||||
{% endblock %}
|
||||
|
||||
@@ -3,7 +3,7 @@ pymdown-extensions>=6.0
|
||||
Markdown>=3.0.1
|
||||
mkdocs-minify-plugin>=0.2
|
||||
mkdocs-autolinks-plugin>=0.2.0
|
||||
mkdocs-htmlproofer-plugin>=0.0.3
|
||||
# mkdocs-htmlproofer-plugin>=0.0.3
|
||||
mkdocs-git-revision-date-localized-plugin>=0.4.8
|
||||
mkdocs-macros-plugin
|
||||
mkdocs-material
|
||||
mkdocs-material
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/bin/bash
|
||||
docker build --build-arg FROM_SOURCE=ghcr.io/geek-cookbook/mkdocs-material-insiders . -t funkypenguin/mkdocs-material
|
||||
# docker pull ghcr.io/geek-cookbook/mkdocs-material-insiders
|
||||
docker build --build-arg FROM_SOURCE=funkypenguin/mkdocs-material-insiders . -t funkypenguin/mkdocs-material
|
||||
docker run --rm --name mkdocs-material -it -p 8123:8000 -v ${PWD}:/docs funkypenguin/mkdocs-material serve -f mkdocs-insiders.yml --dev-addr 0.0.0.0:8000 --dirtyreload
|
||||
|
||||