1
0
mirror of https://github.com/funkypenguin/geek-cookbook/ synced 2025-12-13 09:46:23 +00:00
Files
geek-cookbook/site/ha-docker-swarm/shared-storage-ceph/index.html
2017-07-30 13:19:02 +12:00

976 lines
31 KiB
HTML

<!DOCTYPE html>
<html lang="en" class="no-js">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width,initial-scale=1">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<meta name="description" content="A short description of my project">
<link rel="canonical" href="https://geeks-cookbook.funkypenguin.co.nz/ha-docker-swarm/shared-storage-ceph/">
<meta name="author" content="David Young">
<link rel="shortcut icon" href="../../assets/images/favicon.png">
<meta name="generator" content="mkdocs-0.16.3, mkdocs-material-1.7.4">
<title>Shared Storage (Ceph) - Funky Penguin's Geek's Cookbook</title>
<script src="../../assets/javascripts/modernizr-1df76c4e58.js"></script>
<link rel="stylesheet" href="../../assets/stylesheets/application-769c285a91.css">
<link rel="stylesheet" href="../../assets/stylesheets/application-02c2a4388f.palette.css">
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,400i,700|Roboto+Mono">
<style>body,input{font-family:"Roboto","Helvetica Neue",Helvetica,Arial,sans-serif}code,kbd,pre{font-family:"Roboto Mono","Courier New",Courier,monospace}</style>
<link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons">
</head>
<body data-md-color-primary="indigo" data-md-color-accent="indigo">
<svg class="md-svg">
<defs>
</defs>
</svg>
<input class="md-toggle" data-md-toggle="drawer" type="checkbox" id="drawer">
<input class="md-toggle" data-md-toggle="search" type="checkbox" id="search">
<label class="md-overlay" data-md-component="overlay" for="drawer"></label>
<header class="md-header" data-md-component="header">
<nav class="md-header-nav md-grid">
<div class="md-flex">
<div class="md-flex__cell md-flex__cell--shrink">
<a href="https://geeks-cookbook.funkypenguin.co.nz" title="Funky Penguin's Geek's Cookbook" class="md-logo md-header-nav__button">
<img src="../../images/site-logo.png" width="24" height="24">
</a>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<label class="md-icon md-icon--menu md-header-nav__button" for="drawer"></label>
</div>
<div class="md-flex__cell md-flex__cell--stretch">
<span class="md-flex__ellipsis md-header-nav__title">
<span class="md-header-nav__parent">
Essential
</span>
Shared Storage (Ceph)
</span>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<label class="md-icon md-icon--search md-header-nav__button" for="search"></label>
<div class="md-search" data-md-component="search">
<label class="md-search__overlay" for="search"></label>
<div class="md-search__inner">
<form class="md-search__form" name="search">
<input type="text" class="md-search__input" name="query" required placeholder="Search" accesskey="s" autocapitalize="off" autocorrect="off" autocomplete="off" spellcheck="false" data-md-component="query">
<label class="md-icon md-search__icon" for="search"></label>
<button type="reset" class="md-icon md-search__icon" data-md-component="reset">close</button>
</form>
<div class="md-search__output">
<div class="md-search__scrollwrap" data-md-scrollfix>
<div class="md-search-result" data-md-component="result" data-md-lang-search="">
<div class="md-search-result__meta" data-md-lang-result-none="No matching documents" data-md-lang-result-one="1 matching document" data-md-lang-result-other="# matching documents">
Type to start searching
</div>
<ol class="md-search-result__list"></ol>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</nav>
</header>
<div class="md-container">
<main class="md-main">
<div class="md-main__inner md-grid" data-md-component="container">
<div class="md-sidebar md-sidebar--primary" data-md-component="navigation">
<div class="md-sidebar__scrollwrap">
<div class="md-sidebar__inner">
<nav class="md-nav md-nav--primary" data-md-level="0">
<label class="md-nav__title md-nav__title--site" for="drawer">
<i class="md-logo md-nav__button">
<img src="../../images/site-logo.png">
</i>
Funky Penguin's Geek's Cookbook
</label>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="../.." title="Home" class="md-nav__link">
Home
</a>
</li>
<li class="md-nav__item md-nav__item--nested">
<input class="md-toggle md-nav__toggle" data-md-toggle="nav-2" type="checkbox" id="nav-2">
<label class="md-nav__link" for="nav-2">
Introduction
</label>
<nav class="md-nav" data-md-component="collapsible" data-md-level="1">
<label class="md-nav__title" for="nav-2">
Introduction
</label>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="../../README/" title="README" class="md-nav__link">
README
</a>
</li>
<li class="md-nav__item">
<a href="../../whoami/" title="whoami" class="md-nav__link">
whoami
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item md-nav__item--active md-nav__item--nested">
<input class="md-toggle md-nav__toggle" data-md-toggle="nav-3" type="checkbox" id="nav-3" checked>
<label class="md-nav__link" for="nav-3">
Essential
</label>
<nav class="md-nav" data-md-component="collapsible" data-md-level="1">
<label class="md-nav__title" for="nav-3">
Essential
</label>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="../design/" title="Design" class="md-nav__link">
Design
</a>
</li>
<li class="md-nav__item">
<a href="../vms/" title="VMs" class="md-nav__link">
VMs
</a>
</li>
<li class="md-nav__item md-nav__item--active">
<input class="md-toggle md-nav__toggle" data-md-toggle="toc" type="checkbox" id="toc">
<label class="md-nav__link md-nav__link--active" for="toc">
Shared Storage (Ceph)
</label>
<a href="./" title="Shared Storage (Ceph)" class="md-nav__link md-nav__link--active">
Shared Storage (Ceph)
</a>
<nav class="md-nav md-nav--secondary">
<label class="md-nav__title" for="toc">Table of contents</label>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="#design" title="Design" class="md-nav__link">
Design
</a>
<nav class="md-nav">
<ul class="md-nav__list">
<li class="md-nav__item">
<a href="#why-not-glusterfs" title="Why not GlusterFS?" class="md-nav__link">
Why not GlusterFS?
</a>
</li>
<li class="md-nav__item">
<a href="#why-ceph" title="Why Ceph?" class="md-nav__link">
Why Ceph?
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="#ingredients" title="Ingredients" class="md-nav__link">
Ingredients
</a>
</li>
<li class="md-nav__item">
<a href="#preparation" title="Preparation" class="md-nav__link">
Preparation
</a>
<nav class="md-nav">
<ul class="md-nav__list">
<li class="md-nav__item">
<a href="#selinux" title="SELinux" class="md-nav__link">
SELinux
</a>
</li>
<li class="md-nav__item">
<a href="#setup-monitors" title="Setup Monitors" class="md-nav__link">
Setup Monitors
</a>
</li>
<li class="md-nav__item">
<a href="#setup-osds" title="Setup OSDs" class="md-nav__link">
Setup OSDs
</a>
</li>
<li class="md-nav__item">
<a href="#setup-mdss" title="Setup MDSs" class="md-nav__link">
Setup MDSs
</a>
</li>
<li class="md-nav__item">
<a href="#apply-tweaks" title="Apply tweaks" class="md-nav__link">
Apply tweaks
</a>
</li>
<li class="md-nav__item">
<a href="#create-credentials-for-swarm" title="Create credentials for swarm" class="md-nav__link">
Create credentials for swarm
</a>
</li>
<li class="md-nav__item">
<a href="#mount-mds-volume" title="Mount MDS volume" class="md-nav__link">
Mount MDS volume
</a>
</li>
<li class="md-nav__item">
<a href="#install-docker-volume-plugin" title="Install docker-volume plugin" class="md-nav__link">
Install docker-volume plugin
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="#serving" title="Serving" class="md-nav__link">
Serving
</a>
</li>
<li class="md-nav__item">
<a href="#chefs-notes" title="Chef's Notes" class="md-nav__link">
Chef's Notes
</a>
</li>
<li class="md-nav__item">
<a href="#__comments" title="Comments" class="md-nav__link md-nav__link--active">
Comments
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="../shared-storage-gluster/" title="Shared Storage (GlusterFS)" class="md-nav__link">
Shared Storage (GlusterFS)
</a>
</li>
<li class="md-nav__item">
<a href="../keepalived/" title="Keepalived" class="md-nav__link">
Keepalived
</a>
</li>
<li class="md-nav__item">
<a href="../docker-swarm-mode/" title="Docker Swarm Mode" class="md-nav__link">
Docker Swarm Mode
</a>
</li>
<li class="md-nav__item">
<a href="../traefik/" title="Traefik" class="md-nav__link">
Traefik
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item md-nav__item--nested">
<input class="md-toggle md-nav__toggle" data-md-toggle="nav-4" type="checkbox" id="nav-4">
<label class="md-nav__link" for="nav-4">
Recommended
</label>
<nav class="md-nav" data-md-component="collapsible" data-md-level="1">
<label class="md-nav__title" for="nav-4">
Recommended
</label>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="../../recipies/mail/" title="Mail Server" class="md-nav__link">
Mail Server
</a>
</li>
</ul>
</nav>
</li>
</ul>
</nav>
</div>
</div>
</div>
<div class="md-sidebar md-sidebar--secondary" data-md-component="toc">
<div class="md-sidebar__scrollwrap">
<div class="md-sidebar__inner">
<nav class="md-nav md-nav--secondary">
<label class="md-nav__title" for="toc">Table of contents</label>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="#design" title="Design" class="md-nav__link">
Design
</a>
<nav class="md-nav">
<ul class="md-nav__list">
<li class="md-nav__item">
<a href="#why-not-glusterfs" title="Why not GlusterFS?" class="md-nav__link">
Why not GlusterFS?
</a>
</li>
<li class="md-nav__item">
<a href="#why-ceph" title="Why Ceph?" class="md-nav__link">
Why Ceph?
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="#ingredients" title="Ingredients" class="md-nav__link">
Ingredients
</a>
</li>
<li class="md-nav__item">
<a href="#preparation" title="Preparation" class="md-nav__link">
Preparation
</a>
<nav class="md-nav">
<ul class="md-nav__list">
<li class="md-nav__item">
<a href="#selinux" title="SELinux" class="md-nav__link">
SELinux
</a>
</li>
<li class="md-nav__item">
<a href="#setup-monitors" title="Setup Monitors" class="md-nav__link">
Setup Monitors
</a>
</li>
<li class="md-nav__item">
<a href="#setup-osds" title="Setup OSDs" class="md-nav__link">
Setup OSDs
</a>
</li>
<li class="md-nav__item">
<a href="#setup-mdss" title="Setup MDSs" class="md-nav__link">
Setup MDSs
</a>
</li>
<li class="md-nav__item">
<a href="#apply-tweaks" title="Apply tweaks" class="md-nav__link">
Apply tweaks
</a>
</li>
<li class="md-nav__item">
<a href="#create-credentials-for-swarm" title="Create credentials for swarm" class="md-nav__link">
Create credentials for swarm
</a>
</li>
<li class="md-nav__item">
<a href="#mount-mds-volume" title="Mount MDS volume" class="md-nav__link">
Mount MDS volume
</a>
</li>
<li class="md-nav__item">
<a href="#install-docker-volume-plugin" title="Install docker-volume plugin" class="md-nav__link">
Install docker-volume plugin
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="#serving" title="Serving" class="md-nav__link">
Serving
</a>
</li>
<li class="md-nav__item">
<a href="#chefs-notes" title="Chef's Notes" class="md-nav__link">
Chef's Notes
</a>
</li>
<li class="md-nav__item">
<a href="#__comments" title="Comments" class="md-nav__link md-nav__link--active">
Comments
</a>
</li>
</ul>
</nav>
</div>
</div>
</div>
<div class="md-content">
<article class="md-content__inner md-typeset">
<h1 id="shared-storage-ceph">Shared Storage (Ceph)<a class="headerlink" href="#shared-storage-ceph" title="Permanent link">&para;</a></h1>
<p>While Docker Swarm is great for keeping containers running (<em>and restarting those that fail</em>), it does nothing for persistent storage. This means if you actually want your containers to keep any data persistent across restarts (<em>hint: you do!</em>), you need to provide shared storage to every docker node.</p>
<h2 id="design">Design<a class="headerlink" href="#design" title="Permanent link">&para;</a></h2>
<h3 id="why-not-glusterfs">Why not GlusterFS?<a class="headerlink" href="#why-not-glusterfs" title="Permanent link">&para;</a></h3>
<p>I originally provided shared storage to my nodes using GlusterFS (see the next recipe for details), but found it difficult to deal with because:</p>
<ol>
<li>GlusterFS requires (n) "bricks", where (n) <strong>has</strong> to be a multiple of your replica count. I.e., if you want 2 copies of everything on shared storage (the minimum to provide redundancy), you <strong>must</strong> have either 2, 4, 6 (etc..) bricks. The HA swarm design calls for minimum of 3 nodes, and so under GlusterFS, my third node can't participate in shared storage at all, unless I start doubling up on bricks-per-node (which then impacts redundancy)</li>
<li>GlusterFS turns out to be a giant PITA when you want to restore a failed node. There are at <a href="https://access.redhat.com/documentation/en-US/Red_Hat_Storage/3/html/Administration_Guide/sect-Replacing_Hosts.html">least 14 steps to follow</a> to replace a brick.</li>
<li>I'm pretty sure I messed up the 14-step process above anyway. My replaced brick synced with my "original" brick, but produced errors when querying status via the CLI, and hogged 100% of 1 CPU on the replaced node. Inexperienced with GlusterFS, and unable to diagnose the fault, I switched to a Ceph cluster instead.</li>
</ol>
<h3 id="why-ceph">Why Ceph?<a class="headerlink" href="#why-ceph" title="Permanent link">&para;</a></h3>
<ol>
<li>I'm more familiar with Ceph - I use it in the OpenStack designs I manage</li>
<li>Replacing a failed node is <strong>easy</strong>, provided you can put up with the I/O load of rebalancing OSDs after the replacement.</li>
<li>CentOS Atomic includes the ceph client in the OS, so while the Ceph OSD/Mon/MSD are running under containers, I can keep an eye (and later, automatically monitor) the status of Ceph from the base OS.</li>
</ol>
<h2 id="ingredients">Ingredients<a class="headerlink" href="#ingredients" title="Permanent link">&para;</a></h2>
<div class="admonition summary">
<p class="admonition-title">Ingredients</p>
<p>3 x Virtual Machines (configured earlier), each with:</p>
<ul class="task-list">
<li class="task-list-item"><label class="task-list-control"><input type="checkbox" disabled checked/><span class="task-list-indicator"></span></label> CentOS/Fedora Atomic</li>
<li class="task-list-item"><label class="task-list-control"><input type="checkbox" disabled checked/><span class="task-list-indicator"></span></label> At least 1GB RAM</li>
<li class="task-list-item"><label class="task-list-control"><input type="checkbox" disabled checked/><span class="task-list-indicator"></span></label> At least 20GB disk space (<em>but it'll be tight</em>)</li>
<li class="task-list-item"><label class="task-list-control"><input type="checkbox" disabled checked/><span class="task-list-indicator"></span></label> Connectivity to each other within the same subnet, and on a low-latency link (<em>i.e., no WAN links</em>)</li>
<li class="task-list-item"><label class="task-list-control"><input type="checkbox" disabled/><span class="task-list-indicator"></span></label> A second disk dedicated to the Ceph OSD</li>
</ul>
</div>
<h2 id="preparation">Preparation<a class="headerlink" href="#preparation" title="Permanent link">&para;</a></h2>
<h3 id="selinux">SELinux<a class="headerlink" href="#selinux" title="Permanent link">&para;</a></h3>
<p>Since our Ceph components will be containerized, we need to ensure the SELinux context on the base OS's ceph files is set correctly:</p>
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre>1
2</pre></div></td><td class="code"><div class="codehilite"><pre><span></span>chcon -Rt svirt_sandbox_file_t /etc/ceph
chcon -Rt svirt_sandbox_file_t /var/lib/ceph
</pre></div>
</td></tr></table>
<h3 id="setup-monitors">Setup Monitors<a class="headerlink" href="#setup-monitors" title="Permanent link">&para;</a></h3>
<p>Pick a node, and run the following to stand up the first Ceph mon. Be sure to replace the values for <strong>MON_IP</strong> and <strong>CEPH_PUBLIC_NETWORK</strong> to those specific to your deployment:</p>
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre>1
2
3
4
5
6
7
8</pre></div></td><td class="code"><div class="codehilite"><pre><span></span>docker run -d --net=host \
--restart always \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-e MON_IP=192.168.31.11 \
-e CEPH_PUBLIC_NETWORK=192.168.31.0/24 \
--name=&quot;ceph-mon&quot; \
ceph/daemon mon
</pre></div>
</td></tr></table>
<p>Now <strong>copy</strong> the contents of /etc/ceph on this first node to the remaining nodes, and <strong>then</strong> run the docker command above (<em>customizing MON_IP as you go</em>) on each remaining node. You'll end up with a cluster with 3 monitors (odd number is required for quorum, same as Docker Swarm), and no OSDs (yet)</p>
<h3 id="setup-osds">Setup OSDs<a class="headerlink" href="#setup-osds" title="Permanent link">&para;</a></h3>
<p>Since we have a OSD-less mon-only cluster currently, prepare for OSD creation by dumping the auth credentials for the OSDs into the appropriate location on the base OS:</p>
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre>1
2</pre></div></td><td class="code"><div class="codehilite"><pre><span></span>ceph auth get client.bootstrap-osd -o \
/var/lib/ceph/bootstrap-osd/ceph.keyring
</pre></div>
</td></tr></table>
<p>On each node, you need a dedicated disk for the OSD. In the example below, I used <em>/dev/vdd</em> (the entire disk, no partitions) for the OSD.</p>
<p>Run the following command on every node:</p>
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre> 1
2
3
4
5
6
7
8
9
10
11</pre></div></td><td class="code"><div class="codehilite"><pre><span></span>docker run -d --net=host \
--privileged=true \
--pid=host \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev/:/dev/ \
-e OSD_DEVICE=/dev/vdd \
-e OSD_TYPE=disk \
--name=&quot;ceph-osd&quot; \
--restart=always \
ceph/daemon osd
</pre></div>
</td></tr></table>
<p>Watch the output by running <code class="codehilite">docker logs ceph-osd -f</code>, and confirm success.</p>
<div class="admonition note">
<p class="admonition-title">Zapping the device</p>
<p>The Ceph OSD container will refuse to destroy a partition containing existing data, so it may be necessary to "zap" the target disk, using:
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre>1
2
3
4</pre></div></td><td class="code"><div class="codehilite"><pre><span></span>docker run -d --privileged=true \
-v /dev/:/dev/ \
-e OSD_DEVICE=/dev/sdd \
ceph/daemon zap_device
</pre></div>
</td></tr></table></p>
</div>
<h3 id="setup-mdss">Setup MDSs<a class="headerlink" href="#setup-mdss" title="Permanent link">&para;</a></h3>
<p>In order to mount our ceph pools as filesystems, we'll need Ceph MDS(s). Run the following on each node:</p>
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre>1
2
3
4
5
6
7
8
9</pre></div></td><td class="code"><div class="codehilite"><pre><span></span>docker run -d --net=host \
--name ceph-mds \
--restart always \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /etc/ceph:/etc/ceph \
-e CEPHFS_CREATE=1 \
-e CEPHFS_DATA_POOL_PG=256 \
-e CEPHFS_METADATA_POOL_PG=256 \
ceph/daemon mds
</pre></div>
</td></tr></table>
<h3 id="apply-tweaks">Apply tweaks<a class="headerlink" href="#apply-tweaks" title="Permanent link">&para;</a></h3>
<p>The ceph container seems to configure a pool default of 3 replicas (3 copies of each block are retained), which is one too many for our cluster (we are only protecting against the failure of a single node).</p>
<p>Run the following on any node to reduce the size of the pool to 2 replicas:</p>
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre>1
2</pre></div></td><td class="code"><div class="codehilite"><pre><span></span>ceph osd pool set cephfs_data size 2
ceph osd pool set cephfs_metadata size 2
</pre></div>
</td></tr></table>
<p>Disabled "scrubbing" (which can be IO-intensive, and is unnecessary on a VM) with:</p>
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre>1
2</pre></div></td><td class="code"><div class="codehilite"><pre><span></span>ceph osd set noscrub
ceph osd set nodeep-scrub
</pre></div>
</td></tr></table>
<h3 id="create-credentials-for-swarm">Create credentials for swarm<a class="headerlink" href="#create-credentials-for-swarm" title="Permanent link">&para;</a></h3>
<p>In order to mount the ceph volume onto our base host, we need to provide cephx authentication credentials.</p>
<p>On <strong>one</strong> node, create a client for the docker swarm:</p>
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre>1
2</pre></div></td><td class="code"><div class="codehilite"><pre><span></span>ceph auth get-or-create client.dockerswarm osd \
&#39;allow rw&#39; mon &#39;allow r&#39; mds &#39;allow&#39; &gt; /etc/ceph/keyring.dockerswarm
</pre></div>
</td></tr></table>
<p>Grab the secret associated with the new user (you'll need this for the /etc/fstab entry below) by running:</p>
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre>1</pre></div></td><td class="code"><div class="codehilite"><pre><span></span>ceph-authtool /etc/ceph/keyring.dockerswarm -p -n client.dockerswarm
</pre></div>
</td></tr></table>
<h3 id="mount-mds-volume">Mount MDS volume<a class="headerlink" href="#mount-mds-volume" title="Permanent link">&para;</a></h3>
<p>On each noie, create a mountpoint for the data, by running <code class="codehilite">mkdir /var/data</code>, add an entry to fstab to ensure the volume is auto-mounted on boot, and ensure the volume is actually <em>mounted</em> if there's a network / boot delay getting access to the gluster volume:</p>
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre> 1
2
3
4
5
6
7
8
9
10
11</pre></div></td><td class="code"><div class="codehilite"><pre><span></span>mkdir /var/data
MYHOST=`hostname -s`
echo -e &quot;
# Mount cephfs volume \n
$MYHOST:6789:/ /var/data/ ceph \
name=dockerswarm\
,secret=&lt;YOUR SECRET HERE&gt;\
,noatime,_netdev,context=system_u:object_r:svirt_sandbox_file_t:s0\
0 2&quot; &gt;&gt; /etc/fstab
mount -a
</pre></div>
</td></tr></table>
<h3 id="install-docker-volume-plugin">Install docker-volume plugin<a class="headerlink" href="#install-docker-volume-plugin" title="Permanent link">&para;</a></h3>
<p>Upstream bug for docker-latest reported at <a href="https://bugs.centos.org/view.php?id=13609">https://bugs.centos.org/view.php?id=13609</a></p>
<p>And the alpine fault:
<a href="https://github.com/gliderlabs/docker-alpine/issues/317">https://github.com/gliderlabs/docker-alpine/issues/317</a></p>
<h2 id="serving">Serving<a class="headerlink" href="#serving" title="Permanent link">&para;</a></h2>
<p>After completing the above, you should have:</p>
<table class="codehilitetable"><tr><td class="linenos"><div class="linenodiv"><pre>1
2</pre></div></td><td class="code"><div class="codehilite"><pre><span></span>[X] Persistent storage available to every node
[X] Resiliency in the event of the failure of a single node
</pre></div>
</td></tr></table>
<h2 id="chefs-notes">Chef's Notes<a class="headerlink" href="#chefs-notes" title="Permanent link">&para;</a></h2>
<p>Future enhancements to this recipe include:</p>
<ol>
<li>Rather than pasting a secret key into /etc/fstab (which feels wrong), I'd prefer to be able to set "secretfile" in /etc/fstab (which just points ceph.mount to a file containing the secret), but under the current CentOS Atomic, we're stuck with "secret", per <a href="https://bugzilla.redhat.com/show_bug.cgi?id=1030402">https://bugzilla.redhat.com/show_bug.cgi?id=1030402</a></li>
</ol>
<h2 id="__comments">Comments</h2>
<div id="disqus_thread"></div>
<script>
var disqus_config = function () {
this.page.url = "https://geeks-cookbook.funkypenguin.co.nz/ha-docker-swarm/shared-storage-ceph/";
this.page.identifier =
"/ha-docker-swarm/shared-storage-ceph/";
};
(function() {
var d = document, s = d.createElement("script");
s.src = "//geeks-cookbook.disqus.com/embed.js";
s.setAttribute("data-timestamp", +new Date());
(d.head || d.body).appendChild(s);
})();
</script>
</article>
</div>
</div>
</main>
<footer class="md-footer">
<div class="md-footer-nav">
<nav class="md-footer-nav__inner md-grid">
<a href="../vms/" title="VMs" class="md-flex md-footer-nav__link md-footer-nav__link--prev" rel="prev">
<div class="md-flex__cell md-flex__cell--shrink">
<i class="md-icon md-icon--arrow-back md-footer-nav__button"></i>
</div>
<div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title">
<span class="md-flex__ellipsis">
<span class="md-footer-nav__direction">
Previous
</span>
VMs
</span>
</div>
</a>
<a href="../shared-storage-gluster/" title="Shared Storage (GlusterFS)" class="md-flex md-footer-nav__link md-footer-nav__link--next" rel="next">
<div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title">
<span class="md-flex__ellipsis">
<span class="md-footer-nav__direction">
Next
</span>
Shared Storage (GlusterFS)
</span>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<i class="md-icon md-icon--arrow-forward md-footer-nav__button"></i>
</div>
</a>
</nav>
</div>
<div class="md-footer-meta md-typeset">
<div class="md-footer-meta__inner md-grid">
<div class="md-footer-copyright">
<div class="md-footer-copyright__highlight">
Copyright &copy; 2016 - 2017 David Young
</div>
powered by
<a href="http://www.mkdocs.org" title="MkDocs">MkDocs</a>
and
<a href="http://squidfunk.github.io/mkdocs-material/" title="Material for MkDocs">
Material for MkDocs</a>
</div>
<div class="md-footer-social">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<a href="https://github.com/funkypenguin" class="md-footer-social__link fa fa-github"></a>
<a href="https://twitter.com/funkypenguin" class="md-footer-social__link fa fa-twitter"></a>
</div>
</div>
</div>
</footer>
</div>
<script src="../../assets/javascripts/application-c35428f87f.js"></script>
<script>app.initialize({url:{base:"../.."}})</script>
<script src="../../extras/javascript/piwik.js"></script>
<script>!function(e,t,a,n,o,c,i){e.GoogleAnalyticsObject=o,e[o]=e[o]||function(){(e[o].q=e[o].q||[]).push(arguments)},e[o].l=1*new Date,c=t.createElement(a),i=t.getElementsByTagName(a)[0],c.async=1,c.src=n,i.parentNode.insertBefore(c,i)}(window,document,"script","https://www.google-analytics.com/analytics.js","ga"),ga("create","UA-139253-18","auto"),ga("set","anonymizeIp",!0),ga("send","pageview");var links=document.getElementsByTagName("a");Array.prototype.map.call(links,function(e){e.host!=document.location.host&&e.addEventListener("click",function(){var t=e.getAttribute("data-md-action")||"follow";ga("send","event","outbound",t,e.href)})});var query=document.forms.search.query;query.addEventListener("blur",function(){if(this.value){var e=document.location.pathname;ga("send","pageview",e+"?q="+this.value)}})</script>
</body>
</html>