mirror of
https://github.com/zhwei820/learn.lianglianglee.com.git
synced 2025-09-26 05:06:42 +08:00
502 lines
27 KiB
HTML
502 lines
27 KiB
HTML
<!DOCTYPE html>
|
||
<!-- saved from url=(0046)https://kaiiiz.github.io/hexo-theme-book-demo/ -->
|
||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||
<head>
|
||
<head>
|
||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1.0, user-scalable=no">
|
||
<link rel="icon" href="/static/favicon.png">
|
||
<title>23 K8s 集群中存储对象灾备的落地实践.md.html</title>
|
||
<!-- Spectre.css framework -->
|
||
<link rel="stylesheet" href="/static/index.css">
|
||
<!-- theme css & js -->
|
||
<meta name="generator" content="Hexo 4.2.0">
|
||
</head>
|
||
<body>
|
||
<div class="book-container">
|
||
<div class="book-sidebar">
|
||
<div class="book-brand">
|
||
<a href="/">
|
||
<img src="/static/favicon.png">
|
||
<span>技术文章摘抄</span>
|
||
</a>
|
||
</div>
|
||
<div class="book-menu uncollapsible">
|
||
<ul class="uncollapsible">
|
||
<li><a href="/" class="current-tab">首页</a></li>
|
||
</ul>
|
||
<ul class="uncollapsible">
|
||
<li><a href="../">上一级</a></li>
|
||
</ul>
|
||
<ul class="uncollapsible">
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/00 为什么我们要学习 Kubernetes 技术.md.html">00 为什么我们要学习 Kubernetes 技术</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/01 重新认识 Kubernetes 的核心组件.md.html">01 重新认识 Kubernetes 的核心组件</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/02 深入理解 Kubernets 的编排对象.md.html">02 深入理解 Kubernets 的编排对象</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/03 DevOps 场景下落地 K8s 的困难分析.md.html">03 DevOps 场景下落地 K8s 的困难分析</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/04 微服务应用场景下落地 K8s 的困难分析.md.html">04 微服务应用场景下落地 K8s 的困难分析</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/05 解决 K8s 落地难题的方法论提炼.md.html">05 解决 K8s 落地难题的方法论提炼</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/06 练习篇:K8s 核心实践知识掌握.md.html">06 练习篇:K8s 核心实践知识掌握</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/07 容器引擎 containerd 落地实践.md.html">07 容器引擎 containerd 落地实践</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/08 K8s 集群安装工具 kubeadm 的落地实践.md.html">08 K8s 集群安装工具 kubeadm 的落地实践</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/09 南北向流量组件 IPVS 的落地实践.md.html">09 南北向流量组件 IPVS 的落地实践</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/10 东西向流量组件 Calico 的落地实践.md.html">10 东西向流量组件 Calico 的落地实践</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/11 服务发现 DNS 的落地实践.md.html">11 服务发现 DNS 的落地实践</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/12 练习篇:K8s 集群配置测验.md.html">12 练习篇:K8s 集群配置测验</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/13 理解对方暴露服务的对象 Ingress 和 Service.md.html">13 理解对方暴露服务的对象 Ingress 和 Service</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/14 应用网关 OpenResty 对接 K8s 实践.md.html">14 应用网关 OpenResty 对接 K8s 实践</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/15 Service 层引流技术实践.md.html">15 Service 层引流技术实践</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/16 Cilium 容器网络的落地实践.md.html">16 Cilium 容器网络的落地实践</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/17 应用流量的优雅无损切换实践.md.html">17 应用流量的优雅无损切换实践</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/18 练习篇:应用流量无损切换技术测验.md.html">18 练习篇:应用流量无损切换技术测验</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/19 使用 Rook 构建生产可用存储环境实践.md.html">19 使用 Rook 构建生产可用存储环境实践</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/20 有状态应用的默认特性落地分析.md.html">20 有状态应用的默认特性落地分析</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/21 案例:分布式 MySQL 集群工具 Vitess 实践分析.md.html">21 案例:分布式 MySQL 集群工具 Vitess 实践分析</a>
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/22 存储对象 PV、PVC、Storage Classes 的管理落地实践.md.html">22 存储对象 PV、PVC、Storage Classes 的管理落地实践</a>
|
||
</li>
|
||
<li>
|
||
<a class="current-tab" href="/专栏/Kubernetes 实践入门指南/23 K8s 集群中存储对象灾备的落地实践.md.html">23 K8s 集群中存储对象灾备的落地实践</a>
|
||
|
||
</li>
|
||
<li>
|
||
<a href="/专栏/Kubernetes 实践入门指南/24 练习篇:K8s 集群配置测验.md.html">24 练习篇:K8s 集群配置测验</a>
|
||
</li>
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
<div class="sidebar-toggle" onclick="sidebar_toggle()" onmouseover="add_inner()" onmouseleave="remove_inner()">
|
||
<div class="sidebar-toggle-inner"></div>
|
||
</div>
|
||
<script>
|
||
function add_inner() {
|
||
let inner = document.querySelector('.sidebar-toggle-inner')
|
||
inner.classList.add('show')
|
||
}
|
||
function remove_inner() {
|
||
let inner = document.querySelector('.sidebar-toggle-inner')
|
||
inner.classList.remove('show')
|
||
}
|
||
function sidebar_toggle() {
|
||
let sidebar_toggle = document.querySelector('.sidebar-toggle')
|
||
let sidebar = document.querySelector('.book-sidebar')
|
||
let content = document.querySelector('.off-canvas-content')
|
||
if (sidebar_toggle.classList.contains('extend')) { // show
|
||
sidebar_toggle.classList.remove('extend')
|
||
sidebar.classList.remove('hide')
|
||
content.classList.remove('extend')
|
||
} else { // hide
|
||
sidebar_toggle.classList.add('extend')
|
||
sidebar.classList.add('hide')
|
||
content.classList.add('extend')
|
||
}
|
||
}
|
||
function open_sidebar() {
|
||
let sidebar = document.querySelector('.book-sidebar')
|
||
let overlay = document.querySelector('.off-canvas-overlay')
|
||
sidebar.classList.add('show')
|
||
overlay.classList.add('show')
|
||
}
|
||
function hide_canvas() {
|
||
let sidebar = document.querySelector('.book-sidebar')
|
||
let overlay = document.querySelector('.off-canvas-overlay')
|
||
sidebar.classList.remove('show')
|
||
overlay.classList.remove('show')
|
||
}
|
||
</script>
|
||
<div class="off-canvas-content">
|
||
<div class="columns">
|
||
<div class="column col-12 col-lg-12">
|
||
<div class="book-navbar">
|
||
<!-- For Responsive Layout -->
|
||
<header class="navbar">
|
||
<section class="navbar-section">
|
||
<a onclick="open_sidebar()">
|
||
<i class="icon icon-menu"></i>
|
||
</a>
|
||
</section>
|
||
</header>
|
||
</div>
|
||
<div class="book-content" style="max-width: 960px; margin: 0 auto;
|
||
overflow-x: auto;
|
||
overflow-y: hidden;">
|
||
<div class="book-post">
|
||
<p id="tip" align="center"></p>
|
||
<div><h1>23 K8s 集群中存储对象灾备的落地实践</h1>
|
||
<p>谈到存储对象的灾备,我们可以想象成当你启动了挂载卷的 Pod 的时候,突然集群机器宕机的场景,我们应该如何应对存储对象的容错能力呢?应用的高可用固然最好,但是灾备方案一直都是最后一道门槛,在很多极限情况下,容错的备份是你安心提供服务的保障。</p>
|
||
<p>在虚拟机时代,我们通过控制应用平均分配到各个虚拟机中和定期计划执行的数据备份,让业务可靠性不断地提高。现在升级到 Kubernetes 时代,所有业务都被 Kubernetes 托管,集群可以迅速调度并自维护应用的容器状态,随时可以扩缩资源来应对突发情况。</p>
|
||
<p>听笔者这么说,感觉好像并不需要对存储有多大的担心,只要挂载的是网络存储,即使应用集群坏了,数据还在么,好像也没有多大的事情,那么学这个存储对象的灾备又有什么意义呢?</p>
|
||
<p>笔者想说事情远没有想象中那么简单,我们需要带入接近业务的场景中,再来通过破坏集群状态,看看读存储对象是否有破坏性。</p>
|
||
<p>因为我们从虚拟机时代升级到 Kubernetes 时代,我们的目的是利用动态扩缩的资源来减少业务中断的时间,让应用可以随需扩缩,随需自愈。所以在 Kubernetes 时代,我们要的并不是数据丢不丢的问题,而是能不能有快速保障让业务恢复时间越来越短,甚至让用户没有感知。这个可能实现吗?</p>
|
||
<p>笔者认为 Kubernetes 通过不断丰富的资源对象已经快接近实现这个目标了。所以笔者这里带着大家一起梳理一遍各种存储对象的灾备在 Kubernetes 落地的实践经验,以备不时之需。</p>
|
||
<h3>NFS 存储对象的灾备落地经验</h3>
|
||
<p>首先我们应该理解 PV/PVC 创建 NFS 网络卷的配置方法,注意 mountOptions 参数的使用姿势。如下例子参考:</p>
|
||
<pre><code class="language-yaml">### nfs-pv.yaml
|
||
apiVersion: v1
|
||
kind: PersistentVolume
|
||
metadata:
|
||
name: nfs-pv
|
||
spec:
|
||
capacity:
|
||
storage: 10Gi
|
||
volumeMode: Filesystem
|
||
accessModes:
|
||
- ReadWriteMany
|
||
persistentVolumeReclaimPolicy: Recycle
|
||
storageClassName: nfs
|
||
mountOptions:
|
||
- hard
|
||
- nfsvers=4.1
|
||
nfs:
|
||
path: /opt/k8s-pods/data # 指定 nfs 的挂载点
|
||
server: 192.168.1.40 # 指定 nfs 服务地址
|
||
---
|
||
### nfs-pvc.yaml
|
||
apiVersion: v1
|
||
kind: PersistentVolumeClaim
|
||
metadata:
|
||
name: nfs-pvc
|
||
spec:
|
||
storageClassName: nfs
|
||
accessModes:
|
||
- ReadWriteMany
|
||
resources:
|
||
requests:
|
||
storage: 10Gi
|
||
</code></pre>
|
||
<p>在这个例子中,PersistentVolume 是 NFS 类型的,因此需要辅助程序 /sbin/mount.nfs 来支持挂载 NFS 文件系统。</p>
|
||
<pre><code class="language-bash">[<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="81eae0e5ece8efc1eab9f2acece0f2f5e4f3">[email protected]</a> ~]$ kubectl get pvc nfs-pvc
|
||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
||
nfs-pvc Bound nfs-pv 10Gi RWX nfs 3m54s
|
||
[<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="2c474d484145426c47145f01414d5f58495e">[email protected]</a> ~]$
|
||
[<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="1972787d7470775972216a3474786a6d7c6b">[email protected]</a> ~]$ kubectl get pv nfs-pv
|
||
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
|
||
nfs-pv 10Gi RWX Recycle Bound default/nfs-pvc nfs 18m
|
||
</code></pre>
|
||
<p>执行一个 Pod 挂载 NFS 卷:</p>
|
||
<pre><code class="language-yaml">### nfs-pv-pod.yaml
|
||
apiVersion: v1
|
||
kind: Pod
|
||
metadata:
|
||
name: nginx-pv-pod
|
||
spec:
|
||
volumes:
|
||
- name: nginx-pv-storage
|
||
persistentVolumeClaim:
|
||
claimName: nfs-pvc
|
||
containers:
|
||
- name: nginx
|
||
image: nginx
|
||
ports:
|
||
- containerPort: 80
|
||
name: "nginx-server"
|
||
volumeMounts:
|
||
- mountPath: "/usr/share/nginx/html"
|
||
name: nginx-pv-storage
|
||
复制
|
||
[<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="c2a9a3a6afabac82a9fab1efafa3b1b6a7b0">[email protected]</a> ~]$ kubectl create -f nfs-pv-pod.yaml
|
||
pod/nginx-pv-pod created
|
||
[<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="a8c3c9ccc5c1c6e8c390db85c5c9dbdccdda">[email protected]</a> ~]$
|
||
[<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="711a10151c181f311a49025c1c1002051403">[email protected]</a> ~]$ kubectl get pod nginx-pv-pod -o wide
|
||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||
nginx-pv-pod 1/1 Running 0 66s 172.16.140.28 k8s-worker-2 <none> <none>
|
||
[<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="2843494c4541466843105b0545495b5c4d5a">[email protected]</a> ~]$ curl http://172.16.140.28
|
||
Hello, NFS Storage NGINX
|
||
</code></pre>
|
||
<p>当你在一个 Pod 里面挂载了 NFS 卷之后,就需要考虑如何把数据备份出来。<a href="https://github.com/vmware-tanzu/velero">velero</a> 作为云原生的备份恢复工具出现了,它可以帮助我们备份持久化数据对象。velero 案例如下:</p>
|
||
<pre><code class="language-bash">velero backup create backupName --include-cluster-resources=true --ordered-resources 'pods=ns1/pod1,ns1/pod2;persistentvolumes=pv4,pv8' --include-namespaces=ns1
|
||
</code></pre>
|
||
<p>注意 velero 默认没法备份卷,所以它集成了开源组件 <a href="https://github.com/restic/restic">restic</a> 支持了存储卷的支持。因为目前还处于试验阶段,注意请不要在生产环境中使用。</p>
|
||
<h3>Ceph 数据备份及恢复</h3>
|
||
<p>Rook 是管理 Ceph 集群的云原生管理系统,在早前的课程中我已经和大家实践过使用 Rook 创建 Ceph 集群的方法。现在假设 Ceph 集群瘫痪了应该如何修复它。是的,我们需要手工修复它。步骤如下:</p>
|
||
<p>第一步,停止 Ceph operator 把 Ceph 集群的控制器关掉,不让它能自动负载自己的程序。</p>
|
||
<pre><code class="language-bash">kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=0
|
||
</code></pre>
|
||
<p>第二步,这个 Ceph 的 monmap 保持跟踪 Ceph 节点的容错数量。我们先通过更新保持健康监控节点的实例正常运行。此处为 rook-ceph-mon-b,不健康的实例为 rook-ceph-mon-a 和 rook-ceph-mon-c。备份 rook-ceph-mon-b 的 Deployment 对象:</p>
|
||
<pre><code class="language-bash">kubectl -n rook-ceph get deployment rook-ceph-mon-b -o yaml > rook-ceph-mon-b-deployment.yaml
|
||
</code></pre>
|
||
<p>修改监控实例的命令:</p>
|
||
<pre><code class="language-bash">kubectl -n rook-ceph patch deployment rook-ceph-mon-b -p '{"spec": {"template": {"spec": {"containers": [{"name": "mon", "command": ["sleep", "infinity"], "args": []}]}}}}'
|
||
</code></pre>
|
||
<p>进入健康的监控实例中:</p>
|
||
<pre><code class="language-bash">kubectl -n rook-ceph exec -it <mon-pod> bash
|
||
# set a few simple variables
|
||
cluster_namespace=rook-ceph
|
||
good_mon_id=b
|
||
monmap_path=/tmp/monmap
|
||
# extract the monmap to a file, by pasting the ceph mon command
|
||
# from the good mon deployment and adding the
|
||
# `--extract-monmap=${monmap_path}` flag
|
||
ceph-mon \
|
||
--fsid=41a537f2-f282-428e-989f-a9e07be32e47 \
|
||
--keyring=/etc/ceph/keyring-store/keyring \
|
||
--log-to-stderr=true \
|
||
--err-to-stderr=true \
|
||
--mon-cluster-log-to-stderr=true \
|
||
--log-stderr-prefix=debug \
|
||
--default-log-to-file=false \
|
||
--default-mon-cluster-log-to-file=false \
|
||
--mon-host=$ROOK_CEPH_MON_HOST \
|
||
--mon-initial-members=$ROOK_CEPH_MON_INITIAL_MEMBERS \
|
||
--id=b \
|
||
--setuser=ceph \
|
||
--setgroup=ceph \
|
||
--foreground \
|
||
--public-addr=10.100.13.242 \
|
||
--setuser-match-path=/var/lib/ceph/mon/ceph-b/store.db \
|
||
--public-bind-addr=$ROOK_POD_IP \
|
||
--extract-monmap=${monmap_path}
|
||
# review the contents of the monmap
|
||
monmaptool --print /tmp/monmap
|
||
# remove the bad mon(s) from the monmap
|
||
monmaptool ${monmap_path} --rm <bad_mon>
|
||
# in this example we remove mon0 and mon2:
|
||
monmaptool ${monmap_path} --rm a
|
||
monmaptool ${monmap_path} --rm c
|
||
# inject the modified monmap into the good mon, by pasting
|
||
# the ceph mon command and adding the
|
||
# `--inject-monmap=${monmap_path}` flag, like this
|
||
ceph-mon \
|
||
--fsid=41a537f2-f282-428e-989f-a9e07be32e47 \
|
||
--keyring=/etc/ceph/keyring-store/keyring \
|
||
--log-to-stderr=true \
|
||
--err-to-stderr=true \
|
||
--mon-cluster-log-to-stderr=true \
|
||
--log-stderr-prefix=debug \
|
||
--default-log-to-file=false \
|
||
--default-mon-cluster-log-to-file=false \
|
||
--mon-host=$ROOK_CEPH_MON_HOST \
|
||
--mon-initial-members=$ROOK_CEPH_MON_INITIAL_MEMBERS \
|
||
--id=b \
|
||
--setuser=ceph \
|
||
--setgroup=ceph \
|
||
--foreground \
|
||
--public-addr=10.100.13.242 \
|
||
--setuser-match-path=/var/lib/ceph/mon/ceph-b/store.db \
|
||
--public-bind-addr=$ROOK_POD_IP \
|
||
--inject-monmap=${monmap_path}
|
||
</code></pre>
|
||
<p>编辑 rook configmap 文件:</p>
|
||
<pre><code class="language-bash">kubectl -n rook-ceph edit configmap rook-ceph-mon-endpoints
|
||
</code></pre>
|
||
<p>在 data 字段那里去掉过期的 a 和 b:</p>
|
||
<pre><code class="language-bash">data: a=10.100.35.200:6789;b=10.100.13.242:6789;c=10.100.35.12:6789
|
||
</code></pre>
|
||
<p>变成:</p>
|
||
<pre><code class="language-bash">data: b=10.100.13.242:6789
|
||
</code></pre>
|
||
<p>更新 secret 配置:</p>
|
||
<pre><code class="language-bash">mon_host=$(kubectl -n rook-ceph get svc rook-ceph-mon-b -o jsonpath='{.spec.clusterIP}')
|
||
kubectl -n rook-ceph patch secret rook-ceph-config -p '{"stringData": {"mon_host": "[v2:'"${mon_host}"':3300,v1:'"${mon_host}"':6789]", "mon_initial_members": "'"${good_mon_id}"'"}}'
|
||
</code></pre>
|
||
<p>重启监控实例:</p>
|
||
<pre><code class="language-bash">kubectl replace --force -f rook-ceph-mon-b-deployment.yaml
|
||
</code></pre>
|
||
<p>重启 operator:</p>
|
||
<pre><code class="language-bash"># create the operator. it is safe to ignore the errors that a number of resources already exist.
|
||
kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=1
|
||
</code></pre>
|
||
<h3>Jenkins 挂载 PVC 应用的数据恢复</h3>
|
||
<p>假设 Jenkins 数据损坏,想修复 Jenkins 的数据目录,可以采用把 PVC 挂载带临时镜像并配合 <code>kubectl cp</code> 实现,步骤如下。</p>
|
||
<p>\1. 获得当前 Jenkins 容器的运行权限:</p>
|
||
<pre><code class="language-bash">$ kubectl --namespace=cje-cluster-example get pods cjoc-0 -o jsonpath='{.spec.securityContext}'
|
||
map[fsGroup:1000]
|
||
</code></pre>
|
||
<p>\2. 关闭容器:</p>
|
||
<pre><code class="language-bash">$ kubectl --namespace=cje-cluster-example scale statefulset/cjoc --replicas=0
|
||
statefulset.apps "cjoc" scaled
|
||
</code></pre>
|
||
<p>\3. 查看 PVC:</p>
|
||
<pre><code class="language-bash">$ kubectl --namespace=cje-cluster-example get pvc
|
||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
||
jenkins-home-cjoc-0 Bound pvc-6b27e963-b770-11e8-bcbf-42010a8400c1 20Gi RWO standard 46d
|
||
jenkins-home-mm1-0 Bound pvc-b2b7e305-ba66-11e8-bcbf-42010a8400c1 50Gi RWO standard 42d
|
||
jenkins-home-mm2-0 Bound pvc-6561b8da-c0c8-11e8-bcbf-42010a8400c1 50Gi RWO standard 34d
|
||
</code></pre>
|
||
<p>\4. 挂载 PVC 到临时镜像中方便恢复数据:</p>
|
||
<pre><code class="language-bash">$ cat <<EOF | kubectl --namespace=cje-cluster-example create -f -
|
||
kind: Pod
|
||
apiVersion: v1
|
||
metadata:
|
||
name: rescue-pod
|
||
spec:
|
||
securityContext:
|
||
runAsUser: 1000
|
||
fsGroup: 1000
|
||
volumes:
|
||
- name: rescue-storage
|
||
persistentVolumeClaim:
|
||
claimName: jenkins-home-cjoc-0
|
||
containers:
|
||
- name: rescue-container
|
||
image: nginx
|
||
command: ["/bin/sh"]
|
||
args: ["-c", "while true; do echo hello; sleep 10;done"]
|
||
volumeMounts:
|
||
- mountPath: "/tmp/jenkins-home"
|
||
name: rescue-storage
|
||
EOF
|
||
pod "rescue-pod" created
|
||
</code></pre>
|
||
<p>\5. 复制备份数据到临时镜像:</p>
|
||
<pre><code class="language-bash">kubectl cp oc-jenkins-home.backup.tar.gz rescue-pod:/tmp/
|
||
</code></pre>
|
||
<p>\6. 解压数据到 PVC 挂载卷:</p>
|
||
<pre><code class="language-bash">kubectl exec --namespace=cje-cluster-example rescue-pod -it -- tar -xzf /tmp/oc-jenkins-home.backup.tar.gz -C /tmp/jenkins-home
|
||
</code></pre>
|
||
<p>\7. 删除临时镜像 Pod:</p>
|
||
<pre><code class="language-bash">kubectl --namespace=cje-cluster-example delete pod rescue-pod
|
||
</code></pre>
|
||
<p>\8. 恢复 Jenkins 容器:</p>
|
||
<pre><code class="language-bash">kubectl --namespace=cje-cluster-example scale statefulset/cjoc --replicas=1
|
||
</code></pre>
|
||
<h3>Kubernetes 集群的备份</h3>
|
||
<p>Kubernetes 集群是分布式集群,我们备份集群的元数据的目的一般有两个主要目的:</p>
|
||
<ul>
|
||
<li>能快速恢复控制节点而不是计算节点</li>
|
||
<li>能恢复应用容器</li>
|
||
</ul>
|
||
<p>从集群备份的难度来讲,我们要清楚理解集群控制节点上有哪些关键数据是需要备份的:自签名证书、etcd 数据、kubeconfig。</p>
|
||
<p>拿单个控制几点服务器上的备份步骤来看:</p>
|
||
<pre><code class="language-bash"># Backup certificates
|
||
sudo cp -r /etc/kubernetes/pki backup/
|
||
# Make etcd snapshot
|
||
sudo docker run --rm -v $(pwd)/backup:/backup \
|
||
--network host \
|
||
-v /etc/kubernetes/pki/etcd:/etc/kubernetes/pki/etcd \
|
||
--env ETCDCTL_API=3 \
|
||
k8s.gcr.io/etcd:3.4.3-0 \
|
||
etcdctl --endpoints=https://127.0.0.1:2379 \
|
||
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
|
||
--cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt \
|
||
--key=/etc/kubernetes/pki/etcd/healthcheck-client.key \
|
||
snapshot save /backup/etcd-snapshot-latest.db
|
||
# Backup kubeadm-config
|
||
sudo cp /etc/kubeadm/kubeadm-config.yaml backup/
|
||
</code></pre>
|
||
<p>数据恢复一个控制节点的操作如下:</p>
|
||
<pre><code class="language-bash"># Restore certificates
|
||
sudo cp -r backup/pki /etc/kubernetes/
|
||
# Restore etcd backup
|
||
sudo mkdir -p /var/lib/etcd
|
||
sudo docker run --rm \
|
||
-v $(pwd)/backup:/backup \
|
||
-v /var/lib/etcd:/var/lib/etcd \
|
||
--env ETCDCTL_API=3 \
|
||
k8s.gcr.io/etcd:3.4.3-0 \
|
||
/bin/sh -c "etcdctl snapshot restore '/backup/etcd-snapshot-latest.db' ; \
|
||
mv /default.etcd/member/ /var/lib/etcd/"
|
||
# Restore kubeadm-config
|
||
sudo mkdir /etc/kubeadm
|
||
sudo cp backup/kubeadm-config.yaml /etc/kubeadm/
|
||
# Initialize the master with backup
|
||
sudo kubeadm init --ignore-preflight-errors=DirAvailable--var-lib-etcd \
|
||
--config /etc/kubeadm/kubeadm-config.yaml
|
||
</code></pre>
|
||
<p>通过以上案例知道 Kubernetes 集群中 etcd 数据的备份和恢复,学会善用和 <code>kubectl cp</code> 的配合使用。</p>
|
||
<h3>总结</h3>
|
||
<p>依赖 Kubernetes 原生的数据复制能力 <code>kubectl cp</code> 和 cronjob,我们可以应对大部分的数据备份和恢复工作。当需要处理分布式系统的备份和恢复的时候,大部分情况并不是去备份数据,而是尝试从有效节点中去除故障节点,让集群能自愈。这是分布式系统的特点,它可以自愈。但是分布式系统的弱点也在于自愈是有条件的,如果故障节点超过可用节点数 Quorum,再智能也是无用的。所以备份仍然是最后一道防线。一定要做定期的并且冗余的<strong>数据备份</strong>。</p>
|
||
<h3>参考链接</h3>
|
||
<ul>
|
||
<li><a href="https://github.com/rook/rook/blob/master/Documentation/ceph-disaster-recovery.md.html">https://github.com/rook/rook/blob/master/Documentation/ceph-disaster-recovery</a></li>
|
||
<li><a href="https://zh.wikipedia.org/wiki/Quorum_(分布式系统)">https://zh.wikipedia.org/wiki/Quorum_(%E5%88%86%E5%B8%83%E5%BC%8F%E7%B3%BB%E7%BB%9F)</a></li>
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
<div>
|
||
<div style="float: left">
|
||
<a href="/专栏/Kubernetes 实践入门指南/22 存储对象 PV、PVC、Storage Classes 的管理落地实践.md.html">上一页</a>
|
||
</div>
|
||
<div style="float: right">
|
||
<a href="/专栏/Kubernetes 实践入门指南/24 练习篇:K8s 集群配置测验.md.html">下一页</a>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
<a class="off-canvas-overlay" onclick="hide_canvas()"></a>
|
||
</div>
|
||
<script data-cfasync="false" src="/cdn-cgi/scripts/5c5dd728/cloudflare-static/email-decode.min.js"></script><script defer src="https://static.cloudflareinsights.com/beacon.min.js/v652eace1692a40cfa3763df669d7439c1639079717194" integrity="sha512-Gi7xpJR8tSkrpF7aordPZQlW2DLtzUlZcumS8dMQjwDHEnw9I7ZLyiOj/6tZStRBGtGgN6ceN6cMH8z7etPGlw==" data-cf-beacon='{"rayId":"709972889dfa3d60","version":"2021.12.0","r":1,"token":"1f5d475227ce4f0089a7cff1ab17c0f5","si":100}' crossorigin="anonymous"></script>
|
||
</body>
|
||
<!-- Global site tag (gtag.js) - Google Analytics -->
|
||
<script async src="https://www.googletagmanager.com/gtag/js?id=G-NPSEEVD756"></script>
|
||
<script>
|
||
window.dataLayer = window.dataLayer || [];
|
||
function gtag() {
|
||
dataLayer.push(arguments);
|
||
}
|
||
gtag('js', new Date());
|
||
gtag('config', 'G-NPSEEVD756');
|
||
var path = window.location.pathname
|
||
var cookie = getCookie("lastPath");
|
||
console.log(path)
|
||
if (path.replace("/", "") === "") {
|
||
if (cookie.replace("/", "") !== "") {
|
||
console.log(cookie)
|
||
document.getElementById("tip").innerHTML = "<a href='" + cookie + "'>跳转到上次进度</a>"
|
||
}
|
||
} else {
|
||
setCookie("lastPath", path)
|
||
}
|
||
function setCookie(cname, cvalue) {
|
||
var d = new Date();
|
||
d.setTime(d.getTime() + (180 * 24 * 60 * 60 * 1000));
|
||
var expires = "expires=" + d.toGMTString();
|
||
document.cookie = cname + "=" + cvalue + "; " + expires + ";path = /";
|
||
}
|
||
function getCookie(cname) {
|
||
var name = cname + "=";
|
||
var ca = document.cookie.split(';');
|
||
for (var i = 0; i < ca.length; i++) {
|
||
var c = ca[i].trim();
|
||
if (c.indexOf(name) === 0) return c.substring(name.length, c.length);
|
||
}
|
||
return "";
|
||
}
|
||
</script>
|
||
</html>
|