创建一个ceph pool 创建存储池

ceph集群请看这里:https://imszz.com/p/877f6188/

1
2
3
ceph osd pool create rbd 128
ceph osd pool set-quota rbd max_bytes $((50 * 1024 * 1024 * 1024)) #50G的存储池
rbd pool init rbd

查看集群状态

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@node3 ~]# ceph -s
cluster:
id: 3a2a06c7-124f-4703-b798-88eb2950361e
health: HEALTH_OK

services:
mon: 3 daemons, quorum node5,node4,node3
mgr: node3(active)
osd: 3 osds: 3 up, 3 in

data:
pools: 1 pools, 128 pgs
objects: 23 objects, 22 MiB
usage: 7.4 GiB used, 593 GiB / 600 GiB avail
pgs: 128 active+clean

查看用户key

1
2
3
4
5
6
7
8
[root@node3 ~]# ceph auth get client.admin
exported keyring for client.admin
[client.admin]
key = AQCJMslhQW0JEhAAXEgcsW3IZozDi7FF51+sbw==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"

或者自己创建存储池、用户以及用户key

1
2
3
4
5
[root@node3 ~]# ceph osd pool create kubernetes
[root@node3 ~]# rbd pool init kubernetes
[root@node3 ~]# ceph auth get-or-create client.kubernetes mon 'profile rbd' osd 'profile rbd pool=kubernetes' mgr 'profile rbd pool=kubernetes'
[client.kubernetes]
key = AQD9o0Fd6hQRChAAt7fMaSZXduT3NWEqylNpmg==

注意:这里key后面对应的只是一个例子,实际配置中要以运行命令后产生的结果为准
这里的key使用user的key,后面配置中是需要用到的
如果是ceph luminous版本的集群,那么命令应该是ceph auth get-or-create client.kubernetes mon 'allow r' osd 'allow rwx pool=kubernetes' -o ceph.client.kubernetes.keyring

k8s所有节点安装ceph客户端

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
cat > /etc/yum.repos.d/ceph.repo<<'EOF'
[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirror.tuna.tsinghua.edu.cn/ceph/rpm-mimic/el7/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://mirror.tuna.tsinghua.edu.cn/ceph/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirror.tuna.tsinghua.edu.cn/ceph/rpm-mimic/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://mirror.tuna.tsinghua.edu.cn/ceph/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=https://mirror.tuna.tsinghua.edu.cn/ceph/rpm-mimic/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://mirror.tuna.tsinghua.edu.cn/ceph/keys/release.asc
priority=1
EOF
1
yum -y install ceph

生成ceph-csi的kubernetes configmap

1
2
3
4
5
6
7
8
9
[root@node3 ~]# ceph mon dump
dumped monmap epoch 1
epoch 1
fsid 3a2a06c7-124f-4703-b798-88eb2950361e
last_changed 2021-12-27 11:27:02.815248
created 2021-12-27 11:27:02.815248
0: 172.18.112.18:6789/0 mon.node5
1: 172.18.112.19:6789/0 mon.node4
2: 172.18.112.20:6789/0 mon.node3

用以上的的信息生成configmap:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
cat csi-config-map.yaml
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "3a2a06c7-124f-4703-b798-88eb2950361e",
"monitors": [
"172.18.112.20:6789",
"172.18.112.19:6789",
"172.18.112.18:6789"
]
}
]
metadata:
name: ceph-csi-config

在kubernetes集群上,将此configmap存储到集群

1
kubectl apply -f csi-config-map.yaml

生成ceph-csi cephx的secret

1
2
3
4
5
6
7
8
9
10
cat <<EOF > csi-rbd-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: default
stringData:
userID: admin
userKey: AQAs89depA23NRAA8yEg0GfHNC/uhKU9jsgp6Q==
EOF

将此配置存储到kubernetes中

1
2
kubectl apply -f csi-rbd-secret.yaml

配置ceph-csi插件(kubernetes上的rbac和提供存储功能的容器)

rbac部分

可以通信github直接部署

1
kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml
离线请按照以下配置
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
[root@master-1 ~]# cat csi-provisioner-rbac.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-csi-provisioner
# replace with non-default namespace name
namespace: default

---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role
subjects:
- kind: ServiceAccount
name: rbd-csi-provisioner
# replace with non-default namespace name
namespace: default
roleRef:
kind: ClusterRole
name: rbd-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io

---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# replace with non-default namespace name
namespace: default
name: rbd-external-provisioner-cfg
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]

---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-provisioner-role-cfg
# replace with non-default namespace name
namespace: default
subjects:
- kind: ServiceAccount
name: rbd-csi-provisioner
# replace with non-default namespace name
namespace: default
roleRef:
kind: Role
name: rbd-external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io
1
kubectl apply -f csi-provisioner-rbac.yaml

可以通信github直接部署

1
kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-nodeplugin-rbac.yaml
离线请按照以下配置
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
[root@master-1 ~]# cat csi-nodeplugin-rbac.yaml 
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-csi-nodeplugin
# replace with non-default namespace name
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
# allow to read Vault Token and connection options from the Tenants namespace
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-csi-nodeplugin
subjects:
- kind: ServiceAccount
name: rbd-csi-nodeplugin
# replace with non-default namespace name
namespace: default
roleRef:
kind: ClusterRole
name: rbd-csi-nodeplugin
apiGroup: rbac.authorization.k8s.io

部署

1
kubectl apply -f csi-nodeplugin-rbac.yaml

provisioner部分

包含镜像版本,要是用其他版本,请自行修改yaml文件:

1
2
3
4
5
6
k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
quay.io/cephcsi/cephcsi:canary

官方文件

1
2
wget https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml
wget https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-rbdplugin.yaml

以下yml文件所引用的镜像文件已经本地镜像仓库,请根据自己网络环境调整

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
[root@master-1 ~]# cat csi-rbdplugin-provisioner.yaml
---
kind: Service
apiVersion: v1
metadata:
name: csi-rbdplugin-provisioner
# replace with non-default namespace name
namespace: default
labels:
app: csi-metrics
spec:
selector:
app: csi-rbdplugin-provisioner
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8680

---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-rbdplugin-provisioner
# replace with non-default namespace name
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: csi-rbdplugin-provisioner
template:
metadata:
labels:
app: csi-rbdplugin-provisioner
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-rbdplugin-provisioner
topologyKey: "kubernetes.io/hostname"
serviceAccountName: rbd-csi-provisioner
priorityClassName: system-cluster-critical
containers:
- name: csi-provisioner
image: dockerhub.kubekey.local/k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=150s"
- "--retry-interval-start=500ms"
- "--leader-election=true"
# set it to true to use topology based provisioning
- "--feature-gates=Topology=false"
# if fstype is not specified in storageclass, ext4 is default
- "--default-fstype=ext4"
- "--extra-create-metadata=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: dockerhub.kubekey.local/k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=150s"
- "--leader-election=true"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
image: dockerhub.kubekey.local/k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--leader-election=true"
- "--retry-interval-start=500ms"
env:
- name: ADDRESS
value: /csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: dockerhub.kubekey.local/k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=150s"
- "--leader-election"
- "--retry-interval-start=500ms"
- "--handle-volume-inuse-error=false"
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-rbdplugin
# for stable functionality replace canary with latest release version
image: dockerhub.kubekey.local/quay.io/cephcsi/cephcsi:canary
args:
- "--nodeid=$(NODE_ID)"
- "--type=rbd"
- "--controllerserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--pidlimit=-1"
- "--rbdhardmaxclonedepth=8"
- "--rbdsoftmaxclonedepth=4"
- "--enableprofiling=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# - name: KMS_CONFIGMAP_NAME
# value: encryptionConfig
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: CSI_ADDONS_ENDPOINT
value: unix:///csi/csi-addons.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
# - name: ceph-csi-encryption-kms-config
# mountPath: /etc/ceph-csi-encryption-kms-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
# - name: ceph-config
# mountPath: /etc/ceph/
- name: csi-rbdplugin-controller
# for stable functionality replace canary with latest release version
image: dockerhub.kubekey.local/quay.io/cephcsi/cephcsi:canary
args:
- "--type=controller"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--drivernamespace=$(DRIVER_NAMESPACE)"
env:
- name: DRIVER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
# - name: ceph-config
# mountPath: /etc/ceph/
- name: liveness-prometheus
image: dockerhub.kubekey.local/quay.io/cephcsi/cephcsi:canary
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8680"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi-provisioner.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: socket-dir
emptyDir: {
medium: "Memory"
}
#- name: ceph-config
# configMap:
# name: ceph-config
- name: ceph-csi-config
configMap:
name: ceph-csi-config
#- name: ceph-csi-encryption-kms-config
# configMap:
# name: ceph-csi-encryption-kms-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
[root@master-1 ~]# cat csi-rbdplugin.yaml
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-rbdplugin
# replace with non-default namespace name
namespace: default
spec:
selector:
matchLabels:
app: csi-rbdplugin
template:
metadata:
labels:
app: csi-rbdplugin
spec:
serviceAccountName: rbd-csi-nodeplugin
hostNetwork: true
hostPID: true
priorityClassName: system-node-critical
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
# resolved through k8s service, set dns policy to cluster first
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: driver-registrar
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
securityContext:
privileged: true
image: dockerhub.kubekey.local/k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock"
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: csi-rbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
# for stable functionality replace canary with latest release version
image: dockerhub.kubekey.local/quay.io/cephcsi/cephcsi:canary
args:
- "--nodeid=$(NODE_ID)"
- "--pluginpath=/var/lib/kubelet/plugins"
- "--stagingpath=/var/lib/kubelet/plugins/kubernetes.io/csi/pv/"
- "--type=rbd"
- "--nodeserver=true"
- "--endpoint=$(CSI_ENDPOINT)"
- "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--enableprofiling=false"
# If topology based provisioning is desired, configure required
# node labels representing the nodes topology domain
# and pass the label names below, for CSI to consume and advertise
# its equivalent topology domain
# - "--domainlabels=failure-domain/region,failure-domain/zone"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# - name: KMS_CONFIGMAP_NAME
# value: encryptionConfig
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: CSI_ADDONS_ENDPOINT
value: unix:///csi/csi-addons.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- mountPath: /dev
name: host-dev
- mountPath: /sys
name: host-sys
- mountPath: /run/mount
name: host-mount
- mountPath: /etc/selinux
name: etc-selinux
readOnly: true
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
#- name: ceph-csi-encryption-kms-config
# mountPath: /etc/ceph-csi-encryption-kms-config/
- name: plugin-dir
mountPath: /var/lib/kubelet/plugins
mountPropagation: "Bidirectional"
- name: mountpoint-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- name: keys-tmp-dir
mountPath: /tmp/csi/keys
- name: ceph-logdir
mountPath: /var/log/ceph
#- name: ceph-config
# mountPath: /etc/ceph/
- name: liveness-prometheus
securityContext:
privileged: true
image: dockerhub.kubekey.local/quay.io/cephcsi/cephcsi:canary
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metricsport=8680"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: socket-dir
mountPath: /csi
imagePullPolicy: "IfNotPresent"
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins
type: Directory
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
- name: ceph-logdir
hostPath:
path: /var/log/ceph
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: host-dev
hostPath:
path: /dev
- name: host-sys
hostPath:
path: /sys
- name: etc-selinux
hostPath:
path: /etc/selinux
- name: host-mount
hostPath:
path: /run/mount
- name: lib-modules
hostPath:
path: /lib/modules
#- name: ceph-config
# configMap:
# name: ceph-config
- name: ceph-csi-config
configMap:
name: ceph-csi-config
#- name: ceph-csi-encryption-kms-config
# configMap:
# name: ceph-csi-encryption-kms-config
- name: keys-tmp-dir
emptyDir: {
medium: "Memory"
}
---
# This is a service to expose the liveness metrics
apiVersion: v1
kind: Service
metadata:
name: csi-metrics-rbdplugin
# replace with non-default namespace name
namespace: default
labels:
app: csi-metrics
spec:
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8680
selector:
app: csi-rbdplugin

修改csi-rbdplugin-provisioner.yaml和csi-rbdplugin.yaml文件,注释关于ceph-csi-encryption-kms-config与ceph-config配置:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
[root@master-1 ~]# grep  "#" csi-rbdplugin-provisioner.yaml
# replace with non-default namespace name
# replace with non-default namespace name
# set it to true to use topology based provisioning
# if fstype is not specified in storageclass, ext4 is default
# for stable functionality replace canary with latest release version
# - name: KMS_CONFIGMAP_NAME
# value: encryptionConfig
# - name: ceph-csi-encryption-kms-config
# mountPath: /etc/ceph-csi-encryption-kms-config/
# - name: ceph-config
# mountPath: /etc/ceph/
# for stable functionality replace canary with latest release version
# - name: ceph-config
# mountPath: /etc/ceph/
#- name: ceph-config
# configMap:
# name: ceph-config
#- name: ceph-csi-encryption-kms-config
# configMap:
# name: ceph-csi-encryption-kms-config

注意:所使用的镜像以及修改为本地仓库镜像,请根据自己网络环境调整

1
2
3
4
5
6
dockerhub.kubekey.local/k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
dockerhub.kubekey.local/k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
dockerhub.kubekey.local/k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
dockerhub.kubekey.local/k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
dockerhub.kubekey.local/k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
dockerhub.kubekey.local/quay.io/cephcsi/cephcsi:canary

部署

1
2
kubectl apply -f csi-rbdplugin-provisioner.yaml
kubectl apply -f csi-rbdplugin.yaml

查看运行状态

1
2
3
4
5
6
7
8
9
10
11
[root@master-1 ~]# kubectl get pods 
NAME READY STATUS RESTARTS AGE

csi-rbdplugin-5jb79 3/3 Running 0 22h
csi-rbdplugin-7dqd7 3/3 Running 0 22h
csi-rbdplugin-8dpnb 3/3 Running 0 22h
csi-rbdplugin-provisioner-66557fcc8f-4clkc 7/7 Running 0 22h
csi-rbdplugin-provisioner-66557fcc8f-lbjld 7/7 Running 0 22h
csi-rbdplugin-provisioner-66557fcc8f-vpvb2 7/7 Running 0 22h
csi-rbdplugin-txjcg 3/3 Running 0 22h
csi-rbdplugin-x57d6 3/3 Running 0 22h

使用ceph块儿设备

创建storageclass

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
[root@master-1 ~]# cat csi-rbd-sc.yaml
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
clusterID: 3a2a06c7-124f-4703-b798-88eb2950361e
pool: rbd
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
csi.storage.k8s.io/fstype: ext4
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard
  • clusterID对应之前的步骤中的fsid
  • imageFeatures,这个是用来确定创建的image的特征的
  • allowVolumeExpansion: true 是否开启在线扩容

部署

1
kubectl apply -f csi-rbd-sc.yaml

查看storageclass:

1
2
3
4
5
[root@master-1 ~]#  kubectl get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE

csi-rbd-sc rbd.csi.ceph.com Delete Immediate true 22h
local (default) openebs.io/local Delete WaitForFirstConsumer false 5d23h

创建PVC

1
2
3
4
5
6
7
8
9
10
11
12
13
14
[root@master-1 ~]# cat raw-block-pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: raw-block-pvc
spec:
accessModes:
- ReadWriteOnce
volumeMode: Block
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc

理论上volumeMode应该指定为Block的,要求PVC和控制器中都指定为相同的模式才能挂载使用,但是经过验证在应用端也指定Block,还是不能挂载上,因此就都去掉了,变成了默认的Filesystem

部署

1
kubectl apply -f raw-block-pvc.yaml 

查看pvc

1
2
3
4
[root@master-1 ~]#  kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
raw-block-pvc Bound pvc-23bb1905-2e26-4ce1-8616-2754dd36317f 1Gi RWO csi-rbd-sc 22h

创建使用PVC的应用测试无状态Pod

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
[root@master-1 ~]# cat raw-block-pod.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: pod-with-raw-block-volume
spec:
containers:
- name: fc-container
image: fedora:26
command: ["/bin/sh", "-c"]
args: ["tail -f /dev/null"]
volumeDevices:
- name: data
devicePath: /dev/xvda
volumes:
- name: data
persistentVolumeClaim:
claimName: raw-block-pvc

部署

1
kubectl apply -f raw-block-pod.yaml

查看

1
2
3
4
5
6
7
8
9
10
11
12
13
[root@master-1 ~]# kubectl get pods 
NAME READY STATUS RESTARTS AGE

csi-rbdplugin-5jb79 3/3 Running 0 22h
csi-rbdplugin-7dqd7 3/3 Running 0 22h
csi-rbdplugin-8dpnb 3/3 Running 0 22h
csi-rbdplugin-provisioner-66557fcc8f-4clkc 7/7 Running 0 22h
csi-rbdplugin-provisioner-66557fcc8f-lbjld 7/7 Running 0 22h
csi-rbdplugin-provisioner-66557fcc8f-vpvb2 7/7 Running 0 22h
csi-rbdplugin-txjcg 3/3 Running 0 22h
csi-rbdplugin-x57d6 3/3 Running 0 22h

pod-with-raw-block-volume 1/1 Running 0 22h
应用测试扩容
1
kubectl edit pvc raw-block-pvc #`raw-block-pvc` 想要扩容的pvc,打开pvc修改容量
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"raw-block-pvc","namespace":"default"},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}},"storageClassName":"csi-rbd-sc","volumeMode":"Block"}}
pv.kubernetes.io/bind-completed: "yes"
pv.kubernetes.io/bound-by-controller: "yes"
volume.beta.kubernetes.io/storage-provisioner: rbd.csi.ceph.com
creationTimestamp: "2022-01-10T04:01:31Z"
finalizers:
- kubernetes.io/pvc-protection
name: raw-block-pvc
namespace: default
resourceVersion: "1142767"
uid: 18eb2ee1-3eac-4567-9d07-a449ce0ac675
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 15Gi # 修改此处的容量保存退出即可
storageClassName: csi-rbd-sc
volumeMode: Block
volumeName: pvc-18eb2ee1-3eac-4567-9d07-a449ce0ac675
status:
accessModes:
- ReadWriteOnce
capacity:
storage: 15Gi
phase: Bound
查看pvc
1
2
3
4
5
[root@master-1 ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
data-csi-mysql-0 Bound pvc-e55185b9-fa17-48ad-b125-929d7b01e5a0 5Gi RWO csi-rbd-sc 24m
raw-block-pvc Bound pvc-18eb2ee1-3eac-4567-9d07-a449ce0ac675 15Gi RWO csi-rbd-sc 102m
rbd-pvc-bak Bound pvc-6ff9dc5c-b39e-410d-909c-bdd01db765a1 1Gi RWO csi-rbd-sc-pv 164m

扩容完成

创建使用PVC的应用测试有状态Pod

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
vim mysql-statefulset-static.yaml 
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: csi-mysql
namespace: default
spec:
selector:
matchLabels:
app: mysql
serviceName: mysql
replicas: 1
template:
metadata:
labels:
app: mysql
spec:
containers:
- name: mysql
image: mysql:5.7
env:
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "1"
- name: MYSQL_ROOT_PASSWORD
value: "dlw123"
ports:
- name: mysql
containerPort: 3306
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
resources:
requests:
cpu: 500m
memory: 1Gi
# volumes:
# - name: data
# persistentVolumeClaim:
# claimName: csi-rbd-sc
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "csi-rbd-sc"
resources:
requests:
storage: 5Gi

对于有状态服务来说,如果还是直接使用volumes,则进行动态扩容的时候会报错,所有的Pod都会使用一个相同的PVC,会产生冲突,因此需要使用VolumeClaimTemplate来创建PV。

应用测试扩容
1
kubectl edit pvc data-csi-mysql-0 #`data-csi-mysql-0` 想要扩容的pvc,打开pvc修改容量
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
annotations:
pv.kubernetes.io/bind-completed: "yes"
pv.kubernetes.io/bound-by-controller: "yes"
volume.beta.kubernetes.io/storage-provisioner: rbd.csi.ceph.com
creationTimestamp: "2022-01-10T05:19:37Z"
finalizers:
- kubernetes.io/pvc-protection
labels:
app: mysql-bak
name: data-csi-mysql-0
namespace: default
resourceVersion: "1147968"
uid: e55185b9-fa17-48ad-b125-929d7b01e5a0
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi # 修改此处的容量保存退出即可
storageClassName: csi-rbd-sc
volumeMode: Filesystem
volumeName: pvc-e55185b9-fa17-48ad-b125-929d7b01e5a0
status:
accessModes:
- ReadWriteOnce
capacity:
storage: 5Gi
phase: Bound
查看扩容状态
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
[root@master-1 ~]# kubectl describe pvc data-csi-mysql-0
Name: data-csi-mysql-0
Namespace: default
StorageClass: csi-rbd-sc
Status: Bound
Volume: pvc-e55185b9-fa17-48ad-b125-929d7b01e5a0
Labels: app=mysql-bak
Annotations: pv.kubernetes.io/bind-completed: yes
pv.kubernetes.io/bound-by-controller: yes
volume.beta.kubernetes.io/storage-provisioner: rbd.csi.ceph.com
Finalizers: [kubernetes.io/pvc-protection]
Capacity: 5Gi
Access Modes: RWO
VolumeMode: Filesystem
Used By: csi-mysql-0
Conditions:
Type Status LastProbeTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
FileSystemResizePending True Mon, 01 Jan 0001 00:00:00 +0000 Mon, 10 Jan 2022 13:52:21 +0800 Waiting for user to (re-)start a pod to finish file system resize of volume on node.
···

需要重新部署pod生效

更新pod

查看应用

1
2
kubectl get StatefulSet #有状态应用
kubectl get Deployment #无状态应用

副本伸缩

1
2
kubectl scale StatefulSet csi-mysql --replicas 0  #副本缩容
kubectl scale StatefulSet csi-mysql --replicas 1 #副本扩容
查看扩容状态
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
[root@master-1 ~]# kubectl describe pvc data-csi-mysql-0
Name: data-csi-mysql-0
Namespace: default
StorageClass: csi-rbd-sc
Status: Bound
Volume: pvc-e55185b9-fa17-48ad-b125-929d7b01e5a0
Labels: app=mysql
Annotations: pv.kubernetes.io/bind-completed: yes
pv.kubernetes.io/bound-by-controller: yes
volume.beta.kubernetes.io/storage-provisioner: rbd.csi.ceph.com
Finalizers: [kubernetes.io/pvc-protection]
Capacity: 10Gi
Access Modes: RWO
VolumeMode: Filesystem
Used By: csi-mysql-0
Events:
···
查看pvc
1
2
3
4
5
[root@master-1 ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
data-csi-mysql-0 Bound pvc-e55185b9-fa17-48ad-b125-929d7b01e5a0 5Gi RWO csi-rbd-sc 24m
raw-block-pvc Bound pvc-18eb2ee1-3eac-4567-9d07-a449ce0ac675 15Gi RWO csi-rbd-sc 102m
rbd-pvc-bak Bound pvc-6ff9dc5c-b39e-410d-909c-bdd01db765a1 1Gi RWO csi-rbd-sc-pv 164m

扩容完成