
一、获取ceph集群信息和admin用户的key。
1.1 首先进入ceph shell
[root@ceph1 ~]# python3.6 /usr/sbin/cephadm shell
Inferring fsid 0bd0bee0-7704-11ee-ac45-000c295baf08
Using recent ceph image quay.io/ceph/ceph@sha256:4924a393f5ef4c00e133c13cb8297558f4d1f52731eb16841906519e5de60063
[ceph: root@ceph1 /]#
1.2 获取集群信息
“`tiki wiki
[ceph: root@ceph1 /]# ceph mon dump
epoch 3
fsid 4b5468be-7731-11ee-9c33-000c295baf08
last_changed 2023-10-30T14:57:06.178543+0000
created 2023-10-30T14:33:31.488067+0000
min_mon_release 16 (pacific)
election_strategy: 1
0: [v2:192.168.1.41:3300/0,v1:192.168.1.41:6789/0] mon.ceph1
1: [v2:192.168.1.43:3300/0,v1:192.168.1.43:6789/0] mon.ceph3
2: [v2:192.168.1.42:3300/0,v1:192.168.1.42:6789/0] mon.ceph2
**说明:创建config-map.yaml中clusterid需要用到4b5468be-7731-11ee-9c33-000c295baf08**
## 1.3 获取admin用户key
[ceph: root@ceph1 /]# ceph auth get-key client.admin ; echo
AQC7vj9lfK50BxAAWt/ULQZzRlaCZaVNrbsBFQ==
**说明:创建secret.yaml中userKey需要用到AQC7vj9lfK50BxAAWt/ULQZzRlaCZaVNrbsBFQ==**
# 二、下载并导入镜像
## 2.1 在master01节点上下载镜像
[root@master01 ~]# wget -P /tmp/ https://d.frps.cn/file/tools/ceph-csi/k8s_1.24_ceph-csi.tar
## 2.2 将master01节点上的镜像拷贝到其他四个节点
[root@master01 ~]# for i in master02 master03 node01 node02 ; do scp /tmp/k8s_1.24_ceph-csi.tar $i:/tmp/; done
## 2.3 所有节点导入镜像
$ ctr -n k8s.io i import /tmp/k8s_1.24_ceph-csi.tar
# 三、创建ceph的 provisioner
## 3.1 在master01节点上创建ceph目录,后续将所有yaml文件放到该目录下
[root@master01 ~]# mkdir ceph
[root@master01 ~]# cd ceph
## 3.2 在master01节点上创建secret.yaml
cat > secret.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: default
stringData:
userID: admin
userKey: AQC7vj9lfK50BxAAWt/ULQZzRlaCZaVNrbsBFQ== #这串上面已经获取
EOF
## 3.3 在master01节点上创建config-map.yaml
cat > csi-config-map.yaml <<EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: “ceph-csi-config”
data:
config.json: |-
[
{
“clusterID”: “4b5468be-7731-11ee-9c33-000c295baf08”,
“monitors”: [
“192.168.1.41:6789”,
“192.168.1.42:6789”,
“192.168.1.43:6789”
]
}
]
EOF
## 3.4 在master01节点上创建ceph-conf.yaml
cat > ceph-conf.yaml <<EOF
apiVersion: v1
kind: ConfigMap
data:
ceph.conf: |
[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# keyring is a required key and its value should be empty
keyring: |
metadata:
name: ceph-config
EOF
## 3.5 在master01节点上创建csi-kms-config-map.yaml(该config内容为空)
cat > csi-kms-config-map.yaml <<EOF
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
{}
metadata:
name: ceph-csi-encryption-kms-config
EOF
## 3.6 在master01节点上下载其余rbac以及provisioner相关yaml
wget https://d.frps.cn/file/tools/ceph-csi/csi-provisioner-rbac.yaml
wget https://d.frps.cn/file/tools/ceph-csi/csi-nodeplugin-rbac.yaml
wget https://d.frps.cn/file/tools/ceph-csi/csi-rbdplugin.yaml
wget https://d.frps.cn/file/tools/ceph-csi/csi-rbdplugin-provisioner.yaml
## 3.7 在master01节点上的ceph目录下应用所有yaml
for f in ls *.yaml; do echo $f; kubectl apply -f $f; done
## 3.8 检查provisioner的pod,状态为running才对
[root@master01 ceph]# kubectl get po
NAME READY STATUS RESTARTS AGE
csi-rbdplugin-4gsvz 3/3 Running 0 10s
csi-rbdplugin-g9972 3/3 Running 0 10s
csi-rbdplugin-jfd87 3/3 Running 0 10s
csi-rbdplugin-mdzdh 3/3 Running 0 10s
csi-rbdplugin-provisioner-5c7d54dd8d-2w944 7/7 Running 0 10s
csi-rbdplugin-provisioner-5c7d54dd8d-p67sj 7/7 Running 0 10s
csi-rbdplugin-provisioner-5c7d54dd8d-s8mpm 7/7 Running 0 10s
csi-rbdplugin-xq2n4 3/3 Running 0 10s
# 四、创建storageclass
## 4.1 在k8s上的Master01节点上创建ceph-sc.yaml
cat > ceph-sc.yaml <<EOF
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc #storageclass名称
provisioner: rbd.csi.ceph.com #驱动器
parameters:
clusterID: 4b5468be-7731-11ee-9c33-000c295baf08 #ceph集群id
pool: zq #pool空间
imageFeatures: layering #rbd特性
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
reclaimPolicy: Delete #pvc回收机制
allowVolumeExpansion: true #对扩展卷进行扩展
mountOptions: #StorageClass 动态创建的 PersistentVolume 将使用类中 mountOptions 字段指定的挂载选项
– discard
EOF
## 4.2 在k8s上的Master01节点上应用yaml
[root@master01 ceph]# kubectl apply -f ceph-sc.yaml
# 五、创建pvc
## 5.1 在k8s上的Master01节点上创建ceph-pvc.yaml
cat > ceph-pvc.yaml <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ceph-pvc #pvc名称
spec:
accessModes:
– ReadWriteOnce #访问模式
resources:
requests:
storage: 1Gi #存储空间
storageClassName: csi-rbd-sc
EOF
## 5.2 在k8s上的Master01节点上应用yaml
[root@master01 ceph]# kubectl apply -f ceph-pvc.yaml
# 六、创建pod使用ceph存储
## 6.1 在k8s上的Master01节点上创建ceph-pod.yaml
cat > ceph-pod.yaml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: ceph-pod
spec:
containers:
– name: ceph-ng
image: registry.cn-hangzhou.aliyuncs.com/zq-demo/nginx:1.14.2
volumeMounts:
– name: ceph-mnt
mountPath: /mnt
readOnly: false
volumes:
– name: ceph-mnt
persistentVolumeClaim:
claimName: ceph-pvc
EOF
## 6.2 在k8s上的Master01节点上应用yaml
[root@master01 ceph]# kubectl apply -f ceph-pod.yaml
# 七、验证查看
## 7.1 查看pv
[root@master01 ceph]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-e5904fd9-02c3-40e4-8f61-cc4a37fc0fac 1Gi RWO Delete Bound default/ceph-pvc csi-rbd-sc 3m56s
## 7.2 在ceph这边查看rbd
[ceph: root@ceph1 /]# rbd ls zq
csi-vol-a178125e-7911-11ee-af0e-421bef2176f8
## 7.3 在pod里查看挂载情况
[root@master01 ceph]# kubectl exec -it ceph-pod — df
Filesystem 1K-blocks Used Available Use% Mounted on
overlay 36805060 10357760 26447300 28% /
tmpfs 65536 0 65536 0% /dev
tmpfs 2007620 0 2007620 0% /sys/fs/cgroup
/dev/rbd0 999320 2564 980372 0% /mnt
/dev/mapper/centos-root
36805060 10357760 26447300 28% /etc/hosts
/dev/mapper/centos-root
36805060 10357760 26447300 28% /dev/termination-log
/dev/mapper/centos-root
36805060 10357760 26447300 28% /etc/hostname
/dev/mapper/centos-root
36805060 10357760 26447300 28% /etc/resolv.conf
shm 65536 0 65536 0% /dev/shm
tmpfs 3912840 12 3912828 0% /run/secrets/kubernetes.io/serviceaccount
tmpfs 2007620 0 2007620 0% /proc/acpi
tmpfs 65536 0 65536 0% /proc/kcore
tmpfs 65536 0 65536 0% /proc/keys
tmpfs 65536 0 65536 0% /proc/timer_list
tmpfs 65536 0 65536 0% /proc/sched_debug
tmpfs 2007620 0 2007620 0% /proc/scsi
tmpfs 2007620 0 2007620 0% /sys/firmware
“`





暂无评论内容