1、安装nfs服务
yum install nfs-utils -y
2、准备一个共享目录
mkdir /opt/data/mysql -p
3、将共享目录以读写权限暴露给10.0.19.0/24网段中的所有主机
cat > /etc/exports <4、启动nfs
systemctl start nfs二、接下来,要在每个k8s的node节点都安装nfs,这样的目的是为了node节点可以驱动nfs设备yum install nfs-utils -y三、使用nfs挂载1、接下创建deployment.yaml 来应用nfs
[root@k8s-master1 ~]# cat deployment.yaml apiVersion: apps/v1 kind: Deployment apiVersion: apps/v1 kind: Deployment metadata: labels: app: mysql #为该Deployment设置key为app,value为mysql的标签 name: mysql namespace: test spec: replicas: 1 #副本数量 selector: #标签选择器,与上面的标签共同作用 matchLabels: #选择包含标签app:nginx的资源 app: mysql template: #这是选择或创建的Pod的模板 metadata: #Pod的元数据 labels: #Pod的标签,上面的selector即选择包含标签app:nginx的Pod app: mysql spec: #期望Pod实现的功能(即在pod中部署) containers: #生成container,与docker中的container是同一种 - name: mysql image: mysql:5.7 #使用镜像mysql: 创建container,该container默认3306端口可访问 ports: - containerPort: 3306 # 开启本容器的3306端口可访问 env: - name: MYSQL_ROOT_PASSWORD value: hjj123456 volumeMounts: #挂载持久存储卷 - name: mysql-data #挂载设备的名字,与volumes[*].name 需要对应 mountPath: /var/lib/mysql #挂载到容器的某个路径下 volumes: - name: mysql-data #和上面保持一致 这是本地的文件路径,上面是容器内部的路径 nfs: server: 10.0.19.127 #nfs服务器地址 path: /opt/data/mysql #此路径需要实现创建2、然后应用到k8s中
kubectl apply -f deployment.yaml3、进入nfs服务器,查看nfs共享目录是否有数据
[root@k8s-master1 ~]# ls /opt/data/mysql/ auto.cnf ca.pem client-key.pem ibdata1 ib_logfile1 performance_schema public_key.pem server-key.pem test ca-key.pem client-cert.pem ib_buffer_pool ib_logfile0 mysql private_key.pem server-cert.pem sys四、测试1、查看mysql的pod在那个节点上
[root@k8s-master1 ~]# kubectl get pod -n test -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES mysql-54cdbb65d-mt645 1/1 Running 0 2m55s 10.244.1.51 k8s-node22、进入mysql创建数据库
[root@k8s-master1 ~]# kubectl exec -it mysql-54cdbb65d-mt645 sh -n test # mysql -uroot -phjj123456 mysql: [Warning] Using a password on the command line interface can be insecure. Welcome to the MySQL monitor. Commands end with ; or g. Your MySQL connection id is 2 Server version: 5.7.37 MySQL Community Server (GPL) Copyright (c) 2000, 2022, Oracle and/or its affiliates. Oracle is a registered trademark of Oracle Corporation and/or its affiliates. Other names may be trademarks of their respective owners. Type 'help;' or 'h' for help. Type 'c' to clear the current input statement. mysql> create database test; Query OK, 1 row affected (0.01 sec) mysql> show databases; +--------------------+ | Database | +--------------------+ | information_schema | | mysql | | performance_schema | | sys | | test | +--------------------+ 5 rows in set (0.01 sec)3、进入node2,停掉node2节点测试数据库是否存在,pod迁移需要5,6分钟左右k8s默认的
[root@k8s-node2 ~]# init 0 [root@k8s-master1 ~]# kubectl get pod -n test -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES mysql-54cdbb65d-dm6zn 0/1 Terminating 0 8m51s 10.244.1.52 k8s-node2二、使用pv/pvc(NFS挂载) 一、准备nfs环境mysql-54cdbb65d-gmvws 1/1 Running 0 62s 10.244.0.49 k8s-node1 [root@k8s-master1 ~]# kubectl exec -it mysql-54cdbb65d-gmvws sh -n test # mysql -uroot -phjj123456 mysql: [Warning] Using a password on the command line interface can be insecure. Welcome to the MySQL monitor. Commands end with ; or g. Your MySQL connection id is 2 Server version: 5.7.37 MySQL Community Server (GPL) Copyright (c) 2000, 2022, Oracle and/or its affiliates. Oracle is a registered trademark of Oracle Corporation and/or its affiliates. Other names may be trademarks of their respective owners. Type 'help;' or 'h' for help. Type 'c' to clear the current input statement. mysql> show databases; +--------------------+ | Database | +--------------------+ | information_schema | | mysql | | performance_schema | | sys | | test | +--------------------+ 5 rows in set (0.00 sec) 1、创建目录
mkdir /opt/data/{mysql,nginx} -pv2、将共享目录以读写权限暴露给10.0.19.0/24网段中的所有主机
cat > /etc/exports <3、重启nfs服务器
systemctl restart nfs二、创建pv1、编写pv.yaml文件
apiVersion: v1 kind: PersistentVolume metadata: name: pv1 # pv 名字 spec: capacity: storage: 5Gi # 定义容量 accessModes: - ReadWriteMany # 访问模式 persistentVolumeReclaimPolicy: Retain # 回收策略 storageClassName: mysql # 定义 storageClassName 只有相同名字的才能绑定在一起 nfs: path: /opt/data/mysql #共享目录 server: 10.0.19.127 #nfs服务器地址 --- apiVersion: v1 kind: PersistentVolume metadata: name: pv2 # pv 名字 spec: capacity: storage: 5Gi # 定义容量 accessModes: - ReadWriteMany # 访问模式 persistentVolumeReclaimPolicy: Retain # 回收策略 storageClassName: nginx # 定义 storageClassName 只有相同名字的才能绑定在一起 nfs: path: /opt/data/nginx #共享目录 server: 10.0.19.127 #nfs服务器地址2、创建pv1,pv2并查看
[root@k8s-master1 ~]# kubectl apply -f pv.yaml persistentvolume/pv1 created persistentvolume/pv2 created [root@k8s-master1 ~]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pv1 5Gi RWX Retain Available mysql 10s pv2 5Gi RWX Retain Available nginx 10s [root@k8s-master1 ~]#三、创建pvc,申请pv1、编写pvc.yaml文件
kind: PersistentVolumeClaim apiVersion: v1 metadata: name: pvc1 # pvc 名字 namespace: dev spec: storageClassName: mysql #定义 storageClassName 只有相同名字的才能绑定在一起 accessModes: - ReadWriteMany resources: requests: storage: 5Gi # 用户需要多少容量 --- kind: PersistentVolumeClaim apiVersion: v1 metadata: name: pvc3 # pvc 名字 namespace: dev spec: storageClassName: nginx #定义 storageClassName 只有相同名字的才能绑定在一起 accessModes: - ReadWriteMany resources: requests: storage: 5Gi # amout of Storage request2、创建pvc1,pvc3并查看
[root@k8s-master1 ~]# kubectl create namespace dev namespace/dev created [root@k8s-master1 ~]# kubectl apply -f pvc.yaml persistentvolumeclaim/pvc1 created persistentvolumeclaim/pvc3 created [root@k8s-master1 ~]# kubectl get pvc -n dev NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE pvc1 Bound pv1 5Gi RWX mysql 10s pvc3 Bound pv2 5Gi RWX nginx 10s四、使用pvc挂载1、接下创建deployment.yaml 来应用pvc
apiVersion: apps/v1 kind: Deployment metadata: labels: app: mysql #为该Deployment设置key为app,value为mysql的标签 name: lnmp namespace: dev #和pvc在同一个namespace spec: replicas: 1 #副本数量 selector: #标签选择器,与上面的标签共同作用 matchLabels: #选择包含标签app:lnmp的资源 app: lnmp template: #这是选择或创建的Pod的模板 metadata: #Pod的元数据 labels: #Pod的标签,上面的selector即选择包含标签app:lnmp的Pod app: lnmp spec: #期望Pod实现的功能(即在pod中部署) containers: #生成container,与docker中的container是同一种 - name: mysql image: mysql:5.7 #使用镜像mysql: 创建container,该container默认3306端口可访问 ports: - containerPort: 3306 # 开启本容器的3306端口可访问 env: - name: MYSQL_ROOT_PASSWORD value: hjj123456 volumeMounts: #挂载持久存储卷 - name: mysql-data #挂载设备的名字,与volumes[*].name 需要对应 mountPath: /var/lib/mysql #挂载到容器的某个路径下 - name: nginx image: nginx:1.8 #使用镜像mysql: 创建container,该container默认3306端口可访问 ports: - containerPort: 80 # 开启本容器的80端口可访问 volumeMounts: #挂载持久存储卷 - name: nginx-data #挂载设备的名字,与volumes[*].name 需要对应 mountPath: /var/log/nginx #挂载到容器的某个路径下 volumes: - name: mysql-data #和上面保持一致 这是本地的文件路径,上面是容器内部的路径 persistentVolumeClaim: claimName: pvc1 #pvc名称 readOnly: false #设置成false可读可写,设成true表示只读 - name: nginx-data #和上面保持一致 这是本地的文件路径,上面是容器内部的路径 persistentVolumeClaim: claimName: pvc3 #pvc名称 readOnly: false #设置成false可读可写,设成true表示只读2、然后应用到k8s中
kubectl apply -f deployment.yaml3、进入nfs服务器,查看nfs共享目录是否有数据
[root@k8s-master1 ~]# ls /opt/data/mysql/ auto.cnf ca.pem client-key.pem ibdata1 ib_logfile1 performance_schema public_key.pem server-key.pem test ca-key.pem client-cert.pem ib_buffer_pool ib_logfile0 mysql private_key.pem server-cert.pem sys [root@k8s-master1 ~]# ls /opt/data/nginx/ access.log error.log五、测试选择mysql(这里和上面直接nfs效果一样)[root@k8s-master1 ~]# kubectl get pod -n dev -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES lnmp-7d76dd865d-6qdpm 2/2 Running 0 5m38s 10.244.0.59 k8s-node1[root@k8s-node2 ~]# mysql -uroot -phjj123456 -h10.244.0.59 MySQL [(none)]> show databases; +--------------------+ | Database | +--------------------+ | information_schema | | mysql | | performance_schema | | sys | | test | +--------------------+ 5 rows in set (0.01 sec) 停掉k8s-node1节点在测试,pod迁移需要5,6分钟左右k8s默认的
[root@k8s-master1 ~]# kubectl get pod -n dev -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES lnmp-7d76dd865d-6qdpm 2/2 Terminating 0 9m44s 10.244.0.59 k8s-node1lnmp-7d76dd865d-hbwx7 2/2 Running 0 42s 10.244.1.60 k8s-node2 [root@k8s-node2 ~]# mysql -uroot -phjj123456 -h 10.244.1.60 Welcome to the MariaDB monitor. Commands end with ; or g. Your MySQL connection id is 4 Server version: 5.7.37 MySQL Community Server (GPL) Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. Type 'help;' or 'h' for help. Type 'c' to clear the current input statement. MySQL [(none)]> show databases; +--------------------+ | Database | +--------------------+ | information_schema | | mysql | | performance_schema | | sys | | test | +--------------------+ 5 rows in set (0.01 sec) 以上就是通过pv和pvc挂载
参数介绍
AccessModes 是用来对 PV 进行访问模式的设置,用于描述用户应用对存储资源的访问权限,访问权限包括下面几种方式: - ReadWriteOnce(RWO):读写权限,但是只能被单个节点挂载 - ReadOnlyMany(ROX):只读权限,可以被多个节点挂载 - ReadWriteMany(RWX):读写权限,可以被多个节点挂载 persistentVolumeReclaimPolicy(回收策略) 这里指定的 PV 的回收策略为 Recycle,目前 PV 支持的策略有三种: - Retain(保留)- 保留数据,需要管理员手工清理数据 - Recycle(回收)- 清除 PV 中的数据,效果相当于执行 rm -rf /thevoluem/* - Delete(删除)- 与 PV 相连的后端存储完成 volume 的删除操作,当然这常见于云服务商的存储服务,比如 ASW EBS。 不过需要注意的是,目前只有 NFS 和 HostPath 两种类型支持回收策略。当然一般来说还是设置为 Retain 这种策略保险一点。 PV 的生命周期中的状态,可能会处于4中不同的阶段: - Available(可用):表示可用状态,还未被任何 PVC 绑定 - Bound(已绑定):表示 PVC 已经被 PVC 绑定 - Released(已释放):PVC 被删除,但是资源还未被集群重新声明 - Failed(失败): 表示该 PV 的自动回收失败 StorageClassName 相同的PV和PVC才能够绑定到一起。 例如:PVC 的StorageClassName设置为 "nfs",那么它只会和 StorageClassName 也为 "nfs" 的 PV 绑定到一起。三、使用持久化存储StorageClass(NFS挂载) 一、准备好NFS服务器确保nfs可以正常工作,创建持久化需要的目录。【前面的验证过程中已部署好NFS服务器这里直接引用】
path: /opt/data/nginx server: 10.0.19.127二、开启rbac权限RBAC基于角色的访问控制–全拼Role-Based Access Control
根据rbac.yaml 文件创建Service AccountapiVersion: v1 kind: ServiceAccount #创建一个账户,主要用来管理NFS provisioner在k8s集群中运行的权限 metadata: name: nfs-client-provisioner namespace: kube-system --- kind: ClusterRole #创建集群角色 apiVersion: rbac.authorization.k8s.io/v1 metadata: name: nfs-client-provisioner-runner #角色名 rules: #角色权限 - apiGroups: [""] resources: ["persistentvolumes"] # 操作的资源 verbs: ["get", "list", "watch", "create", "delete"] # 对该资源的操作权限 - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: ClusterRoleBinding # 集群角色绑定 apiVersion: rbac.authorization.k8s.io/v1 metadata: name: run-nfs-client-provisioner subjects: # 角色绑定对象 - kind: ServiceAccount name: nfs-client-provisioner namespace: kube-system roleRef: kind: ClusterRole # 集群角色 name: nfs-client-provisioner-runner apiGroup: rbac.authorization.k8s.io这个文件是创建授权账户。为什么要授权?在K8S中,我们知道有 ApiServer 组件,它可以管理我们创建的 deployment, pod,service等资源,但是有些资源它是管不到的,比如说 K8S本身运行需要的组件等等,同样StorageClass这种资源它也管不到,所以,需要授权账户。
我们在master节点执行
[root@k8s-master1 ~]# kubectl apply -f rbac.yaml serviceaccount/nfs-client-provisioner created clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created三、 创建StorageClass,指定provisioner创建nfs-client-provisioner.yaml文件
kind: Deployment apiVersion: apps/v1 metadata: name: nfs-provisioner-01 namespace: kube-system #与RBAC文件中的namespace保持一致 spec: replicas: 1 strategy: type: Recreate selector: matchLabels: app: nfs-provisioner-01 template: metadata: labels: app: nfs-provisioner-01 spec: serviceAccountName: nfs-client-provisioner # 指定serviceAccount! containers: - name: nfs-client-provisioner image: jmgao1983/nfs-client-provisioner:latest #镜像地址 imagePullPolicy: IfNotPresent volumeMounts: # 挂载数据卷到容器指定目录 - name: nfs-client-root mountPath: /persistentvolumes #不需要修改 env: - name: PROVISIONER_NAME value: nfs-provisioner-01 # 此处供应者名字供storageclass调用 - name: NFS_SERVER value: 10.0.19.127 # 填入NFS的地址 - name: NFS_PATH value: /opt/data/nginx # 填入NFS挂载的目录 volumes: - name: nfs-client-root nfs: server: 10.0.19.127 # 填入NFS的地址 path: /opt/data/nginx # 填入NFS挂载的目录 --- apiVersion: storage.k8s.io/v1 kind: StorageClass # 创建StorageClass metadata: name: nfs-boge provisioner: nfs-provisioner-01 #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致 # Supported policies: Delete、 Retain , default is Delete reclaimPolicy: Retain #清除策略PS:nfs-client-provisioner这个镜像的作用,它通过k8s集群内置的NFS驱动,挂载远端的NFS服务器到本地目录,然后将自身作为storageprovisioner,然后关联到storageclass资源。
在master上创建
[root@k8s-master1 ~]# kubectl apply -f nfs-client-provisioner1.yaml deployment.apps/nfs-provisioner-01 created storageclass.storage.k8s.io/nfs-boge created四、基于StorageClass创建一个pvc,看看动态生成的pv是什么效果:[root@k8s-master1 ~]# cat pvc-sc.yaml kind: PersistentVolumeClaim apiVersion: v1 metadata: name: nginx spec: storageClassName: nfs-boge accessModes: - ReadWriteMany resources: requests: storage: 5Gi [root@k8s-master1 ~]# kubectl apply -f pvc-sc.yaml persistentvolumeclaim/nginx created [root@k8s-master1 ~]# kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE nginx Bound pvc-7fc6a049-79b9-4561-9d35-4d271327d4eb 5Gi RWX nfs-boge 10s [root@k8s-master1 ~]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pvc-7fc6a049-79b9-4561-9d35-4d271327d4eb 5Gi RWX Retain Bound default/nginx nfs-boge 11s五、创建pod测试创建pod,申明PVC进行测试
[root@k8s-master1 ~]# cat nginx.yaml apiVersion: apps/v1 kind: Deployment metadata: labels: app: nginx name: nginx spec: replicas: 1 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - image: nginx name: nginx volumeMounts: # 我们这里将nginx容器默认的页面目录挂载 - name: html-files mountPath: "/usr/share/nginx/html" volumes: - name: html-files persistentVolumeClaim: claimName: nginx #pvc的name清空前面实验的pvc,由于pvc绑定了pv,直接删除pv删除不掉:kubectl delete pvc --all 先删pvc再删pv
如果有pod绑定,则需要先删除pod然后删除pvc,再删除pv,如果还需要清理后端存储,则最后根据情况删除后端存储数据
测试
在master上创建[root@k8s-master1 ~]# kubectl apply -f nginx.yaml deployment.apps/nginx unchanged [root@k8s-master1 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-5bf46c4cc6-snx2d 1/1 Running 0 5m45s 10.244.0.70 k8s-node1[root@k8s-node1 ~]# curl 10.244.0.70 #访问 403 Forbidden 403 Forbidden
nginx/1.21.6 [root@k8s-master1 ~]# echo "node1" > /opt/data/nginx/default-nginx-pvc-7fc6a049-79b9-4561-9d35-4d271327d4eb/index.html #修改挂载文件 [root@k8s-node1 ~]# curl 10.244.0.70 #访问 node1



