osd PG 0-5 128 5-10 512 10-50 4096 删除存储池 https://www.cnblogs.com/leoshi/p/13654244.html lsblk -f 查看磁盘分区情况 linux磁盘分区,挂载 https://www.cnblogs.com/superlsj/p/11610517.htmlCeph部署
部署工具:ceph-deploy
部署版本:Mimic 13班
虚拟机:四台
| ip | 主机名 | 角色 |
|---|---|---|
| 192.168.0.154 | ceph1 | mgr管理节点,osd存储节点,mon |
| 192.168.0.174 | ceph2 | osd存储节点,mon |
| 192.168.0.125 | ceph3 | osd存储节点,mon |
| 192.168.0.164 | client | 客户机挂载节点 |
#vi /etc/yum.repos.d/ceph.repo [Ceph] name=Ceph packages for $basearch baseurl=http://mirrors.aliyun.com/ceph/rpm-mimic/el7/$basearch enabled=1 gpgcheck=1 type=rpm-md gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc [Ceph-noarch] name=Ceph noarch packages baseurl=http://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch enabled=1 gpgcheck=1 type=rpm-md gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc [ceph-source] name=Ceph source packages baseurl=http://mirrors.aliyun.com/ceph/rpm-mimic/el7/SRPMS enabled=1 gpgcheck=1 type=rpm-md gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
yum clean all && yum list
虚拟机准备工作关闭selinux和防火墙
或
开启防火墙端口
firewall-cmd --permanent --add-port=6789/tcp #admin
firewall-cmd --permanent --add-port=6800-8000/tcp #osd
firewall-cmd --reload
修改主机名称
hostnamectl set-hostname
同步时间yum -y install ntpdate ntp ntpdate cn.ntp.org.cn systemctl restart ntpd && systemctl enable ntpd配置本地解析
修改/etc/hosts
192.168.0.154 ceph1
192.168.0.175 ceph2
192.168.0.125 ceph3
192.168.0.164 client
ssh-keygen
ssh-copy-id ceph1
ssh-copy-id ceph2
ssh-copy-id ceph3
ssh-copy-id client
ceph1执行
yum install ceph-deploy python-setuptools python2-subprocess32 ceph-common```
创建ceph目录
mkdir ceph && cd ceph
创建集群,创建在此目录生成文件,ceph-deploy –cluster {cluster-name} new ceph1 ceph2 ceph3 #创建一个自定集群名称的ceph集群,默认为ceph
ceph-deploy new ceph1 ceph2 ceph3
修改配置文件
vim ceph.conf
[global] fsid = 36ea8944-9e18-41d4-8169-401e481f8bc5 mon_initial_members = ceph1, ceph2, ceph3 mon_host = 192.168.0.154,192.168.0.175,192.168.0.125 auth_cluster_required = cephx auth_service_required = cephx auth_client_required = cephx rbd_default_features = 1 mon clock drift allowed = 2 mon clock drift warn backoff = 30 public_network = 192.168.0.0/24 [mon] mon allow pool delete = true
安装Ceph软件包
yum -y install ceph ceph-radosgw #都需要执行,或者管理端执行ceph-deploy install ceph1 ceph2 ceph3
初始化monitor(s),并收集密钥
ceph-deploy mon create-initial
使用ceph-deploy把配置文件和admin密钥拷贝到所有节点,以便您每次执行Ceph命令行时就无需指定monitor地址和ceph.client.admin.keyringceph.client.admin.keyring
ceph-deploy admin ceph1 ceph2 ceph3 client
确定管理节点
L版本之后Ceph,必须要有一个mgr节点,所以我们在管理节点执行:
su - cephadm cd ceph-cluster ceph-deploy mgr create ceph-mon1
检查集群状态
ceph -s #health: HEALTH_OK就表明成功
在VM里给三天节点虚拟机增加新硬盘
lsdlk
查看新硬盘的名称 我的是sdb,磁盘名称是从sda,sdb,sdc排序的
创建OSD
ceph-deploy osd create ceph-mon1 --data /dev/sdb ceph-deploy osd create ceph-mon2 --data /dev/sdb ceph-deploy osd create ceph-mon3 --data /dev/sdb
建一个存储池,要想使用ceph的存储功能,必须先创建存储池
ceph osd pool create rbd 128 128
初始化存储池
rbd pool init -p rbd
升级client的虚拟机内核到5版本
修改client下文件权限
chmod +r /etc/ceph/ceph.client.admin.keyring
client节点创建设备镜像,单位是M
rbd create --size 4096 --pool rbd
client节点映射镜像到主机:
rbd map foo --name client.admin
client节点格式化块设备
mkfs.ext4 -m 0 /dev/rbd/rbd/foo
client节点挂载mount块设备
mkdir /mnt/ceph-block-device mount /dev/rbd/rbd/foo /mnt/ceph-block-device



