1、重新设置主机节点名字,方便部署和管理,以及后期添加扩展,然后重启服务器。
#hostnamectl set-hostname cephmon #cat >> /etc/hosts << ‘EOF’ 192.168.X.X $HOSTNAME EOF #重启 #reboot
2、关闭主机防火墙
#systemctl stop firewalld #systemctl disable firewalld #sed -i -e 's/^SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config #setenforce 0
3、配置安装ceph需要的源
#可以先对原有的yum源做一个备份 /etc/yum.repos.d/所有内容 #curl -o /etc/yum.repos.d/CentOS-base.repo https://mirrors.aliyun.com/repo/Centos-7.repo #curl -o /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo cat > /etc/yum.repos.d/ceph.repo << 'EOF' [ceph] name=ceph baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/ gpgcheck=0 priority =1 [ceph-noarch] name=cephnoarch baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/ gpgcheck=0 priority =1 [ceph-source] name=cephsource baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS/ gpgcheck=0 priority =1 EOF #yum makecache fast
4、安装ceph包
#yum install ceph-deploy-2.0.1 ceph-14.2.4 ceph-radosgw-14.2.4 -y
5、部署ceph集群(会在当前目录生成三个配置文件)
cd /etc/ceph/ ceph-deploy new $HOSTNAME
6、单节点需要对ceph配置文件部分修改(值不唯一,根据实际需求情况配置)
#cat >> /etc/ceph/ceph.conf << EOF osd pool default size = 1 osd crush chooseleaf type = 0 osd max object name len = 256 osd journal size = 128 EOF
7、开始安装ceph节点
#export CEPH_DEPLOY_REPO_URL=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/ #export CEPH_DEPLOY_GPG_URL=http://mirrors.aliyun.com/ceph/keys/release.asc #ceph-deploy install $HOSTNAME
8、初始化监视节点和同步所有配置文件到 /etc/ceph/ 下
#ceph-deploy mon create-initial #ceph-deploy admin $HOSTNAME
9、节点下创建 manager
#ceph-deploy mgr create $HOSTNAME
10、添加OSD,新盘直接添加,其他盘最好先清空分区在选择添加
#ceph-deploy disk zap $HOSTNAME /dev/sdX #ceph-deploy osd create --data /dev/sdX $HOSTNAME
注(如果无法格式化分区):
查看并且删除dm信息
#dmsetup status //查看是否有输出
#dmsetup remove_all //然后再尝试分区
11、部署完成,检查单节点ceph部署情况
#ceph -s
cluster:
id: 272ac8d0-c758-42db-9a51-59e826f96ac2
health: HEALTH_OK
services:
mon: 1 daemons, quorum cephmon (age 8m)
mgr: cephmon(active, since 5h)
osd: 2 osds: 2 up (since 5h), 2 in (since 22h)
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 3.3 GiB used, 7.3 TiB / 7.3 TiB avail
pgs:
注:这里我加了两个OSD,接下来就可以使用我们的ceph存储池了



