所有 Ceph 部署都始于 Ceph 存储集群。一个 Ceph 集群可以包含数千个存储节点,最简系统至少需要一个监视器和两个 OSD 才能做到数据复制。Ceph 文件系统、 Ceph 对象存储、和 Ceph 块设备从 Ceph 存储集群读出和写入数据。
Ceph架构图
1、集群配置
节点 |
IP |
功能 |
ceph01 |
172.16.100.5 |
deploy,mon,osd*2,mds |
ceph02 |
172.16.100.6 |
mon,osd*2 |
ceph03 |
172.16.100.7 |
mon,osd*2 |
2、系统版本
#cat /etc/redhat-release
CentOS Linux release 7.3.1611 (Core)
|
3、三个节点分别额外挂载两块20G磁盘
#lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
fd0 2:0 1 4K 0 disk
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 476M 0 part /boot
└─sda2 8:2 0 19.5G 0 part /
sdb 8:16 0 20G 0 disk
sdc 8:32 0 23G 0 disk
sr0 11:0 1 1024M 0 rom
|
4、关闭防火墙
#vim /etc/selinux/config
disabled
#setenforce 0
#systemctl stop firewalld
#systemctl disable firewalld
|
5、添加hosts
#vim /etc/hosts
172.16.100.5 ceph01
172.16.100.6 ceph02
172.16.100.7 ceph03
|
6、配置SSH免密登陆
#ssh-keygen
#ssh-copy-id ceph01
#ssh-copy-id ceph02
#ssh-copy-id ceph03
|
7、校对时间
#yum install -y ntp ntpdate
#ntpdate pool.ntp.org
|
8、添加yum源
#vim /etc/yum.repos.d/ceph.repo
[ceph]
name=Ceph packages for $basearch
baseurl=http://download.ceph.com/rpm-jewel/el7/$basearch
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
[ceph-noarch]
name=Ceph noarch packages
baseurl=http://download.ceph.com/rpm-jewel/el7/noarch
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
[ceph-source]
name=Ceph source packages
baseurl=http://download.ceph.com/rpm-jewel/el7/SRPMS
enabled=0
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
|
8、导入密钥
#rpm --import 'https://download.ceph.com/keys/release.asc'
|
9、安装ceph客户端
#yum install -y ceph ceph-radosgw rdate
|
10、安装 ceph-deploy
#yum update -y
#yum install -y ceph-deploy
|
11、创建集群
#mkdir -pv /opt/cluster
#cd /opt/cluster
#ceph-deploy new ceph01 ceph02 ceph03
#ls
ceph.conf ceph-deploy-ceph.log ceph.mon.keyring
|
12、修改配置文件,添加public_network,并稍微增大mon之间时差允许范围(默认为0.05s,现改为2s)
#vim ceph.conf
[global]
fsid = 79aa9d8d-65e4-4a9d-84c4-50dbd3db337e
mon_initial_members = ceph01, ceph02
mon_host = 192.168.135.163,192.168.135.164
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public_network = 172.16.100.0/24
mon_clock_drift_allowed = 2
|
13、部署MON
#ceph-deploy mon create-initial
#ceph -s
cluster 6fb69a7a-647a-4cb6-89ad-583729eb0406
health HEALTH_ERR
no osds
monmap e1: 3 mons at {ceph01=172.16.100.5:6789/0,ceph02=172.16.100.6:6789/0,ceph03=172.16.100.7:6789/0}
election epoch 8, quorum 0,1,2 ceph01,ceph02,ceph03
osdmap e1: 0 osds: 0 up, 0 in
flags sortbitwise,require_jewel_osds
pgmap v2: 64 pgs, 1 pools, 0 bytes data, 0 objects
0 kB used, 0 kB / 0 kB avail
64 creating
|
14、部署OSD
#ceph-deploy disk zap ceph01:sdb ceph01:sdc
#ceph-deploy disk zap ceph02:sdb ceph02:sdc
#ceph-deploy disk zap ceph03:sdb ceph03:sdc
#ceph-deploy osd prepare ceph01:sdb:sdc
#ceph-deploy osd prepare ceph02:sdb:sdc
#ceph-deploy osd prepare ceph03:sdb:sdc
#ceph-deploy osd activate ceph01:sdb1:sdc1
#ceph-deploy osd activate ceph02:sdb1:sdc1
#ceph-deploy osd activate ceph03:sdb1:sdc1
#lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
fd0 2:0 1 4K 0 disk
sda 8:0 0 20G 0 disk
├─sda1 8:1 0 476M 0 part /boot
└─sda2 8:2 0 19.5G 0 part /
sdb 8:16 0 20G 0 disk
└─sdb1 8:17 0 20G 0 part /var/lib/ceph/osd/ceph-0
sdc 8:32 0 23G 0 disk
└─sdc1 8:33 0 5G 0 part
sr0 11:0 1 1024M 0 rom
|
15、再次查看集群状态,如果是active+clean状态就是正常的。
#ceph -s
cluster 6fb69a7a-647a-4cb6-89ad-583729eb0406
health HEALTH_OK
monmap e1: 3 mons at {ceph01=172.16.100.5:6789/0,ceph02=172.16.100.6:6789/0,ceph03=172.16.100.7:6789/0}
election epoch 8, quorum 0,1,2 ceph01,ceph02,ceph03
osdmap e15: 3 osds: 3 up, 3 in
flags sortbitwise,require_jewel_osds
pgmap v32: 64 pgs, 1 pools, 0 bytes data, 0 objects
101 MB used, 61305 MB / 61406 MB avail
64 active+clean
|