环境准备
备注:已配置互信,并且在部署节点安装了自动化部署工具ansible,已配置ceph的yum源 主机名已更改、ssh免密登录
# centos7.5 每个机器都有2块盘
10.100.2.50 deploy
10.100.2.51 mon1
10.100.2.52 mon2
10.100.2.53 mon3
10.100.2.54 osd1
10.100.2.61 osd2
10.100.2.62 osd3
10.100.2.71 osd4
配置环境
使用ansible批量修改主机及IP地址
vim hostReset.yml
---
- hosts: openstack
remote_user: root
tasks:
- name: change hostname
raw: "echo {{ hostname | quote }} > /etc/hostname"
- name: copy ens161 config
template: src="/etc/sysconfig/network-scripts/ifcfg-ens161" dest="/etc/sysconfig/network-scripts/"
- name: change ens161 ip address
shell: |
sed -i "s#IPADDR=10.100.2.10#IPADDR={{ ens161_ipaddr }}#g" /etc/sysconfig/network-scripts/ifcfg-ens161
- name: change ens224 ip addrss
shell: |
sed -i "s#BOOTPROTO=dhcp#BOOTPROTO=static#g" /etc/sysconfig/network-scripts/ifcfg-ens224
echo "IPADDR={{ens224_ipaddr}}" >> /etc/sysconfig/network-scripts/ifcfg-ens224
echo "NETMASK=255.255.255.0" >> /etc/sysconfig/network-scripts/ifcfg-ens224
sed -i "s#ONBOOT=no#ONBOOT=yes#g" /etc/sysconfig/network-scripts/ifcfg-ens224
- name: change ens256 network config
shell: |
sed -i "s#BOOTPROTO=dhcp#BOOTPROTO=none#g" /etc/sysconfig/network-scripts/ifcfg-ens256
sed -i "s#ONBOOT=no#ONBOOT=yes#g" /etc/sysconfig/network-scripts/ifcfg-ens256
- name: copy ceph repo
template: src="/etc/yum.repos.d/ceph.repo" dest="/etc/yum.repos.d/"
- name: install ceph and ceph-radosgw
shell: |
yum clean all
yum makecache fast
yum install -y ceph ceph-radosgw
安装ntp服务
vim ntp.yml
---
- hosts: openstack
remote_user: root
tasks:
- name: install ntp and enable ntpd service
shell: |
yum install -y ntp
systemctl start ntpd.service
systemctl enable ntpd.service
安装ceph集群
deploy节点:
yum install -y ceph-deploy
mkdir /etc/ceph
ceph-deploy new controller1 controller2 controller3 --cluster-network 10.100.3.0/24 --public-network 10.100.2.0/24
ceph-deploy mon create-inital
ceph-deploy admin controller1 controller2 controller3 mysql-rabbit network1 network2 compute1
# 部署osd
vim osd_deploy.yml
---
- hosts: openstack
remote_user: root
tasks:
- name: deploy osd
shell: |
#echo "{{ ansible_hostname }}" >> /root/1.txt
ceph-deploy osd create --data /dev/sdb {{ ansible_hostname }}
ceph-deploy osd create --data /dev/sdc {{ ansible_hostname }}
[root@kolla ceph]# ceph -s
cluster:
id: 84225393-2c25-46f3-93ee-e2c621323572
health: HEALTH_WARN
application not enabled on 1 pool(s)
too few PGs per OSD (27 < min 30)
services:
mon: 3 daemons, quorum controller1,controller2,controller3
mgr: controller1(active), standbys: controller2, controller3
osd: 14 osds: 14 up, 14 in
data:
pools: 1 pools, 128 pgs
objects: 5 objects, 709 B
usage: 14 GiB used, 210 GiB / 224 GiB avail
pgs: 128 active+clean
ceph集群对外提供块存储服务
[root@kolla ceph]# ceph osd pool create test 128 128
pool 'test' created
# 关于pg pgp的个数 如何计算可参考ceph分布式存储架构及工作原理
[root@kolla ceph]# rbd create disk01 --size 10G --pool test
root@kolla ceph]# rbd map -p test --image disk01
rbd: sysfs write failed
RBD image feature set mismatch. You can disable features unsupported by the kernel with "rbd feature disable test/disk01 object-map fast-diff deep-flatten".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address
注意由于 centos7 的内核版本比较低,ceph的一些特殊用户无法使用,需要手动禁用才能使用。
rbd feature disable test/disk01 exclusive-lock object-map fast-diff deep-flatten
[root@kolla ceph]# rbd map test/disk01
/dev/rbd0
[root@kolla ceph]# rbd showmapped
id pool image snap device
0 test disk01 - /dev/rbd0
ceph集群对外提供cephfs文件系统
[root@kolla ceph]# yum install -y ceph-fuse
[root@kolla ceph]# ceph-deploy mds create controller1 controller2 controller3
[root@kolla ceph]# ceph osd pool delete _fsdata _fsdata --yes-i-really-really-mean-it
Error EPERM: pool deletion is disabled; you must first set the mon_allow_pool_delete config option to true before you can destroy a pool
解决办法:
ceph tell mon.\* injectargs '--mon-allow-pool-delete=true'
ceph osd pool create cephfs_data 128 128
ceph osd pool create cephfs_metadata 128 128
ceph fs new cephfs cephfs_metadata cephfs_data
[root@kolla ceph]# ceph fs ls
name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
[root@kolla ceph]# ceph mds stat
cephfs-1/1/1 up {0=controller3=up:active}, 2 up:standby
[root@kolla ceph]# ceph-authtool -p /etc/ceph/ceph.client.admin.keyring
AQBRYwJdezDNCxAAazam8HXAJwmHM8PUn1fp3g==
[root@kolla ceph]# mount -t ceph controller1:6789,controller2:6789,controller3:6789:/ /mnt -o name=admin,secret=AQBRYwJdezDNCxAAazam8HXAJwmHM8PUn1fp3g==
[root@kolla ceph]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
/dev/mapper/centos-root xfs 50G 29G 22G 58% /
devtmpfs devtmpfs 32G 12K 32G 1% /dev
tmpfs tmpfs 32G 0 32G 0% /dev/shm
tmpfs tmpfs 32G 9.0M 32G 1% /run
tmpfs tmpfs 32G 0 32G 0% /sys/fs/cgroup
/dev/mapper/centos-home xfs 129G 33M 129G 1% /home
/dev/sda1 xfs 1014M 142M 873M 14% /boot
tmpfs tmpfs 6.3G 0 6.3G 0% /run/user/0
10.100.2.51:6789,10.100.2.52:6789,10.100.2.53:6789:/ ceph 67G 0 67G 0% /mnt
ceph集群启动dashboard
ceph mgr module enable dashboard
[root@controller1 ~]# ceph dashboard create-self-signed-cert
Self-signed certificate created
[root@kolla ceph]# ceph dashboard set-login-credentials admin admin
Error EIO: Module 'dashboard' has experienced an error and cannot handle commands: No module named 'requests.packages.urllib3'
centos7.5 为解决,各种方法都已经尝试。
ceph mgr module disable dashboard
ceph集群对外提供对象存储服务
#1. 安装
[root@kolla ceph]# ceph-deploy rgw create controller1 controller2 controller3
[root@kolla ceph]# ceph -s
cluster:
id: 84225393-2c25-46f3-93ee-e2c621323572
health: HEALTH_OK
services:
mon: 3 daemons, quorum controller1,controller2,controller3
mgr: controller2(active), standbys: controller3
mds: cephfs-1/1/1 up {0=controller3=up:active}, 1 up:standby
osd: 14 osds: 14 up, 14 in
rgw: 3 daemons active
data:
pools: 7 pools, 416 pgs
objects: 215 objects, 4.4 KiB
usage: 14 GiB used, 210 GiB / 224 GiB avail
pgs: 416 active+clean
#2. 测试访问
[root@kolla ceph]# curl -XGET http://controller1:7480
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>[root
[root@kolla ceph]# curl -XGET http://controller2:7480
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>[root
# 3.对象存储使用-s3方式使用
[root@kolla ~]# radosgw-admin user create --uid="rgwuser" --display-name="This is first rgw test user"
{
"user_id": "rgwuser",
"display_name": "This is first rgw test user",
"email": "",
"suspended": 0,
"max_buckets": 1000,
"auid": 0,
"subusers": [],
"keys": [
{
"user": "rgwuser",
"access_key": "0N5JFNC9QF1L7XGP1MH1",
"secret_key": "YHm61QgE8NQpcGCVWKKENSYWx4rwdWnIB0iovdek"
}
],
"swift_keys": [],
"caps": [],
"op_mask": "read, write, delete",
"default_placement": "",
"placement_tags": [],
"bucket_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"user_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"temp_url_keys": [],
"type": "rgw",
"mfa_ids": []
}
"access_key": "0N5JFNC9QF1L7XGP1MH1",
"secret_key": "YHm61QgE8NQpcGCVWKKENSYWx4rwdWnIB0iovdek"
vim s3.py
#!/usr/bin/env python
import boto
import boto.s3.connection
access_key = "0N5JFNC9QF1L7XGP1MH1"
secret_key = "YHm61QgE8NQpcGCVWKKENSYWx4rwdWnIB0iovdek"
conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
host = 'controller1', port= 7480,
is_secure=False,
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
bucket = conn.create_bucket('my-first-s3-bucket')
for bucket in conn.get_all_buckets():
print "{name}\t{created}".format(
name = bucket.name,
created = bucket.creation_date,
)
[root@kolla ~]# python s3.py
my-first-s3-bucket 2019-06-14T14:20:17.512Z
#4.对象存储使用-SWIFT方式使用
为了给用户新建一个子用户 (Swift 接口) ,你必须为该子用户指定用户的 ID(--uid={username}),子用户的 ID 以及访问级别:
[root@kolla ~]# radosgw-admin subuser create --uid=rgwuser --subuser=rgwuser:swift --access=full
{
"user_id": "rgwuser",
"display_name": "This is first rgw test user",
"email": "",
"suspended": 0,
"max_buckets": 1000,
"auid": 0,
"subusers": [
{
"id": "rgwuser:swift",
"permissions": "full-control"
}
],
"keys": [
{
"user": "rgwuser",
"access_key": "0N5JFNC9QF1L7XGP1MH1",
"secret_key": "YHm61QgE8NQpcGCVWKKENSYWx4rwdWnIB0iovdek"
}
],
"swift_keys": [
{
"user": "rgwuser:swift",
"secret_key": "Zmbg3nReOj0kerEVX2GQ8ziApXaO7SPbr4KACyTW"
}
],
"caps": [],
"op_mask": "read, write, delete",
"default_placement": "",
"placement_tags": [],
"bucket_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"user_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"temp_url_keys": [],
"type": "rgw",
"mfa_ids": []
}
[root@kolla ~]#
[root@kolla ~]#
[root@kolla ~]#
[root@kolla ~]# pip install python-swiftclient
[root@kolla ~]# swift -A http://controller1:7480/auth/1.0 -U rgwuser:swift -k "Zmbg3nReOj0kerEVX2GQ8ziApXaO7SPbr4KACyTW"
my-first-s3-bucket
参考文档:
https://www.cnblogs.com/kuku0223/p/8257813.html
https://www.cnblogs.com/itzgr/p/10449791.html
https://blog.csdn.net/wylfengyujiancheng/article/details/85613361