1、ceph -s 查看集群状态
[root@admin-node ~]# ceph -s
cluster 99f00338-a334-4f90-a579-496a934f25c0
health HEALTH_WARN
109 pgs degraded
9 pgs recovering
96 pgs recovery_wait
109 pgs stuck unclean
recovery 38892/105044 objects degraded (37.024%)
monmap e1: 1 mons at {admin-node=192.168.13.171:6789/0}
election epoch 3, quorum 0 admin-node
osdmap e146: 20 osds: 20 up, 20 in
flags sortbitwise
pgmap v6669: 320 pgs, 3 pools, 205 GB data, 52522 objects
424 GB used, 74059 GB / 74484 GB avail
38892/105044 objects degraded (37.024%)
211 active+clean
96 active+recovery_wait+degraded
9 active+recovering+degraded
4 active+degraded
recovery io 95113 kB/s, 23 objects/s
client io 215 MB/s rd, 53 op/s rd, 0 op/s wr
2、ceph health 查看集群状态
[root@admin-node ~]# ceph health
HEALTH_WARN 104 pgs degraded; 7 pgs recovering; 93 pgs recovery_wait; 104 pgs stuck unclean; recovery 36306/105044 objects degraded (34.563%)
3、ceph osd tree 检查OSD的CRUSH map
[root@admin-node ~]# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 20.00000 root default
-2 10.00000 host node2
0 1.00000 osd.0 up 1.00000 1.00000
1 1.00000 osd.1 up 1.00000 1.00000
2 1.00000 osd.2 up 1.00000 1.00000
3 1.00000 osd.3 up 1.00000 1.00000
4 1.00000 osd.4 up 1.00000 1.00000
5 1.00000 osd.5 up 1.00000 1.00000
6 1.00000 osd.6 up 1.00000 1.00000
7 1.00000 osd.7 up 1.00000 1.00000
8 1.00000 osd.8 up 1.00000 1.00000
9 1.00000 osd.9 up 1.00000 1.00000
-3 10.00000 host node3
12 1.00000 osd.12 up 1.00000 1.00000
13 1.00000 osd.13 up 1.00000 1.00000
14 1.00000 osd.14 up 1.00000 1.00000
15 1.00000 osd.15 up 1.00000 1.00000
16 1.00000 osd.16 up 1.00000 1.00000
17 1.00000 osd.17 up 1.00000 1.00000
18 1.00000 osd.18 up 1.00000 1.00000
19 1.00000 osd.19 up 1.00000 1.00000
20 1.00000 osd.20 up 1.00000 1.00000
21 1.00000 osd.21 up 1.00000 1.00000
[root@admin-node ~]#
4、ceph df 查看集群使用情况
[root@admin-node ~]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
74484G 74060G 423G 0.57
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 0 6168M 0.01 36535G 1542
pool1 1 89872M 0.18 36535G 22469
pool2 2 111G 0.21 36535G 28510
5、查看ceph存储池
[root@node3 bin]# ceph osd lspools
<rados.Rados object at 0x100166a7258>
('mon', '')
0 rbd,1 pool1,2 pool2,
6、查看ceph monitor仲裁情况
[root@node3 ~]# ceph quorum_status --format json-pretty
stat auth
end auth
<rados.Rados object at 0x10026787258>
('mon', '')
{
"election_epoch": 4,
"quorum": [
0
],
"quorum_names": [
"admin-node"
],
"quorum_leader_name": "admin-node",
"monmap": {
"epoch": 1,
"fsid": "99f00338-a334-4f90-a579-496a934f25c0",
"modified": "2016-09-22 04:34:19.863934",
"created": "2016-09-22 04:34:19.863934",
"mons": [
{
"rank": 0,
"name": "admin-node",
"addr": "192.168.13.171:6789\/0"
}
]
}
}
7、导出ceph monitor信息
[root@node3 ~]# ceph mon dump
stat auth
end auth
<rados.Rados object at 0x10037cc7258>
('mon', '')
dumped monmap epoch 1
epoch 1
fsid 99f00338-a334-4f90-a579-496a934f25c0
last_changed 2016-09-22 04:34:19.863934
created 2016-09-22 04:34:19.863934
0: 192.168.13.171:6789/0 mon.admin-node
8、检查ceph monitor OSD 和PG状态
[root@node3 ~]# ceph osd stat
stat auth
end auth
<rados.Rados object at 0x10006f77258>
('mon', '')
osdmap e277: 20 osds: 20 up, 20 in
flags sortbitwise
[root@node3 ~]#
[root@node3 ~]# ceph pg stat
stat auth
end auth
<rados.Rados object at 0x1002f4f7258>
('mon', '')
v19994: 320 pgs: 320 active+clean; 2030 GB data, 658 GB used, 73826 GB / 74484 GB avail
9、列出ceph存储池
[root@node3 ~]# ceph osd lspools
stat auth
end auth
<rados.Rados object at 0x1000fbd7258>
('mon', '')
0 rbd,1 pool1,2 pool2,
10 列出集群的认证密钥
[root@node3 ~]# ceph auth list
stat auth
end auth
<rados.Rados object at 0x10036697258>
('mon', '')
installed auth entries:
osd.0
key: AQCcmONX78yyGBAARd1khxgyH4sWvQZ8MzOK3w==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.1
key: AQA3h+RXLpxgNhAA66aGpLH3BwgCTuedzqEV3g==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.12
key: AQC9meNXRJrlIBAAuwj7upKKqLEt/L1x0Rfg1w==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.13
key: AQD4nONXCmlEExAAekslqtPJNkUuGdTbUrXzLg==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.14
key: AQCfneNXmGkEERAASfUPnf3BV/twV1Ny/Sr5Pg==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.15
key: AQCineNXd6l5LxAA7Oi5dIdVneuRK6vLuaL75g==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.16
key: AQClneNXPLdlNxAA1Sh9494cgCyOc9Kdu//GOg==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.17
key: AQCpneNXj0UnNhAAC7PwDYhbB/XX9EHQIB+HDQ==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.18
key: AQCsneNXyHthOhAAJCdCkMJDT6LT/76wgckD/Q==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.19
key: AQCwneNXlAjbDRAASTEjJLSKHTVM25GdZ0iWxg==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.2
key: AQBDh+RX8z5/KRAARehONU5MOVHvXK0A8NE4tw==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.20
key: AQCzneNX4B0fNRAAE9mptVaMXf0s5xNXWCCqsw==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.21
key: AQC2neNXbpWOOxAADvLSlnIroJTkfcVai9ZxBw==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.3
key: AQBPh+RXJTmxBBAAxhEkPfPPpDecn28z73D2wQ==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.4
key: AQBah+RXeZC8DhAARDrqMO1Wip9Qhq/CuHxCOQ==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.5
key: AQBlh+RXXfwgIRAAs65Qf1c5brueHPhpDMAv2w==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.6
key: AQBxh+RXOxDjMhAA0YoZvHQSx6frea6kwvzE7g==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.7
key: AQB/h+RX5o0WNxAAB68I7SY6Ek19fSElWdPA+A==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.8
key: AQCPh+RXqSrXKBAAxLt0IIOKzAkwe5+gPJg/Sw==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.9
key: AQCfh+RXSwc9FBAAt6cJoPICbo/RqKGhIuTKLw==
caps: [mon] allow profile osd
caps: [osd] allow *
client.admin
key: AQBDl+NX6NHFERAAPIruW10XwUpHdUSighhfmg==
auid: 0
caps: [mds] allow
caps: [mon] allow *
caps: [osd] allow *
[root@node3 ~]#
本文转自 OpenStack2015 博客,原文链接: http://blog.51cto.com/andyliu/1856475 如需转载请自行联系原作者