Ceph pool 资源池管理

Ceph Pool 资源池管理

# 查看 ceph 资源池
ceph osd lspools

# 创建资源池
osd pool create <poolname> <int[0-]> {<int[0-]>} {replicated|erasure} {<erasure_code_profile>} {<rule>} {<int>} {<int>} {<int[0-]>} {<int[0-]>} {<float[0.0-1.0]>} :  create pool

[root@node1 ~]# ceph osd pool create pool_demo 16 16
pool 'pool_demo' created
[root@node1 ~]# ceph osd lspools
1 ceph-demo
2 .rgw.root
3 default.rgw.control
4 default.rgw.meta
5 default.rgw.log
6 cephfs_data
7 cephfs_metadata
8 pool_demo

# 修改资源池属性
[root@node1 ~]# ceph osd pool get pool_demo size
size: 3
[root@node1 ~]# ceph osd pool get pool_demo pg_num
pg_num: 16
[root@node1 ~]# ceph osd pool get pool_demo pgp_num
pgp_num: 16
[root@node1 ~]# ceph osd pool set pool_demo pg_num  32
set pool 8 pg_num to 32
[root@node1 ~]# ceph osd pool set pool_demo pgp_num  32
set pool 8 pgp_num to 32

# 将资源池关联到应用程序
ceph osd pool application enable {pool-name} {application-name}
# application-name=rbd|cephfs|rgw

[root@node1 ~]# ceph osd pool application enable pool_demo rbd
enabled application 'rbd' on pool 'pool_demo'
[root@node1 ~]# ceph osd pool application get pool_demo
{
    "rbd": {}
}

# 设置池配额 为每个池的最大字节数和/或最大对象数设置池配额
[root@node1 ~]# ceph osd pool get-quota pool_demo
quotas for pool 'pool_demo':
  max objects: N/A
  max bytes  : N/A
[root@node1 ~]# ceph osd pool set-quota pool_demo max objects 100
Invalid command: max not in max_objects|max_bytes
osd pool set-quota <poolname> max_objects|max_bytes <val> :  set object or byte limit on pool
Error EINVAL: invalid command
[root@node1 ~]# ceph osd pool set-quota pool_demo max_objects 100
set-quota max_objects = 100 for pool pool_demo
[root@node1 ~]# ceph osd pool get-quota pool_demo
quotas for pool 'pool_demo':
  max objects: 100 objects
  max bytes  : N/A

# 删除池
ceph osd pool delete {pool-name} [{pool-name} --yes-i-really-really-mean-it]

若要删除池,mon_allow_pool_delete配置中必须将"该标志"设置为 true。否则,他们将拒绝删除池。
如果您为创建的池创建了自己的规则,则在不再需要池时应考虑删除这些规则:

ceph osd pool get {pool-name} crush_rule
例如,如果规则为"123",您可以检查其他池,例如:

ceph osd dump | grep "^pool" | grep "crush_rule 123"
如果没有其他池使用该自定义规则,则从群集中删除该规则是安全的。

如果您为不再存在的池创建具有严格权限的用户,则应考虑删除这些用户:

ceph auth ls | grep -C 5 {pool-name}
ceph auth del {user}

# 重命名池
ceph osd pool rename {current-pool-name} {new-pool-name}
如果重命名池,并且具有经过身份验证的用户的每个池功能,则必须使用新的池名称更新用户的功能(即大写字母)。

# 显示池统计信息
rados df
ceph osd pool stats [{pool-name}]

[root@node1 ~]# rados df
POOL_NAME              USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS      RD WR_OPS      WR USED COMPR UNDER COMPR
.rgw.root           768 KiB       4      0     12                  0       0        0      0     0 B      4   4 KiB        0 B         0 B
ceph-demo           653 MiB      79      0    237                  0       0        0   1684 5.6 MiB    285 225 MiB        0 B         0 B
cephfs_data             0 B       0      0      0                  0       0        0      0     0 B      0     0 B        0 B         0 B
cephfs_metadata     1.5 MiB      22      0     66                  0       0        0      0     0 B     49  17 KiB        0 B         0 B
default.rgw.control     0 B       8      0     24                  0       0        0      0     0 B      0     0 B        0 B         0 B
default.rgw.log         0 B     175      0    525                  0       0        0  63063  61 MiB  42053     0 B        0 B         0 B
default.rgw.meta        0 B       0      0      0                  0       0        0      0     0 B      0     0 B        0 B         0 B
pool_demo               0 B       0      0      0                  0       0        0      0     0 B      0     0 B        0 B         0 B

total_objects    288
total_used       6.7 GiB
total_avail      143 GiB
total_space      150 GiB
[root@node1 ~]# ceph osd pool stats pool_demo
pool pool_demo id 8
  nothing is going on
上一篇:Ceph PG数量分析


下一篇:详解Ceph系统数据是如何布局的?