ceph客户端配置自动挂载存储服务

1. 安装支持ceph的内核模块
可选:
centos/ubuntu:
yum install -y ceph-common
或
apt install -y ceph-common

2. 拷贝认证密钥
cephadmin@ceph-deploy:~/ceph-cluster$ sudo scp ceph.conf ceph.client.admin.keyring root@<客户端服务器IP>:/etc/ceph

测试获取集群信息:
客户端测试是否能够获取集群信息:
[root@ceph-client-centos ~]# ceph -s
  cluster:
    id:     fbcd7dfd-c0b1-420e-a1c3-5eb5002c0cd3
    health: HEALTH_WARN
            clock skew detected on mon.ceph-mon02
 
  services:
    mon: 3 daemons, quorum ceph-mon01,ceph-mon02,ceph-mon03 (age 43m)
    mgr: ceph-mgr01(active, since 25h), standbys: ceph-mgr02
    osd: 8 osds: 8 up (since 44m), 8 in (since 31h)
 
  data:
    pools:   3 pools, 97 pgs
    objects: 39 objects, 30 MiB
    usage:   284 MiB used, 2.3 TiB / 2.3 TiB avail
    pgs:     97 active+clean
能够获取说明没问题了


3. 创建存储资源。特性配置
使用myrbd1存储池创建一个myimg1的块设备
cephadmin@ceph-deploy:~$ rbd create myimg1 --size 5G --pool myrbd1

使用myrbd1存储池创建一个myimg2的块设备
cephadmin@ceph-deploy:~$ rbd create myimg2 --size 3G --pool myrbd1 --image-format 2 --image-feature layering


检查:
cephadmin@ceph-deploy:~$ rbd ls --pool myrbd1
myimg1
myimg2


关系图:
Ceph 集群
  ├── 存储池(Pool)
     ├── myrbd1
         ├── myimg1(RBD 映像)
         └── myimg2(RBD 映像)

#查看指定rbd信息
cephadmin@ceph-deploy:~$ rbd --image myimg1 --pool myrbd1 info
rbd image 'myimg1':
        size 5 GiB in 1280 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 6251552d851b
        block_name_prefix: rbd_data.6251552d851b
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten  #配置的特性
        op_features: 
        flags: 
        create_timestamp: Wed May 29 21:50:49 2024
        access_timestamp: Wed May 29 21:50:49 2024
        modify_timestamp: Wed May 29 21:50:49 2024

cephadmin@ceph-deploy:~/ceph-cluster$  rbd --image myimg2 --pool myrbd1 info
rbd image 'myimg2':
	size 3 GiB in 768 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 1deb2b0c736e
	block_name_prefix: rbd_data.1deb2b0c736e
	format: 2
	features: layering
	op_features: 
	flags: 
	create_timestamp: Wed May 29 22:29:56 2024
	access_timestamp: Wed May 29 22:29:56 2024
	modify_timestamp: Wed May 29 22:29:56 2024


特性的关闭[可选]:
cephadmin@ceph-deploy:~/ceph-cluster$ rbd feature disable myrbd1/myimg1 object-map fast-diff deep-flatten


检查当前状态:
cephadmin@ceph-deploy:~$ ceph df 
--- RAW STORAGE ---
CLASS     SIZE    AVAIL     USED  RAW USED  %RAW USED
ssd    2.3 TiB  2.3 TiB  296 MiB   296 MiB       0.01
TOTAL  2.3 TiB  2.3 TiB  296 MiB   296 MiB       0.01
--- POOLS ---
POOL    ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
.mgr     1    1  577 KiB        2  1.7 MiB      0    760 GiB
myrbd1  11   64    405 B        7   48 KiB      0    760 GiB
#这里些的760G 实际上是 2.3T 除以3得到的。因为是三副本
可选操作:
查看是不是3副本:
cephadmin@ceph-deploy:~$ ceph osd pool get myrbd1 size
size: 3

修改副本数:
cephadmin@ceph-deploy:~$ ceph osd pool set myrbd1 size 3


3. 客户端进行测试
客户端测试是否能够获取集群信息:
[root@ceph-client-centos ~]# ceph -s
  cluster:
    id:     fbcd7dfd-c0b1-420e-a1c3-5eb5002c0cd3
    health: HEALTH_WARN
            clock skew detected on mon.ceph-mon02
 
  services:
    mon: 3 daemons, quorum ceph-mon01,ceph-mon02,ceph-mon03 (age 43m)
    mgr: ceph-mgr01(active, since 25h), standbys: ceph-mgr02
    osd: 8 osds: 8 up (since 44m), 8 in (since 31h)
 
  data:
    pools:   3 pools, 97 pgs
    objects: 39 objects, 30 MiB
    usage:   284 MiB used, 2.3 TiB / 2.3 TiB avail
    pgs:     97 active+clean


4.客户端映射ceph存储到本地:
1. 手动映射:
把myrbd1映射磁盘到本机:
[root@ceph-client-centos ~]# rbd -p myrbd1 map myimg1
/dev/rbd0

[root@ceph-client-centos ~]# rbd -p myrbd1 map myimg2
/dev/rbd1

检查现有ceph映射的磁盘
[root@ceph-client-centos ~]# lsblk 
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda      8:0    0  100G  0 disk 
├─sda1   8:1    0  190M  0 part /boot
└─sda2   8:2    0 99.8G  0 part /
sr0     11:0    1 1024M  0 rom  
rbd0   253:0    0    5G  0 disk
rbd1   253:16   0    3G  0 disk

取消映射[可选]:
rbd unmap /dev/rbd1

查看:
[root@ceph-client-centos ~]# ceph osd pool ls
.mgr
myrbd1

[root@ceph-client-centos ~]# rbd ls --pool myrbd1
myimg1
myimg2


3. 自动映射(配置为系统服务)
3.1. 创建配置文件,写入挂载点镜像等
[root@ceph-client-centos ~]# cat /etc/rbd_mount.conf
myrbd1
  myimg1 /mysql-1
  myimg2 /mysql-1

3.2. 创建挂载脚本
[root@ceph-client-centos ~]# cat /etc/init.d/ceph-mount-rbd.sh
内容如下:
#-------------------------------------------------------------------#
#!/bin/bash
# chkconfig: 345 20 80
# description: 自动挂载 Ceph RBD 设备
log_file="/var/log/ceph-mount.log"
mapping_file="/etc/rbd_mapping"

log() {
    echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" >> "$log_file"
}
log "脚本开始运行"
case "$1" in
  start)
        log "读取配置文件并挂载 RBD 设备"
        if [ ! -f "$mapping_file" ]; then
            touch "$mapping_file"
        fi
        pool=""
        declare -A mappings
        # 读取现有的映射关系
        if [ -s "$mapping_file" ]; then
            while read -r line; do
                device=$(echo "$line" | awk '{print $1}')
                mount_point=$(echo "$line" | awk '{print $2}')
                mappings[$mount_point]=$device
            done < "$mapping_file"
        fi
        # 清空映射文件
        > "$mapping_file"
        # 读取配置文件
        while read -r line; do
            log "读取行:$line"
            # 忽略注释行和空行
            if [[ $line =~ ^# ]] || [[ -z $line ]]; then
                log "跳过行:$line"
                continue
            fi
            # 解析配置
            if [[ $line =~ ^\ *([^\ ]+)\ *$ ]]; then
                pool=${BASH_REMATCH[1]}
                log "设置 pool 为:$pool"
            elif [[ $line =~ ^\ *([^\ ]+)\ *([^\ ]+)\ *$ ]]; then
                image=${BASH_REMATCH[1]}
                mount_point=${BASH_REMATCH[2]}
                log "挂载 Ceph RBD 设备 $image 到 $mount_point"
                # 如果没有映射,进行映射
                mapped_device=$(rbd map -p "$pool" "$image" --id admin --keyring /etc/ceph/ceph.client.admin.keyring)
                if [ $? -ne 0 ]; then
                    log "RBD 映射失败"
                    exit 1
                fi
                log "映射到的设备:$mapped_device"
                if [ -n "$mapped_device" ]; then
                    # 等待设备出现在 /dev 中
                    udevadm settle
                    if [ ! -e "$mapped_device" ]; then
                        log "设备 $mapped_device 不存在,等待 udev 处理"
                        sleep 5
                    fi

                    if [ ! -e "$mapped_device" ]; then
                        log "设备 $mapped_device 仍然不存在,映射失败"
                        exit 1
                    fi

                    # 记录映射
                    echo "$mapped_device $mount_point" >> "$mapping_file"
                    # 检查是否已格式化
                    fs_type=$(blkid -o value -s TYPE "$mapped_device")
                    if [ -z "$fs_type" ]; then
                        log "设备未格式化,正在格式化设备:$mapped_device"
                        mkfs.ext4 "$mapped_device"
                    else
                        log "设备已格式化,文件系统类型:$fs_type"
                    fi

                    # 挂载设备
                    mkdir -p "$mount_point"
                    mount "$mapped_device" "$mount_point"
                    if [ $? -eq 0 ]; then
                        log "挂载成功:$mapped_device 到 $mount_point"
                    else
                        log "挂载失败:$mapped_device 到 $mount_point"
                        exit 1
                    fi
                else
                    log "无法找到映射的设备"
                    exit 1
                fi
            else
                log "无效的配置行:$line"
            fi
        done < /etc/rbd_mount.conf
        ;;
  stop)
        log "停止所有挂载并解除映射"
        # 读取映射文件
        if [ -s "$mapping_file" ];then
            while read -r line; do
                device=$(echo $line | awk '{print $1}')
                mount_point=$(echo $line | awk '{print $2}')
                # 卸载设备
                umount "$mount_point"
                if [ $? -eq 0 ];then
                    log "卸载成功:$device 从 $mount_point"
                else
                    log "卸载失败:$device 从 $mount_point"
                fi
                # 解除映射
                rbd unmap "$device"
                if [ $? -eq 0 ]; then
                    log "解除映射成功:$device"
                else
                    log "解除映射失败:$device"
                fi
            done < "$mapping_file"
        fi
        ;;
  *)
        echo "Usage: $0 {start|stop}"
        exit 1
esac

exit 0
#-------------------------------------------------------------------#
chmon a+x /etc/init.d/ceph-mount-rbd.sh



创建服务启动配置文件:
vim /etc/systemd/system/ceph-rbd-mount.service
#-------------------------------------------------------------------#
[Unit]
Description=Ceph RBD 自动挂载服务
After=network.target

[Service]
Type=oneshot
ExecStart=/etc/init.d/ceph-mount-rbd.sh start
ExecStop=/etc/init.d/ceph-mount-rbd.sh stop
RemainAfterExit=true

[Install]
WantedBy=multi-user.target
#-------------------------------------------------------------------#

systemctl daemon-reload
systemctl enable rbd-mount.service
systemctl start rbd-mount.service



[root@ceph-client-centos ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
devtmpfs        980M     0  980M   0% /dev
tmpfs           991M     0  991M   0% /dev/shm
tmpfs           991M  9.6M  981M   1% /run
tmpfs           991M     0  991M   0% /sys/fs/cgroup
/dev/sda2       100G  3.1G   97G   4% /
/dev/sda1       187M  109M   78M  59% /boot
tmpfs           199M     0  199M   0% /run/user/0
/dev/rbd0       4.8G   20M  4.6G   1% /mysql-1
/dev/rbd1       2.9G  9.0M  2.8G   1% /mysql-2
[root@ceph-client-centos ~]# cat /etc/rbd_m
rbd_mapping     rbd_mount.conf  
[root@ceph-client-centos ~]# cat /etc/rbd_mount.conf 
myrbd1
  myimg1 /mysql-1
  myimg2 /mysql-2
上一篇:ceph客户端配置自动挂载存储服务


下一篇:Scikit-Learn随机森林回归