一: CephX 认证机制
#node节点 test@ceph-node1:~$ cat /etc/ceph/ceph.client.admin.keyring [client.admin] key = AQD55h9h5ICUJBAAfk/2gBzkwU+G8bfqY023Yg== caps mds = "allow *" caps mgr = "allow *" caps mon = "allow *" caps osd = "allow *"
1.4 ceph 授权和使能
能力一览表: r:向用户授予读取权限。访问监视器(mon)以检索 CRUSH 运行图时需具有此能力。 w:向用户授予针对对象的写入权限。 x:授予用户调用类方法(包括读取和写入)的能力,以及在监视器中执行auth 操作的能力。 *:授予用户对特定守护进程/存储池的读取、写入和执行权限,以及执行管理命令的能力 class-read:授予用户调用类读取方法的能力,属于是 x 能力的子集。 class-write:授予用户调用类写入方法的能力,属于是 x 能力的子集。
MON 能力:
例如: mon ‘allow rwx‘ mon ‘allow profile osd
包括 r、w、x、class-read、class-write(类读取))和 profile osd(类写入),另外 OSD 能力还允 许进行存储池和名称空间设置。
只需要 allow 或空都表示允许。 mds ‘allow
1.5 列出指定用户
#deploy节点
test@ceph-deploy:~/ceph-cluster$ ceph auth ls #列出所以用户
test@ceph-deploy:~/ceph-cluster$ ceph auth get osd.10 [osd.10] key = AQDNBilhkPDRKRAABW8mMaGrYMwYHVVVjtOU0g== caps mgr = "allow profile osd" caps mon = "allow profile osd" caps osd = "allow *" exported keyring for osd.10 test@ceph-deploy:~/ceph-cluster$ ceph auth get osd #write keyring file with requested key test@ceph-deploy:~/ceph-cluster$ ceph auth get osd.10 [osd.10] key = AQDNBilhkPDRKRAABW8mMaGrYMwYHVVVjtOU0g== caps mgr = "allow profile osd" caps mon = "allow profile osd" caps osd = "allow *" exported keyring for osd.10 test@ceph-deploy:~/ceph-cluster$ ceph auth get client.admin [client.admin] key = AQD55h9h5ICUJBAAfk/2gBzkwU+G8bfqY023Yg== caps mds = "allow *" caps mgr = "allow *" caps mon = "allow *" caps osd = "allow *" exported keyring for client.admin
1.6 ceph用户管理
#deploy节点 test@ceph-deploy:~/ceph-cluster$ ceph auth ls mds.ceph-mgr1 key: AQA5UyhhXsY/MBAAgv/L+/cKMPx4fy+V2Cm+vg== caps: [mds] allow caps: [mon] allow profile mds caps: [osd] allow rwx osd.0 key: AQAswSBh2jDUERAA+jfMZKocn+OjdFYZf7lrbg== caps: [mgr] allow profile osd caps: [mon] allow profile osd caps: [osd] allow * osd.1 key: AQBjwSBhhYroNRAAO5+aqRxoaYGiMnI8FZegZw== caps: [mgr] allow profile osd caps: [mon] allow profile osd caps: [osd] allow * osd.10 key: AQDNBilhkPDRKRAABW8mMaGrYMwYHVVVjtOU0g== caps: [mgr] allow profile osd caps: [mon] allow profile osd caps: [osd] allow * osd.11 key: AQDfBilhKPzvGBAAVx7+GDBZlXkdRdLQM/qypw== caps: [mgr] allow profile osd caps: [mon] allow profile osd caps: [osd] allow *
#deploy节点 test@ceph-deploy:~/ceph-cluster$ ceph auth -h auth add <entity> [<caps>...] #添加认证 key test@ceph-deploy:~/ceph-cluster$ ceph auth add client.tom mon ‘allow r‘ osd ‘allow rwx pool=mypool‘ 0added key for client.tom #验证key test@ceph-deploy:~/ceph-cluster$ ceph auth get client.tom [client.tom] key = AQBvVipheB/5DhAAaABVJGZbBlneBJUNoWfowg== caps mon = "allow r" caps osd = "allow rwx pool=mypool" exported keyring for client.tom
1.62.2 ceph auth get-or-create
#创建用户 test@ceph-deploy:~/ceph-cluster$ ceph auth get-or-create client.test mon ‘allow r‘ osd ‘allow rwx pool=mypool‘ [client.test] key = AQAYVyphyzZdGxAAYZlScsmbAf3mK9zyuaod6g== #验证用户 test@ceph-deploy:~/ceph-cluster$ ceph auth get client.test [client.test] key = AQAYVyphyzZdGxAAYZlScsmbAf3mK9zyuaod6g== caps mon = "allow r" caps osd = "allow rwx pool=mypool" exported keyring for client.test #再次创建用户 test@ceph-deploy:~/ceph-cluster$ ceph auth get-or-create client.test mon ‘allow r‘ osd ‘allow rwx pool=mypool‘ [client.test] key = AQAYVyphyzZdGxAAYZlScsmbAf3mK9zyuaod6g==
1.62.3 ceph auth get-or-create-key
#用户有 key 就显示没有就创建 test@ceph-deploy:~/ceph-cluster$ ceph auth get-or-create-key client.test mon ‘allow r‘ osd ‘allow rwx pool=mypool‘ AQAYVyphyzZdGxAAYZlScsmbAf3mK9zyuaod6g==
1.62.4 ceph auth print-key
#获取单个指定用户的key test@ceph-deploy:~/ceph-cluster$ ceph auth print-key client.test AQAYVyphyzZdGxAAYZlScsmbAf3mK9zyuaod6g==test
1.62.5 修改用户能力
#查看用户当前权限 test@ceph-deploy:~/ceph-cluster$ ceph auth get client.test [client.test] key = AQAYVyphyzZdGxAAYZlScsmbAf3mK9zyuaod6g== caps mon = "allow r" caps osd = "allow rwx pool=mypool" exported keyring for client.test #修改权限 test@ceph-deploy:~/ceph-cluster$ ceph auth caps client.test mon ‘allow r‘ osd ‘allow rw pool=mypool‘ updated caps for client.test #验证权限 test@ceph-deploy:~/ceph-cluster$ ceph auth get client.test [client.test] key = AQAYVyphyzZdGxAAYZlScsmbAf3mK9zyuaod6g== caps mon = "allow r" caps osd = "allow rw pool=mypool" exported keyring for client.test
test@ceph-deploy:~/ceph-cluster$ ceph auth del client.tom updated
1.7 秘钥环管理
{client、mon、mds、osd}.name
1.71 通过秘钥环文件备份与恢复用户
创建 keyring 文件命令格式:
ceph-authtool --create-keyring FILE
1.71.1 导出用户认证信息至 keyring 文件
#deploy节点 #创建用户 test@ceph-deploy:~/ceph-cluster$ ceph auth get-or-create client.user1 mon ‘allow r‘ osd ‘allow * pool=mypool‘ [client.user1] key = AQB6WiphsylPERAALnVZ0wMPapQ0lb3ehDdrVA== #验证用户 test@ceph-deploy:~/ceph-cluster$ ceph auth get client.user1 [client.user1] key = AQB6WiphsylPERAALnVZ0wMPapQ0lb3ehDdrVA== caps mon = "allow r" caps osd = "allow * pool=mypool" exported keyring for client.user1 #创建keyring 文件 test@ceph-deploy:~/ceph-cluster$ ceph-authtool --create-keyring ceph.client.user1.keyring creating ceph.client.user1.keyring #验证 keyring 文件 test@ceph-deploy:~/ceph-cluster$ cat ceph.client.user1.keyring test@ceph-deploy:~/ceph-cluster$ file ceph.client.user1.keyring ceph.client.user1.keyring: empty #空文件 #导出 keyring 至指定文件 test@ceph-deploy:~/ceph-cluster$ ceph auth get client.user1 -o ceph.client.user1.keyring exported keyring for client.user1 #验证指定用户的 keyring 文件 test@ceph-deploy:~/ceph-cluster$ cat ceph.client.user1.keyring [client.user1] key = AQB6WiphsylPERAALnVZ0wMPapQ0lb3ehDdrVA== caps mon = "allow r" caps osd = "allow * pool=mypool"
1.71.2 从 keyring 文件恢复用户认证信息
#验证用户 test@ceph-deploy:~/ceph-cluster$ cat ceph.client.user1.keyring [client.user1] key = AQB6WiphsylPERAALnVZ0wMPapQ0lb3ehDdrVA== caps mon = "allow r" caps osd = "allow * pool=mypool" #模拟误删用户 test@ceph-deploy:~/ceph-cluster$ ceph auth del client.user1 updated #验证用户 test@ceph-deploy:~/ceph-cluster$ ceph auth get client.user1 Error ENOENT: failed to find client.user1 in keyring #导入用户 keyring test@ceph-deploy:~/ceph-cluster$ ceph auth import -i ceph.client.user1.keyring imported keyring #验证用户 test@ceph-deploy:~/ceph-cluster$ ceph auth get client.user1 [client.user1] key = AQB6WiphsylPERAALnVZ0wMPapQ0lb3ehDdrVA== caps mon = "allow r" caps osd = "allow * pool=mypool" exported keyring for client.user1
1.72 秘钥环文件多用户
#创建空的keyring 文件 test@ceph-deploy:~/ceph-cluster$ ceph-authtool --create-keyring ceph.client.user.keyring creating ceph.client.user.keyring #把指定的 admin 用户的 keyring 文件内容导入到 user 用户的 keyring 文件 test@ceph-deploy:~/ceph-cluster$ ceph-authtool ./ceph.client.user.keyring --import-keyring ./ceph.client.admin.keyring importing contents of ./ceph.client.admin.keyring into ./ceph.client.user.keyring #验证 keyring 文件 test@ceph-deploy:~/ceph-cluster$ ceph-authtool -l ./ceph.client.user.keyring [client.admin] key = AQD55h9h5ICUJBAAfk/2gBzkwU+G8bfqY023Yg== caps mds = "allow *" caps mgr = "allow *" caps mon = "allow *" caps osd = "allow *" #再导入一个其他用户的 keyring test@ceph-deploy:~/ceph-cluster$ ceph-authtool ./ceph.client.user.keyring --import-keyring ./ceph.client.user1.keyring importing contents of ./ceph.client.user1.keyring into ./ceph.client.user.keyring #验证 keyring 文件是否包含多个用户的认证信息 test@ceph-deploy:~/ceph-cluster$ ceph-authtool -l ./ceph.client.user.keyring [client.admin] key = AQD55h9h5ICUJBAAfk/2gBzkwU+G8bfqY023Yg== caps mds = "allow *" caps mgr = "allow *" caps mon = "allow *" caps osd = "allow *" [client.user1] key = AQB6WiphsylPERAALnVZ0wMPapQ0lb3ehDdrVA== caps mon = "allow r" caps osd = "allow * pool=mypool"
#deploy节点 #创建存储池 test@ceph-deploy:~/ceph-cluster$ ceph osd pool create rbd-data1 32 32 pool ‘rbd-data1‘ created #验证存储池 test@ceph-deploy:~/ceph-cluster$ ceph osd pool ls device_health_metrics mypool myrbd1 .rgw.root default.rgw.log default.rgw.control default.rgw.meta cephfs-metadata cephfs-data rbd-data1 #在存储池启用 rbd test@ceph-deploy:~/ceph-cluster$ ceph osd pool application enable -h osd pool application enable <pool> <app> [--yes-i-really-mean-it] test@ceph-deploy:~/ceph-cluster$ ceph osd pool application enable rbd-data1 rbd enabled application ‘rbd‘ on pool ‘rbd-data1‘ #初始化RBD test@ceph-deploy:~/ceph-cluster$ rbd pool init -p rbd-data1
2.3 创建 img 镜像
rbd 存储池并不能直接用于块设备,而是需要事先在其中按需创建映像(image),并把映像文件作为块设备使用。rbd 命令可用于创建、查看及删除块设备相在的映像(image),以及克隆映像、创建快照、将映像回滚到快照和查看快照等管理操作。
#deploy节点 #创建2个镜像 test@ceph-deploy:~/ceph-cluster$ rbd create data-img1 --size 3G --pool rbd-data1 --image-format 2 --image-feature layering test@ceph-deploy:~/ceph-cluster$ rbd create data-img2 --size 5G --pool rbd-data1 --image-format 2 --image-feature layering #验证镜像 test@ceph-deploy:~/ceph-cluster$ rbd ls --pool rbd-data1 data-img1 data-img2 #查看镜像信息 test@ceph-deploy:~/ceph-cluster$ rbd ls --pool rbd-data1 -l NAME SIZE PARENT FMT PROT LOCK data-img1 3 GiB 2 data-img2 5 GiB 2
2.32 查看镜像详细信息
#deploy节点 #查看data-img2的详细信息 test@ceph-deploy:~/ceph-cluster$ rbd --image data-img2 --pool rbd-data1 info rbd image ‘data-img2‘: size 5 GiB in 1280 objects order 22 (4 MiB objects) snapshot_count: 0 id: 12468e5b9a04b block_name_prefix: rbd_data.12468e5b9a04b format: 2 features: layering op_features: flags: create_timestamp: Sun Aug 29 00:08:51 2021 access_timestamp: Sun Aug 29 00:08:51 2021 modify_timestamp: Sun Aug 29 00:08:51 2021 #查看data-img1的详细信息 test@ceph-deploy:~/ceph-cluster$ rbd --image data-img1 --pool rbd-data1 info rbd image ‘data-img1‘: size 3 GiB in 768 objects order 22 (4 MiB objects) snapshot_count: 0 id: 1245f7ae95595 block_name_prefix: rbd_data.1245f7ae95595 format: 2 features: layering op_features: flags: create_timestamp: Sun Aug 29 00:08:41 2021 access_timestamp: Sun Aug 29 00:08:41 2021 modify_timestamp: Sun Aug 29 00:08:41 2021
2.33 以 json 格式显示镜像信息
#deploy节点 test@ceph-deploy:~/ceph-cluster$ rbd ls --pool rbd-data1 -l --format json --pretty-format [ { "image": "data-img1", "id": "1245f7ae95595", "size": 3221225472, "format": 2 }, { "image": "data-img2", "id": "12468e5b9a04b", "size": 5368709120, "format": 2 } ]
#deploy节点 test@ceph-deploy:~/ceph-cluster$ rbd feature enable exclusive-lock --pool rbd-data1 --image data-img1 test@ceph-deploy:~/ceph-cluster$ rbd feature enable object-map --pool rbd-data1 --image data-img1 test@ceph-deploy:~/ceph-cluster$ rbd feature enable fast-diff --pool rbd-data1 --image data-img1 #验证镜像特性 test@ceph-deploy:~/ceph-cluster$ rbd --image data-img1 --pool rbd-data1 info rbd image ‘data-img1‘: size 3 GiB in 768 objects order 22 (4 MiB objects) snapshot_count: 0 id: 1245f7ae95595 block_name_prefix: rbd_data.1245f7ae95595 format: 2 features: layering, exclusive-lock, object-map, fast-diff op_features: flags: object map invalid, fast diff invalid create_timestamp: Sun Aug 29 00:08:41 2021 access_timestamp: Sun Aug 29 00:08:41 2021 modify_timestamp: Sun Aug 29 00:08:41 2021
#deploy节点 test@ceph-deploy:~/ceph-cluster$ rbd feature disable fast-diff --pool rbd-data1 --image data-img1 test@ceph-deploy:~/ceph-cluster$ rbd --image data-img1 --pool rbd-data1 info rbd image ‘data-img1‘: size 3 GiB in 768 objects order 22 (4 MiB objects) snapshot_count: 0 id: 1245f7ae95595 block_name_prefix: rbd_data.1245f7ae95595 format: 2 features: layering, exclusive-lock #少了一个fast-diff 特性 op_features: flags: create_timestamp: Sun Aug 29 00:08:41 2021 access_timestamp: Sun Aug 29 00:08:41 2021 modify_timestamp: Sun Aug 29 00:08:41 2021
2.4 配置客户端使用 RBD
wget -q -O- ‘https://download.ceph.com/keys/release.asc‘ | sudo apt-key add - cat > /etc/apt/sources.list <<EOF # 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释 deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse # deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse # deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse # deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse # deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse EOF sudo echo "deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic main" >> /etc/apt/sources.list apt update
2.42 客户端安装 ceph-common
#client节点 root@ubuntu:~# apt install ceph-common
2.43 客户端使用 admin 账户挂载并使用 RBD
#deploy节点
test@ceph-deploy:~/ceph-cluster$ scp ceph.conf ceph.client.admin.keyring root@10.0.0.200:/etc/ceph/
2.43.1 客户端映射镜像
#client节点 root@ubuntu:~# rbd -p rbd-data1 map data-img1 /dev/rbd0 root@ubuntu:~# rbd -p rbd-data1 map data-img2 /dev/rbd1
2.43.2 客户端验证镜像
#client节点 root@ubuntu:~# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sda 8:0 0 20G 0 disk └─sda1 8:1 0 20G 0 part / sr0 11:0 1 1024M 0 rom rbd0 252:0 0 3G 0 disk rbd1 252:16 0 5G 0 disk
2.43.3 客户端格式化磁盘并挂载使用
#client节点 #客户端格式化 rbd root@ubuntu:~# mkfs.xfs /dev/rbd0 meta-data=/dev/rbd0 isize=512 agcount=9, agsize=97280 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=0, rmapbt=0, reflink=0 data = bsize=4096 blocks=786432, imaxpct=25 = sunit=1024 swidth=1024 blks naming =version 2 bsize=4096 ascii-ci=0 ftype=1 log =internal log bsize=4096 blocks=2560, version=2 = sectsz=512 sunit=8 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 root@ubuntu:~# mkfs.xfs /dev/rbd1 meta-data=/dev/rbd1 isize=512 agcount=9, agsize=162816 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=0, rmapbt=0, reflink=0 data = bsize=4096 blocks=1310720, imaxpct=25 = sunit=1024 swidth=1024 blks naming =version 2 bsize=4096 ascii-ci=0 ftype=1 log =internal log bsize=4096 blocks=2560, version=2 = sectsz=512 sunit=8 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 #挂载 root@ubuntu:~# mkdir /data /data1 -p root@ubuntu:~# mount /dev/rbd0 /data root@ubuntu:~# mount /dev/rbd1 /data1 root@ubuntu:~# df -TH Filesystem Type Size Used Avail Use% Mounted on udev devtmpfs 1.1G 0 1.1G 0% /dev tmpfs tmpfs 207M 7.0M 200M 4% /run /dev/sda1 ext4 22G 3.0G 17G 15% / tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup tmpfs tmpfs 207M 0 207M 0% /run/user/1000 /dev/rbd0 xfs 3.3G 38M 3.2G 2% /data /dev/rbd1 xfs 5.4G 41M 5.4G 1% /data1
2.43.4 客户端验证写入数据
#client节点 root@ubuntu:~# sudo cp /var/log/syslog /data root@ubuntu:~# sudo cp /var/log/syslog /data1 root@ubuntu:~# df -h Filesystem Size Used Avail Use% Mounted on udev 964M 0 964M 0% /dev tmpfs 198M 6.7M 191M 4% /run /dev/sda1 20G 2.8G 16G 15% / tmpfs 986M 0 986M 0% /dev/shm tmpfs 5.0M 0 5.0M 0% /run/lock tmpfs 986M 0 986M 0% /sys/fs/cgroup tmpfs 198M 0 198M 0% /run/user/1000 /dev/rbd0 3.0G 38M 3.0G 2% /data /dev/rbd1 5.0G 40M 5.0G 1% /data1
2.43.5 验证 rbd 数据
#client节点 root@ubuntu:~# ll /data total 1160 drwxr-xr-x 2 root root 20 Aug 28 09:42 ./ drwxr-xr-x 24 root root 4096 Aug 28 09:38 ../ -rw-r----- 1 root root 1181490 Aug 28 09:42 syslog root@ubuntu:~# ll /data1 total 1160 drwxr-xr-x 2 root root 20 Aug 28 09:43 ./ drwxr-xr-x 24 root root 4096 Aug 28 09:38 ../ -rw-r----- 1 root root 1181490 Aug 28 09:43 syslog
2.43.6 查看存储池空间
#deploy节点 test@ceph-deploy:~/ceph-cluster$ ceph df --- RAW STORAGE --- CLASS SIZE AVAIL USED RAW USED %RAW USED hdd 240 GiB 239 GiB 861 MiB 861 MiB 0.35 TOTAL 240 GiB 239 GiB 861 MiB 861 MiB 0.35 --- POOLS --- POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL device_health_metrics 1 1 0 B 0 0 B 0 76 GiB mypool 2 32 0 B 0 0 B 0 76 GiB myrbd1 3 64 12 MiB 18 35 MiB 0.02 76 GiB .rgw.root 4 32 1.3 KiB 4 48 KiB 0 76 GiB default.rgw.log 5 32 3.6 KiB 209 408 KiB 0 76 GiB default.rgw.control 6 32 0 B 8 0 B 0 76 GiB default.rgw.meta 7 8 0 B 0 0 B 0 76 GiB cephfs-metadata 8 32 56 KiB 22 254 KiB 0 76 GiB cephfs-data 9 64 121 MiB 31 363 MiB 0.16 76 GiB rbd-data1 10 32 23 MiB 32 69 MiB 0.03 76 GiB
2.44 客户端使用普通账户挂载并使用 RBD
#deploy节点 #创建普通账户 test@ceph-deploy:~/ceph-cluster$ ceph auth add client.shijie mon ‘allow r‘ osd ‘allow rwx pool=rbd-data1‘ added key for client.shijie #验证用户信息 test@ceph-deploy:~/ceph-cluster$ ceph auth get client.shijie [client.shijie] key = AQCAaCphzIAHMxAAddWTSYWGP6+lQuJV2OW/mQ== caps mon = "allow r" caps osd = "allow rwx pool=rbd-data1" exported keyring for client.shijie #创建 keyring 文件 test@ceph-deploy:~/ceph-cluster$ ceph-authtool --create-keyring ceph.client.shijie.keyring creating ceph.client.shijie.keyring #导出用户 keyring test@ceph-deploy:~/ceph-cluster$ ceph auth get client.shijie -o ceph.client.shijie.keyring exported keyring for client.shijie
#ceph-client root@ceph-client:~# wget -q -O- ‘https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc‘ | sudo apt-key add - root@ceph-client:~# vim /etc/apt/sources.list root@ceph-client:~# apt install ceph-common
2.44.3 同步普通用户认证文件
#deploy节点 test@ceph-deploy:~/ceph-cluster$ scp ceph.conf ceph.client.admin.keyring root@10.0.0.200:/etc/ceph/
2.44.4 在客户端验证权限
#ceph-client root@ceph-client:~# ll /etc/ceph/ total 20 drwxr-xr-x 2 root root 4096 Aug 28 09:56 ./ drwxr-xr-x 81 root root 4096 Aug 28 09:51 ../ -rw-r--r-- 1 root root 125 Aug 28 09:47 ceph.client.shijie.keyring -rw-r--r-- 1 root root 261 Aug 20 10:11 ceph.conf -rw-r--r-- 1 root root 92 Jun 7 07:39 rbdmap #默认使用 admin 账户 root@ceph-client:~# # ceph --user shijie -s
2.44.5 映射 rbd
#ceph-client节点 #映射 rbd root@ceph-client:~# rbd --user shijie -p rbd-data1 map data-img2 /dev/rbd2 #验证rbd root@ceph-client:~# fdisk -l /dev/rbd0 Disk /dev/rbd0: 3 GiB, 3221225472 bytes, 6291456 sectors Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
2.44.6 格式化并使用 rbd 镜像
#ceph-client节点 root@ceph-client:~# mkfs.ext4 /dev/rbd2 mke2fs 1.44.1 (24-Mar-2018) /dev/rbd2 contains a xfs file system Proceed anyway? (y,N) y Discarding device blocks: done Creating filesystem with 1310720 4k blocks and 327680 inodes Filesystem UUID: fb498e3f-e8cb-40dd-b10d-1e91e0bfbbed Superblock backups stored on blocks: 32768, 98304, 163840, 229376, 294912, 819200, 884736 Allocating group tables: done Writing inode tables: done Creating journal (16384 blocks): done Writing superblocks and filesystem accounting information: done root@ceph-client:~# mkdir /data2 root@ceph-client:~# mount /dev/rbd2 /data2/ root@ceph-client:~# # cp /var/log/messages /data2/ root@ceph-client:~# ll /data2 total 24 drwxr-xr-x 3 root root 4096 Aug 28 10:00 ./ drwxr-xr-x 25 root root 4096 Aug 28 10:01 ../ drwx------ 2 root root 16384 Aug 28 10:00 lost+found/ root@ceph-client:~# df -TH Filesystem Type Size Used Avail Use% Mounted on udev devtmpfs 1.1G 0 1.1G 0% /dev tmpfs tmpfs 207M 7.0M 200M 4% /run /dev/sda1 ext4 22G 3.0G 17G 15% / tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup /dev/rbd0 xfs 3.3G 39M 3.2G 2% /data /dev/rbd1 xfs 5.4G 42M 5.4G 1% /data1 tmpfs tmpfs 207M 0 207M 0% /run/user/1000 /dev/rbd2 ext4 5.3G 21M 5.0G 1% /data2 #deploy节点 #管理端验证镜像状态 test@ceph-deploy:~/ceph-cluster$ rbd ls -p rbd-data1 -l NAME SIZE PARENT FMT PROT LOCK data-img1 3 GiB 2 excl data-img2 5 GiB 2
2.44.7 验证 ceph 内核模块
#client节点 root@ceph-client:~# lsmod|grep ceph libceph 315392 1 rbd libcrc32c 16384 2 xfs,libceph root@ceph-client:~# modinfo libceph filename: /lib/modules/4.15.0-112-generic/kernel/net/ceph/libceph.ko license: GPL description: Ceph core library author: Patience Warnick <patience@newdream.net> author: Yehuda Sadeh <yehuda@hq.newdream.net> author: Sage Weil <sage@newdream.net> srcversion: 899059C79545E4ADF47A464 depends: libcrc32c retpoline: Y intree: Y name: libceph vermagic: 4.15.0-112-generic SMP mod_unload signat: PKCS#7 signer: sig_key: sig_hashalgo: md4
2.44.8 rbd 镜像空间拉伸
#deploy节点 #当前 rbd 镜像空间大小 test@ceph-deploy:~/ceph-cluster$ rbd ls -p rbd-data1 -l NAME SIZE PARENT FMT PROT LOCK data-img1 3 GiB 2 excl data-img2 5 GiB 2 #拉伸 rbd 镜像空间 test@ceph-deploy:~/ceph-cluster$ rbd resize --pool rbd-data1 --image data-img2 --size 8G Resizing image: 100% complete...done. test@ceph-deploy:~/ceph-cluster$ rbd resize --pool rbd-data1 --image data-img1 --size 6G Resizing image: 100% complete...done. #验证rgb信息 test@ceph-deploy:~/ceph-cluster$ rbd ls -p rbd-data1 -l NAME SIZE PARENT FMT PROT LOCK data-img1 6 GiB 2 data-img2 8 GiB 2
2.44.9 客户端验证镜像空间
#client节点 root@ceph-client:~# fdisk -l /dev/rbd2 Disk /dev/rbd2: 8 GiB, 8589934592 bytes, 16777216 sectors Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
2.44.10 开机自动挂载
#client节点 root@ceph-client:~# cat /etc/fstab rbd --user shijie -p rbd-data1 map data-img2 mount /dev/rbd2 /data2/ root@ceph-client:~# chmod a+x /etc/fstab root@ceph-client:~# reboot #查看映射 root@ceph-client:~# rbd showmapped id pool image snap device 0 rbd-data1 data-img2 - /dev/rbd2
2.44.11 卸载 rbd 镜像
#client节点
root@ceph-client:~# umount /data2
root@ceph-client:~# umount /data2 rbd --user shijie -p rbd-data1 unmap data-img2
2.44.12 删除 rbd 镜像
#deploy节点 test@ceph-deploy:~/ceph-cluster$ rbd rm --pool rbd-data1 --image data-img1 Removing image: 100% complete...done.
2.44.13 rbd 镜像回收站机制
#deploy节点 #查看镜像状态 test@ceph-deploy:~/ceph-cluster$ rbd status --pool rbd-data1 --image data-img2 #将进行移动到回收站 test@ceph-deploy:~/ceph-cluster$ rbd trash move --pool rbd-data1 --image data-img2 #查看回收站的镜像 test@ceph-deploy:~/ceph-cluster$ rbd trash list --pool rbd-data1 12468e5b9a04b data-img2 #从回收站删除镜像 如果镜像不再使用,可以直接使用 trash remove 将其从回收站删除 #还原镜像 test@ceph-deploy:~/ceph-cluster$ rbd trash restore --pool rbd-data1 --image data-img2 --image-id 12468e5b9a04b #验证镜像 test@ceph-deploy:~/ceph-cluster$ rbd ls --pool rbd-data1 -l NAME SIZE PARENT FMT PROT LOCK data-img2 8 GiB 2
2.5 镜像快照
#client节点 root@ceph-client:~# ll /data2 total 24 drwxr-xr-x 3 root root 4096 Aug 28 10:00 ./ drwxr-xr-x 25 root root 4096 Aug 28 10:01 ../ drwx------ 2 root root 16384 Aug 28 10:00 lost+found/
#deploy节点 #创建快照 test@ceph-deploy:~/ceph-cluster$ rbd snap create --pool rbd-data1 --image data-img2 --snap img2-snap-12468e5b9a04b Creating snap: 100% complete...done. #验证快照 test@ceph-deploy:~/ceph-cluster$ rbd snap list --pool rbd-data1 --image data-img2 SNAPID NAME SIZE PROTECTED TIMESTAMP 4 img2-snap-12468e5b9a04b 8 GiB Sun Aug 29 01:41:32 2021
2.53 删除数据并还原快照
#客户端删除数据 root@ceph-client:~# rm -rf /data2/lost+found #验证数据 root@ceph-client:~# ll /data2 total 8 drwxr-xr-x 2 root root 4096 Aug 28 10:01 ./ drwxr-xr-x 25 root root 4096 Aug 28 10:01 ../ #卸载 rbd root@ceph-client:~# umount /data2 root@ceph-client:~# rbd unmap /dev/rbd2 #回滚快照 #deploy节点 test@ceph-deploy:~/ceph-cluster$ rbd snap rollback --pool rbd-data1 --image data-img2 --snap img2-snap-12468e5b9a04b
2.54 客户端验证数据
#client节点 #客户端映射 rbd root@ceph-client:~# rbd --user shijie -p rbd-data1 map data-img2 #客户端挂载 rbd root@ceph-client:~# mount /dev/rbd0 /data/ #客户端验证数据 root@ceph-client:~# ll /data/
2.55 删除快照
#deploy节点 test@ceph-deploy:~/ceph-cluster$ rbd snap remove --pool rbd-data1 --image data-img2 --snap img2-snap-12468e5b9a04b Removing snap: 100% complete...done. #验证快照是否删除 test@ceph-deploy:~/ceph-cluster$ rbd snap list --pool rbd-data1 --image data-img2
2.56 快照数量限制
#deploy节点 #设置与修改快照数量限制 test@ceph-deploy:~/ceph-cluster$ rbd snap limit set --pool rbd-data1 --image data-img2 --limit 30 #清除快照数量限制 test@ceph-deploy:~/ceph-cluster$ rbd snap limit clear --pool rbd-data1 --image data-img2
3 CephFS 使用
#mgr节点 test@ceph-mgr1:~$ apt-cache madison ceph-mds ceph-mds | 16.2.5-1bionic | https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic/main amd64 Packages ceph-mds | 12.2.13-0ubuntu0.18.04.8 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu bionic-updates/universe amd64 Packages ceph-mds | 12.2.13-0ubuntu0.18.04.4 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu bionic-security/universe amd64 Packages ceph-mds | 12.2.4-0ubuntu1 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu bionic/universe amd64 Packages test@ceph-mgr1:~$ sudo apt install ceph-mds
#deploy节点 test@ceph-deploy:~$ ceph osd pool create cephfs-metadata 32 32 test@ceph-deploy:~$ ceph osd pool create cephfs-data 64 64 test@ceph-deploy:~$ ceph -s cluster: id: 635d9577-7341-4085-90ff-cb584029a1ea health: HEALTH_OK services: mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 7m) mgr: ceph-mgr2(active, since 6m), standbys: ceph-mgr1 mds: 1/1 daemons up osd: 12 osds: 12 up (since 6m), 12 in (since 39h) rgw: 1 daemon active (1 hosts, 1 zones) data: volumes: 1/1 healthy pools: 10 pools, 329 pgs objects: 328 objects, 213 MiB usage: 894 MiB used, 239 GiB / 240 GiB avail pgs: 329 active+clean
3.3 创建 cephFS 并验证
#deploy节点 test@ceph-deploy:~$ ceph fs new mycephfs cephfs-metadata cephfs-data test@ceph-deploy:~$ ceph fs ls name: mycephfs, metadata pool: cephfs-metadata, data pools: [cephfs-data ] test@ceph-deploy:~$ ceph fs status mycephfs mycephfs - 0 clients ======== RANK STATE MDS ACTIVITY DNS INOS DIRS CAPS 0 active ceph-mgr1 Reqs: 0 /s 12 15 12 0 POOL TYPE USED AVAIL cephfs-metadata metadata 247k 75.5G cephfs-data data 362M 75.5G MDS version: ceph version 16.2.5 (0883bdea7337b95e4b611c768c0279868462204a) pacific (stable)
3.4 验证 cepfFS 服务状态
#deploy节点 test@ceph-deploy:~$ ceph mds stat mycephfs:1 {0=ceph-mgr1=up:active}
3.5 创建客户端账户
#deploy节点 #创建用户 test@ceph-deploy:~/ceph-cluster$ ceph auth add client.yanyan mon ‘allow r‘ mds ‘allow rw‘ osd ‘allow rwx pool=cephfs-data‘ added key for client.yanyan #验证账户 test@ceph-deploy:~/ceph-cluster$ ceph auth get client.yanyan [client.yanyan] key = AQAhMCth/3d/HxAA7sMakmCr5tOFj8l2vmmaRA== caps mds = "allow rw" caps mon = "allow r" caps osd = "allow rwx pool=cephfs-data" exported keyring for client.yanyan #创建keyring 文件 test@ceph-deploy:~/ceph-cluster$ ceph auth get client.yanyan -o ceph.client.yanyan.keyring exported keyring for client.yanyan #创建 key 文件 test@ceph-deploy:~/ceph-cluster$ ceph auth print-key client.yanyan > yanyan.key #验证用户的 keyring 文件 test@ceph-deploy:~/ceph-cluster$ cat ceph.client.yanyan.keyring [client.yanyan] key = AQAhMCth/3d/HxAA7sMakmCr5tOFj8l2vmmaRA== caps mds = "allow rw" caps mon = "allow r" caps osd = "allow rwx pool=cephfs-data"
3.6 安装 ceph 客户端
#client节点 root@ceph-client:/etc/ceph# apt install ceph-common -y
3.7 同步客户端认证文件
#deploy节点 test@ceph-deploy:~/ceph-cluster$ scp ceph.conf ceph.client.yanyan.keyring yanyan.key root@10.0.0.200:/etc/ceph/
3.8 客户端验证权限
#client节点 root@ceph-client2:/etc/ceph# ceph --user yanyan -s cluster: id: 635d9577-7341-4085-90ff-cb584029a1ea health: HEALTH_OK services: mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 55m) mgr: ceph-mgr2(active, since 54m), standbys: ceph-mgr1 mds: 1/1 daemons up osd: 12 osds: 12 up (since 54m), 12 in (since 39h) rgw: 1 daemon active (1 hosts, 1 zones) data: volumes: 1/1 healthy pools: 10 pools, 329 pgs objects: 328 objects, 213 MiB usage: 895 MiB used, 239 GiB / 240 GiB avail pgs: 329 active+clean
3.9 内核空间挂载 ceph-fs
#deploy节点 root@ceph-client2:~# mount -t ceph 10.0.0.101:6789,10.0.0.102:6789,10.0.0.103:6789:/ /data -o name=yanyan,secretfile=/etc/ceph/yanyan.key root@ceph-client2:~# df -h Filesystem Size Used Avail Use% Mounted on udev 964M 0 964M 0% /dev tmpfs 198M 6.6M 191M 4% /run /dev/sda1 20G 2.8G 16G 16% / tmpfs 986M 0 986M 0% /dev/shm tmpfs 5.0M 0 5.0M 0% /run/lock tmpfs 986M 0 986M 0% /sys/fs/cgroup tmpfs 198M 0 198M 0% /run/user/1000 10.0.0.101:6789,10.0.0.102:6789,10.0.0.103:6789:/ 76G 120M 76G 1% /data #验证写入数据 root@ceph-client2:~# cp /var/log/syslog /data/ root@ceph-client2:~# dd if=/dev/zero of=/data/testfile bs=1M count=100 100+0 records in 100+0 records out 104857600 bytes (105 MB, 100 MiB) copied, 0.0415206 s, 2.5 GB/s
3.92 客户端通过 key 挂载
#client节点 root@ceph-client2:~# tail /etc/ceph/yanyan.key AQAhMCth/3d/HxAA7sMakmCr5tOFj8l2vmmaRA== root@ceph-client2:~# umount /data/ root@ceph-client2:~# mount -t ceph 10.0.0.101:6789,10.0.0.102:6789,10.0.0.103:6789:/ /data -o name=yanyan,secret=AQAhMCth/3d/HxAA7sMakmCr5tOFj8l2vmmaRA== root@ceph-client2:~# df -h Filesystem Size Used Avail Use% Mounted on udev 964M 0 964M 0% /dev tmpfs 198M 6.6M 191M 4% /run /dev/sda1 20G 2.8G 16G 16% / tmpfs 986M 0 986M 0% /dev/shm tmpfs 5.0M 0 5.0M 0% /run/lock tmpfs 986M 0 986M 0% /sys/fs/cgroup tmpfs 198M 0 198M 0% /run/user/1000 10.0.0.101:6789,10.0.0.102:6789,10.0.0.103:6789:/ 76G 220M 76G 1% /data #测试写入数据 root@ceph-client2:~# cp /var/log/syslog /data/ #查看挂载点状态 root@ceph-client2:~# stat -f /data/ File: "/data/" ID: 2f5ea2f36fe16833 Namelen: 255 Type: ceph Block size: 4194304 Fundamental block size: 4194304 Blocks: Total: 19319 Free: 19264 Available: 19264 Inodes: Total: 56 Free: -1
3.93 开机挂载
#client节点 root@ceph-client2:~# cat /etc/fstab 10.0.0.101:6789,10.0.0.102:6789,10.0.0.103:6789:/ /data ceph defaults,name=yanyan,secretfile=/etc/ceph/yanyan.key,_netdev 0 0 root@ceph-client2:~# mount -a
#client节点 root@ceph-client2:~# lsmod|grep ceph ceph 376832 1 libceph 315392 1 ceph libcrc32c 16384 1 libceph fscache 65536 1 ceph root@ceph-client2:~# madinfo ceph
3.10 ceph mds 高可用
#deploy节点 test@ceph-deploy:~/ceph-cluster$ ceph mds stat mycephfs:1 {0=ceph-mgr1=up:active}
3.10.2 添加 MDS 服务器
#mds 服务器安装 ceph-mds 服务 test@ceph-mgr2:~$ sudo apt install ceph-mds -y test@ceph-mon2:~$ sudo apt install ceph-mds -y test@ceph-mon3:~$ sudo apt install ceph-mds -y #添加 mds 服务器 test@ceph-deploy:~/ceph-cluster$ ceph-deploy mds create ceph-mgr2 test@ceph-deploy:~/ceph-cluster$ ceph-deploy mds create ceph-mon2 test@ceph-deploy:~/ceph-cluster$ ceph-deploy mds create ceph-mon3 #验证 mds 服务器当前状态: test@ceph-deploy:~/ceph-cluster$ ceph mds stat mycephfs:1 {0=ceph-mgr1=up:active} 3 up:standby
3.10.3 验证 ceph 集群当前状态
#deploy节点 test@ceph-deploy:~/ceph-cluster$ ceph fs status mycephfs - 1 clients ======== RANK STATE MDS ACTIVITY DNS INOS DIRS CAPS 0 active ceph-mgr1 Reqs: 0 /s 13 16 12 2 POOL TYPE USED AVAIL cephfs-metadata metadata 379k 75.2G cephfs-data data 663M 75.2G STANDBY MDS ceph-mon2 ceph-mgr2 ceph-mon3 MDS version: ceph version 16.2.5 (0883bdea7337b95e4b611c768c0279868462204a) pacific (stable)
3.10.4 当前的文件系统状态
#deploy节点 test@ceph-deploy:~/ceph-cluster$ ceph fs get mycephfs Filesystem ‘mycephfs‘ (1) fs_name mycephfs epoch 37 flags 12 created 2021-08-27T11:06:31.193582+0800 modified 2021-08-29T14:48:37.814878+0800 tableserver 0 root 0 session_timeout 60 session_autoclose 300 max_file_size 1099511627776 required_client_features {} last_failure 0 last_failure_osd_epoch 551 compat compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} max_mds 1 in 0 up {0=84172} failed damaged stopped data_pools [9] metadata_pool 8 inline_data disabled balancer standby_count_wanted 1 [mds.ceph-mgr1{0:84172} state up:active seq 7 addr [v2:10.0.0.104:6800/3031657167,v1:10.0.0.104:6801/3031657167]]
3.10.5 设置处于激活状态 mds 的数量
#deploy节点 test@ceph-deploy:~/ceph-cluster$ ceph fs set mycephfs max_mds 2#设置同时活跃的主 mds 最大值为 2 test@ceph-deploy:~/ceph-cluster$ ceph fs status mycephfs - 1 clients ======== RANK STATE MDS ACTIVITY DNS INOS DIRS CAPS 0 active ceph-mgr1 Reqs: 0 /s 13 16 12 2 1 active ceph-mon3 Reqs: 0 /s 10 13 11 0 POOL TYPE USED AVAIL cephfs-metadata metadata 451k 75.2G cephfs-data data 663M 75.2G STANDBY MDS ceph-mon2 ceph-mgr2 MDS version: ceph version 16.2.5 (0883bdea7337b95e4b611c768c0279868462204a) pacific (stable)
3.10.6 MDS 高可用优化
#deploy节点 test@ceph-deploy:~/ceph-cluster$ cat ceph.conf [global] fsid = 635d9577-7341-4085-90ff-cb584029a1ea public_network = 10.0.0.0/24 cluster_network = 192.168.133.0/24 mon_initial_members = ceph-mon1 mon_host = 10.0.0.101 auth_cluster_required = cephx auth_service_required = cephx auth_client_required = cephx mon clock drift allowed = 2 mon clock drift warn backoff = 30 [mds.ceph-mgr2] #mds_standby_for_fscid = mycephfs mds_standby_for_name = ceph-mgr1 mds_standby_replay = true [mds.ceph-mon3] mds_standby_for_name = ceph-mon2 mds_standby_replay = true
3.10.7 分发配置文件并重启 mds 服务
#deploy节点 #分发配置文件保证各 mds 服务重启有效 test@ceph-deploy:~/ceph-cluster$ ceph-deploy --overwrite-conf config push ceph-mon3 test@ceph-deploy:~/ceph-cluster$ ceph-deploy --overwrite-conf config push ceph-mon2 test@ceph-deploy:~/ceph-cluster$ ceph-deploy --overwrite-conf config push ceph-mgr1 test@ceph-deploy:~/ceph-cluster$ ceph-deploy --overwrite-conf config push ceph-mgr2 test@ceph-mon2:~$ sudo systemctl restart ceph-mds@ceph-mon2.service test@ceph-mon3:~$ sudo systemctl restart ceph-mds@ceph-mon3.service test@ceph-mgr1:~$ sudo systemctl restart ceph-mds@ceph-mgr1.service test@ceph-mgr2:~$ sudo systemctl restart ceph-mds@ceph-mgr2.service
3.10.8 ceph 集群 mds 高可用状态
#deploy节点 test@ceph-deploy:~/ceph-cluster$ ceph fs status mycephfs - 1 clients ======== RANK STATE MDS ACTIVITY DNS INOS DIRS CAPS 0 active ceph-mgr2 Reqs: 0 /s 13 16 12 1 1 active ceph-mon2 Reqs: 0 /s 10 13 11 0 POOL TYPE USED AVAIL cephfs-metadata metadata 451k 75.2G cephfs-data data 663M 75.2G STANDBY MDS ceph-mon3 ceph-mgr1 MDS version: ceph version 16.2.5 (0883bdea7337b95e4b611c768c0279868462204a) pacific (stable) #查看 active 和 standby 对应关系 test@ceph-deploy:~/ceph-cluster$ ceph fs get mycephfs Filesystem ‘mycephfs‘ (1) fs_name mycephfs epoch 67 flags 12 created 2021-08-27T11:06:31.193582+0800 modified 2021-08-29T16:34:16.305266+0800 tableserver 0 root 0 session_timeout 60 session_autoclose 300 max_file_size 1099511627776 required_client_features {} last_failure 0 last_failure_osd_epoch 557 compat compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} max_mds 2 in 0,1 up {0=84753,1=84331} failed damaged stopped data_pools [9] metadata_pool 8 inline_data disabled balancer standby_count_wanted 1 [mds.ceph-mgr2{0:84753} state up:active seq 7 addr [v2:10.0.0.105:6802/2338760756,v1:10.0.0.105:6803/2338760756]] [mds.ceph-mon2{1:84331} state up:active seq 14 addr [v2:10.0.0.102:6800/3841027813,v1:10.0.0.102:6801/3841027813]]
3.11 通过 ganesha 将 cephfs 导出为 NFS
#mgr1节点 test@ceph-mgr1:~$ sudo apt install nfs-ganesha-ceph test@ceph-mgr1:~$ cd /etc/ganesha/ test@ceph-mgr1:/etc/ganesha$ cat /etc/ganesha/ganesha.conf NFS_CORE_PARAM { #disable NLM Enable_NLM = false; # disable RQUOTA (not suported on CephFS) Enable_RQUOTA = false; # NFS protocol Protocols = 4; } EXPORT_DEFAULTS { # default access mode Access_Type = RW; } EXPORT { # uniq ID Export_Id = 1; # mount path of CephFS Path = "/"; FSAL { name = CEPH; # hostname or IP address of this Node hostname="10.0.0.104"; } # setting for root Squash Squash="No_root_squash"; # NFSv4 Pseudo path Pseudo="/test"; # allowed security options SecType = "sys"; } LOG { # default log level Default_Log_Level = WARN; } test@ceph-mgr1:/etc/ganesha$ sudo systemctl restart nfs-ganesha