k8s使用Glusterfs动态生成pv

一、环境介绍

[root@k8s-m ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6

172.31.250.152 k8s-m
172.31.250.153 node1
172.31.250.154 node2

[root@k8s-m ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-m Ready master 41m v1.13.1
node1 Ready <none> 27m v1.13.1
node2 Ready <none> 28m v1.13.1

#删除k8s集群master的污点(我这边就三台服务器,需要使用master,生产环境千万别这么干)
[root@k8s-m ~]# kubectl taint node k8s-m node-role.kubernetes.io/master-
node/k8s-m untainted

所有存储服务器下载

yum install centos-release-gluster -y

yum install glusterfs-server -y

二、Heketi安装

所有版本:https://github.com/heketi/heketi/releases

#下载安装
wget https://github.com/heketi/heketi/releases/download/v8.0.0/heketi-v8.0.0.linux.amd64.tar.gz
tar xf heketi-v8.0.0.linux.amd64.tar.gz
mkdir -p /data/heketi/{bin,conf,data}
cp heketi/heketi.json /data/heketi/conf/
cp heketi/{heketi,heketi-cli} /data/heketi/bin/ #我们glusterFS部署在k8s集群外,所以heketi通过ssh管理glusterFS。需要创建免秘钥登陆到所有glusterFS节点。
ssh-keygen -f /data/heketi/conf/heketi_key -t rsa -N ''
#将公钥放到所有GlusterFS节点
ssh-copy-id -i /data/heketi/conf/heketi_key.pub root@k8s-m
ssh-copy-id -i /data/heketi/conf/heketi_key.pub root@node1
ssh-copy-id -i /data/heketi/conf/heketi_key.pub root@node2

2、heketi.json配置文件

[root@k8s-m ~]# cat /data/heketi/conf/heketi.json
{
"_port_comment": "Heketi Server Port Number",
"port": "18080", "_enable_tls_comment": "Enable TLS in Heketi Server",
"enable_tls": false, "_cert_file_comment": "Path to a valid certificate file",
"cert_file": "", "_key_file_comment": "Path to a valid private key file",
"key_file": "", "_use_auth": "Enable JWT authorization. Please enable for deployment",
"use_auth": true, "_jwt": "Private keys for access",
"jwt": {
"_admin": "Admin has access to all APIs",
"admin": {
"key": "adminkey"
},
"_user": "User only has access to /volumes endpoint",
"user": {
"key": "userkey"
}
}, "_backup_db_to_kube_secret": "Backup the heketi database to a Kubernetes secret when running in Kubernetes. Default is off.",
"backup_db_to_kube_secret": false, "_glusterfs_comment": "GlusterFS Configuration",
"glusterfs": {
"_executor_comment": [
"Execute plugin. Possible choices: mock, ssh",
"mock: This setting is used for testing and development.",
" It will not send commands to any node.",
"ssh: This setting will notify Heketi to ssh to the nodes.",
" It will need the values in sshexec to be configured.",
"kubernetes: Communicate with GlusterFS containers over",
" Kubernetes exec api."
],
"executor": "ssh", "_sshexec_comment": "SSH username and private key file information",
"sshexec": {
"keyfile": "/data/heketi/conf/heketi_key",
"user": "root",
"port": "22",
"fstab": "/etc/fstab",
"backup_lvm_metadata": false
}, "_kubeexec_comment": "Kubernetes configuration",
"kubeexec": {
"host" :"https://kubernetes.host:8443",
"cert" : "/path/to/crt.file",
"insecure": false,
"user": "kubernetes username",
"password": "password for kubernetes user",
"namespace": "OpenShift project or Kubernetes namespace",
"fstab": "Optional: Specify fstab file on node. Default is /etc/fstab",
"backup_lvm_metadata": false
}, "_db_comment": "Database file name",
"db": "/data/heketi/data/heketi.db", "_refresh_time_monitor_gluster_nodes": "Refresh time in seconds to monitor Gluster nodes",
"refresh_time_monitor_gluster_nodes": 120, "_start_time_monitor_gluster_nodes": "Start time in seconds to monitor Gluster nodes when the heketi comes up",
"start_time_monitor_gluster_nodes": 10, "_loglevel_comment": [
"Set log level. Choices are:",
" none, critical, error, warning, info, debug",
"Default is warning"
],
"loglevel" : "debug", "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
"auto_create_block_hosting_volume": true, "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
"block_hosting_volume_size": 500, "_block_hosting_volume_options": "New block hosting volume will be created with the following set of options. Removing the group gluster-block option is NOT recommended. Additional options can be added next to it separated by a comma.",
"block_hosting_volume_options": "group gluster-block"
}
}

  

3、创建heketi启动脚本

[root@k8s-m ~]# cat /usr/lib/systemd/system/heketi.service
[Unit]
Description=RESTful based volume management framework for GlusterFS
Before=network-online.target
After=network-online.target
Documentation=https://github.com/heketi/heketi
[Service]
Type=simple
LimitNOFILE=
ExecStart=/data/heketi/bin/heketi --config=/data/heketi/conf/heketi.json
KillMode=process
Restart=on-failure
RestartSec=
SuccessExitStatus=
StandardOutput=syslog
StandardError=syslog
[Install]
WantedBy=multi-user.target #启动heketi
systemctl start heketi
systemctl enable heketi
systemctl status heketi

4、Heketi添加cluster

[root@k8s-m ~]# /data/heketi/bin/heketi-cli --user admin --server http://k8s-m:18080 --secret adminkey --json  cluster create
{"id":"aa378f94299c29bbd0224fc902c0cbd8","nodes":[],"volumes":[],"block":true,"file":true,"blockvolumes":[]}

5、#将3个glusterfs节点作为node添加到cluster

[root@k8s-m ~]# /data/heketi/bin/heketi-cli --server http://k8s-m:18080   --user "admin" --secret "adminkey"   node add --cluster "aa378f94299c29bbd0224fc902c0cbd8"  --management-host-name  k8s-m --storage-host-name 172.31.250.152 --zone 1
Node information:
Id: 9280cde1c2640bf3fef483509a04bff2
State: online
Cluster Id: aa378f94299c29bbd0224fc902c0cbd8
Zone:
Management Hostname k8s-m
Storage Hostname 172.31.250.152 [root@k8s-m ~]# /data/heketi/bin/heketi-cli --server http://k8s-m:18080 --user "admin" --secret "adminkey" node add --cluster "aa378f94299c29bbd0224fc902c0cbd8" --management-host-name node1 --storage-host-name 172.31.250.153 --zone 1
Node information:
Id: 19838aea8104aee077b0ea67cd6ea012
State: online
Cluster Id: aa378f94299c29bbd0224fc902c0cbd8
Zone:
Management Hostname node1
Storage Hostname 172.31.250.153

[root@k8s-m ~]# /data/heketi/bin/heketi-cli --server http://k8s-m:18080 --user "admin" --secret "adminkey" node add --cluster "aa378f94299c29bbd0224fc902c0cbd8" --management-host-name node2 --storage-host-name 172.31.250.154 --zone 1
Node information:
Id: 171aeb876968bf9ef1fab28e6a31e919
State: online
Cluster Id: aa378f94299c29bbd0224fc902c0cbd8
Zone:
Management Hostname node2
Storage Hostname 172.31.250.154

6、添加device

#机器只是作为gluster的运行单元,volume是基于device创建的。同时需要特别说明的是,目前heketi仅支持使用裸分区或裸磁盘(未格式化)添加为device,不支持文件系统

[root@k8s-m ~]# /data/heketi/bin/heketi-cli --server http://k8s-m:18080   --user "admin" --secret "adminkey"    --json device add --name="/dev/vdb" --node "9280cde1c2640bf3fef483509a04bff2"
Device added successfully [root@k8s-m ~]# /data/heketi/bin/heketi-cli --server http://k8s-m:18080 --user "admin" --secret "adminkey" --json device add --name="/dev/vdb" --node "19838aea8104aee077b0ea67cd6ea012"
Device added successfully [root@k8s-m ~]# /data/heketi/bin/heketi-cli --server http://k8s-m:18080 --user "admin" --secret "adminkey" --json device add --name="/dev/vdb" --node "171aeb876968bf9ef1fab28e6a31e919"
Device added successfully ##查看
[root@k8s-m ~]# /data/heketi/bin/heketi-cli --server http://k8s-m:18080 --user "admin" --secret "adminkey" node list
Id:171aeb876968bf9ef1fab28e6a31e919 Cluster:aa378f94299c29bbd0224fc902c0cbd8
Id:19838aea8104aee077b0ea67cd6ea012 Cluster:aa378f94299c29bbd0224fc902c0cbd8
Id:9280cde1c2640bf3fef483509a04bff2 Cluster:aa378f94299c29bbd0224fc902c0cbd8

7、添加volume

创建一个大小为3G,副本为3的volume
[root@k8s-m ~]# /data/heketi/bin/heketi-cli --server http://k8s-m:18080 --user "admin" --secret "adminkey" volume create --size 3 --replica 3
Name: vol_2aab01a26b3a2dfc1286eed16c8fcb62
Size: 3
Volume Id: 2aab01a26b3a2dfc1286eed16c8fcb62
Cluster Id: aa378f94299c29bbd0224fc902c0cbd8
Mount: 172.31.250.154:vol_2aab01a26b3a2dfc1286eed16c8fcb62
Mount Options: backup-volfile-servers=172.31.250.153,172.31.250.152
Block: false
Free Size: 0
Reserved Size: 0
Block Hosting Restriction: (none)
Block Volumes: []
Durability Type: replicate
Distributed+Replica: 3

  

8、创建storageclass

[root@k8s-m ~]# cat   storageclass-glusterfs.yaml
apiVersion: v1
kind: Secret
metadata:
name: heketi-secret
namespace: default
data:
# base64 encoded password. E.g.: echo -n "mypassword" | base64
key: YWRtaW5rZXk=
type: kubernetes.io/glusterfs ---
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: glusterfs
provisioner: kubernetes.io/glusterfs
allowVolumeExpansion: true
parameters:
resturl: "http://172.31.250.152:18080"
clusterid: "aa378f94299c29bbd0224fc902c0cbd8"
restauthenabled: "true"
restuser: "admin"
#secretNamespace: "default"
#secretName: "heketi-secret"
restuserkey: "adminkey"
gidMin: ""
gidMax: ""
volumetype: "replicate:3" #导入
[root@k8s-m ~]# kubectl apply -f storageclass-glusterfs.yaml
secret/heketi-secret created
storageclass.storage.k8s.io/glusterfs created

9、创建一个 statefulset测试是否会自动生成pv

#查看pv和pvc
[root@k8s-m ~]# kubectl get pv --all-namespaces
No resources found.
[root@k8s-m ~]# kubectl get pvc --all-namespaces
No resources found. #statefulset文件
[root@k8s-m ~]# cat mystatefulset.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port:
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx # has to match .spec.template.metadata.labels
serviceName: "nginx"
replicas: # by default is
template:
metadata:
labels:
app: nginx # has to match .spec.selector.matchLabels
spec:
terminationGracePeriodSeconds:
containers:
- name: nginx
image: nginx
ports:
- containerPort:
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "glusterfs"
resources:
requests:
storage: 2Gi #查看pv和pvc(根据pvc自动创建了pv)
[root@k8s-m ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-0afdc43a--11e9-b8f6-00163e024454 2Gi RWO Delete Bound default/www-nginx- glusterfs 6s
[root@k8s-m ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
www-nginx- Bound pvc-0afdc43a--11e9-b8f6-00163e024454 2Gi RWO glusterfs 16s
上一篇:c# WebBrower 与 HttpRequest配合 抓取数据


下一篇:UIWebView的探索