http://slack.minio.org.cn/people/1
https://github.com/minio/operator
hostpath形式
1、node加标签
kubectl label nodes test-01 minio-0=true kubectl label nodes test-02 minio-1=true kubectl label nodes test-03 minio-2=true
kubectl label nodes test-04 minio-3=true
2、建立storageclass
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: minio-sc provisioner: kubernetes.io/no-provisioner reclaimPolicy: Retain volumeBindingMode: WaitForFirstConsumer
3、minio.yaml
apiVersion: v1 kind: Service metadata: name: minio namespace: velero labels: app: minio spec: clusterIP: None ports: - port: 9000 name: minio selector: app: minio --- apiVersion: apps/v1 kind: StatefulSet metadata: name: minio namespace: velero spec: selector: matchLabels: app: minio serviceName: minio replicas: 4 template: metadata: labels: app: minio spec: containers: - name: minio env: - name: MINIO_ACCESS_KEY value: "minio" - name: MINIO_SECRET_KEY value: "minio123" image: k8s-deploy/minio:2020-06-01 args: - server - http://minio-{0...3}.minio.velero.svc.cluster.local/data ports: - containerPort: 9000 # These volume mounts are persistent. Each pod in the PetSet # gets a volume mounted based on this field. volumeMounts: - name: data mountPath: /data # These are converted to volume claims by the controller # and mounted at the paths mentioned above. volumeClaimTemplates: - metadata: name: data spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi # Uncomment and add storageClass specific to your requirements below. Read more https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 storageClassName: minio-sc volumeMode: Filesystem
4、建立pv
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-0 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi hostPath: path: /minio-data-0 type: "" nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: minio-0 operator: In values: - "true" persistentVolumeReclaimPolicy: Retain storageClassName: minio-sc volumeMode: Filesystem
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-1 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi hostPath: path: /minio-data-1 type: "" nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: minio-1 operator: In values: - "true" persistentVolumeReclaimPolicy: Retain storageClassName: minio-sc volumeMode: Filesystem
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-2 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi hostPath: path: /minio-data-2 type: "" nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: minio-2 operator: In values: - "true" persistentVolumeReclaimPolicy: Retain storageClassName: minio-sc volumeMode: Filesystem
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-3 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi hostPath: path: /minio-data-3 type: "" nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: minio-3 operator: In values: - "true" persistentVolumeReclaimPolicy: Retain storageClassName: minio-sc volumeMode: Filesystem
5、建立service
apiVersion: v1 kind: Service metadata: name: minio-nodeport namespace: velero labels: app: minio spec: type: NodePort ports: - port: 9000 name: minio targetPort: 9000 selector: app: minio
nfs形式
前提:新建好nfs相关内容
nfs-client.yaml
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: annotations: NFSADDR: 192.168.92.147 NFSPATH: /nfs/ type: NFS name: nfs1 parameters: archiveOnDelete: "false" provisioner: nfs-client-provisioner-nfs reclaimPolicy: Delete volumeBindingMode: Immediate
1、sts
apiVersion: v1 kind: Service metadata: name: minio labels: app: minio spec: clusterIP: None ports: - port: 9000 name: minio selector: app: minio --- apiVersion: apps/v1 kind: StatefulSet metadata: name: minio spec: selector: matchLabels: app: minio serviceName: minio replicas: 4 template: metadata: labels: app: minio spec: tolerations: - key: "node-role.kubernetes.io/master" operator: "Exists" effect: "NoSchedule" containers: - name: minio env: - name: MINIO_ACCESS_KEY value: "minio" - name: MINIO_SECRET_KEY value: "minio123" image: minio/minio:RELEASE.2020-06-01T17-28-03Z args: - server - http://minio-{0...3}.minio.default.svc.cluster.local/data ports: - containerPort: 9000 # These volume mounts are persistent. Each pod in the PetSet # gets a volume mounted based on this field. volumeMounts: - name: data mountPath: /data # These are converted to volume claims by the controller # and mounted at the paths mentioned above. volumeClaimTemplates: - metadata: name: data spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi # Uncomment and add storageClass specific to your requirements below. Read more https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 storageClassName: nfs-client
2、建立pv
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-0 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi nfs: path: /nfs/top/minio/0 server: 192.168.*.* persistentVolumeReclaimPolicy: Delete storageClassName: nfs-client volumeMode: Filesystem
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-1 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi nfs: path: /nfs/top/minio/1 server: 192.168.*.* persistentVolumeReclaimPolicy: Delete storageClassName: nfs-client volumeMode: Filesystem
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-2 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi nfs: path: /nfs/top/minio/2 server: 192.168.*.* persistentVolumeReclaimPolicy: Delete storageClassName: nfs-client volumeMode: Filesystem
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-3 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi nfs: path: /nfs/top/minio/3 server: 192.168.*.* persistentVolumeReclaimPolicy: Delete storageClassName: nfs-client volumeMode: Filesystem
3、登录nfs服务器新建目录
mkdir -p /nfs/top/minio/{0,1,2,3}
4、对外服务
apiVersion: v1 kind: Service metadata: labels: app: minio name: minio-svc-nodeport namespace: default spec: ports: - name: minio port: 9000 protocol: TCP targetPort: 9000
nodePort: 33007 selector: app: minio sessionAffinity: None type: NodePort
5、访问
[root@host-239 minio]# kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE minio-svc-nodeport NodePort 10.*.*.* <none> 9000:33007/TCP 59s
使用nodeip加33007端口访问minio
压测
参考 https://www.cnblogs.com/yuhaohao/p/13099507.html
https://blog.csdn.net/ff_gogo/article/details/85252189
1、安装jdk环境和其他依赖
yum install -y wget nmap-ncat
2、下载
wget https://github.com/intel-cloud/cosbench/releases/download/v0.4.2.c4/0.4.2.c4.zip
3、解压,修改配置
主要修改conf/s3-config-sample.xml的storage内容,添加minio 账号密码和地址
每个workflow可以多个workstage,每个workstage下可以有多个work;
每钟workstage对应一个工作类别,有work下的type字段标识,总共有init(创建bucket),write(创建object写数据)、cleanup(删除object)、dispose(删除bucket)。
修改后的内容:
<?xml version="1.0" encoding="UTF-8" ?> <workload name="s3-sample" description="sample benchmark for s3"> <storage type="s3" config="accesskey=minio;secretkey=minio123;endpoint=http://10.1.11.*:30780" /> <workflow> <workstage name="init"> <work type="init" workers="1" config="cprefix=s3testqwer;containers=r(1,2)" /> </workstage> <workstage name="prepare"> <work type="prepare" workers="1" config="cprefix=s3testqwer;containers=r(1,2);objects=r(1,10);sizes=c(128)KB" /> </workstage> <workstage name="main"> <work name="main" workers="8" runtime="30"> <operation type="read" ratio="80" config="cprefix=s3testqwer;containers=u(1,2);objects=u(1,10)" /> <operation type="write" ratio="20" config="cprefix=s3testqwer;containers=u(1,2);objects=u(11,20);sizes=c(128)KB" /> </work> </workstage> <workstage name="cleanup"> <work type="cleanup" workers="1" config="cprefix=s3testqwer;containers=r(1,2);objects=r(1,20)" /> </workstage> <workstage name="dispose"> <work type="dispose" workers="1" config="cprefix=s3testqwer;containers=r(1,2)" /> </workstage> </workflow> </workload>