手动安装kubernetes 1.8.3集群
@(我的第一个笔记本)[High availability]
[TOC]
概述
本文主要介绍kubernetes master高可用部署,master最小的集群是两节点,而etcd最小集群需要3节点才能保证高可用,我们在实际部署过程中,为了提高阿里云上的资源利用率,减少了一个master的部署,将第3个etcd部署在了slave上,下面是主要的部署过程。
安装etcd集群
引用@初杨 文章玩转阿里云上Kubernetes 1.7.2 高可用部署 中etcd集群安装步骤
假设我们已经选定192.168.0.1作为总控机执行所有命令,首先登录到总控机上。ssh root@192.168.0.1,在所有master节点上部署一套ETCD集群
下载部署脚本
root@192.168.0.1 # curl https://aliacs-k8s-cn-hangzhou.oss-cn-hangzhou.aliyuncs.com/public/pkg/run/1.0/kuberun.sh > kuberun.sh
root@192.168.0.1 # chmod +x kuberun.sh
一键部署ETCD集群
root@192.168.0.1 # ./kuberun.sh --role deploy-etcd \
--hosts 192.168.0.1,192.168.0.2,192.168.0.3 \
--etcd-version v3.0.17
上面--hosts参数指定etcd组件将要在哪几个机器上部署, --etcd-version指定etcd的版本号,目前支持v3.0.17。命令执行完成后一个高可用ETCD集群就部署完成了,peer证书及客户端证书都存放在/var/lib/etcd/cert下面。
验证
通过ps -eaf|grep etcd查看进程是否正常启动。
通过命令
root@192.168.0.1 # etcdctl --endpoints=https://192.168.0.1:2379 \
--ca-file=/var/lib/etcd/cert/ca.pem \
--cert-file=/var/lib/etcd/cert/etcd-client.pem \
--key-file=/var/lib/etcd/cert/etcd-client-key.pem \
cluster-health
注:
任何时候您部署etcd集群出错后可以通过命令./kuberun.sh --role destroy-etcd --hosts 192.168.0.1,192.168.0.2,192.168.0.3 --etcd-version v3.0.17来清理etcd部署环境。
部署kubernetes第一个master 192.168.0.1
本示例在centos 7.4下进行操作。
安装docker
root@192.168.0.1 # curl -O https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-17.03.0.ce-1.el7.centos.x86_64.rpm
root@192.168.0.1 # yum localinstall -y docker-engine-17.03.0.ce-1.el7.centos.x86_64.rpm
安装kubernetes master组件
root@192.168.0.1 # wget https://github.com/liubin-cm/kube-binary/blob/master/kubeadm-1.8.3-0.x86_64.rpm
root@192.168.0.1 # wget https://github.com/liubin-cm/kube-binary/blob/master/kubectl-1.8.3-0.x86_64.rpm
root@192.168.0.1 # wget https://github.com/liubin-cm/kube-binary/blob/master/kubelet-1.8.3-0.x86_64.rpm
root@192.168.0.1 # wget https://github.com/liubin-cm/kube-binary/blob/master/kubernetes-cni-0.5.1-1.x86_64.rpm
root@192.168.0.1 # yum install socat
root@192.168.0.1 # rpm -ivh *.rpm
启动前准备
root@192.168.0.1 # setenforce 0
root@192.168.0.1 # sed -i '/net.bridge.bridge-nf-call-iptables/d' /usr/lib/sysctl.d/00-system.conf
root@192.168.0.1 # sed -i '$a net.bridge.bridge-nf-call-iptables = 1' /usr/lib/sysctl.d/00-system.conf
root@192.168.0.1 # echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
root@192.168.0.1 # iptables -P FORWARD ACCEPT
root@192.168.0.1 # sed -i "/ExecStart=/a\ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT" /lib/systemd/system/docker.service
root@192.168.0.1 # systemctl daemon-reload ; systemctl enable docker.service; systemctl restart docker.service
下载依赖的组件
root@192.168.0.1 # docker pull registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
root@192.168.0.1 # docker tag registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 gcr.io/google_containers/pause-amd64:3.0
root@192.168.0.1 # docker pull registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/flannel:v0.9.0-amd64
root@192.168.0.1 # docker tag registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/flannel:v0.9.0-amd64 quay.io/coreos/flannel:v0.9.0-amd64
root@192.168.0.1 # docker pull registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/k8s-dns-dnsmasq-nanny-amd64:1.14.5
root@192.168.0.1 # docker tag registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/k8s-dns-dnsmasq-nanny-amd64:1.14.5 gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5
root@192.168.0.1 # docker pull registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/k8s-dns-kube-dns-amd64:1.14.5
root@192.168.0.1 # docker tag registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/k8s-dns-kube-dns-amd64:1.14.5 gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5
root@192.168.0.1 # docker pull registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/k8s-dns-sidecar-amd64:1.14.5
root@192.168.0.1 # docker tag registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/k8s-dns-sidecar-amd64:1.14.5 gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
root@192.168.0.1 # docker pull registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/kube-apiserver-amd64:v1.8.3
root@192.168.0.1 # docker tag registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/kube-apiserver-amd64:v1.8.3 gcr.io/google_containers/kube-apiserver-amd64:v1.8.3
root@192.168.0.1 # docker pull registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/kube-controller-manager-amd64:v1.8.3
root@192.168.0.1 # docker tag registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/kube-controller-manager-amd64:v1.8.3 gcr.io/google_containers/kube-controller-manager-amd64:v1.8.3
root@192.168.0.1 # docker pull registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/kube-scheduler-amd64:v1.8.3
root@192.168.0.1 # docker tag registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/kube-scheduler-amd64:v1.8.3 gcr.io/google_containers/kube-scheduler-amd64:v1.8.3
root@192.168.0.1 # docker pull registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/kube-proxy-amd64:v1.8.3
root@192.168.0.1 # docker tag registry-vpc.cn-shanghai.aliyuncs.com/youmik8s/kube-proxy-amd64:v1.8.3 gcr.io/google_containers/kube-proxy-amd64:v1.8.3
编写kubernetes 初始化配置文件
将配置文件保存在/etc/kubeadm/kubeadm.cfg
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
networking:
dnsDomain: cluster.local
serviceSubnet: 172.19.0.0/16
podSubnet: 172.16.0.0/16
kubernetesVersion: v1.8.3
etcd:
endpoints:
- https://192.168.0.1:2379
- https://192.168.0.2:2379
- https://192.168.0.3:2379
caFile: /etc/kubernetes/pki/etcd/ca.pem
certFile: /etc/kubernetes/pki/etcd/etcd-client.pem
keyFile: /etc/kubernetes/pki/etcd/etcd-client-key.pem
apiServerCertSANs:
- 192.168.0.4
- 192.168.0.1
- 192.168.0.2
- 192.168.0.3
nodeName: 192.168.0.1
执行初始化kubernetes 指令
root@192.168.0.1 # cp -rf /var/lib/etcd/cert/{ca.pem,etcd-client.pem,etcd-client-key.pem} /etc/kubernetes/pki/etcd/
root@192.168.0.1 # kubeadm init --config=/etc/kubeadm/kubeadm.cfg
本例使用flannel作为网络组件
安装完成后,应用flannel.yml
root@192.168.0.1 # export KUBECONFIG=/etc/kubernetes/admin.conf
root@192.168.0.1 # kubectl apply -f flannel.yml
其中flannel.yml文件为
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
net-conf.json: |
{
"Network": "172.16.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.9.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conf
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.9.0-amd64
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
到此,第一个master已经部署完毕。
部署kubernetes 第二个master 192.168.0.2
拷贝证书
将第一个master上的/etc/kubernetes/pki整个拷贝到192.168.0.2。
root@192.168.0.2 # scp -r root@192.168.0.1:/etc/kubernetes/pki /etc/kubernetes/
重复第一个master的部署步骤
安装docker
安装kubernetes master组件
启动前准备
下载依赖的组件
编写kubernetes 初始化配置文件
在此步骤中,需要将nodeName: 192.168.0.1修改为192.168.0.2
执行初始化kubernetes 指令
到此,一个两节点的kubernetes master已经安装完毕。
多节点的master部署中,关键需要保证多个节点的证书一致。