一.主机环境配置(centos7.6)
1.主机名设置
1 #所有主机分别设置如下 2 # hostnamectl set-hostname master 3 # hostnamectl set-hostname node1 4 # hostnamectl set-hostname node2
2.主机名绑定hosts
#所有主机设置相同 # cat /etc/hosts ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 192.168.11.111 master 192.168.11.112 node1 192.168.11.113 node2
3.静态ip设置
# cat /etc/sysconfig/network-scripts/ifcfg-eth0 TYPE="Ethernet" BOOTPROTO="static" NAME="eth0" DEVICE="eth0" ONBOOT="yes" IPADDR="192.168.11.111" PREFIX="24" GATEWAY="192.168.11.253" # cat /etc/sysconfig/network-scripts/ifcfg-eth0 TYPE="Ethernet" BOOTPROTO="static" NAME="eth0" DEVICE="eth0" ONBOOT="yes" IPADDR="192.168.11.112" PREFIX="24" GATEWAY="192.168.11.253" # cat /etc/sysconfig/network-scripts/ifcfg-eth0 TYPE="Ethernet" BOOTPROTO="static" NAME="eth0" DEVICE="eth0" ONBOOT="yes" IPADDR="192.168.11.113" PREFIX="24" GATEWAY="192.168.11.253" # 注意:阿里云ecs可以不用设置
4.selinux关闭
所有主机设置如下
# sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
设置完后重启生效
seleinux状态查看
[root@node2 ~]# sestatus SELinux status: disabled
5.ntp时间同步
所有主机设置时间同步
# ntpdate time1.aliyun.com
6.swap关闭
所有主机关闭swap
临时关闭swap命令如下
# swapoff -a
永久关闭
# cat /etc/fstable ... #/dev/mapper/centos-swap swap swap defaults 0 0
#注释上面一行即可
7.防火墙
所有主机配置防火墙
关闭firewalld
# systemctl disable firewalld # systemctl stop firewalld [root@master ~]# firewall-cmd --state not running [root@master ~]#
安装iptables并配置
# yum -i install iptables-services # systemctl enable iptables # systemctl stop iptables # iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X # iptables -P FORWARD ACCEPT # service iptables save
-F:删除指定表的所有链上的规则
-X:删除用户自定义的链
-P:修改链上的策略
不指定表的情况下默认是filter表
8.网桥过滤
所有主机配置网桥过滤
# vim /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 vm.swappiness = 0 # modprobe br_netfilter # lsmod | grep br_netfilter # sysctl -p /etc/sysctl.d/k8s.conf
二.安装docker
下载docker-ce.repo
# wget https://download.docker.com/linux/centos/docker-ce.repo # mv docker-ce.repo /etc/yum.repos.d/
安装docker
# yum -y install docker-ce # systemctl enable docker # systemctl start docker
三.工具安装
配置k8s仓库
# cat /etc/yum.repos.d/k8s.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=1 gpgcheck=0 #查询一个软件的所有版本 # yum list kubelet --showduplicates |sort
安装kubectl kubeadm kubelet的rpm包
#搜索软件包所有版本的名称 # yum search --showduplicates kubectl # yum search --showduplicates kubelet # yum search --showduplicates kubeadm #根据软件包名称安装指定对应版本的软件 # yum -y install kubectl-1.14.0-0.x86_64 # yum -y install kubelete-1.14.0-0.x86_64 # yum -y install kubeadm-1.14.0-0.x86_64
四.镜像准备
在一台master上拉取镜像,再远程拷贝到其他节点
需要的镜像如下
[root@master images]# ll total 817672 -rw-r--r-- 1 root root 40290816 Nov 21 11:13 coredns.tar -rw-r--r-- 1 root root 258365952 Nov 21 11:16 etcd.tar -rw-r--r-- 1 root root 211355648 Nov 21 11:17 kube-apiserver.tar -rw-r--r-- 1 root root 159389184 Nov 21 11:19 kube-controller-manager.tar -rw-r--r-- 1 root root 83978752 Nov 21 11:20 kube-proxy.tar -rw-r--r-- 1 root root 83145728 Nov 21 11:21 kube-scheduler.tar -rw-r--r-- 1 root root 754176 Nov 21 11:21 pause.tar
可以通过如下方式获得镜像,kube-xxx镜像版本与kubeadm等工具保持一致v1.14.0
# docker search xxx:[tag] # docker pull xxx:[tag] # docker tag xxx:[tag] k8s.gcr.io/xxx:[tag]
镜像打包
# docker save xxx:[tag] -o xxx.tar # scp xxx.tar node1:/root/ # scp xxx.tar node2:/root/
镜像加载
docker load -i xxx.tar
五.集群初始化
先配置kubelet,所有主机全部配置
# DOCKER_CGROUP=$(docker info | grep Cgroup | awk '{print $3}') # echo $DOCKER_CGROUP cgroupfs # cat >/etc/sysconfig/kubelet<<EOF > KUBELET_EXTRA_ARGS=--cgroup-driver=$DOCKER_CGROUP > EOF # cat /etc/sysconfig/kubelet KUBELET_EXTRA_ARGS=--cgroup-driver=cgroupfs # systemctl daemon-reload # systemctl enable kubelet # systemctl start kubelet
添加配置文件kubeadm-config.yaml并执行初始化命令,只在master节点上执行
# cat kubeadm-config.yaml apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration kubernetesVersion: v1.14.0 controlPlaneEndpoint: "172.31.25.96:6443"
执行初始化
#kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs [init] Using Kubernetes version: v1.14.0 [preflight] Running pre-flight checks [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.5. Latest validated version: 18.09 [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Activating the kubelet service [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [master localhost] and IPs [192.168.11.111 127.0.0.1 ::1] [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [master localhost] and IPs [192.168.11.111 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "ca" certificate and key [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.11.111 192.168.11.111] [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "kubelet.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" [control-plane] Creating static Pod manifest for "kube-scheduler" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [apiclient] All control plane components are healthy after 18.002245 seconds [upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.14" in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Storing the certificates in ConfigMap "kubeadm-certs" in the "kube-system" Namespace [upload-certs] Using certificate key: e293be9299d208b9969ea45d78214876ec9b415855c18dedf4e631574df140d2 [mark-control-plane] Marking the node master as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] [bootstrap-token] Using token: cyg336.twn31sa08a07cum2 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] creating the "cluster-info" ConfigMap in the "kube-public" namespace [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ You can now join any number of the control-plane node running the following command on each as root: kubeadm join 192.168.11.111:6443 --token cyg336.twn31sa08a07cum2 \ --discovery-token-ca-cert-hash sha256:4bb761c0a4854cffce4e347bce5f7caa4bc1d2ab930c88002703c657e5940144 \ --experimental-control-plane --certificate-key e293be9299d208b9969ea45d78214876ec9b415855c18dedf4e631574df140d2 Please note that the certificate-key gives access to cluster sensitive data, keep it secret! As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use "kubeadm init phase upload-certs --experimental-upload-certs" to reload certs afterward. Then you can join any number of worker nodes by running the following on each as root: kubeadm join 192.168.11.111:6443 --token cyg336.twn31sa08a07cum2 \ --discovery-token-ca-cert-hash sha256:4bb761c0a4854cffce4e347bce5f7caa4bc1d2ab930c88002703c657e5940144 #添加新的master kubeadm join 192.168.11.111:6443 --token cyg336.twn31sa08a07cum2 \ --discovery-token-ca-cert-hash sha256:4bb761c0a4854cffce4e347bce5f7caa4bc1d2ab930c88002703c657e5940144 \ --experimental-control-plane --certificate-key e293be9299d208b9969ea45d78214876ec9b415855c18dedf4e631574df140d2 #添加新的node kubeadm join 192.168.11.111:6443 --token cyg336.twn31sa08a07cum2 \ --discovery-token-ca-cert-hash sha256:4bb761c0a4854cffce4e347bce5f7caa4bc1d2ab930c88002703c657e5940144
网络配置
# 在master节点上执行 # kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
验证集群可用性
[root@master images]# kubectl get componentstatuses NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-0 Healthy {"health":"true"}
查看集群节点
[root@master images]# kubectl get nodes NAME STATUS ROLES AGE VERSION master Ready master 28d v1.14.0 node1 Ready <none> 28d v1.14.0 node2 Ready <none> 28d v1.14.0