k8s-master 192.168.1.111
k8s-node1 192.168.1.113
k8s-node2 192.168.1.116
cat >> /etc/hosts << EOF
192.168.1.111 k8s-master
192.168.1.113 k8s-node1
192.168.1.116 k8s-node2
EOF
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod |grep -e ip_vs -e nf_conntrack_ipv4
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install containerd.io-1.4.4 -y
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml
sed -i "s#k8s.gcr.io#registry.cn-hangzhou.aliyuncs.com/google_containers#g" /etc/containerd/config.toml
sed -i '/containerd.runtimes.runc.options/a\ \ \ \ \ \ \ \ \ \ \ \ SystemdCgroup = true' /etc/containerd/config.toml
sed -i "s#https://registry-1.docker.io#https://registry.cn-hangzhou.aliyuncs.com#g" /etc/containerd/config.toml
systemctl daemon-reload
systemctl enable containerd
systemctl restart containerd
cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubeadm-1.20.5 kubectl-1.20.5 kubelet-1.20.5
crictl config runtime-endpoint /run/containerd/containerd.sock
systemctl daemon-reload
systemctl enable kubelet && systemctl start kubelet
------------------------- 在 Master 上操作
kubeadm config print init-defaults > kubeadm.yaml
# 修改 kubeadm.yaml
advertiseAddress: 192.168.1.111
criSocket: /run/containerd/containerd.sock
imageRepository: registry.aliyuncs.com/google_containers
kubernetesVersion: v1.20.5
# 在 dnsDomain: cluster.local 添加,与 dnsDomain 平级
podSubnet: 172.16.0.0/16
# 在最后添加
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
kubeadm init --config=kubeadm.yaml
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
------------------------- 在 Node 节点上操作
kubeadm join 192.168.1.111:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:489e5c81a2925274bc21e8ab97af2ffac0e8b83e5bcb8f86bd9df19f83d4cd26
------------------------- 在 Master 上操作
yum install wget -y
mkdir -p /data/yaml/default/calico
cd /data/yaml/default/calico
wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml
vi calico.yaml
# 修改下面
- name: calico-node
image: calico/node:v3.8.9
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# 增加下面变量和值,与自己的系统匹配
- name: IP_AUTODETECTION_METHOD
value: interface=eth0
# 找到 CALICO_IPV4POOL_CIDR 修改成 172.16.0.0/16
- name: CALICO_IPV4POOL_CIDR
value: "172.16.0.0/16"
kubectl apply -f calico.yaml
# 测试查看到 node 节点为 Ready 状态即可
kubectl get nodes
DNS解析测试
kubectl run -it --rm dns-test --image=busybox:1.28.4 sh
# 执行下面,查看能否正常解析出 kubernetes
nslookup kubernetes