设置计算机名
各虚拟机设置计算机名
hostnamectl set-hostname harbor
hostnamectl set-hostname master1
hostnamectl set-hostname master2
hostnamectl set-hostname node1
hostnamectl set-hostname node2
hostnamectl set-hostname node3
重新启动虚拟机
设置hosts解析
cat >> /etc/hosts << EOF
192.168.10.20 harbor
192.168.10.20 hub.test.com
192.168.10.21 k8s.test.com
192.168.10.22 master1
192.168.10.23 master2
192.168.10.24 node1
192.168.10.25 node2
192.168.10.26 node3
EOF
将hosts文件分发给其他虚拟机
scp /etc/hosts master1:/etc/
scp /etc/hosts master2:/etc/
scp /etc/hosts node1:/etc/
scp /etc/hosts node2:/etc/
scp /etc/hosts node3:/etc/
创建免密登录
ssh-keygen
ssh-copy-id -i /root/.ssh/id_rsa.pub root@harbor
ssh-copy-id -i /root/.ssh/id_rsa.pub root@master1
ssh-copy-id -i /root/.ssh/id_rsa.pub root@master2
ssh-copy-id -i /root/.ssh/id_rsa.pub root@node1
ssh-copy-id -i /root/.ssh/id_rsa.pub root@node2
ssh-copy-id -i /root/.ssh/id_rsa.pub root@node3
安装bind
登录harbor安装bind
yum install bind -y
sed -i ‘s/{ 127.0.0.1; };/{ 192.168.10.20; };/‘ /etc/named.conf
sed -i ‘s/dnssec-enable yes;/dnssec-enable no;/‘ /etc/named.conf
sed -i ‘s/dnssec-validation yes;/dnssec-validation no; forwarders { 192.168.10.2; };/‘ /etc/named.conf
sed -i ‘s/allow-query { localhost; };/allow-query { any; };/‘ /etc/named.conf
cat >> /etc/named.rfc1912.zones << EOF
zone "test.com" IN {
type master;
file "test.com.zone";
allow-update { 192.168.10.20; };
};
zone "abc.com" IN {
type master;
file "abc.com.zone";
allow-update { 192.168.10.20; };
};
EOF
vi /var/named/test.com.zone
$ORIGIN test.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.test.com. dnsadmin.test.com. (
2020091502 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.test.com.
$TTL 60 ; 1 minute
dns A 192.168.10.20
harbor A 192.168.10.20
hub A 192.168.10.20
k8s A 192.168.10.21
master1 A 192.168.10.22
master2 A 192.168.10.23
node1 A 192.168.10.24
node2 A 192.168.10.25
node3 A 192.168.10.26
vi /var/named/abc.com.zone
$ORIGIN abc.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.abc.com. dnsadmin.abc.com. (
2020091501 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.abc.com.
$TTL 60 ; 1 minute
dns A 192.168.10.20
harbor A 192.168.10.20
named-checkconf
systemctl start named && systemctl enable named
将所有虚拟机dns记录指向harbor
sed -i ‘s/nameserver 114.114.114.114/nameserver 192.168.10.20/‘ /etc/resolv.conf && sed -i ‘s/DNS1=114.114.114.114/DNS1=192.168.10.20/‘ /etc/sysconfig/network-scripts/ifcfg-eth0 && service network restart
dig -t A master1.test.com @192.168.10.20 +short
安装ntp
每台虚拟机安装ntp
yum -y install chrony
timedatectl set-timezone Asia/Shanghai
harbor虚拟机设置
其他虚拟机设置
查看目前可以安装的docker列表
yum list docker-ce --showduplicates | sort -r
安装docker
yum update -y && yum install -y \
containerd.io-1.2.13 \
docker-ce-19.03.11 \
docker-ce-cli-19.03.11
启动docker服务
systemctl enable docker && systemctl start docker
设置docker systemd启动
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
systemctl restart docker
安装k8s集群
安装 kubeadm、kubelet 和 kubectl
master1 master2 node1 node2 node3 安装
添加kubernetes yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
安装kubelet kubeadm kubectl
yum install -y kubelet kubeadm kubectl
启用kubelet服务
systemctl enable kubelet
初始化k8s集群
查看安装k8s版本
rpm -qa |grep kube
下载必备镜像
设置群集时需要下载kubernetes组件镜像和calico网络支持镜像
kubernetes组件镜下载
查看kubernetes组件镜像
kubeadm config images list
下载kubernetes组件镜像
docker pull k8s.gcr.io/kube-apiserver:v1.19.1
docker pull k8s.gcr.io/kube-controller-manager:v1.19.1
docker pull k8s.gcr.io/kube-scheduler:v1.19.1
docker pull k8s.gcr.io/kube-proxy:v1.19.1
docker pull k8s.gcr.io/pause:3.2
docker pull k8s.gcr.io/etcd:3.4.13-0
docker pull k8s.gcr.io/coredns:1.7.0
calico网络支持镜像下载
安装网络插件确保node和node之间?络互通 pod和pod之间?络互通 node和pod之间?络互通
不同的CNI plugin?持的特性有所差别。kubernetes?持多种开源的?络CNI插件,常?的有 ?annel、calico、canal、weave等
Calico历史版本
https://docs.projectcalico.org/releases
下载网络插件
下载calico 3.16版本的yaml
wget https://docs.projectcalico.org/v3.16/manifests/calico.yaml
查看网络支持镜像
cat calico.yaml |grep image
下载网络支持镜像
docker pull calico/cni:v3.16.1
docker pull calico/pod2daemon-flexvol:v3.16.1
docker pull calico/node:v3.16.1
docker pull calico/kube-controllers:v3.16.1
批量导出镜像并拷贝到其他虚拟机
docker images
docker save $(docker images | grep -v REPOSITORY | awk ‘BEGIN{OFS=":";ORS=" "}{print $1,$2}‘) -o k8s-master.tar
拷贝镜像到其他虚拟机
scp k8s-master.tar master2:/root/
导入必备镜像
其他虚拟机导入镜像
docker load < k8s-master.tar
安装设置keepalive
master1 master2是kubernetes master节点采用keepalive进行安装设置,避免单一节点遇到宕机不可用的状况发生
安装设置keepalive
安装keepalive
master1 master2节点安装keepalive
yum -y install keepalived
创建监听脚本
master1 master2上新建
vi /etc/keepalived/check_port.sh
#!/bin/bash
#keepalived 监控端口脚本
#使用方法:
#在keepalived的配置文件中
#vrrp_script check_port {#创建一个vrrp_script脚本,检查配置
script "/etc/keepalived/check_port.sh 6379" #配置监听的端口
interval 2 #检查脚本的频率,单位(秒)
#}
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
PORT_PROCESS=ss -lnt|grep $CHK_PORT|wc -l
if [ $PORT_PROCESS -eq 0 ];then
echo "Port $CHK_PORT Is Not Used,End."
exit 1
fi
else
echo "Check Port Cant Be Empty!"
fi
chmod +x /etc/keepalived/check_port.sh
设置keepalive(主)
master1为keepalive主节点
keepalived 主: 192.168.10.22
vi /etc/keepalived/keepalived.conf
先删除原文然后添加
! Configuration File for keepalived
global_defs {
router_id 192.168.10.22
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 2
weight -20
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 251
priority 100
advert_int 1
mcast_src_ip 192.168.10.22
nopreempt
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
192.168.10.21
}
}
设置keepalive(从)
master2为keepalive从节点
keepalived 主: 192.168.10.23
vi /etc/keepalived/keepalived.conf
先删除原文然后添加
! Configuration File for keepalived
global_defs {
router_id 192.168.10.23
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 2
weight -20
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 251
mcast_src_ip 192.168.10.23
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
192.168.10.21
}
}
安装设置haproxy代理
master1 master2安装haproxy
yum install haproxy -y
设置haproxy
master1 master2设置haproxy
mkdir /etc/haproxy
vi /etc/haproxy/haproxy.cfg
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
listen stats
bind *:8006
mode http
stats enable
stats hide-version
stats uri /stats
stats refresh 30s
stats realm Haproxy\ Statistics
stats auth admin:admin
frontend k8s-master
bind 0.0.0.0:16443
bind 127.0.0.1:16443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server master1 192.168.10.22:6443 check
server master2 192.168.10.23:6443 check
启动代理并检查
systemctl enable haproxy && systemctl start haproxy
启动keepalived
systemctl start keepalived.service && systemctl enable keepalived
检查
ip addr
群集初始化
设置初始化配置
master 1先做初始化master2后加入
vi kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: 7t2weq.bjbawausm0jaxury
ttl: 24h0m0s
usages: - signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.10.22
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: master1
taints: -
effect: NoSchedule
key: node-role.kubernetes.io/masterapiServer:
certSANs: - 192.168.10.21
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.10.21:16443
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.19.1
networking:
dnsDomain: cluster.local
podSubnet: 172.168.0.0/16
serviceSubnet: 192.168.20.0/24
scheduler: {}
如果跟新配置文件
kubeadm config migrate --old-config kubeadm-config.yaml --new-config new.yaml
群集初始化
kubeadm init --config /root/new.yaml --upload-certs
Master1添加主机配置
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Master2加入控制节点
kubeadm join 192.168.10.21:16443 --token 7t2weq.bjbawausm0jaxury \
--discovery-token-ca-cert-hash sha256:0ea75536cc7361fc9647139ad6498dcbd0acb6fed124c3d9015d15ca5a1a63ed \
--control-plane --certificate-key a5d5d02f3f22ddb83c3c4623e5038e8fcaba6cc7a465266488bbb2e1bc0848bd
Node节点加入
node1 node2 node3加入节点
kubeadm join 192.168.10.21:16443 --token 7t2weq.bjbawausm0jaxury \
--discovery-token-ca-cert-hash sha256:0ea75536cc7361fc9647139ad6498dcbd0acb6fed124c3d9015d15ca5a1a63ed
查看token
kubectl get secret -n kube-system
查看加入群集token时间
kubectl get secret -n kube-system
上面2个是2个master节点加入群集token,具体查看如下
kubectl get secret -n kube-system bootstrap-token-7t2weq -oyaml
expiration: MjAyMC0wOS0xN1QwODowNDozNiswODowMA== 是过期时间采用base64加密的
使用base64解密过期时间是2020-09-17:08:04
echo "MjAyMC0wOS0xN1QwODowNDozNiswODowMA==" |base64 --decode
删除旧token生成新的token
删除旧token
kubectl delete secret -n kube-system bootstrap-token-7t2weq
kubectl delete secret -n kube-system bootstrap-token-j1e2u8
生成新的node节点加入群集token
kubeadm token create --print-join-command
kubeadm join 192.168.10.21:16443 --token 0pc550.eh272x8n21xla62o --discovery-token-ca-cert-hash sha256:0ea75536cc7361fc9647139ad6498dcbd0acb6fed124c3d9015d15ca5a1a63ed
生成新的master节点加入群集token
master和node token唯一不同的是master 多了--control-plane --certificate-key
提取master --control-plane --certificate-key
kubeadm init phase upload-certs --upload-certs
将参数加入到之前生成的token
kubeadm join 192.168.10.21:16443 --token 7t2weq.bjbawausm0jaxury \
--discovery-token-ca-cert-hash sha256:0ea75536cc7361fc9647139ad6498dcbd0acb6fed124c3d9015d15ca5a1a63ed \
--control-plane --certificate-key
b5eb71bfe7bbf02266dc2249a4a7826592f7bc3f345663ee2a5680eaaffdf2c6
安装calico插件
查看群集状态
kubectl get node -o wide
NotReady的状态是因为没有安装calico网络插件配置,之前calico网络插件镜像已经安装
修改calico.yaml配置文件
之前下载过calio.yaml文件,下载的calio.yaml文件不能马上安装需要修改后才能安装
vi calico.yaml
- system:bootstrappers:kubeadm:default-node-token
- name: IP_AUTODETECTION_METHOD
value: "interface=eth0"
安装calico网络插件配置
kubectl apply -f calico.yaml
再次查看群集状态
kubectl get node -o wide
查看kubernetes系统组件镜像是否正常运行
K8s所有的心态组件镜像在kube-system命名空间内
kubectl get pods -n kube-system