kubeadm方式安装kubernetes集群高可用

环境准备:

主机名 centos ip docker version flnanel version keepalived 配置
master01 7.6.1810

192.168.100.3

vip:192.168.100.16

18.09.9 v0.11.0 v1.3.5 2C2G
master02 7.6.1810

192.168.100.4

vip:192.168.100.16

18.09.9 v0.11.0 v1.3.5 2C2G
master03 7.6.1810

192.168.100.5

vip:192.168.100.16

18.09.9 v0.11.0 v1.3.5 2C2G
node1 7.6.1810 192.168.100.6 18.09.9 v0.11.0 v1.3.5 2C1G
node2 7.6.1810 192.168.100.7 18.09.9 v0.11.0 v1.3.5 2C1G

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 


 

架构图:

kubeadm方式安装kubernetes集群高可用

 


1 安装准备工作:
2 安装Centos时已经禁用了防火墙和selinux并设置了阿里源。
3 关闭NetworkManager
4 设置主机名
5 修改hosts文件
初始化k8s环境:各个节点需要执行
[root@master1 ~]#vim /etc/shell/initialize-k8s.sh
!/bin/bash ##initialize K8S ###########设置主机名######################### read -p "请设置你的主机名: " HOST hostnamectl set-hostname $HOST ###########关闭selinux####################### setenforce 0 sed -i /^\bSELINUX\b/s@enforcing@disable@ /etc/seliunx/config ##########设置hosts文件####################### MASTER1=192.168.100.3 MASTER2=192.168.100.4 MASTER3=192.168.100.5 NODE1=192.168.100.6 NODE2=192.168.100.7 cat >> /etc/hosts <<-EOF $MASTER1 master1 $MASTER2 master2 $MASTER3 master3 $NODE1 node1 $NODE2 node2 EOF #########设置docker和kubernetes仓库######################### wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo yum install -y yum-utils device-mapper-persistent-data lvm2 yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo cat >> /etc/yum.repos.d/kubernetes.repo <<-EOF [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF

1.禁用swap:

每个节点都需要执行:
[root@master1 shell]# swapoff  -a
[root@master1 shell]# sed -n /swap/s@\(.*\)@#\1@p /etc/fstab

 

2.内核参数修改:

各个节点需要执行:
[root@master1 sysctl.d]# cat /etc/sysctl.d/k8s-sysctl.conf 
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

[root@master1 sysctl.d]# modprobe br_netfilter
[root@master1 sysctl.d]# sysctl -p /etc/sysctl.d/k8s-sysctl.conf 
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

 

3.设置免密登陆:

master1执行脚本实现其他节点双击互信:
[root@master1 shell]# cat expect-ssh.sh 
#!/bin/bash
#####双击互信#############################
yum makecache fast
rpm -q expect  

if [ $? -ne 0 ];then
        yum install expect -y 
        echo "expect is install sucessfully"
fi


expect<<-EOF
spawn ssh-keygen
expect {
        "id_rsa" {send "\n";exp_continue}
         "passphrase" {send "\n";exp_continue}
        "again" {send "\n"}
}
spawn ssh-copy-id 192.168.100.4
expect {
        "yes/no" {send "yes\n";exp_continue}
        "password" {send "1\n"}
}

spawn ssh-copy-id 192.168.100.5
expect {
        "yes/no" {send "yes\n";exp_continue}
        "password" {send "1\n"}
}

spawn ssh-copy-id 192.168.100.6
expect {
        "yes/no" {send "yes\n";exp_continue}
        "password" {send "1\n"}
}
spawn ssh-copy-id 192.168.100.7
expect {
        "yes/no" {send "yes\n";exp_continue}
        "password" {send "1\n"}
}
expect eof
EOF

[root@master1 shell]#bash expect-ssh.sh     

 

4.安装docker版本为18.09.9

各个节点执行:
[root@master1 ~]# yum install docker-ce-18.09.9 docker-ce-cli-18.09.9 containerd.io -y    

[root@master1 ~]# systemctl start docker 

[root@master1 ~]# systemctl enable docker   

[root@master1 ~]# docker version 
Client:
 Version:           18.09.9
 API version:       1.39
 Go version:        go1.11.13
 Git commit:        039a7df9ba
 Built:             Wed Sep  4 16:51:21 2019
 OS/Arch:           linux/amd64
 Experimental:      false

Server: Docker Engine - Community
 Engine:
  Version:          18.09.9
  API version:      1.39 (minimum5 version 1.12)
  Go version:       go1.11.13
  Git commit:       039a7df
  Built:            Wed Sep  4 16:22:32 2019
  OS/Arch:          linux/amd64
  Experimental:     false

 

5.安装docker镜像加速:

 

各个节点执行:
[root@master1 ~]# cat /etc/docker/daemon.json 
{
"registry-mirrors": ["https://l6ydvf0r.mirror.aliyuncs.com"],
 "exec-opts": ["native.cgroupdriver=systemd"]
}
[root@master1 ~]# systemctl daemon-reload 
[root@master1 ~]# systemctl restart docker 

 

 

6.master节点安装keepalived:

[root@master1 ~]# yum -y install keepalived 

master1 keepalived配置:
[root@master1 keepalived]# cat keepalived.conf
! Configuration File for keepalived

global_defs {
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
vrrp_script check_haproxy {
        script "/root/shell/check_haproxy.sh"
        interval 3
        }
vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.100.16
    }
     track_script {
        check_haproxy
   }
}

master2 keepalived配置:
[root@master2 keepalived]# cat keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id LVS_DEVEL
}
vrrp_script check_haproxy {
        script "/root/shell/check_haproxy.sh"
        interval 3
        }
vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 51
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.100.16
    }
     track_script {
        check_haproxy
   }
}

master3的keepalived配置:
[root@master3 keepalived]# cat keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id LVS_DEVEL
}
vrrp_script check_haproxy {
        script "/root/shell/check_haproxy.sh"
        interval 3
        }
vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 51
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.100.16
    }
     track_script {
        check_haproxy
   }
}

以上三台keepalived配置完成,启动。
[root@master1 keepalived]# systemctl start keepalived && systemctl enable keepalived 

 

7.master节点安装haproxy:

master1安装haproxy:
[root@master1 haproxy]# grep  -v  -E "#|^$" haproxy.cfg
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats
defaults
    mode                    tcp
    log                     global
    option                  tcplog
    option                  dontlognull
    option                  httpclose
    option                  abortonclose
    option                  redispatch
    retries                 3
    timeout connect         5000ms
    timeout client          2h
    timeout server          2h
    timeout check           10s
    maxconn                 32000
frontend  k8s-apiserver
        bind *:8443
        mode tcp
    default_backend             k8s-apiserver
listen stats
mode    http
bind    :10086
stats   enable
stats   uri  /admin?stats
stats   auth admin:admin
stats   admin if TRUE
backend k8s-apiserver
    balance     roundrobin
    server  master1 192.168.100.3:6443 check
    server  master2 192.168.100.4:6443 check
    server  master3 192.168.100.5:6443 check

查看服务状态:
[root@master1 haproxy]# systemctl status haproxy 
● haproxy.service - HAProxy Load Balancer
   Loaded: loaded (/usr/lib/systemd/system/haproxy.service; disabled; vendor preset: disabled)
   Active: active (running) since 五 2020-05-29 10:29:55 CST; 9min ago
 Main PID: 47177 (haproxy-systemd)
    Tasks: 3
   Memory: 2.5M
   CGroup: /system.slice/haproxy.service
           ├─47177 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid
           ├─47188 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
           └─47200 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds

5月 29 10:29:55 master1 systemd[1]: Started HAProxy Load Balancer.
5月 29 10:29:55 master1 haproxy-systemd-wrapper[47177]: haproxy-systemd-wrapper: executing /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds

[root@master1 haproxy]# netstat -tanp|grep haproxy 
tcp        0      0 0.0.0.0:8443            0.0.0.0:*               LISTEN      47200/haproxy       
tcp        0      0 0.0.0.0:10086           0.0.0.0:*               LISTEN      47200/haproxy       
 
 
 其他2个master安装haproxy并启动:
[root@master1 shell]# ansible all -i 192.168.100.4,192.168.100.5  -m yum -a name=haproxy state=present 
[root@master1 haproxy]# ansible all -i 192.168.100.4,192.168.100.5 -m copy -a src=/etc/haproxy/haproxy.cfg dest=/etc/haproxy/haproxy.cfg
[root@master1 haproxy]# ansible all -i 192.168.100.4,192.168.100.5 -m service -a ‘name=haproxy state=restarted

 

8.编写haproxy脚本:

[root@master1 shell]# vim /etc/shell/check_haproxy.sh 
#!/bin/bash
##检查haproxy是否正常,如果haproxy进程不存在,
##keepalived进程也随之停掉
CHECK_HA=$(systemctl status haproxy &>/dev/null;echo $?)
if [ $CHECK_HA -ne 0 ];then
        pkill keepalived
        echo "haproxy is closed,keepalived is closed"
fi

分发脚本: [root@master1 keepalived]# ansible all
-i 192.168.100.4,192.168.100.5 -m copy -a src=/root/shell/check_haproxy.sh dest=/root/shell/check_haproxy.sh master2,master3需要给脚本执行权限: [root@master2 shell]# chmod +x check_haproxy.sh

 

 

K8S安装:

1.安装kubelet、kubeadm、kubectl

3台master执行:
[root@master1 ~]# yum install kubelet-1.16.4 kubeadm-1.16.4 kubectl-1.16.4 -y
            #kubelet 运行在集群所有节点上,用于启动pod和容器对象的工具
            #kubeadm 用于初始化集群,启动集群的命令工具
            #kubectl 用于和集群通信的命令行,通过kubectl部署和管理应用。对资源的增删改查的组件。
            
2台node节点:    
[root@node2 ~]#  yum install kubelet-1.16.4  kubeadm-1.16.4 -y

 

2.启动kubelet:

所有节点执行:
[root@master1 ~]# systemctl start kubelet && systemctl enable kubelet 

  

3.拉取k8s所需镜像:

3台master节点执行脚本:
[root@master1 shell]# cat k8s-image.sh 
#!/bin/bash
##拉取镜像

registry_url=registry.cn-hangzhou.aliyuncs.com/loong576
version=v1.16.4
images=`kubeadm config images list --kubernetes-version=1.16.4|awk -F"/" ‘{print $2}‘`
for image_name in ${images[@]};do
        docker pull $registry_url/$image_name 
        docker tag $registry_url/$image_name  k8s.gcr.io/$image_name
        docker rmi $registry_url/$image_name
done

[root@master1 shell]# docker images
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-apiserver            v1.16.4             3722a80984a0        5 months ago        217MB
k8s.gcr.io/kube-controller-manager   v1.16.4             fb4cca6b4e4c        5 months ago        163MB
k8s.gcr.io/kube-scheduler            v1.16.4             2984964036c8        5 months ago        87.3MB
k8s.gcr.io/kube-proxy                v1.16.4             091df896d78f        5 months ago        86.1MB
k8s.gcr.io/etcd                      3.3.15-0            b2756210eeab        8 months ago        247MB
k8s.gcr.io/coredns                   1.6.2               bf261d157914        9 months ago        44.1MB
k8s.gcr.io/pause                     3.1                 da86e6ba6ca1        2 years ago         742kB


node节点需要kube-proxy,pause镜像;
[root@node1 shell]# docker images 
REPOSITORY              TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-proxy   v1.16.4             091df896d78f        5 months ago        86.1MB
k8s.gcr.io/pause        3.1                 da86e6ba6ca1        2 years ago         742kB

  

4.初始化集群:

[root@master1 conf]# vim  kubeadm.conf 
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.100.3                   #masterIP
  bindPort: 6443                                    #端口
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master1
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.16.4
apiServer:
 certSANs:       #此处填所有的masterip和lbip和其它你可能需要通过它访问apiserver的地址和域名或者主机名等,如阿里fip,证书中会允许这些ip
 - 192.168.100.3           
 - 192.168.100.4
 - 192.168.100.5
 - 192.168.100.6
 - 192.168.100.7
 - 192.168.100.16
 - master1
 - master2
 - master3
 - node1
 - node2
controlPlaneEndpoint: "192.168.100.16:8443"       #controlPlaneEndpoint是apiserver的服务地址,同样是负载均衡的host:port。
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
scheduler: {}
networking:
  podSubnet: 10.244.0.0/16

master1节点执行:
[root@master1 conf]# kubeadm init --config=kubeadm.conf
.......
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities 
and service account keys on each node and then running the following as root:

  kubeadm join 192.168.100.16:8443 --token abcdef.0123456789abcdef     --discovery-token-ca-cert-hash sha256:4659965a2ff49020d350d239bc426028735ed1576919e1f31b0b95a812cedab3     --control-plane       

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.100.16:8443 --token abcdef.0123456789abcdef     --discovery-token-ca-cert-hash sha256:4659965a2ff49020d350d239bc426028735ed1576919e1f31b0b95a812cedab3
   
 master节点行:
[root@master1 conf]# mkdir -p $HOME/.kube
[root@master1 conf]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master1 conf]# chown $(id -u):$(id -g) $HOME/.kube/config 

此时查看组件:
[root@master1 conf]# kubectl get cs 
NAME                 AGE
scheduler            <unknown>
controller-manager   <unknown>
etcd-0               <unknown>

[root@master1 conf]# kubectl get pod -n kube-system 
NAME                              READY   STATUS    RESTARTS   AGE
coredns-5644d7b6d9-24rcr          0/1     Pending   0          19m
coredns-5644d7b6d9-48ptv          0/1     Pending   0          19m
etcd-master1                      1/1     Running   0          18m
kube-apiserver-master1            1/1     Running   0          18m
kube-controller-manager-master1   1/1     Running   0          18m
kube-proxy-5kpmk                  1/1     Running   0          19m
kube-scheduler-master1            1/1     Running   0          18m        

 

control plane节点加入集群

1.证书分发:

[root@master1 shell]# vim send-ca.sh 
#!/bin/bash
#发送集群证书给其他master节点
for i in 4 5 ;do 
        scp /etc/kubernetes/pki/ca.crt 192.168.100.$i:/root
        scp /etc/kubernetes/pki/ca.key 192.168.100.$i:/root
        scp /etc/kubernetes/pki/sa.key 192.168.100.$i:/root
        scp /etc/kubernetes/pki/sa.pub 192.168.100.$i:/root
        scp /etc/kubernetes/pki/front-proxy-ca.crt 192.168.100.$i:/root
        scp /etc/kubernetes/pki/front-proxy-ca.key 192.168.100.$i:/root
        scp /etc/kubernetes/pki/etcd/ca.crt  192.168.100.$i:/root/etcd-ca.crt
        scp /etc/kubernetes/pki/etcd/ca.key  192.168.100.$i:/root/etcd-ca.key
done

master2节点操作:
[root@master2 ~]# mkdir -p /etc/kubernetes/pki/etcd
[root@master2 ~]# mv *.key *.crt *.pub   /etc/kubernetes/pki/
[root@master2 ~]#cd /etc/kubernetes/pki/
[root@master2 pki]# mv etcd-ca.crt etcd/ca.crt
[root@master2 pki]# mv etcd-ca.key etcd/ca.key

master2节点操作加入集群:
[root@master2 etcd]# kubeadm join 192.168.100.16:6443 --token abcdef.0123456789abcdef  --discovery-token-ca-cert-hash sha256:d81368fcaa3ea2c0f3b669ec413210753757ee539c2eadfd742f2dd9bfe5bdcd  --control-plane

master2节点操作:
[root@master2 pki]#  mkdir -p $HOME/.kube
[root@master2 pki]#  cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master2 pki]#  chown $(id -u):$(id -g) $HOME/.kube/config


master2操作查看:
[root@master2 etcd]# kubectl get pod -n kube-system 
NAME                              READY   STATUS              RESTARTS   AGE
coredns-5644d7b6d9-24rcr          0/1     ContainerCreating   0          93m
coredns-5644d7b6d9-48ptv          0/1     ContainerCreating   0          93m
etcd-master1                      1/1     Running             0          92m
etcd-master2                      1/1     Running             0          6m41s
kube-apiserver-master1            1/1     Running             0          92m
kube-apiserver-master2            1/1     Running             0          6m41s
kube-controller-manager-master1   1/1     Running             1          92m
kube-controller-manager-master2   1/1     Running             0          6m41s
kube-proxy-5kpmk                  1/1     Running             0          93m
kube-proxy-nv6vw                  1/1     Running             0          6m49s
kube-scheduler-master1            1/1     Running             1          92m
kube-scheduler-master2            1/1     Running             0          6m41s

master3执行同样的操作与master2上的操作一样。
1.创建证书目录;
2.修改etcd-ca.crt  ca.crt 并移动到 /etc/kubernetes/pki/etcd/
3.修改etcd-ca.key  ca.key 并移动到 /etc/kubernetes/pki/etcd/
4.执行加入集群即可。

master3查看:
[root@master3 etcd]# kubectl get nodes
NAME      STATUS     ROLES    AGE    VERSION
master1   NotReady   master   104m   v1.16.4
master2   NotReady   master   17m    v1.16.4
master3   NotReady   master   100s   v1.16.4

 

work节点加入集群:

1.master节点证书分发Node节点:

[root@master1 pki]#scp front-proxy-client.crt front-proxy-client.key  apiserver-kubelet-client.crt apiserver-kubelet-client.key 
192.168.100.7:/etc/kubernetes/pki/                 #拷贝证书node2节点

[root@master1 pki]#scp front-proxy-client.crt front-proxy-client.key  apiserver-kubelet-client.crt apiserver-kubelet-client.key 
192.168.100.6:/etc/kubernetes/pki/                 #拷贝证书node1节点

node1节点查看证书:
[root@node1 pki]# ll
总用量 20
-rw-r--r--. 1 root root 1099 5月  29 12:56 apiserver-kubelet-client.crt
-rw-------. 1 root root 1675 5月  29 12:56 apiserver-kubelet-client.key
-rw-r--r--. 1 root root 1025 5月  29 12:57 ca.crt
-rw-r--r--. 1 root root 1058 5月  29 12:56 front-proxy-client.crt
-rw-------. 1 root root 1675 5月  29 12:56 front-proxy-client.key

node1节点执行加入集群:
[root@node1 pki]# kubeadm join 192.168.100.16:8443 --token abcdef.0123456789abcdef
 --discovery-token-ca-cert-hash sha256:4659965a2ff49020d350d239bc426028735ed1576919e1f31b0b95a812cedab3
 .........
 This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run kubectl get nodes on the control-plane to see this node join the cluster.


node2节点操作同理:
1.拿到证书
2.执行加入集群
 
 master1节点操作查看:
[root@master1 pki]# kubectl get nodes
NAME      STATUS     ROLES    AGE     VERSION
master1   NotReady   master   47m     v1.16.4
master2   NotReady   master   38m     v1.16.4
master3   NotReady   master   34m     v1.16.4
node1     NotReady   <none>   8m48s   v1.16.4
node2     NotReady   <none>   7m33s   v1.16.4 

 

2.部署flannel网络:

master1执行:
[root@master1 conf]#wget https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml
[root@master1 conf]#kubectl apply -f  kube-flannel.yml

如果下载不下来执行:
[root@master1 conf]#wget http://116.62.187.96/test01/kube-flannel.yml
[root@master1 conf]#kubectl apply -f  kube-flannel.yml

[root@master1 conf]#docker pull registry.cn-hangzhou.aliyuncs.com/wy18301/flannel-v0.11.0-amd64:v0.11.0-amd64
[root@master1 conf]#docker tag registry.cn-hangzhou.aliyuncs.com/wy18301/flannel-v0.11.0-amd64:v0.11.0-amd64 quay.io/coreos/flannel:v0.11.0-amd64

查看节点:
[root@master1 conf]# kubectl get nodes
NAME      STATUS   ROLES    AGE   VERSION
master1   Ready    master   19m   v1.16.4
master2   Ready    master   13m   v1.16.4
master3   Ready    master   12m   v1.16.4
node1     Ready    <none>   11m   v1.16.4
node2     Ready    <none>   11m   v1.16.4

查看集群pod:
[root@master1 conf]# kubectl get pod -n kube-system 
NAME                              READY   STATUS    RESTARTS   AGE
coredns-5644d7b6d9-rcnnf          1/1     Running   0          24m
coredns-5644d7b6d9-vfm5l          1/1     Running   0          60s
etcd-master1                      1/1     Running   0          23m
etcd-master2                      1/1     Running   0          19m
etcd-master3                      1/1     Running   0          18m
kube-apiserver-master1            1/1     Running   0          23m
kube-apiserver-master2            1/1     Running   0          19m
kube-apiserver-master3            1/1     Running   1          18m
kube-controller-manager-master1   1/1     Running   1          23m
kube-controller-manager-master2   1/1     Running   0          19m
kube-controller-manager-master3   1/1     Running   0          17m
kube-flannel-ds-amd64-88f4m       1/1     Running   0          14m
kube-flannel-ds-amd64-j4f4j       1/1     Running   0          14m
kube-flannel-ds-amd64-l8lgs       1/1     Running   0          14m
kube-flannel-ds-amd64-wgzp4       1/1     Running   0          14m
kube-flannel-ds-amd64-xmt95       1/1     Running   0          14m
kube-proxy-2l8q5                  1/1     Running   0          24m
kube-proxy-2lws9                  1/1     Running   0          17m
kube-proxy-flb7s                  1/1     Running   0          17m
kube-proxy-sgjtk                  1/1     Running   0          18m
kube-proxy-zqdvh                  1/1     Running   0          19m
kube-scheduler-master1            1/1     Running   1          24m
kube-scheduler-master2            1/1     Running   0          19m
kube-scheduler-master3            1/1     Running   0          17m

此时kubernetes集群部署完成。

 

测试:

[root@master1 yaml]# vim myapp-demo.yaml 
---
apiVersion: apps/v1
kind: Deployment
metadata:
 name: myapp
 labels:
  app: myapp
spec:
 selector:
  matchLabels:
   app: myapp
 template:
  metadata:
    labels:
      app: myapp
  spec:
   containers:
   - name: myapp
     image: ikubernetes/myapp:v1
     imagePullPolicy: IfNotPresent
     ports:
     - name: http
       containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
 name: myapp-svc-demo
 labels:
  app: myapp
spec:
  type: NodePort
  selector:
    app: myapp
  ports:
  - name: http
    port: 80
    targetPort: 80


[root@master1 yaml]# kubectl apply -f myapp-demo.yaml

[root@master1 yaml]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES myapp-7866747958-kcjsl 1/1 Running 0 91s 10.244.4.4 node2 <none> <none>

 

[root@master1 yaml]# kubectl get svc -o wide 

NAME             TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE   SELECTOR

kubernetes       ClusterIP   10.96.0.1      <none>        443/TCP        13h   <none>

 

myapp-svc-demo   NodePort    10.98.230.12   <none>        80:30004/TCP   12h   app=myapp

 

访问测试:

kubeadm方式安装kubernetes集群高可用

 

 

 

 

 

 

 

kubeadm方式安装kubernetes集群高可用

 

kubeadm方式安装kubernetes集群高可用

上一篇:docker搭建LNMP网站平台


下一篇:[前端]纯