k8s

k8s安装

准备环境

基础准备

  • centos7.6 内核3.8.x以上

    ​ 关闭selinux和firewalld

    ​ 时间同步 ntpdate/chronyd

    ​ base、epel源优化

    ​ 内核优化文件描述符、内核转发

    ​ 运算节点关闭swap分区

  • 安装bind9内网DNS

  • 安装docker私有仓库harbor

  • 签发证书cfssl

    ​ cfssl:证书签发的主要工具

    ​ cfssl-json:将cfssl转换成承载式文件

    ​ cfssl-certinfo : 可以查看签发证书的信息

  • 安装主控节点服务(4个)

    ​ etcd # nosql数据库,存储k8s数据

    ​ apiserver

    ​ controller-manager

    ​ scheduler

    apiserver和controller-maneger和scheduler部署在同主机上,etcd单独部署其他主机(奇数个)

  • 安装部署运算node节点(2个)

    ​ kebelet

    ​ kebelet-proxy

[root@k8s-200 /opt/certs]# cfssl-certinfo -cert apiserver.pem
# 可部署的主机节点
    "127.0.0.1",
    "192.168.0.1",
    "10.0.0.10",
    "10.0.0.21",
    "10.0.0.22",
    "10.0.0.23",
    "10.0.0.200"

# 证书过期时间
  "not_before": "2020-08-10T11:05:00Z",
  "not_after": "2040-08-05T11:05:00Z",

节点规划:

10.0.0.11 2C2G bind9 apiserver
10.0.0.12 2C2G etcd apiserver
10.0.0.21 2C6G docker etcd
10.0.0.22 2C6G docker etcd
10.0.0.200 2C2G docker harbor

主机名:请规划好

hostnamectl set-hostname k8s-11
hostnamectl set-hostname k8s-12
hostnamectl set-hostname k8s-21
hostnamectl set-hostname k8s-22
hostnamectl set-hostname k8s-200

所有主机配置:基本优化

rm -rf /etc/yum.repos.d/*
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum clean all
getenforce
systemctl status firewalld

yum install wget net-tools telnet tree nmap sysstat lrzsz dos2unix bind-utils -y

bind9 DNS

  1. 10.0.0.11创建主DNS

    yum install -y bind
    rpm -qa bind
    bind-9.11.4-16.P2.el7_8.2.x86_64
    
  2. vim /etc/named.conf

    options {
            listen-on port 53 { 10.0.0.11; }; #开启内网监听
            //listen-on-v6 port 53 { ::1; };  #注释ipv6
            directory       "/var/named";
            dump-file       "/var/named/data/cache_dump.db";
            statistics-file "/var/named/data/named_stats.txt";
            memstatistics-file "/var/named/data/named_mem_stats.txt";
            recursing-file  "/var/named/data/named.recursing";
            secroots-file   "/var/named/data/named.secroots";
            allow-query     { any; }; 		 #哪些客户端可以使用本机DNS解析
            forwarders      { 10.0.0.254; }; #上级DNS,本机网关
    
            recursion yes; 	#递归算法查询
    
            dnssec-enable no;	#DNS安全扩展
            dnssec-validation no;	
    
    

    参数:https://www.cnblogs.com/pipci/p/10475162.html

  3. 语法检查

    named-checkconf
    
  4. 区域配置文件

    a. vim /etc/named.rfc1912.zones

    业务域 lc.com

    主机域 host.com

    zone "host.com" IN {
            type   master;
    		file   "host.com.zone";
    		allow-update { 10.0.0.11; };
    };
    
    zone "lc.com" IN {
            type   master;
    		file   "lc.com.zone";
    		allow-update { 10.0.0.11; };
    };
    

    b. vim /var/named/host.com.zone

    $ORIGIN host.com.
    $TTL    600     ; 10 minutes
    @       IN  SOA dns.host.com.    dnsadmin.host.com.  (   
                                          2020050801 ; serial   
                                          10800      ; refresh (3 hours)   
                                          900        ; retry (15 minutes)  
                                          604800     ; expire (1 week)
                                          86400      ; minimum (1 day)  
    									  )
                        NS      dns.host.com.   
    $TTL    60    ; 1 minute
    dns            A     10.0.0.11
    k8s-11         A     10.0.0.11 
    k8s-12         A     10.0.0.12
    k8s-21         A     10.0.0.21
    k8s-22         A     10.0.0.22
    k8s-200        A     10.0.0.200
    

    c. vim /var/named/od.com.zone

    $ORIGIN lc.com.
    $TTL    600     ; 10 minutes
    @       IN  SOA dns.od.com.    dnsadmin.lc.com.  (   
                                          2020050801 ; serial   
                                          10800      ; refresh (3 hours)   
                                          900        ; retry (15 minutes)  
                                          604800     ; expire (1 week)
                                          86400      ; minimum (1 day)  
    									  )
                        NS      dns.lc.com.   
    $TTL    60    ; 1 minute
    dns            A     10.0.0.11
    harbor         A     10.0.0.200
    

    d. 检查

    named-checkconf
    systemctl start named 
    systemctl enable named 
    netstat -lntup|grep 53
    dig -t A k8s-21.host.com @10.0.0.11 +short
    dig -t A k8s-200.host.com @10.0.0.11 +short
    
  5. 修改主机DNS1

    cat /etc/sysconfig/network-scripts/ifcfg-ens33
    cat /etc/sysconfig/network-scripts/ifcfg-eth0
    sed -i 's#DNS1=114.114.114.114#DNS1=10.0.0.11#g' /etc/sysconfig/network-scripts/ifcfg-ens33 && systemctl restart network
    
    vim /etc/resolv.conf
    search host.com   #短域名可以在主机域上用,都加上
    
    ping baidu.com
    ping k8s-11.host.com
    ping k8s-11
    
    1. win CMD测试
    vmnet8网卡和本地连接网卡 修改dns 10.0.0.11
    
    ping dns.host.com
    ping dns.lc.com
    ping k8s-11.host.com
    

cfssl证书签发

  1. 10.0.0.200 主机签发

    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/bin/cfssl
    wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/bin/cfssl-json 
    wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/bin/cfssl-certinfo
    chmod +x  /usr/bin/cfssl*
    
    which cfssl
    which cfssl-json
    which cfssl-certinfo
    ll /usr/bin/cfssl*
    
  2. CA证书

    [root@k8s-200 ~]# mkdir /opt/certs
    [root@k8s-200 /opt/certs]# vi /opt/certs/ca-csr.json
    {
        "CN": "OldboyEdu",
        "hosts": [
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "beijing",
                "L": "beijing",
                "O": "od",
                "OU": "ops"
            }
        ],
        "ca": {
            "expiry": "175200h"
        }
    }
    
    [root@k8s-200 /opt/certs]# cfssl gencert -initca ca-csr.json | cfssl-json -bare ca
    [root@k8s-200 /opt/certs]# ll
    total 16
    -rw-r--r-- 1 root root  993 Aug 11 10:10 ca.csr
    -rw-r--r-- 1 root root  346 Aug 11 10:09 ca-csr.json
    -rw------- 1 root root 1675 Aug 11 10:10 ca-key.pem	# 根证书的私钥
    -rw-r--r-- 1 root root 1346 Aug 11 10:10 ca.pem		# 根证书
    
    

docker环境

10.0.0.200

curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
mkdir -p /etc/docker /data/docker
vi /etc/docker/daemon.json
{
  "graph": "/data/docker",
  "storage-driver": "overlay2",
  "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com","harbor.lc.com"],
  "registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],
  "bip": "172.7.200.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}
systemctl start docker
systemctl enable docker
systemctl status docker
docker -v

10.0.0.21

curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
mkdir -p /etc/docker /data/docker
vi /etc/docker/daemon.json
{
  "graph": "/data/docker",
  "storage-driver": "overlay2",
  "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com","harbor.lc.com"],
  "registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],
  "bip": "172.7.21.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}
systemctl start docker
systemctl enable docker
systemctl status docker
docker -v

10.0.0.22

curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
mkdir -p /etc/docker /data/docker
vi /etc/docker/daemon.json
{
  "graph": "/data/docker",
  "storage-driver": "overlay2",
  "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com","harbor.lc.com"],
  "registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],
  "bip": "172.7.22.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}
systemctl start docker
systemctl enable docker
systemctl status docker
docker -v

harbor镜像仓库 200主机

安装1.7.6以上版本

[root@hdss7-22 ~]# mkdir /opt/src
[root@hdss7-22 ~]# cd /opt/src/
[root@hdss7-22 src]# ls
harbor-offline-installer-v1.9.1.tgz
[root@hdss7-22 src]# tar zxvf harbor-offline-installer-v1.9.1.tgz -C /opt/

把软件包做版本标识,做一个软链接,便于以后升级

[root@hdss7-200 src]# cd ..
[root@hdss7-22 opt]# mv harbor/ harbor-v1.9.1
[root@hdss7-22 opt]# ln -s /opt/harbor-v1.9.1/ /opt/harbor
[root@hdss7-22 opt]# ll
总用量 0
drwx--x--x 4 root root  28 12月 10 14:30 containerd
lrwxrwxrwx 1 root root  19 12月 10 15:00 harbor -> /opt/harbor-v1.9.1/
drwxr-xr-x 2 root root 100 12月 10 14:58 harbor-v1.9.1
drwxr-xr-x 2 root root  49 12月 10 14:56 src

编辑harbor文件

[root@hdss7-22 opt]# cd harbor
[root@hdss7-22 harbor]# vi harbor.yml
5 hostname: harbor.lc.com
10   port: 180
27 harbor_admin_password: Harbor12345  #默认密码
40 data_volume: /data/harbor
87     location: /data/harbor/logs	# 更改日志存储路径

[root@hdss7-22 harbor]# mkdir -p /data/harbor/logs

单机编排工具

[root@hdss7-22 harbor]# yum install -y docker-compose
[root@hdss7-22 harbor]# rpm -qa docker-compose
docker-compose-1.18.0-4.el7.noarch
# 安装
[root@hdss7-22 harbor]# ./install.sh 

# 移除docker-compose所有容器
[root@hdss7-22 harbor]# docker-compose ps

每次重启docker需要执行
[root@hdss7-22 harbor]# docker-compose up -d

安装nginx做反向代理 域名:harbor.lc.com

[root@hdss7-200 harbor]# yum install -y nginx

[root@hdss7-200 harbor]# vi /etc/nginx/conf.d/harbor.od.com.conf
server {
    listen       80;
    server_name  harbor.lc.com;
    client_max_body_size 1000m;
    location / {
        proxy_pass http://127.0.0.1:180;
    }
}

检测配置文件
[root@hdss7-200 harbor]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful

[root@hdss7-200 harbor]# systemctl start nginx
[root@hdss7-200 harbor]# systemctl enable nginx

dns服务器 10.0.0.11配置解析

[root@hdss7-11 named]# vi /var/named/lc.com.zone 
$ORIGIN lc.com.
$TTL 600        ; 10 minutes
@               IN SOA  dns.lc.com. dnsadmin.lc.com. (
                                2019120902 ; serial		# 往后滚动一个记录编号02,每次更改配置,必须滚动一个序号
                                10800      ; refresh (3 hours)
                                900        ; retry (15 minutes)
                                604800     ; expire (1 week)
                                86400      ; minimum (1 day)
                                )
                                NS   dns.lc.com.
$TTL 60 ; 1 minute
dns                A    10.0.0.11
harbor             A    10.0.0.200

[root@hdss7-11 named]# systemctl restart named 

验证
[root@hdss7-11 named]# dig -t A harbor.lc.com +short

http://harbor.lc.com/
10.0.0.200:180

admin
123456
新建一个public项目,公开

[root@hdss7-200 harbor]# docker pull nginx:1.7.9
[root@hdss7-200 harbor]# docker tag nginx:1.7.9 harbor.lc.com/public/nginx:v1.7.9 
[root@hdss7-200 harbor]# docker login harbor.lc.com # 登录
[root@hdss7-200 harbor]# docker push harbor.lc.com/public/nginx:v1.7.9

etcd

10.0.0.12

10.0.0.21

10.0.0.22

  1. 10.0.0.200签发etcd证书

    [root@hdss7-200 ~]# vi /opt/certs/ca-config.json
    {
        "signing": {
            "default": {
                "expiry": "175200h"
            },
            "profiles": {
                "server": {
                    "expiry": "175200h",
                    "usages": [
                        "signing",
                        "key encipherment",
                        "server auth"
                    ]
                },
                "client": {
                    "expiry": "175200h",
                    "usages": [
                        "signing",
                        "key encipherment",
                        "client auth"
                    ]
                },
                "peer": {				
                    "expiry": "175200h",
                    "usages": [
                        "signing",
                        "key encipherment",
                        "server auth",
                        "client auth"
                    ]
                }
            }
        }
    }
    
  2. vi /opt/certs/etcd-peer-csr.json

    IP地址为有可能装ETCD的主机,多一个IP为预备

    {
        "CN": "k8s-etcd",
        "hosts": [
            "10.0.0.11",
            "10.0.0.12",
            "10.0.0.21",
            "10.0.0.22",
            "10.0.0.200"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "beijing",
                "L": "beijing",
                "O": "od",
                "OU": "ops"
            }
        ]
    }
    
  3. 生成etcd证书

    [root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json |cfssl-json -bare etcd-peer
    [root@k8s-200 /opt/certs]# ll
    total 36
    -rw-r--r-- 1 root root  840 Aug 10 14:14 ca-config.json
    -rw-r--r-- 1 root root  993 Aug 11  2020 ca.csr
    -rw-r--r-- 1 root root  346 Aug 11  2020 ca-csr.json
    -rw------- 1 root root 1675 Aug 11  2020 ca-key.pem
    -rw-r--r-- 1 root root 1346 Aug 11  2020 ca.pem
    -rw-r--r-- 1 root root 1070 Aug 10 14:15 etcd-peer.csr
    -rw-r--r-- 1 root root  385 Aug 10 14:14 etcd-peer-csr.json
    -rw------- 1 root root 1679 Aug 10 14:15 etcd-peer-key.pem
    -rw-r--r-- 1 root root 1436 Aug 10 14:15 etcd-peer.pem
    
  4. 10.0.0.12 / 21 / 22 主机上安装etcd 以22为例:

    在etcd主机上创建etcd用户

    useradd -s /sbin/nologin -M etcd
    id etcd
    mkdir /opt/src
    cd /opt/src/
    
    #下载etcd软件,建议用不超3.3的版本
    # etcd-v3.1.20-linux-amd64.tar.gz
    tar xfv etcd-v3.1.20-linux-amd64.tar.gz -C /opt/
    cd ..
    mv etcd-v3.1.20-linux-amd64 etcd-v3.1.20
    #创建软链接方便以后更新版本
    ln -s /opt/etcd-v3.1.20 /opt/etcd
    
    # ll
    总用量 0
    lrwxrwxrwx 1 root   root   17 12月 10 16:45 etcd -> /opt/etcd-v3.1.20
    drwxr-xr-x 3 478493 89939 123 10月 11 2018 etcd-v3.1.20
    drwxr-xr-x 2 root   root   45 12月 10 16:41 src
    
    #创建目录,拷贝证书、私钥
    mkdir -p /opt/etcd/certs /data/etcd /data/logs/etcd-server
    
    #将运维主机上生成的ca.pem  etc-peer-key.pem etc-peer.pem 拷贝到/opt/etcd/certs目录中,私钥文件权限为600
    scp 10.0.0.200:/opt/certs/ca.pem /opt/etcd/certs/
    scp 10.0.0.200:/opt/certs/etcd-peer-key.pem /opt/etcd/certs/
    scp 10.0.0.200:/opt/certs/etcd-peer.pem /opt/etcd/certs/
    
    #更改属主属组
    cd /opt/etcd/certs
    chown -R etcd.etcd /opt/etcd/certs /data/etcd /data/logs/etcd-server
    # ll
    -rw-r--r-- 1 etcd etcd 1346 12月 10 16:52 ca.pem
    -rw------- 1 etcd etcd 1679 12月 10 16:53 etcd-peer-key.pem
    -rw-r--r-- 1 etcd etcd 1428 12月 10 16:53 etcd-peer.pem
    
  5. 创建etcd服务启动脚本IP地址改成本机IP

    vi /opt/etcd/etcd-server-startup.sh
    
    #!/bin/sh
    ./etcd --name etcd-server-7-12 \
           --data-dir /data/etcd/etcd-server \
           --listen-peer-urls https://10.0.0.12:2380 \
           --listen-client-urls https://10.0.0.12:2379,http://127.0.0.1:2379 \
           --quota-backend-bytes 8000000000 \
           --initial-advertise-peer-urls https://10.0.0.12:2380 \
           --advertise-client-urls https://10.0.0.12:2379,http://127.0.0.1:2379 \
           --initial-cluster  etcd-server-7-12=https://10.0.0.12:2380,etcd-server-7-21=https://10.0.0.21:2380,etcd-server-7-22=https://10.0.0.22:2380 \
           --ca-file ./certs/ca.pem \
           --cert-file ./certs/etcd-peer.pem \
           --key-file ./certs/etcd-peer-key.pem \
           --client-cert-auth  \
           --trusted-ca-file ./certs/ca.pem \
           --peer-ca-file ./certs/ca.pem \
           --peer-cert-file ./certs/etcd-peer.pem \
           --peer-key-file ./certs/etcd-peer-key.pem \
           --peer-client-cert-auth \
           --peer-trusted-ca-file ./certs/ca.pem \
           --log-output stdout
           
           
    # 10.0.0.21 etcd启动脚本
    #!/bin/sh
    ./etcd --name etcd-server-7-21 \
           --data-dir /data/etcd/etcd-server \
           --listen-peer-urls https://10.0.0.21:2380 \
           --listen-client-urls https://10.0.0.21:2379,http://127.0.0.1:2379 \
           --quota-backend-bytes 8000000000 \
           --initial-advertise-peer-urls https://10.0.0.21:2380 \
           --advertise-client-urls https://10.0.0.21:2379,http://127.0.0.1:2379 \
           --initial-cluster  etcd-server-7-12=https://10.0.0.12:2380,etcd-server-7-21=https://10.0.0.21:2380,etcd-server-7-22=https://10.0.0.22:2380 \
           --ca-file ./certs/ca.pem \
           --cert-file ./certs/etcd-peer.pem \
           --key-file ./certs/etcd-peer-key.pem \
           --client-cert-auth  \
           --trusted-ca-file ./certs/ca.pem \
           --peer-ca-file ./certs/ca.pem \
           --peer-cert-file ./certs/etcd-peer.pem \
           --peer-key-file ./certs/etcd-peer-key.pem \
           --peer-client-cert-auth \
           --peer-trusted-ca-file ./certs/ca.pem \
           --log-output stdout
    
    
    # 10.0.0.22 etcd启动脚本
    #!/bin/sh
    ./etcd --name etcd-server-7-22 \
           --data-dir /data/etcd/etcd-server \
           --listen-peer-urls https://10.0.0.22:2380 \
           --listen-client-urls https://10.0.0.22:2379,http://127.0.0.1:2379 \
           --quota-backend-bytes 8000000000 \
           --initial-advertise-peer-urls https://10.0.0.22:2380 \
           --advertise-client-urls https://10.0.0.22:2379,http://127.0.0.1:2379 \
           --initial-cluster  etcd-server-7-12=https://10.0.0.12:2380,etcd-server-7-21=https://10.0.0.21:2380,etcd-server-7-22=https://10.0.0.22:2380 \
           --ca-file ./certs/ca.pem \
           --cert-file ./certs/etcd-peer.pem \
           --key-file ./certs/etcd-peer-key.pem \
           --client-cert-auth  \
           --trusted-ca-file ./certs/ca.pem \
           --peer-ca-file ./certs/ca.pem \
           --peer-cert-file ./certs/etcd-peer.pem \
           --peer-key-file ./certs/etcd-peer-key.pem \
           --peer-client-cert-auth \
           --peer-trusted-ca-file ./certs/ca.pem \
           --log-output stdout
    
    #赋予执行权限
    chmod +x /opt/etcd/etcd-server-startup.sh
    #更改属主属组
    chown -R etcd.etcd /opt/etcd-v3.1.20/ /data/etcd /data/logs/etcd-server
    
  6. 使etcd后端运行

    [root@hdss7-12 logs]# yum install supervisor -y
    [root@hdss7-12 logs]# systemctl start supervisord
    [root@hdss7-12 logs]# systemctl enable supervisord
    
  7. 更改supervisord的配置文件:[program:etcd-server-7-12]名字需要根据实际更改

    vi /etc/supervisord.d/etcd-server.ini
    
    [program:etcd-server-7-12]
    command=/opt/etcd/etcd-server-startup.sh                        ; the program (relative uses PATH, can take args)	
    numprocs=1                                                      ; number of processes copies to start (def 1)
    directory=/opt/etcd                                             ; directory to cwd to before exec (def no cwd)
    autostart=true                                                  ; start at supervisord start (default: true)
    autorestart=true                                                ; retstart at unexpected quit (default: true)
    startsecs=30                                                    ; number of secs prog must stay running (def. 1)
    startretries=3                                                  ; max # of serial start failures (default 3)
    exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
    stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
    user=etcd                                                       ; setuid to this UNIX account to run the program
    redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/etcd-server/etcd.stdout.log           ; stdout log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
    
    #创建后端启动etcd
    [root@hdss7-12 logs]# supervisorctl update
    etcd-server-7-12: added process group
    
    [root@hdss7-12 logs]# supervisorctl status
    etcd-server-7-12                 STARTING 
    
    [root@hdss7-12 logs]# netstat -luntp|grep etcd
    tcp        0      0 192.168.153.12:2379     0.0.0.0:*               LISTEN      19395/./etcd        
    tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      19395/./etcd        
    tcp        0      0 192.168.153.12:2380     0.0.0.0:*               LISTEN      19395/./etcd  
    
    #查看日志
    [root@hdss7-12 logs]# tail -fn 200 /data/logs/etcd-server/etcd.stdout.log
    
    #检查
    supervisorctl status
    ./etcdctl cluster-health
    ./etcdctl member list
    

apiserver

  1. 集群规划

    10.0.0.21 apiserver

    10.0.0.22 apiserver

    10.0.0.12 nginx+keepalived

    10.0.0.11 nginx+keepalived

    10.0.0.10 VIP

  2. 上传1.15.2版本到 /opt/src (以21为例)

    [root@hdss7-21 ~]# cd /opt/src/
    [root@hdss7-21 src]# ls
    kubernetes-server-linux-amd64-v1.15.2.tar.gz
    
    # 查看文件大小
    [root@hdss7-21 src]# du -sh kubernetes-server-linux-amd64-v1.15.2.tar.gz
    
    [root@hdss7-21 src]# tar xf kubernetes-server-linux-amd64-v1.15.2.tar.gz -C /opt/
    [root@hdss7-21 src]# cd ..
    [root@hdss7-21 opt]# mv kubernetes/ kubernetes-v1.15.2
    
    # 做软连接,方便以后更新
    [root@hdss7-21 opt]# ln -s /opt/kubernetes-v1.15.2/ /opt/kubernetes
    [root@hdss7-21 opt]# ll
    总用量 0
    drwx--x--x 4 root root  28 12月 10 14:28 containerd
    lrwxrwxrwx 1 root root  17 12月 10 16:45 etcd -> /opt/etcd-v3.1.20
    drwxr-xr-x 4 etcd etcd 166 12月 10 17:43 etcd-v3.1.20
    lrwxrwxrwx 1 root root  24 12月 10 18:33 kubernetes -> /opt/kubernetes-v1.15.2/
    drwxr-xr-x 4 root root  79 8月   5 18:01 kubernetes-v1.15.2
    drwxr-xr-x 2 root root  97 12月 10 18:29 src
    
    [root@hdss7-21 opt]# cd kubernetes
    [root@hdss7-21 kubernetes]# ls
    addons  kubernetes-src.tar.gz  LICENSES  server
    
    # 删除源码包
    [root@hdss7-21 kubernetes]# rm -rf kubernetes-src.tar.gz 
    
    [root@hdss7-21 kubernetes]# cd server/bin/
    
    # 删除没用的文件docker镜像等
    [root@hdss7-21 bin]# rm -rf *.tar
    [root@hdss7-21 bin]# rm -rf *_tag 
    
    # 剩余一些可执行文件
    [root@hdss7-21 bin]# ll
    总用量 884636
    -rwxr-xr-x 1 root root  43534816 8月   5 18:01 apiextensions-apiserver
    -rwxr-xr-x 1 root root 100548640 8月   5 18:01 cloud-controller-manager
    -rwxr-xr-x 1 root root 200648416 8月   5 18:01 hyperkube
    -rwxr-xr-x 1 root root  40182208 8月   5 18:01 kubeadm
    -rwxr-xr-x 1 root root 164501920 8月   5 18:01 kube-apiserver
    -rwxr-xr-x 1 root root 116397088 8月   5 18:01 kube-controller-manager
    -rwxr-xr-x 1 root root  42985504 8月   5 18:01 kubectl
    -rwxr-xr-x 1 root root 119616640 8月   5 18:01 kubelet
    -rwxr-xr-x 1 root root  36987488 8月   5 18:01 kube-proxy
    -rwxr-xr-x 1 root root  38786144 8月   5 18:01 kube-scheduler
    -rwxr-xr-x 1 root root   1648224 8月   5 18:01 mounter
    
  3. apiserver证书

    签发apiserver-client证书:apiserver与etc通信用的证书。apiserver是客户端,etcd是服务端
    运维主机k8s-200.host.com上

    创建生成证书签名请求(csr)的JSON配置文件 -- 此目录下有,直接上传修改,不要粘贴复制

    [root@hdss7-200 ~]# vi /opt/certs/client-csr.json
    {
        "CN": "k8s-node",
        "hosts": [
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "beijing",
                "L": "beijing",
                "O": "od",
                "OU": "ops"
            }
        ]
    }
    
    [root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json |cfssl-json -bare client
    

    创建签名请求(csr)的JSON配置文件,apiserver,server端证书

    [root@k8s-200 ~]# vi /opt/certs/apiserver-csr.json
    {
        "CN": "k8s-apiserver",
        "hosts": [
            "127.0.0.1",
            "192.168.0.1",
            "kubernetes.default",
            "kubernetes.default.svc",
            "kubernetes.default.svc.cluster",
            "kubernetes.default.svc.cluster.local",
            "10.0.0.10",
            "10.0.0.21",
            "10.0.0.22",
            "10.0.0.23",
            "10.0.0.200"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "beijing",
                "L": "beijing",
                "O": "od",
                "OU": "ops"
            }
        ]
    }
    
    
    [root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json |cfssl-json -bare apiserver
    
  4. 查看

    [root@hdss7-200 certs]# ll
    总用量 80
    total 24
    -rw------- 1 root root 1679 Nov 10 17:42 apiserver-key.pem
    -rw-r--r-- 1 root root 1598 Nov 10 17:42 apiserver.pem
    -rw------- 1 root root 1679 Nov 10 17:41 ca-key.pem
    -rw-r--r-- 1 root root 1346 Nov 10 17:41 ca.pem
    -rw------- 1 root root 1675 Nov 10 17:41 client-key.pem
    -rw-r--r-- 1 root root 1363 Nov 10 17:41 client.pem
    
  5. 拷贝证书到21和22节点

    scp 10.0.0.200:/opt/certs/apiserver-key.pem /opt/kubernetes/server/bin/certs
    scp 10.0.0.200:/opt/certs/apiserver.pem /opt/kubernetes/server/bin/certs
    scp 10.0.0.200:/opt/certs/ca-key.pem /opt/kubernetes/server/bin/certs
    scp 10.0.0.200:/opt/certs/ca.pem /opt/kubernetes/server/bin/certs
    scp 10.0.0.200:/opt/certs/client-key.pem /opt/kubernetes/server/bin/certs
    scp 10.0.0.200:/opt/certs/client.pem /opt/kubernetes/server/bin/certs
    
    
    [root@k8s-21 ~]# mkdir -p /opt/kubernetes/server/bin/certs
    [root@k8s-21 ~]# ll /opt/kubernetes/server/bin/certs
    total 24
    -rw------- 1 root root 1675 Aug 10 19:10 apiserver-key.pem
    -rw-r--r-- 1 root root 1606 Aug 10 19:10 apiserver.pem
    -rw------- 1 root root 1675 Aug 11 10:10 ca-key.pem
    -rw-r--r-- 1 root root 1346 Aug 11 10:10 ca.pem
    -rw------- 1 root root 1679 Aug 10 19:00 client-key.pem
    -rw-r--r-- 1 root root 1363 Aug 10 19:00 client.pem
    
  6. 创建启动配置脚本

    [root@k8s-21 ~]# mkdir -p /opt/kubernetes/server/bin/conf
    [root@k8s-21 ~]# vi /opt/kubernetes/server/bin/conf/audit.yaml
    
    apiVersion: audit.k8s.io/v1beta1 # This is required.
    kind: Policy
    # Don't generate audit events for all requests in RequestReceived stage.
    omitStages:
      - "RequestReceived"
    rules:
      # Log pod changes at RequestResponse level
      - level: RequestResponse
        resources:
        - group: ""
          # Resource "pods" doesn't match requests to any subresource of pods,
          # which is consistent with the RBAC policy.
          resources: ["pods"]
      # Log "pods/log", "pods/status" at Metadata level
      - level: Metadata
        resources:
        - group: ""
          resources: ["pods/log", "pods/status"]
    
      # Don't log requests to a configmap called "controller-leader"
      - level: None
        resources:
        - group: ""
          resources: ["configmaps"]
          resourceNames: ["controller-leader"]
    
      # Don't log watch requests by the "system:kube-proxy" on endpoints or services
      - level: None
        users: ["system:kube-proxy"]
        verbs: ["watch"]
        resources:
        - group: "" # core API group
          resources: ["endpoints", "services"]
    
      # Don't log authenticated requests to certain non-resource URL paths.
      - level: None
        userGroups: ["system:authenticated"]
        nonResourceURLs:
        - "/api*" # Wildcard matching.
        - "/version"
    
      # Log the request body of configmap changes in kube-system.
      - level: Request
        resources:
        - group: "" # core API group
          resources: ["configmaps"]
        # This rule only applies to resources in the "kube-system" namespace.
        # The empty string "" can be used to select non-namespaced resources.
        namespaces: ["kube-system"]
    
      # Log configmap and secret changes in all other namespaces at the Metadata level.
      - level: Metadata
        resources:
        - group: "" # core API group
          resources: ["secrets", "configmaps"]
    
      # Log all other resources in core and extensions at the Request level.
      - level: Request
        resources:
        - group: "" # core API group
        - group: "extensions" # Version of group should NOT be included.
    
      # A catch-all rule to log all other requests at the Metadata level.
      - level: Metadata
        # Long-running requests like watches that fall under this rule will not
        # generate an audit event in RequestReceived.
        omitStages:
          - "RequestReceived"
    
  7. 编写启动脚本

    [root@k8s-21 ~]# vi /opt/kubernetes/server/bin/kube-apiserver.sh
    #!/bin/bash
    ./kube-apiserver \
      --apiserver-count 2 \
      --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \
      --audit-policy-file ./conf/audit.yaml \
      --authorization-mode RBAC \
      --client-ca-file ./certs/ca.pem \
      --requestheader-client-ca-file ./certs/ca.pem \
      --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerat
    ionSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
      --etcd-cafile ./certs/ca.pem \
      --etcd-certfile ./certs/client.pem \
      --etcd-keyfile ./certs/client-key.pem \
      --etcd-servers https://10.0.0.12:2379,https://10.0.0.21:2379,https://10.0.0.22:2379 \
      --service-account-key-file ./certs/ca-key.pem \
      --service-cluster-ip-range 192.168.0.0/16 \
      --service-node-port-range 3000-29999 \
      --target-ram-mb=1024 \
      --kubelet-client-certificate ./certs/client.pem \
      --kubelet-client-key ./certs/client-key.pem \
      --log-dir  /data/logs/kubernetes/kube-apiserver \
      --tls-cert-file ./certs/apiserver.pem \
      --tls-private-key-file ./certs/apiserver-key.pem \
      --v 2
    
    # 添加执行权限
    [root@hdss7-21 bin]# chmod +x kube-apiserver.sh
    
    # 查看帮助命令,查看每行的意思
    [root@hdss7-21 bin]# ./kube-apiserver --help|grep -A 5 target-ram-mb 
    
  8. 创建后台启动

    [root@hdss7-21 bin]# vi /etc/supervisord.d/kube-apiserver.ini
    [program:kube-apiserver-7-21]
    command=/opt/kubernetes/server/bin/kube-apiserver.sh            ; the program (relative uses PATH, can take args)
    numprocs=1                                                      ; number of processes copies to start (def 1)
    directory=/opt/kubernetes/server/bin                            ; directory to cwd to before exec (def no cwd)
    autostart=true                                                  ; start at supervisord start (default: true)
    autorestart=true                                                ; retstart at unexpected quit (default: true)
    startsecs=30                                                    ; number of secs prog must stay running (def. 1)
    startretries=3                                                  ; max # of serial start failures (default 3)
    exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
    stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
    user=root                                                       ; setuid to this UNIX account to run the program
    redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log        ; stderr log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
    
    [root@hdss7-21 bin]# mkdir -p /data/logs/kubernetes/kube-apiserver
    [root@hdss7-21 bin]# supervisorctl update
    [root@hdss7-21 bin]# netstat -luntp | grep kube-api
    tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      27303/./kube-apiser 
    tcp6       0      0 :::6443                 :::*                    LISTEN      27303/./kube-apiser 
    

安装部署主控节点4层反向代理服务

10.0.0.12 nginx+keepalived

10.0.0.11 nginx+keepalived

10.0.0.10 VIP

用VIP 10.0.0.10的7443端口,反代hdss7-21、hdss7-22的apiserver6443端口

  1. HDSS7-11和HDSS7-12上同时操作

    [root@hdss7-11 ~]# yum install -y nginx	
    
    [root@hdss7-11 ~]# vi /etc/nginx/nginx.conf		-- 四层负载代理,编辑到http模块末尾
    stream {
        upstream kube-apiserver {
            server 10.0.0.21:6443     max_fails=3 fail_timeout=30s;
            server 10.0.0.22:6443     max_fails=3 fail_timeout=30s;
        }
        server {
            listen 7443;
            proxy_connect_timeout 2s;
            proxy_timeout 900s;
            proxy_pass kube-apiserver;
        }
    }
    
    
    检查配置文件
    [root@hdss7-11 ~]# nginx -t
    nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
    nginx: configuration file /etc/nginx/nginx.conf test is successful
    
    [root@hdss7-11 ~]# systemctl start nginx
    [root@hdss7-11 ~]# systemctl enable nginx
    
  2. keepalived安装配置

    [root@hdss7-11 ~]# yum install keepalived -y
    
    编写监听脚本	-- 直接上传
    [root@hdss7-11 ~]# vi /etc/keepalived/check_port.sh	
    #!/bin/bash
    #keepalived 监控端口脚本
    #使用方法:
    #在keepalived的配置文件中
    #vrrp_script check_port {#创建一个vrrp_script脚本,检查配置
    #    script "/etc/keepalived/check_port.sh 6379" #配置监听的端口
    #    interval 2 #检查脚本的频率,单位(秒)
    #}
    CHK_PORT=$1
    if [ -n "$CHK_PORT" ];then
            PORT_PROCESS=`netstat -lntup|grep $CHK_PORT|wc -l`
            if [ $PORT_PROCESS -eq 0 ];then
                    echo "Port $CHK_PORT Is Not Used,End."
                    exit 1
            fi
    else
            echo "Check Port Cant Be Empty!"
    fi
    
    [root@hdss7-11 ~]# # chmod +x /etc/keepalived/check_port.sh
    
  3. 配置keepalived 分别配置主从

    # keepalived 主:
    [root@hdss7-11 ~]# vi /etc/keepalived/keepalived.conf 
    ! Configuration File for keepalived
    
    global_defs {
       router_id 10.0.0.11
    
    }
    
    vrrp_script chk_nginx {
        script "/etc/keepalived/check_port.sh 7443"
        interval 2
        weight -20
    }
    
    vrrp_instance VI_1 {
        state MASTER
        interface eth0	# 根据实际网卡更改
        virtual_router_id 251
        priority 100
        advert_int 1
        mcast_src_ip 10.0.0.11
        nopreempt
    
        authentication {
            auth_type PASS
            auth_pass 11111111
        }
        track_script {
             chk_nginx
        }
        virtual_ipaddress {
            10.0.0.10
        }
    }
    
    
    # keepalived从:    
    [root@hdss7-12 ~]# vi /etc/keepalived/keepalived.conf 
    ! Configuration File for keepalived
    global_defs {
            router_id 10.0.0.12
    	script_user root
            enable_script_security 
    }
    vrrp_script chk_nginx {
            script "/etc/keepalived/check_port.sh 7443"
            interval 2
            weight -20
    }
    vrrp_instance VI_1 {
            state BACKUP
            interface eth0		# 根据实际网卡更改
            virtual_router_id 251
            mcast_src_ip 10.0.0.12
            priority 90
            advert_int 1
            authentication {
                    auth_type PASS
                    auth_pass 11111111
            }
            track_script {
                    chk_nginx
            }
            virtual_ipaddress {
                    10.0.0.10
            }
    }
    
    
    [root@hdss7-11 keepalived]# ss -lnt|grep 7443|wc -l         
    1
    
    [root@hdss7-11 ~]# systemctl start keepalived
    [root@hdss7-11 ~]# systemctl enable keepalived
    
    1.nginx宕机后同机器的keepalive也需要关闭,nginx起来后,keepalive也需要启动
    2.nginx宕机VIP飘逸到备,keepalive需要启动,KA服务的VIP需要提供服务
    
    [root@hdss7-11 ~]# netstat -luntp | grep 7443
    tcp        0      0 0.0.0.0:7443            0.0.0.0:*               LISTEN      22071/nginx: master 
    
   
   

### 主控节点controller-manager/scheduler

10.0.0.21 controller-manager/scheduler

10.0.0.22 controller-manager/scheduler

1. kube-controller-manager安装

​```shell
# 创建启动脚本:
HDS7-21.host.com上
[root@hdss7-21 ~]# vi /opt/kubernetes/server/bin/kube-controller-manager.sh
#!/bin/sh
./kube-controller-manager \
  --cluster-cidr 172.7.0.0/16 \
  --leader-elect true \
  --log-dir /data/logs/kubernetes/kube-controller-manager \
  --master http://127.0.0.1:8080 \
  --service-account-private-key-file ./certs/ca-key.pem \
  --service-cluster-ip-range 192.168.0.0/16 \
  --root-ca-file ./certs/ca.pem \
  --v 2
[root@hdss7-21 ~]# mkdir -p /data/logs/kubernetes/kube-controller-manager
[root@hdss7-21 ~]# chmod +x /opt/kubernetes/server/bin/kube-controller-manager.sh
vim /etc/supervisord.d/kube-conntroller-manager.ini
# 21为例,22修改进程名字
[program:kube-controller-manager-7-21]
command=/opt/kubernetes/server/bin/kube-controller-manager.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                                        ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                              ; directory to cwd to before exec (def no cwd)
autostart=true                                                                    ; start at supervisord start (default: true)
autorestart=true                                                                  ; retstart at unexpected quit (default: true)
startsecs=30                                                                      ; number of secs prog must stay running (def. 1)
startretries=3                                                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                         ; setuid to this UNIX account to run the program
redirect_stderr=true                                                              ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log  ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                                       ; emit events on stdout writes (default false)
  1. kube-scheduler安装
[root@hdss7-21 ~]# vi /opt/kubernetes/server/bin/kube-scheduler.sh
#!/bin/sh
./kube-scheduler \
  --leader-elect  \
  --log-dir /data/logs/kubernetes/kube-scheduler \
  --master http://127.0.0.1:8080 \
  --v 2


安装kube-scheduler
[root@hdss7-21 ~]# chmod +x /opt/kubernetes/server/bin/kube-scheduler.sh
[root@hdss7-21 ~]# mkdir -p /data/logs/kubernetes/kube-scheduler

[root@hdss7-21 ~]# vi /etc/supervisord.d/kube-scheduler.ini
[program:kube-scheduler-7-21]
command=/opt/kubernetes/server/bin/kube-scheduler.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                               ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                     ; directory to cwd to before exec (def no cwd)
autostart=true                                                           ; start at supervisord start (default: true)
autorestart=true                                                         ; retstart at unexpected quit (default: true)
startsecs=30                                                             ; number of secs prog must stay running (def. 1)
startretries=3                                                           ; max # of serial start failures (default 3)
exitcodes=0,2                                                            ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                          ; signal used to kill process (default TERM)
stopwaitsecs=10                                                          ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                ; setuid to this UNIX account to run the program
redirect_stderr=true                                                     ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                             ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                              ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                              ; emit events on stdout writes (default false)


[root@hdss7-22 ~]# supervisorctl update
  1. 结果
[root@k8s-21 /etc/supervisord.d]# supervisorctl status
etcd-server-7-21                 RUNNING   pid 7369, uptime 3:38:42
kube-apiserver-7-21              RUNNING   pid 24215, uptime 1:28:15
kube-controller-manager-7-21     RUNNING   pid 24437, uptime 0:12:31
kube-scheduler-7-21              RUNNING   pid 24501, uptime 0:07:55

[root@k8s-22 ~]# supervisorctl status
etcd-server-7-22                 RUNNING   pid 7346, uptime 3:50:45
kube-apiserver-7-22              RUNNING   pid 24064, uptime 1:38:02
kube-controller-manager-7-22     RUNNING   pid 24191, uptime 0:17:21
kube-scheduler-7-22              RUNNING   pid 24273, uptime 0:12:41

node节点-kubelet

10.0.0.21

10.0.0.22

  1. 证书签发 10.0.0.200

    [root@k8s-200 ~]# vim /opt/certs/kubelet-csr.json
    {
        "CN": "k8s-kubelet",
        "hosts": [
        "127.0.0.1",
        "10.0.0.10",
        "10.0.0.21",
        "10.0.0.22",
        "10.0.0.23",
        "10.0.0.24",
        "10.0.0.25",
        "10.0.0.26",
        "10.0.0.27",
        "10.0.0.28"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "beijing",
                "L": "beijing",
                "O": "od",
                "OU": "ops"
            }
        ]
    }
    
    [root@k8s-200 ~]# cd /opt/certs/
    [root@k8s-200 /opt/certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssl-json -bare kubelet
    
    [root@k8s-200 /opt/certs]# ll
    total 84
    -rw-r--r-- 1 root root 1257 Aug 10 19:10 apiserver.csr
    -rw-r--r-- 1 root root  588 Aug 10 19:07 apiserver-csr.json
    -rw------- 1 root root 1675 Aug 10 19:10 apiserver-key.pem
    -rw-r--r-- 1 root root 1606 Aug 10 19:10 apiserver.pem
    -rw-r--r-- 1 root root  840 Aug 10 14:14 ca-config.json
    -rw-r--r-- 1 root root  993 Aug 11  2020 ca.csr
    -rw-r--r-- 1 root root  346 Aug 11  2020 ca-csr.json
    -rw------- 1 root root 1675 Aug 11  2020 ca-key.pem
    -rw-r--r-- 1 root root 1346 Aug 11  2020 ca.pem
    -rw-r--r-- 1 root root  993 Aug 10 19:00 client.csr
    -rw-r--r-- 1 root root  280 Aug 10 19:00 client-csr.json
    -rw------- 1 root root 1679 Aug 10 19:00 client-key.pem
    -rw-r--r-- 1 root root 1363 Aug 10 19:00 client.pem
    -rw-r--r-- 1 root root 1070 Aug 10 14:15 etcd-peer.csr
    -rw-r--r-- 1 root root  385 Aug 10 14:14 etcd-peer-csr.json
    -rw------- 1 root root 1679 Aug 10 14:15 etcd-peer-key.pem
    -rw-r--r-- 1 root root 1436 Aug 10 14:15 etcd-peer.pem
    -rw-r--r-- 1 root root 1115 Aug 11 10:08 kubelet.csr
    -rw-r--r-- 1 root root  452 Aug 11 10:05 kubelet-csr.json
    -rw------- 1 root root 1679 Aug 11 10:08 kubelet-key.pem
    -rw-r--r-- 1 root root 1468 Aug 11 10:08 kubelet.pem
    
  2. scp

    scp -rp kubelet-key.pem 10.0.0.21:/opt/kubernetes/server/bin/certs
    scp -rp kubelet-key.pem 10.0.0.22:/opt/kubernetes/server/bin/certs
    scp -rp kubelet.pem 10.0.0.22:/opt/kubernetes/server/bin/certs
    scp -rp kubelet.pem 10.0.0.21:/opt/kubernetes/server/bin/certs
    
  3. 21和22

    [root@k8s-21 ~]# ll /opt/kubernetes/server/bin/certs/
    total 32
    -rw------- 1 root root 1675 Aug 10 19:10 apiserver-key.pem
    -rw-r--r-- 1 root root 1606 Aug 10 19:10 apiserver.pem
    -rw------- 1 root root 1675 Aug 11 10:10 ca-key.pem
    -rw-r--r-- 1 root root 1346 Aug 11 10:10 ca.pem
    -rw------- 1 root root 1679 Aug 10 19:00 client-key.pem
    -rw-r--r-- 1 root root 1363 Aug 10 19:00 client.pem
    -rw------- 1 root root 1679 Aug 11 10:08 kubelet-key.pem
    -rw-r--r-- 1 root root 1468 Aug 11 10:08 kubelet.pem
    
    [root@k8s-22 ~]# ll /opt/kubernetes/server/bin/certs/
    total 32
    -rw------- 1 root root 1675 Aug 10 19:10 apiserver-key.pem
    -rw-r--r-- 1 root root 1606 Aug 10 19:10 apiserver.pem
    -rw------- 1 root root 1675 Aug 11 10:10 ca-key.pem
    -rw-r--r-- 1 root root 1346 Aug 11 10:10 ca.pem
    -rw------- 1 root root 1679 Aug 10 19:00 client-key.pem
    -rw-r--r-- 1 root root 1363 Aug 10 19:00 client.pem
    -rw------- 1 root root 1679 Aug 11 10:08 kubelet-key.pem
    -rw-r--r-- 1 root root 1468 Aug 11 10:08 kubelet.pem
    

    创建配置

    10.0.0.21

    10.0.0.22

    IP为keeplive的VIP 10.0.0.10

    # set-cluster
    [root@hdss7-21 conf]# cd /opt/kubernetes/server/bin/
    [root@hdss7-21 conf]# ./kubectl config set-cluster myk8s \
        --certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem \
        --embed-certs=true \
        --server=https://10.0.0.10:7443 \
        --kubeconfig=kubelet.kubeconfig
    
    Cluster "myk8s" set.
    
    # set-credentials
    [root@hdss7-21 conf]# ./kubectl config set-credentials k8s-node \
      --client-certificate=/opt/kubernetes/server/bin/certs/client.pem \
      --client-key=/opt/kubernetes/server/bin/certs/client-key.pem \
      --embed-certs=true \
      --kubeconfig=kubelet.kubeconfig 
    
    User "k8s-node" set.
    
    # set-context
    [root@hdss7-21 conf]# ./kubectl config set-context myk8s-context \
      --cluster=myk8s \
      --user=k8s-node \
      --kubeconfig=kubelet.kubeconfig
    
    Context "myk8s-context" created.
    
    # use-context
    [root@hdss7-21 conf]# ./kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig
    Switched to context "myk8s-context".
    

    授予权限,角色绑定 -- 只创建一次就好,存到etcd里,然后拷贝到各个node节点上

    创建一个k8snode角色,具有运算节点的权限

    [root@k8s-21 /opt/kubernetes/server/bin]# cd /opt/kubernetes/server/bin/conf/
    [root@hdss7-21 conf]# vi k8s-node.yaml
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: k8s-node
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:node
    subjects:
    - apiGroup: rbac.authorization.k8s.io
      kind: User
      name: k8s-node
    
    /opt/kubernetes/server/bin/kubectl create -f k8s-node.yaml
    clusterrolebinding.rbac.authorization.k8s.io/k8s-node created
    
    /opt/kubernetes/server/bin/kubectl get clusterrolebinding k8s-node
    NAME       AGE
    k8s-node   36s
    
    /opt/kubernetes/server/bin/kubectl get clusterrolebinding k8s-node -o yaml
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      creationTimestamp: "2020-08-11T15:06:15Z"
      name: k8s-node
      resourceVersion: "7875"
      selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/k8s-node
      uid: 7ebc840c-233a-40c0-99ca-37858da4003f
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:node
    subjects:
    - apiGroup: rbac.authorization.k8s.io
      kind: User
      name: k8s-node
      
      
    [root@k8s-21 /opt/kubernetes/server/bin/conf]# mv /opt/kubernetes/server/bin/kubelet.kubeconfig /opt/kubernetes/server/bin/conf/
    [root@k8s-21 /opt/kubernetes/server/bin/conf]# ll
    total 16
    -rw-r--r-- 1 root root 2223 Sep 14 14:09 audit.yaml
    -rw-r--r-- 1 root root  258 Sep 14 16:17 k8s-node.yaml
    -rw------- 1 root root 6195 Sep 14 16:16 kubelet.kubeconfig
    
  4. 10.0.0.22节点

    [root@k8s-21 /opt/kubernetes/server/bin/conf]# scp -rp kubelet.kubeconfig 10.0.0.22:/opt/kubernetes/server/bin/conf
    
    [root@k8s-22 ~]# ll /opt/kubernetes/server/bin/conf
    total 12
    -rw-r--r-- 1 root root 2223 Aug 11 19:48 audit.yaml
    -rw------- 1 root root 6199 Aug 11 22:54 kubelet.kubeconfig
    
    [root@k8s-22 /opt/kubernetes/server/bin]# ll certs/
    total 32
    -rw------- 1 root root 1675 Aug 10 19:10 apiserver-key.pem
    -rw-r--r-- 1 root root 1606 Aug 10 19:10 apiserver.pem
    -rw------- 1 root root 1675 Aug 11 10:10 ca-key.pem
    -rw-r--r-- 1 root root 1346 Aug 11 10:10 ca.pem
    -rw------- 1 root root 1679 Aug 10 19:00 client-key.pem
    -rw-r--r-- 1 root root 1363 Aug 10 19:00 client.pem
    -rw------- 1 root root 1679 Aug 11 10:08 kubelet-key.pem
    -rw-r--r-- 1 root root 1468 Aug 11 10:08 kubelet.pem
    [root@k8s-22 /opt/kubernetes/server/bin]# ll conf/
    total 12
    -rw-r--r-- 1 root root 2223 Aug 11 19:48 audit.yaml
    -rw------- 1 root root 6199 Aug 11 22:54 kubelet.kubeconfig
    
  5. 准备pause基础镜像 -- 边车模式

    # 运维主机hdss7-200.host.com上:
    
    [root@hdss7-200 ~]# docker pull kubernetes/pause
    [root@hdss7-200 ~]# docker tag kubernetes/pause:latest harbor.lc.com/public/pause:latest
    [root@hdss7-200 ~]# docker push harbor.lc.com/public/pause:latest
    
  6. kubelet启动 两个节点21 22

    vi /opt/kubernetes/server/bin/kubelet.sh
    #!/bin/sh
    ./kubelet \
      --anonymous-auth=false \
      --cgroup-driver systemd \
      --cluster-dns 192.168.0.2 \ #DNS地址
      --cluster-domain cluster.local \
      --runtime-cgroups=/systemd/system.slice \
      --kubelet-cgroups=/systemd/system.slice \
      --fail-swap-on="false" \
      --client-ca-file ./certs/ca.pem \
      --tls-cert-file ./certs/kubelet.pem \
      --tls-private-key-file ./certs/kubelet-key.pem \
      --hostname-override k8s-21.host.com \		# 更改主机名	
      --kubeconfig ./conf/kubelet.kubeconfig \
      --log-dir /data/logs/kubernetes/kube-kubelet \
      --pod-infra-container-image harbor.lc.com/public/pause:latest \ #harbor仓库地址
      --root-dir /data/kubelet
      
      [root@k8s-22 /opt/kubernetes/server/bin]# vi /opt/kubernetes/server/bin/kubelet.sh
    #!/bin/sh
    ./kubelet \
      --anonymous-auth=false \
      --cgroup-driver systemd \
      --cluster-dns 192.168.0.2 \
      --cluster-domain cluster.local \
      --runtime-cgroups=/systemd/system.slice \
      --kubelet-cgroups=/systemd/system.slice \
      --fail-swap-on="false" \
      --client-ca-file ./certs/ca.pem \
      --tls-cert-file ./certs/kubelet.pem \
      --tls-private-key-file ./certs/kubelet-key.pem \
      --hostname-override k8s-22.host.com \
      --kubeconfig ./conf/kubelet.kubeconfig \
      --log-dir /data/logs/kubernetes/kube-kubelet \
      --pod-infra-container-image harbor.lc.com/public/pause:latest \
      --root-dir /data/kubelet
    
      
    [root@hdss7-21 conf]# mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
    [root@hdss7-21 conf]# chmod +x /opt/kubernetes/server/bin/kubelet.sh
    
    # 22上修改名字
    [root@hdss7-21 conf]# vi /etc/supervisord.d/kube-kubelet.ini
    [program:kube-kubelet-7-21]	
    command=/opt/kubernetes/server/bin/kubelet.sh     ; the program (relative uses PATH, can take args)
    numprocs=1                                        ; number of processes copies to start (def 1)
    directory=/opt/kubernetes/server/bin              ; directory to cwd to before exec (def no cwd)
    autostart=true                                    ; start at supervisord start (default: true)
    autorestart=true              		          ; retstart at unexpected quit (default: true)
    startsecs=30                                      ; number of secs prog must stay running (def. 1)
    startretries=3                                    ; max # of serial start failures (default 3)
    exitcodes=0,2                                     ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT                                   ; signal used to kill process (default TERM)
    stopwaitsecs=10                                   ; max num secs to wait b4 SIGKILL (default 10)
    user=root                                         ; setuid to this UNIX account to run the program
    redirect_stderr=true                              ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log   ; stderr log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB                      ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4                          ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB                       ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false                       ; emit events on stdout writes (default false)
    
    
    
    [root@hdss7-22 conf]# supervisorctl update
    
    [root@hdss7-21 conf]# supervisorctl status
    etcd-server-7-21                 RUNNING   pid 6565, uptime 0:24:15
    kube-apiserver-7-21              RUNNING   pid 6566, uptime 0:24:15
    kube-controller-manager-7-21     RUNNING   pid 6551, uptime 0:24:15
    kube-kubelet-7-21                RUNNING   pid 16663, uptime 0:01:14
    kube-scheduler-7-21              RUNNING   pid 6552, uptime 0:24:15
    
    echo 'PATH=$PATH:/opt/kubernetes/server/bin' >>/etc/profile && source /etc/profile
    
    [root@k8s-21 /opt/kubernetes/server/bin]# kubectl get nodes
    NAME              STATUS   ROLES    AGE   VERSION
    k8s-21.host.com   Ready    <none>   10m   v1.15.2
    k8s-22.host.com   Ready    <none>   78s   v1.15.2
    
  7. ROlES添加标签,设定节点角色,可同时加两个标签

    kubectl label node k8s-21.host.com node-role.kubernetes.io/master=
    kubectl label node k8s-21.host.com node-role.kubernetes.io/node=
    kubectl label node k8s-22.host.com node-role.kubernetes.io/master=
    kubectl label node k8s-22.host.com node-role.kubernetes.io/node=
    
    [root@k8s-21 /opt/kubernetes/server/bin]# kubectl get nodes
    NAME              STATUS   ROLES         AGE     VERSION
    k8s-21.host.com   Ready    master,node   13m     v1.15.2
    k8s-22.host.com   Ready    master,node   3m54s   v1.15.2
    

node节点-kube-proxy

10.0.0.21

10.0.0.22

第一台node节点部署完成后,将生成的配置文件拷贝至各个Node节点

  1. 10.0.0.200操作:证书签发
[root@k8s-200 /opt/certs]# vi /opt/certs/kube-proxy-csr.json
{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}

[root@k8s-200 /opt/certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssl-json -bare kube-proxy-client

[root@k8s-200 /opt/certs]# ll
total 100
-rw-r--r-- 1 root root 1257 Aug 10 19:10 apiserver.csr
-rw-r--r-- 1 root root  588 Aug 10 19:07 apiserver-csr.json
-rw------- 1 root root 1675 Aug 10 19:10 apiserver-key.pem
-rw-r--r-- 1 root root 1606 Aug 10 19:10 apiserver.pem
-rw-r--r-- 1 root root  840 Aug 10 14:14 ca-config.json
-rw-r--r-- 1 root root  993 Aug 11 10:10 ca.csr
-rw-r--r-- 1 root root  346 Aug 11 10:09 ca-csr.json
-rw------- 1 root root 1675 Aug 11 10:10 ca-key.pem
-rw-r--r-- 1 root root 1346 Aug 11 10:10 ca.pem
-rw-r--r-- 1 root root  993 Aug 10 19:00 client.csr
-rw-r--r-- 1 root root  280 Aug 10 19:00 client-csr.json
-rw------- 1 root root 1679 Aug 10 19:00 client-key.pem
-rw-r--r-- 1 root root 1363 Aug 10 19:00 client.pem
-rw-r--r-- 1 root root 1070 Aug 10 14:15 etcd-peer.csr
-rw-r--r-- 1 root root  385 Aug 10 14:14 etcd-peer-csr.json
-rw------- 1 root root 1679 Aug 10 14:15 etcd-peer-key.pem
-rw-r--r-- 1 root root 1436 Aug 10 14:15 etcd-peer.pem
-rw-r--r-- 1 root root 1115 Aug 11 10:08 kubelet.csr
-rw-r--r-- 1 root root  452 Aug 11 10:05 kubelet-csr.json
-rw------- 1 root root 1679 Aug 11 10:08 kubelet-key.pem
-rw-r--r-- 1 root root 1468 Aug 11 10:08 kubelet.pem
-rw-r--r-- 1 root root 1005 Aug 11 15:11 kube-proxy-client.csr
-rw------- 1 root root 1675 Aug 11 15:11 kube-proxy-client-key.pem
-rw-r--r-- 1 root root 1375 Aug 11 15:11 kube-proxy-client.pem
-rw-r--r-- 1 root root  267 Aug 11 15:10 kube-proxy-csr.json

# 分发到node节点
scp -rp kube-proxy-client.pem 10.0.0.21:/opt/kubernetes/server/bin/certs
scp -rp kube-proxy-client.pem 10.0.0.22:/opt/kubernetes/server/bin/certs
scp -rp kube-proxy-client-key.pem 10.0.0.22:/opt/kubernetes/server/bin/certs
scp -rp kube-proxy-client-key.pem 10.0.0.21:/opt/kubernetes/server/bin/certs

# node节点查看
[root@k8s-21 /opt/kubernetes/server/bin/conf]# ll /opt/kubernetes/server/bin/certs/
total 40
-rw------- 1 root root 1675 Aug 10 19:10 apiserver-key.pem
-rw-r--r-- 1 root root 1606 Aug 10 19:10 apiserver.pem
-rw------- 1 root root 1675 Aug 11 10:10 ca-key.pem
-rw-r--r-- 1 root root 1346 Aug 11 10:10 ca.pem
-rw------- 1 root root 1679 Aug 10 19:00 client-key.pem
-rw-r--r-- 1 root root 1363 Aug 10 19:00 client.pem
-rw------- 1 root root 1679 Aug 11 10:08 kubelet-key.pem
-rw-r--r-- 1 root root 1468 Aug 11 10:08 kubelet.pem
-rw------- 1 root root 1675 Aug 11 15:11 kube-proxy-client-key.pem
-rw-r--r-- 1 root root 1375 Aug 11 15:11 kube-proxy-client.pem

[root@k8s-22 ~]# ll /opt/kubernetes/server/bin/cert/
total 40
-rw------- 1 root root 1675 Aug 10 19:10 apiserver-key.pem
-rw-r--r-- 1 root root 1606 Aug 10 19:10 apiserver.pem
-rw------- 1 root root 1675 Aug 11 10:10 ca-key.pem
-rw-r--r-- 1 root root 1346 Aug 11 10:10 ca.pem
-rw------- 1 root root 1679 Aug 10 19:00 client-key.pem
-rw-r--r-- 1 root root 1363 Aug 10 19:00 client.pem
-rw------- 1 root root 1679 Aug 11 10:08 kubelet-key.pem
-rw-r--r-- 1 root root 1468 Aug 11 10:08 kubelet.pem
-rw------- 1 root root 1675 Aug 11 15:11 kube-proxy-client-key.pem
-rw-r--r-- 1 root root 1375 Aug 11 15:11 kube-proxy-client.pem
  1. 以21为例安装kube-proxy
[root@k8s-21 /opt/kubernetes/server/bin/conf]# kubectl config set-cluster myk8s \
  --certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem \
  --embed-certs=true \
  --server=https://10.0.0.10:7443 \
  --kubeconfig=kube-proxy.kubeconfig
[root@k8s-21 /opt/kubernetes/server/bin/conf]# kubectl config set-credentials kube-proxy\
  --client-certificate=/opt/kubernetes/server/bin/certs/kube-proxy-client.pem \
  --client-key=/opt/kubernetes/server/bin/certs/kube-proxy-client-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig
[root@k8s-21 /opt/kubernetes/server/bin/conf]# kubectl config set-context myk8s-context \
  --cluster=myk8s \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig
[root@k8s-21 /opt/kubernetes/server/bin/conf]# kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig

# 21结果
[root@k8s-21 /opt/kubernetes/server/bin/conf]# ll /opt/kubernetes/server/bin/conf
total 24
-rw-r--r-- 1 root root 2224 Aug 11 19:57 audit.yaml
-rw-r--r-- 1 root root  258 Aug 11 23:05 k8s-node.yaml
-rw------- 1 root root 6199 Aug 11 22:54 kubelet.kubeconfig
-rw------- 1 root root 6215 Aug 12 00:52 kube-proxy.kubeconfig
  1. 22安装kubo-proxy
# 拷贝到22
[root@k8s-21 /opt/kubernetes/server/bin/conf]# scp -rp kube-proxy.kubeconfig 10.0.0.22:/opt/kubernetes/server/bin/conf

[root@k8s-22 ~]# ll /opt/kubernetes/server/bin/conf
total 20
-rw-r--r-- 1 root root 2223 Aug 11 19:48 audit.yaml
-rw------- 1 root root 6199 Aug 11 22:54 kubelet.kubeconfig
-rw------- 1 root root 6215 Aug 12 00:52 kube-proxy.kubeconfig

  1. 加载ipvs模块 21和22 所有node节点 -- 脚本需要设置成开启自动运行
[root@k8s-22 ~]# vim /root/ipvs.sh
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
do
  /sbin/modinfo -F filename $i &>/dev/null
  if [ $? -eq 0 ];then
    /sbin/modprobe $i
  fi
done

[root@k8s-22 ~]# chmod +x /root/ipvs.sh 
[root@k8s-22 ~]# /root/ipvs.sh 

[root@k8s-22 ~]# lsmod| grep ip_vs 
ip_vs_wrr              12697  0 
ip_vs_wlc              12519  0 
ip_vs_sh               12688  0 
ip_vs_sed              12519  0 
ip_vs_rr               12600  0 
ip_vs_pe_sip           12740  0 
nf_conntrack_sip       33860  1 ip_vs_pe_sip
ip_vs_nq               12516  0 
ip_vs_lc               12516  0 
ip_vs_lblcr            12922  0 
ip_vs_lblc             12819  0 
ip_vs_ftp              13079  0 
ip_vs_dh               12688  0 
ip_vs                 145497  24 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlcip_vs_wrr,ip_vs_pe_sip,ip_vs_lblcr,ip_vs_lblc
nf_nat                 26787  3 ip_vs_ftp,nf_nat_ipv4,nf_nat_masquerade_ipv4
nf_conntrack          133095  8 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_sip,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

# 设置开机自动启动	
[root@hdss7-21 ~]# vi /etc/rc.d/rc.local
/root/ipvs.sh

# 开启开机自启动脚本功能  -- 详见本文件夹内 开启开机自启动脚本文件
[root@hdss7-21 ~]# chmod +x /etc/rc.d/rc.local

[root@hdss7-21 ~]# mkdir -p /usr/lib/system/system/

[root@hdss7-21 ~]# vim /usr/lib/system/system/rc-local.service
[Install]
WantedBy=multi-user.target

[root@hdss7-21 ~]# ln -s '/lib/systemd/system/rc-local.service' '/etc/systemd/system/multi-user.target.wants/rc-local.service'

开启 rc-local.service 服务:
[root@hdss7-21 ~]# systemctl start rc-loacl.service
[root@hdss7-21 ~]# systemctl enable rc-local.service
# 创建kube-proxy启动脚本
k8s-21.host.com:# --hostname-override 注意主机名的修改
[root@k8s-21 /opt/kubernetes/server]# cat /opt/kubernetes/server/bin/kube-proxy.sh
#!/bin/sh
./kube-proxy \
  --cluster-cidr 172.7.0.0/16 \
  --hostname-override k8s-21.host.com \
  --proxy-mode=ipvs \
  --ipvs-scheduler=nq \
  --kubeconfig ./conf/kube-proxy.kubeconfig


chmod +x /opt/kubernetes/server/bin/kube-proxy.sh

mkdir -p /data/logs/kubernetes/kube-proxy

vi /etc/supervisord.d/kube-proxy.ini
[program:kube-proxy-7-21]
command=/opt/kubernetes/server/bin/kube-proxy.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                           ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                 ; directory to cwd to before exec (def no cwd)
autostart=true                                                       ; start at supervisord start (default: true)
autorestart=true                                                     ; retstart at unexpected quit (default: true)
startsecs=30                                                         ; number of secs prog must stay running (def. 1)
startretries=3                                                       ; max # of serial start failures (default 3)
exitcodes=0,2                                                        ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                      ; signal used to kill process (default TERM)
stopwaitsecs=10                                                      ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                            ; setuid to this UNIX account to run the program
redirect_stderr=true                                                 ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log     ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                         ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                             ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                          ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                          ; emit events on stdout writes (default false)

# 启动
supervisorctl update
supervisorctl status
kube-proxy-7-21                  RUNNING   pid 6873, uptime 0:28:15

[root@hdss7-22 ~]# netstat -luntp |grep kube-proxy
tcp        0      0 127.0.0.1:10249         0.0.0.0:*               LISTEN      7310/./kube-proxy   
tcp6       0      0 :::10256                :::*                    LISTEN      7310/./kube-proxy 

# 检查是否启用lvs
[root@k8s-22 ~]# yum install -y ipvsadm
[root@k8s-22 ~]#  ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.0.1:443 nq
  -> 10.0.0.21:6443               Masq    1      0          0         
  -> 10.0.0.22:6443               Masq    1      0          0         

#kube-proxy集群各主机启动脚本略有不同,部署其他节点注意修改
/opt/kubernetes/server/bin/kube-proxy.sh

验证集群

# 登录harbor仓库
[root@k8s-22 ~]# docker login harbor.lc.com
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
   
[root@k8s-22 ~]# ll .docker/config.json 
-rw------- 1 root root 144 Aug 12 01:25 .docker/config.json
[root@k8s-22 ~]# cat  .docker/config.json 
{
	"auths": {
		"harbor.lc.com": {
			"auth": "YWRtaW46MTIzNDU2"
		}
	},
	"HttpHeaders": {
		"User-Agent": "Docker-Client/19.03.12 (linux)"
	}
}
[root@k8s-22 ~]# echo "YWRtaW46MTIzNDU2"|base64 -d
admin:123456

在任意一个运算节点,创建一个资源配置清单

[root@k8s-21 ~]# vim nginx-ds.yaml 
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: nginx-ds
spec:
  replicas:
  template:
    metadata:
      labels:
        app: nginx-ds
    spec:
      containers:
        - name: my-nginx
          image: harbor.lc.com/public/nginx:v1.7.9
          ports:
            - containerPort: 80

测试完删除

# kubectl create -f nginx-ds.yaml 
daemonset.extensions/nginx-ds created

# kubectl get pods -o wide
NAME             READY   STATUS    RESTARTS   AGE     IP            NODE              NOMINATED NODE   READINESS GATES
nginx-ds-7lvpq   1/1     Running   0          4m39s   172.7.200.2   k8s-21.host.com   <none>           <none>
nginx-ds-llw25   1/1     Running   0          4m39s   172.7.200.2   k8s-22.host.com   <none>           <none>

# kubectl delete -f nginx-ds.yaml 
daemonset.extensions "nginx-ds" deleted


# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok                   
controller-manager   Healthy   ok                   
etcd-0               Healthy   {"health": "true"}   
etcd-1               Healthy   {"health": "true"}   
etcd-2               Healthy   {"health": "true"}   
# kubectl get node
NAME              STATUS   ROLES         AGE    VERSION
k8s-21.host.com   Ready    master,node   118m   v1.15.2
k8s-22.host.com   Ready    master,node   99m    v1.15.2
# kubectl get pods

flannel

10.0.0.21 节点安装

10.0.0.22 节点安装

https://github.com/coreos/flannel/releases

# 21安装为例:其他节点修改IP地址
[root@k8s-21 /opt/src]# ll
-rw-r--r-- 1 root root    8547 Aug 12 14:01 flannel-v0.12.0-linux-amd64.tar.gz
[root@k8s-21 /opt/src]# mkdir /opt/flannel-v0.11.0
[root@k8s-21 /opt/src]# tar xf flannel-v0.11.0-linux-amd64.tar.gz -C /opt/flannel-v0.11.0
[root@k8s-21 /opt/src]# ln -s /opt/flannel-v0.11.0/ /opt/flannel
[root@k8s-21 /opt/src]# mkdir /opt/flannel/certs
[root@k8s-21 /opt/src]# cd /opt/flannel/certs
[root@k8s-21 /opt/flannel/cert]# scp -rp 10.0.0.200:/opt/certs/client.pem .
[root@k8s-21 /opt/flannel/cert]# scp -rp 10.0.0.200:/opt/certs/client-key.pem .  
[root@k8s-21 /opt/flannel/cert]# scp -rp 10.0.0.200:/opt/certs/ca.pem .

[root@k8s-21 /opt/flannel/cert]# cd /opt/flannel
[root@k8s-21 /opt/flannel]# vim subnet.env
FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.21.1/24 #地址修改
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false

[root@k8s-21 /opt/flannel]# vim flanneld.sh
#!/bin/bash
./flanneld \
  --public-ip=10.0.0.21 \ # 不同节点修改本机IP
  --etcd-endpoints=https://10.0.0.12:2379,https://10.0.0.21:2379,https://10.0.0.22:2379 \
  --etcd-keyfile=./certs/client-key.pem \
  --etcd-certfile=./certs/client.pem \
  --etcd-cafile=./certs/ca.pem \
  --iface=eth0 \  # 根据本机网卡修改
  --subnet-file=./subnet.env \
  --healthz-port=2401
  
[root@k8s-21 /opt/flannel]# chmod +x flanneld.sh 

# etcd添加IP信息,一个节点添加即可
[root@k8s-21 /opt/etcd]# /opt/etcd/etcdctl member list
6cbdd801d2c800d9: name=etcd-server-7-21 peerURLs=https://10.0.0.21:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.21:2379 isLeader=false
74538ef5dc383e39: name=etcd-server-7-22 peerURLs=https://10.0.0.22:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.22:2379 isLeader=false
f7a9c20602b8532e: name=etcd-server-7-12 peerURLs=https://10.0.0.12:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.12:2379 isLeader=true

[root@k8s-21 /opt/etcd]# /opt/etcd/etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}'
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}

[root@k8s-21 /opt/etcd]# /opt/etcd/etcdctl get /coreos.com/network/config
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}

# supervisord
[root@k8s-21 /etc/supervisord.d]# vim /etc/supervisord.d/flannel.ini 
[program:flanneld-7-21]
command=/opt/flannel/flanneld.sh                                ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/flannel                                          ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log          ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
[root@k8s-21 ~]# mkdir /data/logs/flanneld/
[root@k8s-21 ~]# supervisorctl update
flanneld-7-21: added process group
[root@k8s-21 ~]# supervisorctl status
etcd-server-7-21                 RUNNING   pid 7369, uptime 20:29:20
flanneld-7-21                    RUNNING   pid 46424, uptime 0:00:37
kube-apiserver-7-21              RUNNING   pid 24215, uptime 18:18:53
kube-controller-manager-7-21     RUNNING   pid 34280, uptime 0:42:55
kube-kubelet-7-21                RUNNING   pid 25123, uptime 14:51:05
kube-proxy-7-21                  RUNNING   pid 46716, uptime 13:11:28
kube-scheduler-7-21              RUNNING   pid 34355, uptime 0:42:51

# flannel三种模式
host-gw 基于二层网络,所有宿主机的网关在一个网段
VxLAN	基于三层路由网络,宿主机虚拟出一块flannel网卡,在同一个路由网络内通讯
host-gw+VxLAN 智能选择
# route
[root@k8s-22 /opt/flannel]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         10.0.0.254      0.0.0.0         UG    0      0        0 eth0
10.0.0.0        0.0.0.0         255.255.255.0   U     0      0        0 eth0
169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
172.7.21.0      10.0.0.21       255.255.255.0   UG    0      0        0 eth0
172.7.22.0      0.0.0.0         255.255.255.0   U     0      0        0 docker0

[root@k8s-21 /etc/supervisord.d]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         10.0.0.254      0.0.0.0         UG    0      0        0 eth0
10.0.0.0        0.0.0.0         255.255.255.0   U     0      0        0 eth0
169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
172.7.21.0      0.0.0.0         255.255.255.0   U     0      0        0 docker0
172.7.22.0      10.0.0.22       255.255.255.0   UG    0      0        0 eth0

# 删除静态路由
route del -net 172.7.22.0/24 gw 10.0.0.22 dev eth0
# 添加静态路由
route add -net 172.7.22.0/24 gw 10.0.0.22 dev eth0
# 优化规则:要让容器里的服务日志打印容器地址172.7.0.0网段,不是宿主机的10网段
[root@k8s-22 /opt/flannel]# yum install -y iptables-services
[root@k8s-22 /opt/flannel]# systemctl start iptables
[root@k8s-22 /opt/flannel]# systemctl enable iptables

[root@k8s-22 /opt/flannel]# iptables-save |grep -i postrouting

-A POSTROUTING -s 172.7.22.0/24 ! -o docker0 -j MASQUERADE
# 21节点
iptables -t nat -I POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE

# 22节点
iptables -t nat -I POSTROUTING -s 172.7.22.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE

# 查看
[root@k8s-22 /opt/flannel]# iptables-save |grep -i postrouting
-A POSTROUTING -s 172.7.22.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE
[root@k8s-22 ~]# iptables-save > /etc/sysconfig/iptables

# 开启防火墙后允许宿主机通讯
[root@k8s-21 ~]# iptables -I INPUT -s 10.0.0.0/24 -j ACCEPT

coreDNS

k8s服务发现插件

# 200主机配置资源清单
[root@k8s-200 ~]# vim /etc/nginx/conf.d/k8s-yaml.lc.com.conf
server{
    listen      80;
    server_name k8s-yaml.lc.com;

    location / {
        autoindex on;
        default_type text/plain;
        root /data/k8s-yaml;
    }
}
[root@k8s-200 ~]# mkdir /data/k8s-yaml
[root@k8s-200 ~]# nginx -t
[root@k8s-200 ~]# systemctl reload nginx

# 11主机解析DNS
[root@k8s-11 ~]# vim /var/named/lc.com.zone
$ORIGIN lc.com.
$TTL    600     ; 10 minutes
@       IN  SOA dns.lc.com.    dnsadmin.lc.com.  (
                                      2020050812 ; serial   
                                      10800      ; refresh (3 hours)   
                                      900        ; retry (15 minutes)  
                                      604800     ; expire (1 week)
                                      86400      ; minimum (1 day)  
                                                                          )
                    NS      dns.lc.com.
$TTL    60    ; 1 minute
dns            A     10.0.0.11
harbor         A     10.0.0.200
k8s-yaml       A     10.0.0.200

# 重启
named-checkconf 
systemctl restart named

[root@k8s-11 ~]# dig -t A k8s-yaml.lc.com @10.0.0.11 +short
[root@k8s-200 /data/k8s-yaml]# docker pull docker.io/coredns/coredns:1.6.1
[root@k8s-200 /data/k8s-yaml]# docker images
[root@k8s-200 /data/k8s-yaml]# docker tag coredns/coredns:1.6.1 harbor.lc.com/public/coredns:v1.6.1
[root@k8s-200 /data/k8s-yaml]# docker push harbor.lc.com/public/coredns:v1.6.1 
https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/coredns/coredns.yaml.base
# RBAC 清单
# __MACHINE_GENERATED_WARNING__
vim /data/k8s-yaml/coredns/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system

# configMap 清单
vi /data/k8s-yaml/coredns/cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        ready
        kubernetes cluster.local 192.168.0.0/16
        forward . 10.0.0.11
        cache 30
        loop
        reload
        loadbalance
    }
# deployment 清单
vi /data/k8s-yaml/coredns/dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: coredns
  template:
    metadata:
      labels:
        k8s-app: coredns
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              labelSelector:
                matchExpressions:
                  - key: k8s-app
                    operator: In
                    values: ["kube-dns"]
              topologyKey: kubernetes.io/hostname
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      containers:
      - name: coredns
        image: harbor.od.com/public/coredns:v1.6.1
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            #memory: __PILLAR__DNS__MEMORY__LIMIT__
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile

# service 清单
vim /data/k8s-yaml/coredns/svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: coredns
  clusterIP: 192.168.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
# 10.0.0.21创建coreDNS
[root@k8s-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/rbac.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created

[root@k8s-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/cm.yaml
configmap/coredns created

[root@k8s-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/dp.yaml
deployment.apps/coredns created

[root@k8s-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/svc.yaml
service/coredns created

[root@k8s-21 ~]# kubectl get all -n kube-system
NAME                           READY   STATUS    RESTARTS   AGE
pod/coredns-866444dcfb-cv7jm   1/1     Running   0          65s


NAME              TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)                  AGE
service/coredns   ClusterIP   192.168.0.2   <none>        53/UDP,53/TCP,9153/TCP   51s


NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/coredns   1/1     1            1           65s

NAME                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/coredns-866444dcfb   1         1         1       65s

[root@k8s-21 ~]# dig -t A k8s-21.host.com @192.168.0.2 +short
10.0.0.21

[root@k8s-21 ~]# dig -t A nginx-dp.kube-public.svc.cluster.local.  @192.168.0.2 +short
192.168.178.184

# nginx-dp.kube-public 在容器里的映射
[root@k8s-21 ~]# kubectl exec -it nginx-dp-5dfc689474-9jtsb /bin/bash -n kube-public
root@nginx-dp-5dfc689474-9jtsb:/# cat /etc/resolv.conf 
nameserver 192.168.0.2
search kube-public.svc.cluster.local svc.cluster.local cluster.local
options ndots:5

ingress

服务自动暴露供外界访问

nodeport 为iptables添加规则暴露k8s内的服务,iptables只能rr模式

ingress基于域名和URL路径的用户请求转发给指定的svc资源规则

# 200 pull traefik
[root@k8s-200 /data/k8s-yaml]# docker pull traefik:v1.7.2-alpine
[root@k8s-200 /data/k8s-yaml]# docker images|grep traefik
[root@k8s-200 /data/k8s-yaml]# docker tag add5fac61ae5 harbor.od.com/public/traefik:v1.7.2
[root@k8s-200 /data/k8s-yaml]# docker push harbor.od.com/public/traefik:v1.7.2 

https://github.com/containous/traefik/tree/v1.7/examples/k8s
上一篇:可持久化可并堆优化k短路


下一篇:剑指 Offer 37. 序列化二叉树(前序遍历,后序遍历,层次遍历)