二进制安装kubernetes1.16.10

二进制安装kubernetes1.16.10

服务器规划

主机名 IP 组件名
k8s01 192.168.10.91 kube-apiserver、kube-controller-manager、kube-scheduler、etcd
k8s02 192.168.10.92 kubelet、kube-proxy、docker、etcd
k8s03 192.168.10.93 kubelet、kube-proxy、docker、etcd

部署etcd

生成etcd证书

#安装及插件CFSSL
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl*

创建etcd相关证书

#创建目录
mkdir /opt/ssl_config/etcd -p
cd /opt/ssl_config/etcd/

etcd证书ca配置

cat <<EOF|tee ca-config.json
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "www": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

创建ca-csr配置

cat <<EOF|tee ca-csr.json
{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

创建 etcd Server 证书

cat <<EOF|tee server-csr.json
{
    "CN": "etcd",
    "hosts": [
        "192.168.10.91",
        "192.168.10.92",
        "192.168.10.93"
        ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing"
        }
    ]
}
EOF

生成 ETCD CA 证书和私钥

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

部署etcd

下载软件

#软件下载
wget https://github.com/etcd-io/etcd/releases/download/v3.3.22/etcd-v3.3.22-linux-amd64.tar.gz
tar xf etcd-v3.3.22-linux-amd64.tar.gz 

编辑etcd配置文件

mkdir -p /opt/etcd/{bin,cfg,ssl}
cp etcd-v3.3.22-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/

cat <<EOF |tee /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.10.91:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.10.91:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.10.91:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.10.91:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.10.91:2380,etcd-2=https://192.168.10.92:2380,etcd-3=https://192.168.10.93:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

配置证书文件

cd /opt/ssl_config/etcd/
cp *.pem /opt/etcd/ssl/

创建 etcd的 启动 文件

vim /etc/systemd/system/etcd.service 
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd         --name=${ETCD_NAME}         --data-dir=${ETCD_DATA_DIR}         --listen-peer-urls=${ETCD_LISTEN_PEER_URLS}         --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379         --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS}         --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS}         --initial-cluster=${ETCD_INITIAL_CLUSTER}         --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN}         --initial-cluster-state=new         --cert-file=/opt/etcd/ssl/server.pem         --key-file=/opt/etcd/ssl/server-key.pem         --peer-cert-file=/opt/etcd/ssl/server.pem         --peer-key-file=/opt/etcd/ssl/server-key.pem         --trusted-ca-file=/opt/etcd/ssl/ca.pem         --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

配置文件拷贝到 节点1、节点2

rsync -azv etcd root@192.168.10.92:/opt/
rsync -azv  /etc/systemd/system/etcd.service root@192.168.10.92:/etc/systemd/system/etcd.service
sed -i ‘s#ETCD_NAME=\"etcd-1#ETCD_NAME=\"etcd-2#g‘ /opt/etcd/cfg/etcd.conf
sed -i ‘s#URLS=\"https://192.168.10.91#URLS=\"https://192.168.10.92#g‘ /opt/etcd/cfg/etcd.conf

rsync -azv etcd root@192.168.10.93:/opt/
rsync -azv  /etc/systemd/system/etcd.service root@192.168.10.93:/etc/systemd/system/etcd.service
sed -i ‘s#ETCD_NAME=\"etcd-1#ETCD_NAME=\"etcd-3#g‘ /opt/etcd/cfg/etcd.conf
sed -i ‘s#URLS=\"https://192.168.10.91#URLS=\"https://192.168.10.93#g‘ /opt/etcd/cfg/etcd.conf

启动ETCD服务

systemctl daemon-reload
systemctl start etcd
systemctl enable etcd

检查服务是否正常

etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.10.91:2379,https://192.168.10.92:2379,https://192.168.10.93:2379" cluster-health

member 675d9d912b71a976 is healthy: got healthy result from https://192.168.10.93:2379
member 8031e45f5ff71d02 is healthy: got healthy result from https://192.168.10.92:2379
member 9e07fded8a2b9058 is healthy: got healthy result from https://192.168.10.91:2379
cluster is healthy

部署Master Node

创建 Kubernetes 相关证书

mkdir -p /opt/ssl_config/kubernetes
cd /opt/ssl_config/kubernetes

kubernetes 证书ca配置

cat << EOF | tee ca-config.json
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

创建ca-csr证书配置

cat << EOF | tee ca-csr.json
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
      	    "O": "k8s",
            "OU": "System"
        }
    ]
}

EOF

生成api-server证书

cat << EOF | tee server-csr.json
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local",
      "192.168.10.91",
      "192.168.10.92",
      "192.168.10.93"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

创建kube-proxy-csr证书

cat << EOF | tee kube-proxy-csr.json
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

EOF

生成 K8S CA证书和私钥

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

配置master节点配置文件

wget https://dl.k8s.io/v1.16.10/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin/
mkdir /opt/kubernetes/{bin,cfg,ssl,logs} -p
cp kube-apiserver kube-controller-manager kube-proxy kubectl /opt/kubernetes/bin/
cp /opt/ssl_config/kubernetes/*.pem /opt/kubernetes/ssl/

创建 TLS Bootstrapping Token

head -c 16 /dev/urandom | od -An -t x | tr -d ‘ ‘
658bca9454213b67c0ee76c3c9023ab5

vim /opt/kubernetes/cfg/token.csv
658bca9454213b67c0ee76c3c9023ab5,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

部署 kube-apiserver 组件

创建apiserver配置文件

cat /opt/kubernetes/cfg/kube-apiserver.conf 
KUBE_APISERVER_OPTS="--logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --etcd-servers=https://192.168.10.91:2379,https://192.168.10.92:2379,https://192.168.10.93:2379 --bind-address=192.168.10.91 --secure-port=6443 --advertise-address=192.168.10.91 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --enable-bootstrap-token-auth=true --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-32767 --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem --tls-cert-file=/opt/kubernetes/ssl/server.pem  --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/opt/kubernetes/logs/k8s-audit.log"

创建 kube-apiserver 启动文件

cat /etc/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

部署kube-scheduler

创建kube-scheduler配置文件

cat /opt/kubernetes/cfg/kube-scheduler.conf
KUBE_SCHEDULER_OPTS="--logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect --master=127.0.0.1:8080 --address=127.0.0.1"

创建kube-scheduler启动文件

cat /etc/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

部署kube-controller-manager

创建kube-controller-manager配置文件

cat /opt/kubernetes/cfg/kube-controller-manager.conf 
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect=true --master=127.0.0.1:8080 --address=127.0.0.1 --allocate-node-cidrs=true --cluster-cidr=10.244.0.0/16 --service-cluster-ip-range=10.0.0.0/24 --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem  --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s"

创建kube-controller-manager启动文件

cat /etc/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

启动服务

systemctl daemon-reload
systemctl start kube-apiserver.service
systemctl start kube-scheduler.service
systemctl start kube-controller-manager.service

kubelet-bootstrap授权

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

部署node节点

mkdir -p /opt/kubernetes/{bin,cfg,logs,ssl}

#master节点
rsync -azv kubelet kube-proxy root@k8s02:/opt/kubernetes/bin/

创建 kubelet bootstrap.kubeconfig 文件

cat environment.sh
# 创建kubelet bootstrapping kubeconfig
BOOTSTRAP_TOKEN=658bca9454213b67c0ee76c3c9023ab5
KUBE_APISERVER="https://192.168.10.91:6443"
# 设置集群参数
kubectl config set-cluster kubernetes   --certificate-authority=./ca.pem   --embed-certs=true   --server=${KUBE_APISERVER}   --kubeconfig=bootstrap.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap   --token=${BOOTSTRAP_TOKEN}   --kubeconfig=bootstrap.kubeconfig

# 设置上下文参数
kubectl config set-context default   --cluster=kubernetes   --user=kubelet-bootstrap   --kubeconfig=bootstrap.kubeconfig

# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

#通过 bash environment.sh获取 bootstrap.kubeconfig 配置文件。

创建 kubelet.kubeconfig 文件

cat envkubelet.kubeconfig.sh
# 创建kubelet bootstrapping kubeconfig
BOOTSTRAP_TOKEN=658bca9454213b67c0ee76c3c9023ab5
KUBE_APISERVER="https://192.168.10.91:6443"

# 设置集群参数
kubectl config set-cluster kubernetes   --certificate-authority=./ca.pem   --embed-certs=true   --server=${KUBE_APISERVER}   --kubeconfig=kubelet.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials kubelet   --token=${BOOTSTRAP_TOKEN}   --kubeconfig=kubelet.kubeconfig

# 设置上下文参数
kubectl config set-context default   --cluster=kubernetes   --user=kubelet   --kubeconfig=kubelet.kubeconfig

# 设置默认上下文
kubectl config use-context default --kubeconfig=kubelet.kubeconfig

#通过 bash envkubelet.kubeconfig.sh获取 kubelet.kubeconfig 配置文件。

创建kube-proxy kubeconfig文件

cat env_proxy.sh
# 创建kube-proxy kubeconfig文件

BOOTSTRAP_TOKEN=658bca9454213b67c0ee76c3c9023ab5
KUBE_APISERVER="https://192.168.10.91:6443"

kubectl config set-cluster kubernetes   --certificate-authority=./ca.pem   --embed-certs=true   --server=${KUBE_APISERVER}   --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy   --client-certificate=./kube-proxy.pem   --client-key=./kube-proxy-key.pem   --embed-certs=true   --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default   --cluster=kubernetes   --user=kube-proxy   --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

#通过 bash env_proxy.sh获取 kube-proxy.kubeconfig 配置文件。

拷贝到node节点

rsync -azv bootstrap.kubeconfig kube-proxy.kubeconfig  kubelet.kubeconfig root@k8s02:/opt/kubernetes/cfg/
rsync -azv ca.pem kube-proxy*.pem root@k8s02:/opt/kubernetes/ssl/

部署docker

wget https://download.docker.com/linux/static/stable/x86_64/docker-18.09.6.tgz
tar xf docker-18.09.6.tgz
cp docker/* /usr/bin/

配置镜像加速

mkdir -p /etc/docker/
cat /etc/docker/daemon.json
{
    "registry-mirrors": ["http://bc437cce.m.daocloud.io"]
}  

配置docker启动文件

cat /etc/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target

[Service]
Type=notify
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target

启动docker

systemctl daemon-reload
systemctl start docker
systemctl enable docker

部署kubelet组件

创建 kubelet 参数配置文件

cat /opt/kubernetes/cfg/kubelet-config.yml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local 
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /opt/kubernetes/ssl/ca.pem 
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110

创建kubelet配置文件

cat kubelet.conf
KUBELET_OPTS="--logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --hostname-override=k8s02 --network-plugin=cni --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig --config=/opt/kubernetes/cfg/kubelet-config.yml --cert-dir=/opt/kubernetes/ssl --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

创建kubelet 启动文件

cat /etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Before=docker.service

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

部署kube-proxy 组件

创建kube-proxy 参数配置文件

cat kube-proxy-config.yml 
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
address: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
  kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
hostnameOverride: k8s02
clusterCIDR: 10.0.0.0/24
mode: ipvs
ipvs:
  scheduler: "rr"
iptables:
  masqueradeAll: true

创建kube-proxy配置文件

cat kube-proxy.conf 
KUBE_PROXY_OPTS="--logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --config=/opt/kubernetes/cfg/kube-proxy-config.yml"

创建kube-proxy 启动文件

cat /etc/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf
ExecStart=/opt/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

启动服务

systemctl daemon-reload
systemctl start kubelet
systemctl start kube-proxy
systemctl enable kubelet
systemctl enable kube-proxy

允许给Node颁发证书(masterj节点)

kubectl certificate approve node-csr-6LffhreJ_LWi5txpoiyU9nRhC-0XFi3yUkIt-UxZ-u4
kubectl certificate approve node-csr-rsgn02q-gZmhv4NHHRCwMrk0Sxj1wS0YFX0iW5Vn69c
[root@k8s01 kubernetes]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-6LffhreJ_LWi5txpoiyU9nRhC-0XFi3yUkIt-UxZ-u4   74m     kubelet-bootstrap   Approved,Issued
node-csr-rsgn02q-gZmhv4NHHRCwMrk0Sxj1wS0YFX0iW5Vn69c   2m26s   kubelet-bootstrap   Approved,Issued
[root@k8s01 kubernetes]# kubectl get node
NAME    STATUS     ROLES    AGE   VERSION
k8s02   NotReady   <none>   64m   v1.16.10
k8s03   NotReady   <none>   97s   v1.16.10
[root@k8s01 kubernetes]# 

部署CNI网络

wget https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz
mkdir /opt/cni/bin /etc/cni/net.d -p
tar xf cni-plugins-linux-amd64-v0.8.6.tgz -C /opt/cni/bin/

kubelet启用CNI:

cat /opt/kubernetes/cfg/kubelet.conf
--network-plugin=cni 

master执行

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml

kubectl get pods -n kube-system
NAME                          READY   STATUS    RESTARTS   AGE
kube-flannel-ds-amd64-jtwwr   1/1     Running   0          7m13s
kube-flannel-ds-amd64-t7xsj   1/1     Running   0          10s

kubectl get nodes
NAME    STATUS   ROLES    AGE   VERSION
k8s02   Ready    <none>   11h   v1.16.10
k8s03   Ready    <none>   10h   v1.16.10
[root@k8s01 cfg]# 

部署web UI

wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml
vi recommended.yaml
…
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30001
  selector:
    k8s-app: kubernetes-dashboard
…
# kubectl apply -f recommended.yaml

#创建service account并绑定默认cluster-admin管理员集群角色:
cat dashboard-adminuser.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard

#启动
apply -f dashboard-adminuser.yaml 

#获取token
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk ‘{print $1}‘)

访问地址:http://NodeIP:30001
使用输出的token登录Dashboard。

部署DNS

集群内部是通过ClusterIP进行通讯,但是这个地址也不是固定的,在我们这里项目连接基础服务都是通过DNS名称去连接的,之前swarm的时候就说过,Swarm是自带DNS模块的,在早期的版本用的是kube-dns,但是设计的很复杂,不容器维护,现在默认DNScoredns

cat coredns.yaml 
# Warning: This is a file generated from the base underscore template file: coredns.yaml.base

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local in-addr.arpa ip6.arpa {
            pods insecure
            upstream
            fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        proxy . /etc/resolv.conf
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: ‘docker/default‘
    spec:
      serviceAccountName: coredns
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      containers:
      - name: coredns
        image: lizhenliang/coredns:1.2.2
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.0.0.2 
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP

kubectl apply –f coredns.yaml
kubectl get pods -n kube-system
NAME                          READY   STATUS    RESTARTS   AGE
coredns-6d8cfdd59d-rk664      1/1     Running   0          31s
kube-flannel-ds-amd64-jtwwr   1/1     Running   0          63m
kube-flannel-ds-amd64-t7xsj   1/1     Running   0          56m

Master高可用

主机名 IP 组件名
k8s01 192.168.10.91 kube-apiserver、kube-controller-manager、kube-scheduler、etcd
k8s02 192.168.10.92 kubelet、kube-proxy、docker、etcd
k8s03 192.168.10.93 kubelet、kube-proxy、docker、etcd
k8s04 192.168.10.94 kube-apiserver、kube-controller-manager、kube-scheduler
lb01 192.168.10.5 nginx(四层负载)、keepalived
lb02 192.168.10.6 nginx(四层负载)、keepalived
vip 192.168.10.4
#修改server-csr.json 证书
cat server-csr.json
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local",
      "192.168.10.91",
      "192.168.10.92",
      "192.168.10.93",
      "192.168.10.94",
      "192.168.10.5",
      "192.168.10.6",
      "192.168.10.4"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}

#生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
cp server*.pem /opt/kubernetes/ssl/

部署Master组件(k8s01一致)

rsync -azv kubernetes root@192.168.10.94:/opt/
rsync -azv /etc/systemd/system/kube-* root@192.168.10.94:/etc/systemd/system/
rsync -azv /opt/etcd/ssl/* root@192.168.10.94:/opt/etcd/ssl/

[root@k8s04 cfg]# vim kube-apiserver.conf 
--bind-address=192.168.10.94 --secure-port=6443 --advertise-address=192.168.10.94 

systemctl start kube-apiserver
systemctl start kube-controller-manager
systemctl start kube-scheduler
systemctl enable kube-apiserver
systemctl enable kube-controller-manager
systemctl enable kube-scheduler

kubectl get nodes
NAME    STATUS   ROLES    AGE   VERSION
k8s02   Ready    <none>   14h   v1.16.10
k8s03   Ready    <none>   13h   v1.16.10

查看master集群状态

kubectl get cs -o=go-template=‘{{printf "|NAME|STATUS|MESSAGE|\n"}}{{range .items}}{{$name := .metadata.name}}{{range .conditions}}{{printf "|%s|%s|%s|\n" $name .status .message}}{{end}}{{end}}‘

|NAME|STATUS|MESSAGE|
|scheduler|True|ok|
|controller-manager|True|ok|
|etcd-0|True|{"health":"true"}|
|etcd-1|True|{"health":"true"}|
|etcd-2|True|{"health":"true"}|

部署Nginx+Keepalived高可用

cat >/etc/yum.repos.d/nginx.repo <<EOF
[nginx-stable]
name=nginx stable repo
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
EOF

yum -y install nginx

cat > /etc/nginx/nginx.conf <<EOF
user  nginx;
worker_processes  1;
error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;
events {
    worker_connections  1024;
}
 stream {
    log_format  main  ‘$remote_addr - [$time_local] ‘ ‘$status ‘ ‘$upstream_addr‘;
    upstream k8s-apiserver {
      server 192.168.10.91:6443;
      server 192.168.10.94:6443;
 }
  server {
    listen 6443;
    proxy_pass k8s-apiserver;
    }
    access_log /var/log/nginx/api-server.log main ;
}
EOF

yum -y install keepalived

mkdir /server/scripts -p
cat > /server/scripts/check_web.sh <<EOF
#!/bin/bash
num=`ps -ef | grep -c nginx`
if [ $num -lt 2 ];then
  systemctl stop keepalived
fi
EOF

chmod +x  /server/scripts/check_web.sh

#主
cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id lb01
}

vrrp_script check_web {
    script "/server/scripts/check_web.sh"
    interval 2
    weight 2
}

vrrp_instance lb {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 150
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.10.4/24
    }
    track_script {
        check_web
    }
}

#备
cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id lb02
}

vrrp_script check_web {
    script "/server/scripts/check_web.sh"
    interval 2
    weight 2
}

vrrp_instance lb {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.10.4/24
    }
    track_script {
        check_web
    }
}

启动服务

systemctl start nginx
systemctl enable nginx
systemctl start keepalived.service 
systemctl enable keepalived.service

测试vip是否正常

curl -k --header "Authorization: Bearer 658bca9454213b67c0ee76c3c9023ab5" https://192.168.10.4:6443/version
{
  "major": "1",
  "minor": "16",
  "gitVersion": "v1.16.10",
  "gitCommit": "f3add640dbcd4f3c33a7749f38baaac0b3fe810d",
  "gitTreeState": "clean",
  "buildDate": "2020-05-20T13:51:56Z",
  "goVersion": "go1.13.9",
  "compiler": "gc",
  "platform": "linux/amd64"
}

将Node节点连接VIP

cd /opt/kubernetes/cfg/
sed -i ‘s#192.168.10.91#192.168.10.4#g‘ *
systemctl restart kubelet.service 
systemctl restart kube-proxy.service

二进制安装kubernetes1.16.10

上一篇:HTML5新增语义化标签&文本类标签


下一篇:前端(web)知识-html