2022-01-04环境:192.168.1.140 centos 7.6 master 192.168.1.135 centos 7.6 node-35
1、修改主机名
[root@yt ~]# hostnamectl set-hostname master [root@yt ~]# bash
2、生产ssh 密钥对
[root@master ~]# ssh-copy-id 192.168.1.140
一直回车即可
[root@master ~]# ssh-copy-id 192.168.1.135
需要输入node主机密码
3、所有节点都关闭防火墙和 selinux
master节点和node节点
[root@master ~]# systemctl stop firewalld ; systemctl disable firewalld [root@master ~]# setenforce 0 setenforce: SELinux is disabled [root@master ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
4、关闭 swap 分区
master节点和node节点
方法一:
[root@master ~]# swapoff -a
[root@master ~]# free -m
方法二:永久关闭:注释 /etc/fstab 中的swap
5、修改:br_netfilter 模块用于将桥接流量转发至 iptables 链,br_netfilter 内核参数需要开启转发。
master节点和node节点
[root@master ~]# modprobe br_netfilter [root@master ~]# echo "modprobe br_netfilter" >> /etc/profile
[root@master ~]# cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 EOF [root@master ~]# sysctl -p /etc/sysctl.d/k8s.con
6、配置 kubeadm、docker-ce的yum 源
方法一、在线源配置方法: 配置阿里云 Kubernetes yum 源(master节点和node节点)
) [root@master ~]# tee /etc/yum.repos.d/kubernetes.repo <<-'EOF'
[kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1 gpgcheck=1
repo_gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF
方法二、离线线源,需要提前下载kubeadm,docker-ce
[root@master ~]# tar xf k8s-docker.tar.gz -C /opt/
[root@master ~]# tee /etc/yum.repos.d/k8s-docker.repo << 'EOF' [k8s-docker]
name=k8s-docker baseurl=file:///opt/k8s-docker enable=1 gpgcheck=0 EOF
7、安装 kubelet、 kubeadm 、kubectl(master节点和node节点)
[root@master ~]# yum install -y kubelet-1.18.2 kubeadm-1.18.2 kubectl-1.18.2
[root@master ~]# systemctl enable kubelet && systemctl start kubelet
kubelet:运行在集群所有节点上,用于启动 Pod 和容器等对象的工具
kubeadm :用于初始化集群,启动集群的命令工具
kubectl:用于和集群通信的命令行,通过 kubectl 可以部署和管理应用,查看各种资源,创建、删除和更新各种组件
[root@node ~]# yum install -y kubelet-1.18.2 kubeadm-1.18.2 kubectl-1.18.2 [root@node ~]# systemctl enable kubelet && systemctl start kubelet
8、安装 docker(master节点和node节点)
[root@master ~]# yuminstall -y yum-utils device-mapper-persistent-data lvm2
[root@master ~]# yuminstall docker-ce docker-ce-cli containerd.io -y
[root@master ~]# systemctl start docker && systemctl enable docker.service
9、daemon.json(master节点和node节点)
[root@master ~]# tee /etc/docker/daemon.json << 'EOF' { "registry-mirrors": ["https://rncxm540.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"] } EOF [root@master ~]# systemctl daemon-reload [root@master ~]# systemctl restart docker
10、初始化集群(master节点)
如果是离线需要导入docker镜像
[root@master ~]#tar xf k8s-images.tar.gz -C /opt/
[root@master ~]#ll /opt/k8s-images/*.tar.gz|awk '{print $NF}'|sed -r 's#^#docker load -i #'|bash
[root@master ~]# kubeadm init --kubernetes-version=1.18.2 \ --apiserver-advertise-address=192.168.1.140 \
--image-repository registry.aliyuncs.com/google_containers \ --service-cidr=10.10.0.0/16 --pod-network-cidr=10.122.0.0/16
kubeadm join 192.168.1.140:6443 --token qw4g2r.hjpegi3tnxha8fjy
--discovery-token-ca-cert-hash sha256:86d9de66ab136bd813f341ff291405dca8b0ba6b552033b32ee5a93c5d392802
初始化完成加入 k8s 集群
11、配置kubectl的配置文件
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
12、安装kubernetes网络组件-Calico
上传 calico.yaml到/root/ 在线下载配置文件地址是: https://docs.projectcalico.org/manifests/calico.yaml
[root@master ~]# kubectl apply -f /root/calico.yaml
[root@master ~]# kubectl get pod --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE kube-system calico-kube-controllers-6dc8c99cbc-flsrn 1/1 Running 1 4d23h kube-system calico-node-kbtl7 1/1 Running 0 4d22h kube-system calico-node-mmg66 0/1 Running 3 4d23h kube-system coredns-7f89b7bc75-lz7cx 1/1 Running 1 4d23h kube-system coredns-7f89b7bc75-qv97w 1/1 Running 1 4d23h kube-system etcd-master 1/1 Running 6 4d23h kube-system kube-apiserver-master 1/1 Running 6 4d23h kube-system kube-controller-manager-master 1/1 Running 4 4d23h kube-system kube-proxy-8kcp7 1/1 Running 1 4d23h kube-system kube-proxy-8mlqq 1/1 Running 0 4d22h kube-system kube-scheduler-master 1/1 Running 3 4d23h
13、node 节点加入集群
[root@node-35 ~] #kubeadm join 192.168.1.140:6443 --token qw4g2r.hjpegi3tnxha8fjy
--discovery-token-ca-cert-hash sha256:86d9de66ab136bd813f341ff291405dca8b0ba6b552033b32ee5a93c5d392802
14、查看
[root@master ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION master Ready control-plane,master 4d23h v1.20.4 node-35 NotReady worker 4d22h v1.20.4
15、安装 kubernetes-dashboard-2.0
kubernetes-dashboard2.0 yaml 文件地址: https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recomm ended.yaml
vim recommended.yaml 在第 42 行下方添加 2 行 nodePort: 30000
type: NodePort
安装 dashboard
root@master ~]# kubectl apply -f recommended.yaml
namespace/kubernetes-dashboard unchanged serviceaccount/kubernetes-dashboard unchanged service/kubernetes-dashboard unchanged secret/kubernetes-dashboard-certs unchanged secret/kubernetes-dashboard-csrf configured Warning: resource secrets/kubernetes-dashboard-key-holder is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. secret/kubernetes-dashboard-key-holder configured configmap/kubernetes-dashboard-settings unchanged role.rbac.authorization.k8s.io/kubernetes-dashboard unchanged clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard unchanged rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard unchanged clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard unchanged deployment.apps/kubernetes-dashboard unchanged service/dashboard-metrics-scraper unchanged deployment.apps/dashboard-metrics-scraper unchanged
root@master ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE kube-system calico-kube-controllers-6dc8c99cbc-flsrn 1/1 Running 1 4d23h kube-system calico-node-kbtl7 1/1 Running 0 4d23h kube-system calico-node-mmg66 0/1 Running 3 4d23h kube-system coredns-7f89b7bc75-lz7cx 1/1 Running 1 4d23h kube-system coredns-7f89b7bc75-qv97w 1/1 Running 1 4d23h kube-system etcd-master 1/1 Running 6 4d23h kube-system kube-apiserver-master 1/1 Running 6 4d23h kube-system kube-controller-manager-master 1/1 Running 4 4d23h kube-system kube-proxy-8kcp7 1/1 Running 1 4d23h kube-system kube-proxy-8mlqq 1/1 Running 0 4d23h kube-system kube-scheduler-master 1/1 Running 3 4d23h kubernetes-dashboard dashboard-metrics-scraper-7b59f7d4df-h975g 1/1 Running 1 4d23h kubernetes-dashboard kubernetes-dashboard-74d688b6bc-4cthw 1/1 Running 1 4d23h
[root@master ~]# kubectl describe secrets -n kubernetes-dashboard dashboard-admin Name: dashboard-admin-token-x92cp Namespace: kubernetes-dashboard Labels: <none> Annotations: kubernetes.io/service-account.name: dashboard-admin kubernetes.io/service-account.uid: e6fda435-4b0d-4600-859c-bcbaa7678adc Type: kubernetes.io/service-account-token Data ==== ca.crt: 1066 bytes namespace: 20 bytes token: eyJhbGciOiJSUzI1NiIsImtpZCI6IlhHU2t3X21NWTluVG5Pd3pBRnJ0eWFwZGt3TVFFekc2NHRrZzZ3QVI5Z28ifQ.
eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJ
rdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWR
taW4tdG9rZW4teDkyY3AiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJ
kLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZTZmZGE0MzUtNGIwZC00NjAwL
Tg1OWMtYmNiYWE3Njc4YWRjIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1h
ZG1pbiJ9.P5PU-Pp5jDQFTAUyu4ojrEp4k8S5vFguWA_BA50biHS568j7Dt6YUFMLhAdB53gM7T4hqHtBsGkrgc8dYgjIv1mad0BT0YoWqB
Vp64zkpWLylmgtkb3IBoOEbGG7fUi71OjAvt31EkNaBhpJ8lN-wDhqakZ5lWr2Hd-pDTHwkVlK3tABJXMeTucHn4VJy6JkCNe85yBfzjf7V
slZFFNkwZolwVS-aFVLC9KUEUPfp1MkoFH8QcXX1EZLykQxo0DlUeLiUHKuXeolvnfiWmlXYm1hlBYkORzYbjTtg73CALTSGaMmmyzTRz-rWbuacFINwyoXyzy_a7rqwv7cCoaC4Q
访问 web 界面 https://192.168.1.140:30000/
查看节点