kubeadm部署kubernetes-1.18.0
Master 安装
停止防火墙
master 192.168.31.243 test3
slave1 192.168.31.5 test1
slave2 192.168.31.10 k8s-master
systemctl stop firewalld && systemctl disable firewalld
关闭selinux
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config && setenforce 0
swapoff -a
停止Swap
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
修改服务器名称
hostnamectl set-hostname test3
配置Hosts文件
cat > /etc/hosts <<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.31.243 test3
192.168.31.10 k8s-master01
192.168.31.5 test1
EOF
修改sysctl配置
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
安装配置服务器时间同步
yum install -y ntpdate
(echo "*/5 * * * * /usr/sbin/ntpdate -u cn.pool.ntp.org") | crontab
systemctl restart crond
安装依赖和docker并配置docker配置文件
yum -y install yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install docker-ce-19.03.13 docker-ce-cli-19.03.13
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://qkje6fd9.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl enable docker && systemctl start docker
配置Kubernetes源地址和安装kubeadm工具
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
systemctl enable kubelet
systemctl start kubelet
配置Kubernetes
kubeadm init --apiserver-advertise-address=192.168.31.243 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.18.0 --service-cidr=10.1.0.0/16 --service-cidr=10.10.0.0/16 --pod-network-cidr=10.122.0.0/16
// POD的网段为: 10.122.0.0/16, api server地址就是master本机IP。
初始化结果如下:
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.31.243:6443 --token yqbyaj.gn6ih945lp95anrw \
--discovery-token-ca-cert-hash sha256:dcef0abb40f48dcfe0b5314336c5763bd0d5282302b4b01cb8d4ffb06e2a449d
根据步骤初始化执行命令
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
source <(kubectl completion bash)
部署caclico
# kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
wget https://docs.projectcalico.org/manifests/calico.yaml
vim calico.yaml
3611 # Cluster type to identify the deployment type
3612 - name: CLUSTER_TYPE
3613 value: "k8s,bgp"
#下面新增加
3614 - name: IP_AUTODETECTION_METHOD
3615 value: "interface=ens160" #对应的网口
#结束点
3619 - name: IP
3620 value: "autodetect"
3621 # Enable IPIP
3622 - name: CALICO_IPV4POOL_IPIP
3623 value: "Always"
kubectl apply -f calico.yaml
kubectl get node #查看Node节点
[root@test3 data]# kubectl get node
NAME STATUS ROLES AGE VERSION
test3 Ready master 4d2h v1.18.0
kubectl get pod --all-namespace #查看Pod节点
[root@test3 data]# kubectl get pod --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-785fc6b5f6-d6jrd 1/1 Running 0 4d1h
kube-system calico-node-752fq 0/1 Running 0 4d1h
kube-system calico-node-wq6sx 0/1 Running 0 4d
kube-system calico-node-zgvvk 0/1 Running 0 4d
kube-system coredns-7ff77c879f-l7r2t 1/1 Running 0 4d2h
kube-system coredns-7ff77c879f-r4t2j 1/1 Running 0 4d2h
kube-system etcd-test3 1/1 Running 1 4d2h
kube-system kube-apiserver-test3 1/1 Running 1 4d2h
kube-system kube-controller-manager-test3 1/1 Running 2 4d2h
kube-system kube-proxy-6bwz2 1/1 Running 1 4d2h
kube-system kube-proxy-c5qzt 1/1 Running 0 4d
kube-system kube-proxy-hchz5 1/1 Running 0 4d
kube-system kube-scheduler-test3 1/1 Running 2 4d2h
安装kubernetes-dashboard
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc7/aio/deploy/recommended.yam
vim recommmended.yaml
#修改 Service 配置
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 30000
selector:
k8s-app: kubernetes-dashboard
kubectl create -f recommended.yaml
查看pod,service
[root@test3 data]# kubectl get pod,svc --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system pod/calico-kube-controllers-785fc6b5f6-d6jrd 1/1 Running 0 4d1h
kube-system pod/calico-node-752fq 0/1 Running 0 4d1h
kube-system pod/calico-node-wq6sx 0/1 Running 0 4d
kube-system pod/calico-node-zgvvk 0/1 Running 0 4d
kube-system pod/coredns-7ff77c879f-l7r2t 1/1 Running 0 4d2h
kube-system pod/coredns-7ff77c879f-r4t2j 1/1 Running 0 4d2h
kube-system pod/etcd-test3 1/1 Running 1 4d2h
kube-system pod/kube-apiserver-test3 1/1 Running 1 4d2h
kube-system pod/kube-controller-manager-test3 1/1 Running 2 4d2h
kube-system pod/kube-proxy-6bwz2 1/1 Running 1 4d2h
kube-system pod/kube-proxy-c5qzt 1/1 Running 0 4d
kube-system pod/kube-proxy-hchz5 1/1 Running 0 4d
kube-system pod/kube-scheduler-test3 1/1 Running 2 4d2h
kubernetes-dashboard pod/dashboard-metrics-scraper-6b4884c9d5-67gkc 1/1 Running 0 4d1h
kubernetes-dashboard pod/kubernetes-dashboard-7b544877d5-vq96m 1/1 Running 0 4d1h
访问展示
获取token登录
kubectl create sa dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk '{print $1}')
DASHBOARD_LOGIN_TOKEN=$(kubectl describe secret -n kube-system ${ADMIN_SECRET} | grep -E '^token' | awk '{print $2}')
echo ${DASHBOARD_LOGIN_TOKEN}
注意事项:
#kubeadm token 默认时间是24 小时,过期记得从新生成token 然后加入节点
# 查看token
kubeadm token list
# 创建token
kubeadm token create
#忘记初始master节点时的node节点加入集群命令怎么办
# 简单方法
kubeadm token create --print-join-command
# 第二种方法
token=$(kubeadm token generate)
kubeadm token create $token --print-join-command --ttl=0
# 接下来就可以部署监控,应用等。
Slave 客户端安装配置
停止防火墙
systemctl stop firewalld && systemctl disable firewalld
关闭selinux
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config && setenforce 0
swapoff -a
停止Swap
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
修改服务器名称
hostnamectl set-hostname test3
配置Hosts文件
cat > /etc/hosts <<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.31.243 test3
192.168.31.10 k8s-master01
192.168.31.5 test1
EOF
修改sysctl配置
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
安装配置服务器时间同步
yum install -y ntpdate
(echo "*/5 * * * * /usr/sbin/ntpdate -u cn.pool.ntp.org") | crontab
systemctl restart crond
安装依赖和docker并配置docker配置文件
yum -y install yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install docker-ce-18.09.6 docker-ce-cli-18.09.6
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://qkje6fd9.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl enable docker && systemctl start docker
配置Kubernetes源地址和安装kubeadm工具
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
systemctl enable kubelet
systemctl start kubelet
##加入到K8s节点
kubeadm join 192.168.31.243:6443 --token yqbyaj.gn6ih945lp95anrw \
--discovery-token-ca-cert-hash sha256:dcef0abb40f48dcfe0b5314336c5763bd0d5282302b4b01cb8d4ffb06e2a449d
从节点配置kubectl命令支持
从节点可以执行kubectl命令
mkdir $HOME/.kube/ (从节点执行)
scp /etc/kubernetes/admin.conf 192.168.31.10:/root/.kube/config (主节点执行)
scp /etc/kubernetes/admin.conf 192.168.31.6:/root/.kube/config (主节点执行)
查看Node节点
[root@test3 data]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready <none> 4d v1.18.0
test1 Ready <none> 4d v1.18.0
test3 Ready master 4d2h v1.18.0
calico客户端工具calicoctl
Calico的二进制程序文件calicoctl可以直接操作Calico存储来查看,修改或配置Calico系统特性。
wget https://github.com/projectcalico/calicoctl/releases/download/v3.5.4/calicoctl -O /usr/bin/calicoctl
chmod +x /usr/bin/calicoctl
执行查看calico节点
DATASTORE_TYPE=kubernetes KUBECONFIG=~/.kube/config calicoctl get nodes
calicoctl node status #查看状态
注意事项
有时候 caclico使用的对望的网口是docker或者br-*的网口实现网络转发的,此处需要设置caclico.yaml来实现指定网口实现路由转发,我这里端口是ens160网口
查看calico命令: