软件环境:
操作系统:CentOS-7
HostName | IP | 配置 |
---|---|---|
K8S-master | 10.211.55.20 | CPU:2 Memory: 4G |
K8S-node01 | 10.211.55.21 | CPU:2 Memory: 4G |
K8S-node02 | 10.211.55.22 | CPU:2 Memory: 4G |
软件版本:
软件名称 | 版本 |
---|---|
kubeadm | V1.21.1 |
kubectl | V1.21.1 |
kubelet | V1.21.1 |
kubenetes | V1.21.1 |
关闭SWAP
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
关闭SELINUX
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
安装部署工具包、配置系统环境及docker-ce
[root@ansible k8s]# cat install.yaml
---
- hosts: k8s
tasks:
- name: 安装依赖包
yum:
name: "{{ item.pkg }}"
state: present
with_items:
- { pkg: "epel-release" }
- { pkg: "conntrack" }
- { pkg: "ntpdate" }
- { pkg: "ntp" }
- { pkg: "ipvsadm" }
- { pkg: "ipset" }
- { pkg: "jq" }
- { pkg: "iptables" }
- { pkg: "curl" }
- { pkg: "sysstat" }
- { pkg: "libseccomp" }
- { pkg: "wget" }
- { pkg: "vim" }
- { pkg: "net-tools" }
- { pkg: "git" }
- { pkg: "bash-completion"}
- { pkg: "yum-utils" }
- { pkg: "device-mapper-persistent-data" }
- { pkg: "lvm2" }
- name: 关闭防火墙
service:
name: firewalld
state: stopped
enabled: no
- name: 调整系统内核参数
copy:
src: '/etc/ansible/yaml/k8s/file/kubernetes.conf'
dest: '/etc/sysctl.d/'
- name: 调整系统时间
shell: "timedatectl set-timezone Asia/Shanghai && timedatectl set-local-rtc 0 && systemctl restart rsyslog && systemctl restart crond"
- name: 配置rsyslogd & systemd journald
file:
path: "{{ item.dir }}"
state: directory
recurse: yes
with_itmes:
- { dir: '/var/log/journal' }
- { dir: '/etc/systemd/journald.con.d' }
- name: 复制journald配置文件
copy:
src: '/etc/ansible/yaml/k8s/file/99-prophet.conf'
dest: '/etc/systemd/journald.conf.d/'
- name: 配置Docker YUM源
shell: "yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo"
- name: 安装docker-ce
yum:
name: docker-ce
state: present
- name: 创建Docker相关目录
shell: "mkdir -p /etc/docker && mkdir -p /etc/systemd/system/docker.service.d"
- name: 复制daemon.json文件
copy:
src: '/file/daemon.json'
dest: '/etc/docker/'
- name: 重新加载daemon
shell: "systemctl daemon-reload"
- name: 启动Docker服务
service:
name: docker
state: started
enabled: yes
部署Kubeadm服务
[root@ansible k8s]# cat install-kubeadm.yaml
---
- hosts: k8s
tasks:
- name: 配置kubeadm YUM源
copy:
src: "./file/kubernetes.repo"
dest: "/etc/yum.repos.d/"
- name: 安装kubeadm & kubectl & kubelet
yum:
name: "{{ item.pkg }}"
state: present
with_items:
- { pkg: 'kubeadm-1.21.1' }
- { pkg: 'kubectl-1.21.1' }
- { pkg: 'kubelet-1.21.1'}
- name: 启动kubelet服务
service:
name: kubelet
state: started
enabled: yes
下载kubenetes镜像文件
[root@k8s-master01 ~]# kubeadm config images list
I1105 18:38:47.933541 22774 version.go:254] remote version is much newer: v1.22.3; falling back to: stable-1.21
k8s.gcr.io/kube-apiserver:v1.21.6
k8s.gcr.io/kube-controller-manager:v1.21.6
k8s.gcr.io/kube-scheduler:v1.21.6
k8s.gcr.io/kube-proxy:v1.21.6
k8s.gcr.io/pause:3.4.1
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns/coredns:v1.8.0
镜像下载脚本
[root@kn02 script]# cat k8sImagePull.sh
#! /bin/bash
# 下载k8s-1.21 镜像
images=(
kube-apiserver:v1.21.6
kube-controller-manager:v1.21.6
kube-scheduler:v1.21.6
kube-proxy:v1.21.6
pause:3.4.1
etcd:3.4.13-0
coredns:v1.8.0
)
for image in ${images[@]};do
if [ $image = 'coredns:v1.8.0' ];then
docker pull registry.aliyuncs.com/google_containers/$image
docker tag registry.aliyuncs.com/google_containers/$image k8s.gcr.io/coredns/$image
docker rmi registry.aliyuncs.com/google_containers/$image
else
docker pull registry.aliyuncs.com/google_containers/$image
docker tag registry.aliyuncs.com/google_containers/$image k8s.gcr.io/$image
docker rmi registry.aliyuncs.com/google_containers/$image
fi
done
部署K8S Master
kubeadm init --apiserver-advertise-address=10.211.55.20 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.21.1 --service-cidr=10.96.0.0/12 --pod-network-cidr=10.244.0.0/16 | tee kubeadm-init.log
[init] Using Kubernetes version: v1.21.1
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
....
....
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.211.55.20:6443 --token 8nkx6j.364591n6j8wamd8m \
--discovery-token-ca-cert-hash sha256:40fd388cfc560416f24e1ed1267a149267a9fa4f09071fc89dd097019d9a021e
安装Flannel网络组件
创建kube-flannel.yaml配置文件内容如下:
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.14.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.14.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
安装flannel网络组件
[root@k8s-master01 k8s]# vim kube-flannel.yaml
[root@k8s-master01 k8s]# kubectl apply -f kube-flannel.yaml
将Node节点加入到K8S集群
Then you can join any number of worker nodes by running the following on each as root:
# 两台Node节点分加执行:
[root@k8s-node01 ~]# kubeadm join 10.211.55.20:6443 --token 8nkx6j.364591n6j8wamd8m \
--discovery-token-ca-cert-hash sha256:40fd388cfc560416f24e1ed1267a149267a9fa4f09071fc89dd097019d9a021e
查看K8S集群状态
[root@k8s-master01 ~]# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master01 Ready control-plane,master 11h v1.21.1 10.211.55.20 <none> CentOS Linux 7 (Core) 4.4.290 docker://20.10.10
k8s-node01 Ready <none> 11h v1.22.3 10.211.55.21 <none> CentOS Linux 7 (Core) 3.10.0-1160.el7.x86_64 docker://20.10.10
k8s-node02 Ready <none> 11h v1.22.3 10.211.55.22 <none> CentOS Linux 7 (Core) 3.10.0-1160.el7.x86_64 docker://20.10.10