kubernetes(k8s) v1.18.6安装与harbor安装
# 设置系统主机名以及 Host 文件的相互解析
hostnamectl set-hostname master
hostnamectl set-hostname node01
hostnamectl set-hostname node02
hostnamectl set-hostname harbor
# 配置 /etc/hosts 主机信息
192.168.1.20 master
192.168.1.21 node01
192.168.1.22 node02
192.168.1.25 harbor
# 安装依赖包
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wgetvimnet-tools git
# 设置防火墙为 Iptables 并设置空规则
systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables&& iptables -F && service iptables save
# 关闭 SELINUX
# swapoff 关闭虚拟内存
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
# 调整内核参数,对于 K8S
# 写入 kubernetes.conf 文件
cat > kubernetes.conf < /etc/systemd/journald.conf.d/99-prophet.conf < /etc/sysconfig/modules/ipvs.modules < /etc/docker/daemon.json < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 安装最新版
yum -y install kubeadm kubectl kubelet
# 开机自启
# 因为kubelet需要跟我们的容器接口进行交互,启动我们的容器,而我们的k8s通过kubeadm安装出来以后都是以pod的方式存在,
# 也就是底层是以容器的方式运行,所以kubelet一定要是开机自启的,不然的话,重启以后k8s集群不会启动。
systemctl enable kubelet.service
# 11.2 查看 kubeadm 版本(我当前是 v1.18.6 版本)
kubeadm version
# 11.3 根据版本号查看所需要的镜像
kubeadm config images list --kubernetes-version=v1.18.6
# 打印出所需要的包
k8s.gcr.io/kube-apiserver:v1.18.6
k8s.gcr.io/kube-controller-manager:v1.18.6
k8s.gcr.io/kube-scheduler:v1.18.6
k8s.gcr.io/kube-proxy:v1.18.6
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.7
# 11.4 下载镜像
# 11.4.1 连接google服务器,所以没有*用下面的命令无法下载
kubeadm config images pull
# 11.4 手动下载阿里云上镜像(因为阿里去上的版本只更新到1.18.3)
# 下载
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.3
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.3
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.3
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.3
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.3 && docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.3 && docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.3 && docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.3 && docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 && docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0 && docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7
# 重命名
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.3 k8s.gcr.io/kube-proxy:v1.18.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.3 k8s.gcr.io/kube-apiserver:v1.18.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.3 k8s.gcr.io/kube-scheduler:v1.18.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.3 k8s.gcr.io/kube-controller-manager:v1.18.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0 k8s.gcr.io/etcd:3.4.3-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7 k8s.gcr.io/coredns:1.6.7
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.3 k8s.gcr.io/kube-proxy:v1.18.6 && docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.3 k8s.gcr.io/kube-apiserver:v1.18.6 && docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.3 k8s.gcr.io/kube-scheduler:v1.18.6 && docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.3 k8s.gcr.io/kube-controller-manager:v1.18.6 && docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2 && docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0 k8s.gcr.io/etcd:3.4.3-0 && docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7 k8s.gcr.io/coredns:1.6.7
# 删除原来的镜像,保留 k8s.gcr.io 的镜像
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.3
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.3
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.3
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.3
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.3 && docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.3 && docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.3 && docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.3 && docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 && docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0 && docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7
####################### 下面这块不需要操作 ##############################
# 初始化之前需要导入镜像
# kubeadm在初始化k8s集群的时候,会从GCE【谷歌云服务器】里去拉取镜像,并且这些镜像是相当大的,而且速度比较慢,最重要的是,萌新一般都会404,因此我们直接导入离线的镜像。
# 解压安装包 (压缩包在百度云盘)
tar -zxvf kubeadm-basic.images.tar.gz
# 导入脚本
vi load-images.sh
#!/bin/bash
ls /root/kubeadm-basic.images > /tmp/images.txt
cd /root/kubeadm-basic.images
for i in $(cat /tmp/images.txt)
do
docker load -i $i
done
rm -f /tmp/images.txt
# 授权
chmod a+x load-images.sh
# 执行
./load-images.sh
###############################################################
# 只要在master中执行
# 初始化主节点,打印到yaml模板文件中
kubeadm config print init-defaults > kubeadm-config.yaml
# kubeadm-config.yaml组成部署说 (说明,不需要配置)
InitConfiguration: 用于定义一些初始化配置,如初始化使用的token以及apiserver地址等
ClusterConfiguration:用于定义apiserver、etcd、network、scheduler、controller-manager等master组件相关配置项
KubeletConfiguration:用于定义kubelet组件相关的配置项
KubeProxyConfiguration:用于定义kube-proxy组件相关的配置项
可以看到,在默认的kubeadm-config.yaml文件中只有InitConfiguration、ClusterConfiguration 两部分。我们可以通过如下操作生成另外两部分的示例文件:
# 生成KubeletConfiguration示例文件
kubeadm config print init-defaults --component-configs KubeletConfiguration
# 生成KubeProxyConfiguration示例文件
kubeadm config print init-defaults --component-configs KubeProxyConfiguration
关于kubeadm-config.yaml更多配置语法参考: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2
使用kubeadm-config.yaml配置主节点:https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
kube-proxy开启ipvs参考: https://github.com/kubernetes/kubernetes/blob/master/pkg/proxy/ipvs/README.md
kubelet的配置示例参考: https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/kubelet-integration/#configure-kubelets-using-kubeadm
###############################################################
# 修改文件
vi kubeadm-config.yaml
localAPIEndpoint:
advertiseAddress: 192.168.66.10 #修改ip
kubernetesVersion: v1.18.6 # 修改版本
networking:
podSubnet: "10.244.0.0/16" # 加入这段
serviceSubnet: 10.96.0.0/12 # 不加改
#再加入下面这段
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
kubeProxy:
config:
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
# 使用指定的yaml文件进行初始化安装 自动颁发证书(1.13后支持) 把所有的信息都写入到 kubeadm-init.log中
# --experimental-upload-certs已被弃用,官方推荐使用--upload-certs替代
# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log(不可用)
# 只要在master中执行
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
# kubeadm-init.log 分析
# 日志信息一般有以下几点:
.最开始告诉我们kubernetes的版本
.检测当前运行环境
.为k8s集群下载镜像【时间很长,镜像需要从Google GCE下载】
.开始安装镜像
.在/var/lib/kubelet/kubeadm-flags.env文件中保存了kubelet环境变量
.在/var/lib/kubelet/config.yaml文件中保存了kubelet配置文件
.在/etc/kubernetes/pki目录中保存了k8s所使用的所有的证书,因为k8s采用了http协议进行的C/S结构的开发,它为了安全性考虑在所有的组件通讯的时候采用的是https的双向认证的方案,
所以k8s需要大量的CE证书以及私钥密钥
.配置DNS以及当前默认的域名【svc(service)的默认名称】
.生成k8s组件的密钥
。指定DNS的名称及地址
.在/etc/kubernetes目录下生成k8s组件的配置文件
.RBAC授权
.初始化成功
# 执行完初始化后可以在kubeadm-init.log 中查看到下面的操作
# 执行下面3步操作
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 加入主节点以及其余工作节点
kubeadm join 192.168.1.20:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:8af6e9fc58ab8dde2a186c1311211d94cdee2cdbf79a9f511259818a5c944b98
# 部署flannel网络
# 1~5步 3台服务器都需要操作
# 1、下载flannel镜像
docker pull quay.io/coreos/flannel:v0.9.0-amd64
# 2、配置cni网络
mkdir -p /etc/cni/net.d
# 3、创建配置文件
cat > /etc/cni/net.d/10-flannel.conf << EOF
{"name":"cbr0","type":"flannel","delegate":{"isDefaultGateway":true}}
EOF
# 4、创建网段内容
mkdir -p /usr/share/oci-umount/oci-umount.d && mkdir /run/flannel
cat > /run/flannel/subnet.env << EOF
FLANNEL_NETWORK=172.100.0.0/16
FLANNEL_SUBNET=172.100.1.0/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=true
EOF
# 5、重启服务
systemctl daemon-reload
systemctl restart kubelet
systemctl restart docker
# 6、应用配置
kubectl apply -f kube-flannel.yml
# kube-flannel.yml内容见https://github.com/coreos/flannel/blob/v0.12.0/Documentation/kube-flannel-aliyun.yml
############################# kube-flannel.yml 内容(经过修改) ##############################
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
}
net-conf.json: |
{
"Network": "10.24.0.0/16",
"Backend": {
"Type": "ali-vpc"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
name: kube-flannel
template:
metadata:
labels:
name: kube-flannel
tier: node
app: flannel
spec:
hostNetwork: true
priorityClassName: system-node-critical
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: registry.cn-hangzhou.aliyuncs.com/google-containers/flannel:v0.9.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conf
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: registry.cn-hangzhou.aliyuncs.com/google-containers/flannel:v0.9.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
#################################################################################
# 问题处理
# 1.无法下载yml问题
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
如无法连接,更改vim /etc/hosts 加入 151.101.76.133 raw.githubusercontent.com
# 2.处理报错
# 报错处理方法见:https://cormachogan.com/2020/03/06/deploying-flannel-vsphere-cpi-and-vsphere-csi-with-later-versions-of-kubernetes/
unable to recognize "kube-flannel.yml": no matches for kind "DaemonSet" in version "extensions/v1beta1"
# a. 先用 kubectl explain DaemonSet 查看Daemonset版本
[root@master ~]# kubectl explain DaemonSet
KIND: DaemonSet
VERSION: apps/v1
# b. 把 kube-flannel.yml
VERSION: extensions/v1beta1 改成 VERSION: apps/v1
# 3.处理报错
kubectl get nodes
The connection to the server localhost:8080 was refused - did you specify the right host or port?
# a.用下面的步骤
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 配置k8s 命令补全
locate bash_completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
###########################################################################
# 配置 harbor 仓库,配置可见官方链接
https://goharbor.io/docs/2.0.0/install-config/
# 4台主机配置hosts
cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.20 master
192.168.1.21 node01
192.168.1.22 node02
192.168.1.25 harbor
192.168.1.25 hub.bevis.com
# 4台主机全部添加 /etc/docker/daemon.json 内容
# 加入配置 insecure-registries 到本地域名
vi /etc/docker/daemon.json
"insecure-registries":["https://hub.bevis.com"]
# 安装docker-compose 或者用yum安装也可以
curl -L https://github.com/docker/compose/releases/download/1.26.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose --version
# 下载安装harbor
# 访问github
https://github.com/goharbor/harbor/releases
# 下载harbor
wget https://github.com/goharbor/harbor/releases/download/v1.10.4/harbor-offline-installer-v1.10.4.tgz
# 解压
tar -zxvf harbor-offline-installer-v1.10.4.tgz
# 配置harbor
mv harbor /usr/local/
cd /usr/local/harbor/
# 修改配置文件 harbor.yml
# 操作步骤见:https://goharbor.io/docs/2.0.0/install-config/troubleshoot-installation/#https
hostname: hub.bevis.com
certificate: /data/cert/server.crt
private_key: /data/cert/server.key
# 创建证书(在harbor中操作)
# 1、创建目录
mkdir -p /data/cert/0
cd /data/cert/
# 2、生成ca.key
openssl genrsa -out ca.key 4096
# 3、生成 ca.crt
openssl req -x509 -new -nodes -sha512 -days 3650 \
-subj "/C=CN/ST=Beijing/L=Beijing/O=example/OU=Personal/CN=hub.bevis.com" \
-key ca.key \
-out ca.crt
# 3、生成 hub.bevis.com.key
openssl genrsa -out hub.bevis.com.key 4096
# 4、生成证书签名请求(CSR)
openssl req -sha512 -new \
-subj "/C=CN/ST=Beijing/L=Beijing/O=example/OU=Personal/CN=hub.bevis.com" \
-key hub.bevis.com.key \
-out hub.bevis.com.csr
# 5、生成一个x509 v3扩展文件。
# 无论您使用FQDN还是IP地址连接到Harbor主机,都必须创建此文件,以便可以为您的Harbor主机生成符合主题备用名称(SAN)和x509 v3的证书扩展要求。替换DNS条目以反映您的域。
cat > v3.ext <<-EOF
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1=hub.bevis.com
DNS.2=hub.bevis
DNS.3=harbor
EOF
# 6、使用该v3.ext文件为您的Harbor主机生成证书。
openssl x509 -req -sha512 -days 3650 \
-extfile v3.ext \
-CA ca.crt -CAkey ca.key -CAcreateserial \
-in hub.bevis.com.csr \
-out hub.bevis.com.crt
# 7、转换hub.bevis.com.crt为hub.bevis.com.cert,供Docker使用。
openssl x509 -inform PEM -in hub.bevis.com.crt -out hub.bevis.com.cert
# 8、将服务器证书,密钥和CA文件复制到Harbor主机上的Docker证书文件夹中。您必须首先创建适当的文件夹。
# 在其他3台服务器中创建目录
/etc/docker/certs.d/hub.bevis.com/
# 把证书拷贝到3台服务器
scp hub.bevis.com.cert root@master:/etc/docker/certs.d/hub.bevis.com/
scp hub.bevis.com.cert root@node01:/etc/docker/certs.d/hub.bevis.com/
scp hub.bevis.com.cert root@node02:/etc/docker/certs.d/hub.bevis.com/
scp hub.bevis.com.key root@master:/etc/docker/certs.d/hub.bevis.com/
scp hub.bevis.com.key root@node01:/etc/docker/certs.d/hub.bevis.com/
scp hub.bevis.com.key root@node02:/etc/docker/certs.d/hub.bevis.com/
scp ca.crt root@master:/etc/docker/certs.d/hub.bevis.com/
scp ca.crt root@node01:/etc/docker/certs.d/hub.bevis.com/
scp ca.crt root@node02:/etc/docker/certs.d/hub.bevis.com/
# 9、重启4台服务器docker
systemctl restart docker
# 安装
cd /usr/local/harbor
./install.sh
# 4台主机配置本地域名
echo "192.168.1.25 hub.bevis.com" >> /etc/hosts
# 修改配置hosts,配置域名
192.168.1.25 hub.bevis.com
###############################
# 测试本地域名
# 登录
docker login hub.bevis.com
# 随便下载个镜像
docker pull wangyanglinux/myapp:v1
# 改名
docker tag wangyanglinux/myapp:v1 hub.bevis.com/library/myapp:v1
# 上传
docker push hub.bevis.com/library/myapp:v1