签发 kubernetes 证书
# 设置证书环境变量
# 设置证书使用时间87600h 10年
export EXPIRY_TIME="87600h"
# kube-apiserver 服务器IP 如果外部访问K8s 集群使用VIP ip 请在下面添加vip ip
export K8S_APISERVER_VIP="\"192.168.2.175\",\"192.168.2.176\",\"192.168.2.177\""
# kubernetes 服务 IP (一般是 SERVICE_CIDR 中第一个IP)
export CLUSTER_KUBERNETES_SVC_IP="10.66.0.1"
# 设置集群参数
export CLUSTER_NAME=kubernetes
export KUBE_API=https://192.168.2.175:5443
# K8S 外部IP 这里高可用使用本地环回IP
export K8S_VIP_DOMAIN=127.0.0.1
export K8S_SSL="\"${K8S_VIP_DOMAIN}\""
#证书所需要的配置参数
export CERT_ST="GuangDong"
export CERT_L="GuangZhou"
export CERT_O="k8s"
export CERT_OU="Qist"
export CERT_PROFILE="kubernetes"
# 生成 EncryptionConfig 所需的加密 key
export ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
# 设置工作目录
export HOST_PATH=`pwd`
# etcd 如果已经创建可以不用重复创建
# 创建etcd K8S 证书json 存放目录
mkdir -p ${HOST_PATH}/cfssl/{k8s,etcd}
# 创建签发证书存放目录
mkdir -p ${HOST_PATH}/cfssl/pki/{k8s,etcd}
# CA 配置文件用于配置根证书的使用场景 (profile) 和具体参数 (usage,过期时间、服务端认证、客户端认证、加密等),后续在签名其它证书时需要指定特定场景。
cat << EOF | tee ${HOST_PATH}/cfssl/ca-config.json
{
"signing": {
"default": {
"expiry": "${EXPIRY_TIME}"
},
"profiles": {
"${CERT_PROFILE}": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "${EXPIRY_TIME}"
}
}
}
}
EOF
# 创建 Kubernetes CA 配置文件
cat << EOF | tee ${HOST_PATH}/cfssl/k8s/k8s-ca-csr.json
{
"CN": "$CLUSTER_NAME",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "$CERT_ST",
"L": "$CERT_L",
"O": "$CERT_O",
"OU": "$CERT_OU"
}
],
"ca": {
"expiry": "${EXPIRY_TIME}"
}
}
EOF
# 生成 Kubernetes CA 证书和私钥
cfssl gencert -initca ${HOST_PATH}/cfssl/k8s/k8s-ca-csr.json | cfssljson -bare ${HOST_PATH}/cfssl/pki/k8s/k8s-ca
# 创建 Kubernetes API Server 配置文件
cat << EOF | tee ${HOST_PATH}/cfssl/k8s/k8s-apiserver.json
{
"CN": "$CLUSTER_NAME",
"hosts": [
${K8S_APISERVER_VIP},
"${CLUSTER_KUBERNETES_SVC_IP}",
${K8S_SSL},
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.${CLUSTER_DNS_DOMAIN}"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "$CERT_ST",
"L": "$CERT_L",
"O": "$CERT_O",
"OU": "$CERT_OU"
}
]
}
EOF
# 生成 Kubernetes API Server 证书和私钥
cfssl gencert -ca=${HOST_PATH}/cfssl/pki/k8s/k8s-ca.pem -ca-key=${HOST_PATH}/cfssl/pki/k8s/k8s-ca-key.pem -config=${HOST_PATH}/cfssl/ca-config.json -profile=${CERT_PROFILE} ${HOST_PATH}/cfssl/k8s/k8s-apiserver.json | cfssljson -bare ${HOST_PATH}/cfssl/pki/k8s/k8s-server
# 创建 Kubernetes webhook 证书配置文件
cat << EOF | tee ${HOST_PATH}/cfssl/k8s/aggregator.json
{
"CN": "aggregator",
"hosts": [""],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "$CERT_ST",
"L": "$CERT_L",
"O": "$CERT_O",
"OU": "$CERT_OU"
}
]
}
EOF
# 生成 Kubernetes webhook 证书和私钥
cfssl gencert -ca=${HOST_PATH}/cfssl/pki/k8s/k8s-ca.pem -ca-key=${HOST_PATH}/cfssl/pki/k8s/k8s-ca-key.pem -config=${HOST_PATH}/cfssl/ca-config.json -profile=${CERT_PROFILE} ${HOST_PATH}/cfssl/k8s/aggregator.json | cfssljson -bare ${HOST_PATH}/cfssl/pki/k8s/aggregator
# 创建admin管理员 配置文件
cat << EOF | tee ${HOST_PATH}/cfssl/k8s/k8s-apiserver-admin.json
{
"CN": "admin",
"hosts": [""],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "$CERT_ST",
"L": "$CERT_L",
"O": "system:masters",
"OU": "Kubernetes-manual"
}
]
}
EOF
# 生成 Kubernetes admin管理员证书
cfssl gencert -ca=${HOST_PATH}/cfssl/pki/k8s/k8s-ca.pem -ca-key=${HOST_PATH}/cfssl/pki/k8s/k8s-ca-key.pem -config=${HOST_PATH}/cfssl/ca-config.json -profile=${CERT_PROFILE} ${HOST_PATH}/cfssl/k8s/k8s-apiserver-admin.json | cfssljson -bare ${HOST_PATH}/cfssl/pki/k8s/k8s-apiserver-admin
# 分发生成的证书到所有需要部署kube-apiserver 节点
ssh 192.168.2.175 mkdir -p /apps/k8s/ssl/{k8s,etcd}
ssh 192.168.2.176 mkdir -p /apps/k8s/ssl/{k8s,etcd}
ssh 192.168.2.177 mkdir -p /apps/k8s/ssl/{k8s,etcd}
# 分发文件
scp -r ./cfssl/pki/k8s/* 192.168.2.175:/apps/k8s/ssl/k8s
scp -r ./cfssl/pki/k8s/* 192.168.2.176:/apps/k8s/ssl/k8s
scp -r ./cfssl/pki/k8s/* 192.168.2.177:/apps/k8s/ssl/k8s
# etcd 客户端连接证书分发
scp -r ./cfssl/pki/etcd/etcd-client* 192.168.2.175:/apps/k8s/ssl/etcd
scp -r ./cfssl/pki/etcd/etcd-client* 192.168.2.176:/apps/k8s/ssl/etcd
scp -r ./cfssl/pki/etcd/etcd-client* 192.168.2.177:/apps/k8s/ssl/etcd
scp -r ./cfssl/pki/etcd/etcd-ca.pem 192.168.2.175:/apps/k8s/ssl/etcd
scp -r ./cfssl/pki/etcd/etcd-ca.pem 192.168.2.176:/apps/k8s/ssl/etcd
scp -r ./cfssl/pki/etcd/etcd-ca.pem 192.168.2.177:/apps/k8s/ssl/etcd
kube-apiserver 二进制文件准备
wget https://storage.googleapis.com/kubernetes-release/release/v1.18.2/kubernetes-server-linux-amd64.tar.gz
# 解压下载好文件
tar -xvf kubernetes-server-linux-amd64.tar.gz
# 创建二进制远程存放目录
ssh 192.168.2.175 mkdir -p /apps/k8s/bin
ssh 192.168.2.176 mkdir -p /apps/k8s/bin
ssh 192.168.2.177 mkdir -p /apps/k8s/bin
# 分发解压好二进制文件
cd kubernetes/server/bin/
scp -r kube-apiserver 192.168.2.175:/apps/k8s/bin
scp -r kube-apiserver 192.168.2.176:/apps/k8s/bin
scp -r kube-apiserver 192.168.2.177:/apps/k8s/bin
kube-apiserver 配置文件准备
# 生成encryption-config.yaml
cat << EOF | tee ${HOST_PATH}/encryption-config.yaml
kind: EncryptionConfig
apiVersion: v1
resources:
- resources:
- secrets
providers:
- aescbc:
keys:
- name: key1
secret: ${ENCRYPTION_KEY}
- identity: {}
EOF
# 创建审计策略文件
cat << EOF | tee ${HOST_PATH}/audit-policy.yaml
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk, so drop them.
- level: None
resources:
- group: ""
resources:
- endpoints
- services
- services/status
users:
- ‘system:kube-proxy‘
verbs:
- watch
- level: None
resources:
- group: ""
resources:
- nodes
- nodes/status
userGroups:
- ‘system:nodes‘
verbs:
- get
- level: None
namespaces:
- kube-system
resources:
- group: ""
resources:
- endpoints
users:
- ‘system:kube-controller-manager‘
- ‘system:kube-scheduler‘
- ‘system:serviceaccount:kube-system:endpoint-controller‘
verbs:
- get
- update
- level: None
resources:
- group: ""
resources:
- namespaces
- namespaces/status
- namespaces/finalize
users:
- ‘system:apiserver‘
verbs:
- get
# Don‘t log HPA fetching metrics.
- level: None
resources:
- group: metrics.k8s.io
users:
- ‘system:kube-controller-manager‘
verbs:
- get
- list
# Don‘t log these read-only URLs.
- level: None
nonResourceURLs:
- ‘/healthz*‘
- /version
- ‘/swagger*‘
# Don‘t log events requests.
- level: None
resources:
- group: ""
resources:
- events
# node and pod status calls from nodes are high-volume and can be large, don‘t log responses for expected updates from nodes
- level: Request
omitStages:
- RequestReceived
resources:
- group: ""
resources:
- nodes/status
- pods/status
users:
- kubelet
- ‘system:node-problem-detector‘
- ‘system:serviceaccount:kube-system:node-problem-detector‘
verbs:
- update
- patch
- level: Request
omitStages:
- RequestReceived
resources:
- group: ""
resources:
- nodes/status
- pods/status
userGroups:
- ‘system:nodes‘
verbs:
- update
- patch
# deletecollection calls can be large, don‘t log responses for expected namespace deletions
- level: Request
omitStages:
- RequestReceived
users:
- ‘system:serviceaccount:kube-system:namespace-controller‘
verbs:
- deletecollection
# Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
omitStages:
- RequestReceived
resources:
- group: ""
resources:
- secrets
- configmaps
- group: authentication.k8s.io
resources:
- tokenreviews
# Get repsonses can be large; skip them.
- level: Request
omitStages:
- RequestReceived
resources:
- group: ""
- group: admissionregistration.k8s.io
- group: apiextensions.k8s.io
- group: apiregistration.k8s.io
- group: apps
- group: authentication.k8s.io
- group: authorization.k8s.io
- group: autoscaling
- group: batch
- group: certificates.k8s.io
- group: extensions
- group: metrics.k8s.io
- group: networking.k8s.io
- group: policy
- group: rbac.authorization.k8s.io
- group: scheduling.k8s.io
- group: settings.k8s.io
- group: storage.k8s.io
verbs:
- get
- list
- watch
# Default level for known APIs
- level: RequestResponse
omitStages:
- RequestReceived
resources:
- group: ""
- group: admissionregistration.k8s.io
- group: apiextensions.k8s.io
- group: apiregistration.k8s.io
- group: apps
- group: authentication.k8s.io
- group: authorization.k8s.io
- group: autoscaling
- group: batch
- group: certificates.k8s.io
- group: extensions
- group: metrics.k8s.io
- group: networking.k8s.io
- group: policy
- group: rbac.authorization.k8s.io
- group: scheduling.k8s.io
- group: settings.k8s.io
- group: storage.k8s.io
# Default level for all other requests.
- level: Metadata
omitStages:
- RequestReceived
EOF
# 创建远程配置目录
ssh 192.168.2.175 mkdir -p /apps/k8s/config
ssh 192.168.2.176 mkdir -p /apps/k8s/config
ssh 192.168.2.177 mkdir -p /apps/k8s/config
# 分发配置到远程目录
scp -r {audit-policy.yaml,encryption-config.yaml} 192.168.2.175:/apps/k8s/config
scp -r {audit-policy.yaml,encryption-config.yaml} 192.168.2.176:/apps/k8s/config
scp -r {audit-policy.yaml,encryption-config.yaml} 192.168.2.177:/apps/k8s/config
# 创建kube-apiserver 启动配置文件
ssh 192.168.2.175 mkdir -p /apps/k8s/conf
ssh 192.168.2.176 mkdir -p /apps/k8s/conf
ssh 192.168.2.177 mkdir -p /apps/k8s/conf
# 192.168.2.175 配置
ssh 192.168.2.175
cat << EOF | tee /apps/k8s/conf/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=false \ --bind-address=0.0.0.0 \ --advertise-address=192.168.2.175 \ --secure-port=5443 \ --insecure-port=0 \ --service-cluster-ip-range=10.66.0.0/16 \ --service-node-port-range=30000-65535 \ --etcd-cafile=/apps/k8s/ssl/etcd/etcd-ca.pem \ --etcd-certfile=/apps/k8s/ssl/etcd/etcd-client.pem \ --etcd-keyfile=/apps/k8s/ssl/etcd/etcd-client-key.pem \ --etcd-prefix=/registry \ --etcd-servers=https://192.168.2.175:2379,https://192.168.2.176:2379,https://192.168.2.177:2379 \ --client-ca-file=/apps/k8s/ssl/k8s/k8s-ca.pem \ --tls-cert-file=/apps/k8s/ssl/k8s/k8s-server.pem \ --tls-private-key-file=/apps/k8s/ssl/k8s/k8s-server-key.pem \ --kubelet-client-certificate=/apps/k8s/ssl/k8s/k8s-server.pem \ --kubelet-client-key=/apps/k8s/ssl/k8s/k8s-server-key.pem \ --service-account-key-file=/apps/k8s/ssl/k8s/k8s-ca.pem \ --requestheader-client-ca-file=/apps/k8s/ssl/k8s/k8s-ca.pem \ --proxy-client-cert-file=/apps/k8s/ssl/k8s/aggregator.pem \ --proxy-client-key-file=/apps/k8s/ssl/k8s/aggregator-key.pem \ --requestheader-allowed-names=aggregator \ --requestheader-group-headers=X-Remote-Group \ --requestheader-extra-headers-prefix=X-Remote-Extra- \ --requestheader-username-headers=X-Remote-User \ --enable-aggregator-routing=true \ --anonymous-auth=false \ --experimental-encryption-provider-config=/apps/k8s/config/encryption-config.yaml \ --enable-admission-plugins=DefaultStorageClass,DefaultTolerationSeconds,LimitRanger,NamespaceExists,NamespaceLifecycle,NodeRestriction,PodNodeSelector,PersistentVolumeClaimResize,PodPreset,PodTolerationRestriction,ResourceQuota,ServiceAccount,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook \ --disable-admission-plugins=DenyEscalatingExec,ExtendedResourceToleration,ImagePolicyWebhook,LimitPodHardAntiAffinityTopology,NamespaceAutoProvision,Priority,EventRateLimit,PodSecurityPolicy \ --cors-allowed-origins=.* \ --enable-swagger-ui \ --runtime-config=api/all=true \ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \ --authorization-mode=Node,RBAC \ --allow-privileged=true \ --apiserver-count=1 \ --audit-dynamic-configuration \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-truncate-enabled \ --audit-policy-file=/apps/k8s/config/audit-policy.yaml \ --audit-log-path=/apps/k8s/log/api-server-audit.log \ --profiling \ --kubelet-https \ --event-ttl=1h \ --feature-gates=DynamicAuditing=true,ServiceTopology=true,EndpointSlice=true \ --enable-bootstrap-token-auth=true \ --alsologtostderr=true \ --log-dir=/apps/k8s/log \ --v=2 \ --endpoint-reconciler-type=lease \ --max-mutating-requests-inflight=500 \ --max-requests-inflight=1500 \ --target-ram-mb=600"
EOF
# 192.168.2.176 配置
ssh 192.168.2.176
cat << EOF | tee /apps/k8s/conf/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=false \ --bind-address=0.0.0.0 \ --advertise-address=192.168.2.176 \ --secure-port=5443 \ --insecure-port=0 \ --service-cluster-ip-range=10.66.0.0/16 \ --service-node-port-range=30000-65535 \ --etcd-cafile=/apps/k8s/ssl/etcd/etcd-ca.pem \ --etcd-certfile=/apps/k8s/ssl/etcd/etcd-client.pem \ --etcd-keyfile=/apps/k8s/ssl/etcd/etcd-client-key.pem \ --etcd-prefix=/registry \ --etcd-servers=https://192.168.2.175:2379,https://192.168.2.176:2379,https://192.168.2.177:2379 \ --client-ca-file=/apps/k8s/ssl/k8s/k8s-ca.pem \ --tls-cert-file=/apps/k8s/ssl/k8s/k8s-server.pem \ --tls-private-key-file=/apps/k8s/ssl/k8s/k8s-server-key.pem \ --kubelet-client-certificate=/apps/k8s/ssl/k8s/k8s-server.pem \ --kubelet-client-key=/apps/k8s/ssl/k8s/k8s-server-key.pem \ --service-account-key-file=/apps/k8s/ssl/k8s/k8s-ca.pem \ --requestheader-client-ca-file=/apps/k8s/ssl/k8s/k8s-ca.pem \ --proxy-client-cert-file=/apps/k8s/ssl/k8s/aggregator.pem \ --proxy-client-key-file=/apps/k8s/ssl/k8s/aggregator-key.pem \ --requestheader-allowed-names=aggregator \ --requestheader-group-headers=X-Remote-Group \ --requestheader-extra-headers-prefix=X-Remote-Extra- \ --requestheader-username-headers=X-Remote-User \ --enable-aggregator-routing=true \ --anonymous-auth=false \ --experimental-encryption-provider-config=/apps/k8s/config/encryption-config.yaml \ --enable-admission-plugins=DefaultStorageClass,DefaultTolerationSeconds,LimitRanger,NamespaceExists,NamespaceLifecycle,NodeRestriction,PodNodeSelector,PersistentVolumeClaimResize,PodPreset,PodTolerationRestriction,ResourceQuota,ServiceAccount,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook \ --disable-admission-plugins=DenyEscalatingExec,ExtendedResourceToleration,ImagePolicyWebhook,LimitPodHardAntiAffinityTopology,NamespaceAutoProvision,Priority,EventRateLimit,PodSecurityPolicy \ --cors-allowed-origins=.* \ --enable-swagger-ui \ --runtime-config=api/all=true \ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \ --authorization-mode=Node,RBAC \ --allow-privileged=true \ --apiserver-count=1 \ --audit-dynamic-configuration \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-truncate-enabled \ --audit-policy-file=/apps/k8s/config/audit-policy.yaml \ --audit-log-path=/apps/k8s/log/api-server-audit.log \ --profiling \ --kubelet-https \ --event-ttl=1h \ --feature-gates=DynamicAuditing=true,ServiceTopology=true,EndpointSlice=true \ --enable-bootstrap-token-auth=true \ --alsologtostderr=true \ --log-dir=/apps/k8s/log \ --v=2 \ --endpoint-reconciler-type=lease \ --max-mutating-requests-inflight=500 \ --max-requests-inflight=1500 \ --target-ram-mb=600"
EOF
# 192.168.2.177 配置
ssh 192.168.2.177
cat << EOF | tee /apps/k8s/conf/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=false \ --bind-address=0.0.0.0 \ --advertise-address=192.168.2.177 \ --secure-port=5443 \ --insecure-port=0 \ --service-cluster-ip-range=10.66.0.0/16 \ --service-node-port-range=30000-65535 \ --etcd-cafile=/apps/k8s/ssl/etcd/etcd-ca.pem \ --etcd-certfile=/apps/k8s/ssl/etcd/etcd-client.pem \ --etcd-keyfile=/apps/k8s/ssl/etcd/etcd-client-key.pem \ --etcd-prefix=/registry \ --etcd-servers=https://192.168.2.175:2379,https://192.168.2.176:2379,https://192.168.2.177:2379 \ --client-ca-file=/apps/k8s/ssl/k8s/k8s-ca.pem \ --tls-cert-file=/apps/k8s/ssl/k8s/k8s-server.pem \ --tls-private-key-file=/apps/k8s/ssl/k8s/k8s-server-key.pem \ --kubelet-client-certificate=/apps/k8s/ssl/k8s/k8s-server.pem \ --kubelet-client-key=/apps/k8s/ssl/k8s/k8s-server-key.pem \ --service-account-key-file=/apps/k8s/ssl/k8s/k8s-ca.pem \ --requestheader-client-ca-file=/apps/k8s/ssl/k8s/k8s-ca.pem \ --proxy-client-cert-file=/apps/k8s/ssl/k8s/aggregator.pem \ --proxy-client-key-file=/apps/k8s/ssl/k8s/aggregator-key.pem \ --requestheader-allowed-names=aggregator \ --requestheader-group-headers=X-Remote-Group \ --requestheader-extra-headers-prefix=X-Remote-Extra- \ --requestheader-username-headers=X-Remote-User \ --enable-aggregator-routing=true \ --anonymous-auth=false \ --experimental-encryption-provider-config=/apps/k8s/config/encryption-config.yaml \ --enable-admission-plugins=DefaultStorageClass,DefaultTolerationSeconds,LimitRanger,NamespaceExists,NamespaceLifecycle,NodeRestriction,PodNodeSelector,PersistentVolumeClaimResize,PodPreset,PodTolerationRestriction,ResourceQuota,ServiceAccount,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook \ --disable-admission-plugins=DenyEscalatingExec,ExtendedResourceToleration,ImagePolicyWebhook,LimitPodHardAntiAffinityTopology,NamespaceAutoProvision,Priority,EventRateLimit,PodSecurityPolicy \ --cors-allowed-origins=.* \ --enable-swagger-ui \ --runtime-config=api/all=true \ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \ --authorization-mode=Node,RBAC \ --allow-privileged=true \ --apiserver-count=1 \ --audit-dynamic-configuration \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-truncate-enabled \ --audit-policy-file=/apps/k8s/config/audit-policy.yaml \ --audit-log-path=/apps/k8s/log/api-server-audit.log \ --profiling \ --kubelet-https \ --event-ttl=1h \ --feature-gates=DynamicAuditing=true,ServiceTopology=true,EndpointSlice=true \ --enable-bootstrap-token-auth=true \ --alsologtostderr=true \ --log-dir=/apps/k8s/log \ --v=2 \ --endpoint-reconciler-type=lease \ --max-mutating-requests-inflight=500 \ --max-requests-inflight=1500 \ --target-ram-mb=600"
EOF
创建apiserver systemd文件
cat << EOF | tee kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
Type=notify
LimitNOFILE=65535
LimitNPROC=65535
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-/apps/k8s/conf/kube-apiserver
ExecStart=/apps/k8s/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
User=k8s
[Install]
WantedBy=multi-user.target
EOF
# 上传启动文件到服务器
scp kube-apiserver.service 192.168.2.175:/usr/lib/systemd/system
scp kube-apiserver.service 192.168.2.176:/usr/lib/systemd/system
scp kube-apiserver.service 192.168.2.176:/usr/lib/systemd/system
kube-apiserver 启动准备
# 创建文件目录
ssh 192.168.2.175 mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/k8s/kubelet-plugins/volume
ssh 192.168.2.176 mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/k8s/kubelet-plugins/volume
ssh 192.168.2.177 mkdir -p /apps/k8s/{log,kubelet-plugins,conf} && mkdir -p /apps/k8s/kubelet-plugins/volume
# 创建k8s 用户
ssh 192.168.2.175 useradd k8s -s /sbin/nologin -M
ssh 192.168.2.176 useradd k8s -s /sbin/nologin -M
ssh 192.168.2.177 useradd k8s -s /sbin/nologin -M
# 给/apps/k8s k8s 用户权限
ssh 192.168.2.175 chown -R k8s:root /apps/k8s
ssh 192.168.2.176 chown -R k8s:root /apps/k8s
ssh 192.168.2.177 chown -R k8s:root /apps/k8s
kube-apiserver 启动
# 刷新service
ssh 192.168.2.175 systemctl daemon-reload
ssh 192.168.2.176 systemctl daemon-reload
ssh 192.168.2.177 systemctl daemon-reload
# 设置开机启动
ssh 192.168.2.175 systemctl enable kube-apiserver.service
ssh 192.168.2.176 systemctl enable kube-apiserver.service
ssh 192.168.2.177 systemctl enable kube-apiserver.service
# 启动 kube-apiserver
ssh 192.168.2.175 systemctl start kube-apiserver.service
ssh 192.168.2.176 systemctl start kube-apiserver.service
ssh 192.168.2.177 systemctl start kube-apiserver.service
# 查看启动状态
ssh 192.168.2.175 systemctl status kube-apiserver.service
ssh 192.168.2.176 systemctl status kube-apiserver.service
ssh 192.168.2.177 systemctl status kube-apiserver.service
验证kube-apiserver 是否启动成功
#创建 kubeconfig 文件夹
mkdir -p ${HOST_PATH}/kubeconfig
# 创建admin管理员登录kubeconfig
# 设置集群参数
kubectl config set-cluster ${CLUSTER_NAME} --certificate-authority=${HOST_PATH}/cfssl/pki/k8s/k8s-ca.pem --embed-certs=true --server=${KUBE_API} --kubeconfig=${HOST_PATH}/kubeconfig/admin.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials admin --client-certificate=${HOST_PATH}/cfssl/pki/k8s/k8s-apiserver-admin.pem --client-key=${HOST_PATH}/cfssl/pki/k8s/k8s-apiserver-admin-key.pem --embed-certs=true --kubeconfig=${HOST_PATH}/kubeconfig/admin.kubeconfig
# 设置上下文参数
kubectl config set-context ${CLUSTER_NAME} --cluster=${CLUSTER_NAME} --user=admin --namespace=kube-system --kubeconfig=${HOST_PATH}/kubeconfig/admin.kubeconfig
# 设置默认上下文
kubectl config use-context ${CLUSTER_NAME} --kubeconfig=${HOST_PATH}/kubeconfig/admin.kubeconfig
# 创建当前家目录.kube 目录
mkdir -p ~/.kube
cp ${HOST_PATH}/kubeconfig/admin.kubeconfig ~/.kube/config
# cp 二进制kubectl 到 /bin 目录
cp kubectl /bin
# 验证集群
root@Qist:/tmp/sss# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Unhealthy Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused
scheduler Unhealthy Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused
etcd-1 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
root@Qist:/tmp/sss# kubectl cluster-info
Kubernetes master is running at https://192.168.2.175:5443
To further debug and diagnose cluster problems, use ‘kubectl cluster-info dump‘.
集群能够正常访问 kube-apiserver 部署正常
kubernetes v1.18.2 二进制部署 ipv4 kube-apiserver 部署