部署 kubernetes-下

这里写目录标题

12 部署 DNS

12.1 部署 coredns

[root@master-1 ~]# mkdir /root/dns
[root@master-1 ~]# cd /root/dns

上传文件 coredns.yaml

[root@master-1 dns]# kubectl apply -f coredns.yaml 
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.extensions/coredns created
service/coredns created

[root@master-1 dns]# kubectl get pod -A
NAMESPACE     NAME                       READY   STATUS              RESTARTS   AGE
kube-system   coredns-66db855d4d-kxcdh   0/1     ContainerCreating   0          13s


[root@master-1 dns]# kubectl get pod -n kube-system
NAME                       READY   STATUS              RESTARTS   AGE
coredns-66db855d4d-kxcdh   0/1     ContainerCreating   0          25s

查看启动进程

[root@master-1 dns]# kubectl describe pod coredns-66db855d4d-26bvw	-n 
kube-system	

12.2 查看SVC

[root@master-1 dns]# kubectl get svc -o wide -n=kube-system
NAME      TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE   SELECTOR
coredns   ClusterIP   10.0.0.2     <none>        53/UDP,53/TCP,9153/TCP   53s   k8s-app=coredns

12.3 验证 DNS 是否有效

12.3.1 删除之前创建的nginx demo

[root@master-1 cfg]# kubectl delete deployment nginx 
[root@master-1 cfg]# kubectl delete pods nginx 
[root@master-1 cfg]# kubectl delete svc -l run=nginx
[root@master-1 cfg]# kubectl delete deployment.apps/nginx

12.3.2 启动新容器

[root@master-1 ~]# kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools
If you don't see a command prompt, try pressing enter.
Error attaching, falling back to logs: unable to upgrade connection: Forbidden (user=system:anonymous, verb=create, resource=nodes, subresource=proxy)
pod "dnstools" deleted
Error from server (Forbidden): Forbidden (user=system:anonymous, verb=get, resource=nodes, subresource=proxy) ( pods/log dnstools)	

出现错误, 解决方法

[root@master-1 ~]# kubectl create clusterrolebinding system:anonymous --clusterrole=cluster-admin --user=system:anonymous 
clusterrolebinding.rbac.authorization.k8s.io/system:anonymous created

[root@master-1 dns]# kubectl delete pod dnstools
[root@master-1 nginx]# kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools

12.3.2 创建Nginx 容器

[root@master-1 ~]# kubectl run nginx --image=nginx --replicas=2 
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created



# 创建 svc (cluster IP)
# Create a service for an nginx deployment, which serves on port 88 and connects to the containers on port 80.
# template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
# --type='': Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. Default is 'ClusterIP'.
[root@master-1 ~]# kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort
service/nginx exposed

12.3.3 查看SVC

[root@master-1 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP        135m
nginx        NodePort    10.0.0.65    <none>        88:42671/TCP   74s

12.3.4 测试解析Nginx

测试解析nginx

#dns 解析的名称是 svc (service 名称, 非 pod 名称) 
dnstools# nslookup nginx
Server:         10.0.0.2
Address:        10.0.0.2#53

Name:   nginx.default.svc.cluster.local
Address: 10.0.0.65

12.3.5 案例:容器的网络访问不区分命名空间(kubernetes 的命名空间)

在 default ns 可以访问到kube-system ns 服务nginx

[root@master-1 ~]# kubectl run nginx-n1 --image=nginx --replicas=1 -n kube-system
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx-n1 created

查看容器状态(指定命名空间)

[root@master-3 ~]# kubectl get pods -n kube-system
NAME                        READY   STATUS              RESTARTS   AGE
coredns-66db855d4d-kxcdh    1/1     Running             0          21m
nginx-n1-67bb8ccb5f-w8f87   0/1     ContainerCreating   0          12s

查看容器状态(显示所有的命名空间)

[root@master-2 ~]# kubectl get pod,svc -A
NAMESPACE     NAME                            READY   STATUS              RESTARTS   AGE
default       pod/dnstools                    1/1     Running             0          97s
default       pod/nginx-7bb7cd8db5-kfrsk      1/1     Running             0          3m58s
default       pod/nginx-7bb7cd8db5-rwvgg      1/1     Running             0          3m58s
kube-system   pod/coredns-66db855d4d-kxcdh    1/1     Running             0          22m
kube-system   pod/nginx-n1-67bb8ccb5f-w8f87   0/1     ContainerCreating   0          24s

NAMESPACE     NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
default       service/kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP                  137m
default       service/nginx        NodePort    10.0.0.65    <none>        88:42671/TCP             3m25s
kube-system   service/coredns      ClusterIP   10.0.0.2     <none>        53/UDP,53/TCP,9153/TCP   22m
[root@master-1 ~]# kubectl expose deployment nginx-n1 --port=99 --target-port=80 -n kube-system
service/nginx-n1 exposed

12.3.7 跨 ns 访问服务

[root@master-1 ~]# kubectl get svc -n kube-system | grep nginx-n1
nginx-n1   ClusterIP   10.0.0.41    <none>        99/TCP                   11s

访问服务

dnstools# curl 10.0.0.41:99
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

12.3.8 解析不成功

dnstools# nslookup nginx-n1 
Server:         10.0.0.2
Address:        10.0.0.2#53

** server can't find nginx-n1: NXDOMAIN

解决方法(默认解析为 default 空间)

dnstools# nslookup nginx-n1.kube-system.svc.cluster.local
Server:         10.0.0.2
Address:        10.0.0.2#53

Name:   nginx-n1.kube-system.svc.cluster.local
Address: 10.0.0.41

DNS无法访问的排错方法

  1. 先看pod状态
  2. 拿到dns pod的ip
  3. dnstools, ping dns pod ip
  • 网络不通,检查fanneld,
  • 网络是通的,检查服务是否可以通
    • 服务不通,容器有问题
    • 服务通,查看集群地址svc (service ip)
      • 不可以通,node节点查看配置文件 kube-proxy
      • 可以通

13 部署 Dashboard 2.0

13.1 创建Dashboard证书

13.1.1 创建目录(master-1 节点)

[root@master-1 ~]# mkdir /certs
[root@master-1 ~]# cd /certs/

13.1.2 创建命名空间

[root@master-1 certs]# kubectl create namespace kubernetes-dashboard
namespace/kubernetes-dashboard created

#查看命令空间
[root@master-1 certs]# kubectl get ns
NAME                   STATUS   AGE
default                Active   4h33m
kube-node-lease        Active   4h33m
kube-public            Active   4h33m
kube-system            Active   4h33m
kubernetes-dashboard   Active   5m57s

13.1.3 创建key文件

[root@master-1 certs]# openssl genrsa -out dashboard.key 2048
Generating RSA private key, 2048 bit long modulus
.......................................................................................................................................................+++
......................................................+++
e is 65537 (0x10001)

13.1.4 证书请求

[root@master-1 certs]# openssl req -days 36000 -new -out dashboard.csr -key dashboard.key -subj '/CN=dashboard-cert'

13.1.5 自签证书

[root@master-1 certs]# openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
Signature ok
subject=/CN=dashboard-cert
Getting Private key

[root@master-1 certs]# ll
total 12
-rw-r--r-- 1 root root  989 Aug  6 19:37 dashboard.crt
-rw-r--r-- 1 root root  899 Aug  6 19:35 dashboard.csr
-rw-r--r-- 1 root root 1675 Aug  6 19:34 dashboard.key

13.1.6 创建kubernetes-dashboard-certs 对象

[root@master-1 certs]# kubectl delete secrets kubernetes-dashboard-certs -n kubernetes-dashboard
Error from server (NotFound): secrets "kubernetes-dashboard-certs" not found

# 把证书存储到kubernetes
[root@master-1 certs]# kubectl create secret generic kubernetes-dashboard-certs --from-file=/certs -n kubernetes-dashboard 
secret/kubernetes-dashboard-certs created

13.1.7 查看系统中是否存在证书文件

[root@master-1 certs]# kubectl get secret
NAME                  TYPE                                  DATA   AGE
default-token-7rdlr   kubernetes.io/service-account-token   3      4h23m

13.2 安装Dashboard

13.2.1 创建目录

[root@master-1 certs]# mkdir /root/dashboard
[root@master-1 certs]# cd /root/dashboard/

13.2.2 下载证书文件

[root@master-1 dashboard]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml

13.3.3 修改recommended配置

由于证书问题,只能firefox浏览器才能打开,通过修改证书的方式,使得所有浏览器都能打开

里面内容全部注释

 49 #因为自动生成的证书很多浏览器无法使用,所以我们自己创建,注释掉kubernetes-dashboard-certs对象声明
 50 #apiVersion: v1
 51 #kind: Secret
 52 #metadata:
 53 #  labels:
 54 #    k8s-app: kubernetes-dashboard
 55 #  name: kubernetes-dashboard-certs
 56 #  namespace: kubernetes-dashboard
 57 #type: Opaque
13.2.3.1 增加nodePort

修改配置

 39 spec:
 40   type: NodePort
 41   ports:
 42     - port: 443
 43       targetPort: 8443
 44       nodePort: 31001

13.2.4 修改之后的Dashboard文件

在node节点拉取dashboard镜像

[root@node-1 ~]# docker pull kubernetesui/dashboard:v2.0.0-beta6
v2.0.0-beta6: Pulling from kubernetesui/dashboard
e2da8a4ba320: Pull complete 
Digest: sha256:32616f6bda6477ef2d5ae3dcd96a89f355c59e62d254ae72c1b901785df4841c
Status: Downloaded newer image for kubernetesui/dashboard:v2.0.0-beta6
docker.io/kubernetesui/dashboard:v2.0.0-beta6

13.2.5 应用 recommended.yaml

[root@master-1 dashboard]# kubectl create -f recommended.yaml
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
Error from server (AlreadyExists): error when creating "recommended.yaml": namespaces "kubernetes-dashboard" already exists

13.2.6 查看创建结果

获取pod节点

[root@master-1 dashboard]# kubectl get pod -A -o wide
NAMESPACE              NAME                                         READY   STATUS              RESTARTS   AGE     IP            NODE           NOMINATED NODE   READINESS GATES
default                nginx-7bb7cd8db5-kfrsk                       1/1     Running             2          5h15m   172.17.27.3   192.168.31.7   <none>           <none>
default                nginx-7bb7cd8db5-rwvgg                       1/1     Running             2          5h15m   172.17.34.2   192.168.31.8   <none>           <none>
kube-system            coredns-66db855d4d-kxcdh                     1/1     Running             2          5h33m   172.17.27.2   192.168.31.7   <none>           <none>
kube-system            nginx-n1-67bb8ccb5f-w8f87                    1/1     Running             2          5h11m   172.17.27.4   192.168.31.7   <none>           <none>
kubernetes-dashboard   dashboard-metrics-scraper-6c554969c6-2nzg9   0/1     ContainerCreating   0          10s     <none>        192.168.31.8   <none>           <none>
kubernetes-dashboard   kubernetes-dashboard-c46b85bbd-nrsr9         0/1     ContainerCreating   0          10s     <none>        192.168.31.8   <none>           <none>

获取node pod端口

kubectl get  svc -A

浏览器访问

http访问

部署 kubernetes-下

https访问

部署 kubernetes-下

2种访问方式

部署 kubernetes-下

13.3 创建Dashboard访问账户

13.3.1 创建SA

[root@master-1 dashboard]# kubectl create serviceaccount dashboard-admin -n kubernetes-dashboard
serviceaccount/dashboard-admin created

13.3.2 绑定集群管理员

[root@master-1 dashboard]# kubectl create clusterrolebinding dashboard-cluster-admin   --clusterrole=cluster-admin  --serviceaccount=kubernetes-dashboard:dashboard-admin
clusterrolebinding.rbac.authorization.k8s.io/dashboard-cluster-admin created

13.3.3 获取 Token

获取 token

[root@master-1 dashboard]# kubectl describe secrets $(kubectl get secrets -n kubernetes-dashboard | awk '/dashboard-admin-token/{print $1}') -n kubernetes-dashboard | sed -n '/token:.*/p'
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tZnNmbjgiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZTA0ZDM3MWEtNWNjYi00MzQxLWFiZWQtNThiYjQxOTQ5YmE4Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1hZG1pbiJ9.K9JpWaH5TuSS6jKpCZDyJIlMnI1Mu9NBzIYstmYHpM76CiDOYT87EA-B8hIHx15tw-ICdvLR9rpjPLnBA4ban4yRXGSXZ0o6PXIpwcj4vCxBN74I9GeC9YzaXQU0aC7Nhg6Aueh6DmmaAqVozJO_Rsslio9_GJDmDBal9E4QZ042ZMKgeb5nUN6CMaEckE5gfpmbQT6hm96_L_WwxcCNSVxV329bpUhUaXjHm9MGNG3PyJoJeLVZQro8EVtsZ9oS6s76wfj558eK-BGyLLW56kGBx6jO1028EyDA01TYrK2TY61izQxtMWx-V-nALfoKyvOXk5s77j496uZtPmzMYw

部署 kubernetes-下

13.3.4 登录系统

如果之前有安装过其他的版本的 dashboard, 那么切换 node 节点 IP 访问

部署 kubernetes-下

部署 kubernetes-下

部署 kubernetes-下

14 部署 Ingress

ingress作用

1. 锁定svc服务名称
2. 提供外部访问
  • 服务反向代理
  • 部署Traefik 2.0 版本

创建目录

[root@master-1 ~]# mkdir /root/ingress
[root@master-1 ~]# cd ingress/

14.1 创建 traefik-crd.yaml 文件 (master-1)

[root@master-1 ~]# vim traefik-crd.yaml 
## IngressRoute
apiVersion: apiextensions.k8s.io/v1beta1 
kind: CustomResourceDefinition 
metadata:
    name: ingressroutes.traefik.containo.us 
spec:
    scope: Namespaced 
    group: traefik.containo.us 
    version: v1alpha1
    names:
        kind: IngressRoute 
        plural: ingressroutes 
        singular: ingressroute
---
## IngressRouteTCP
apiVersion: apiextensions.k8s.io/v1beta1 
kind: CustomResourceDefinition 
metadata:
    name: ingressroutetcps.traefik.containo.us 
spec:
    scope: Namespaced 
    group: traefik.containo.us 
    version: v1alpha1
names:
    kind: IngressRouteTCP 
    plural: ingressroutetcps 
    singular: ingressroutetcp
---
## Middleware
apiVersion: apiextensions.k8s.io/v1beta1 
kind: CustomResourceDefinition 
metadata:
    name: middlewares.traefik.containo.us 
spec:
    scope: Namespaced 
    group: traefik.containo.us 
    version: v1alpha1
names:
    kind: Middleware 
    plural: middlewares 
    singular: middleware
---
apiVersion: apiextensions.k8s.io/v1beta1 
kind: CustomResourceDefinition 
metadata:
    name: tlsoptions.traefik.containo.us 
spec:
    scope: Namespaced 
    group: traefik.containo.us 
    version: v1alpha1
names:
    kind: TLSOption 
    plural: tlsoptions 
    singular: tlsoption

14.1.1 创建Traefik CRD 资源(master-1)

[root@master-1 ingress]# kubectl create -f traefik-crd.yaml 
customresourcedefinition.apiextensions.k8s.io/ingressroutes.traefik.containo.us created
customresourcedefinition.apiextensions.k8s.io/ingressroutetcps.traefik.containo.us created
customresourcedefinition.apiextensions.k8s.io/middlewares.traefik.containo.us created
customresourcedefinition.apiextensions.k8s.io/tlsoptions.traefik.containo.us created


[root@master-1 ingress]# kubectl get CustomResourceDefinition 
NAME                                   CREATED AT
ingressroutes.traefik.containo.us      2020-08-06T14:55:12Z
ingressroutetcps.traefik.containo.us   2020-08-06T14:55:12Z
middlewares.traefik.containo.us        2020-08-06T14:55:12Z
tlsoptions.traefik.containo.us         2020-08-06T14:55:12Z

14.2 创建 Traefik RABC 文件(master-1)

[root@master-1 ~]# vi traefik-rbac.yaml 
apiVersion: v1
kind: ServiceAccount 
metadata:
    namespace: kube-system 
    name: traefik-ingress-controller
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1 
metadata:
    name: traefik-ingress-controller 
rules:
    -apiGroups: [""]
        resources: ["services","endpoints","secrets"] 
        verbs: ["get","list","watch"]
    -apiGroups: ["extensions"] 
        resources: ["ingresses"] 
        verbs: ["get","list","watch"]
    -apiGroups: ["extensions"] 
        resources: ["ingresses/status"] 
        verbs: ["update"]
    -apiGroups: ["traefik.containo.us"] 
        resources: ["middlewares"] 
        verbs: ["get","list","watch"]
    -apiGroups: ["traefik.containo.us"] 
        resources: ["ingressroutes"] 
        verbs: ["get","list","watch"]
    -apiGroups: ["traefik.containo.us"] 
        resources: ["ingressroutetcps"] 
        verbs: ["get","list","watch"]
    -apiGroups: ["traefik.containo.us"] 
        resources: ["tlsoptions"]
        verbs: ["get","list","watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1 
metadata:
    name: traefik-ingress-controller 
roleRef:
    apiGroup: rbac.authorization.k8s.io 
    kind: ClusterRole
    name: traefik-ingress-controller 
subjects:
    -kind: ServiceAccount
        name: traefik-ingress-controller 
namespace: kube-system

14.2.1 创建RABC 资源

[root@master-1 ingress]# kubectl create -f traefik-rbac.yaml
serviceaccount/traefik-ingress-controller created
clusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created
clusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created	

14.3 创建 Traefik ConfigMap (master-1)

[root@master-1 ~]#	vi traefik-config.yaml
kind: ConfigMap 
apiVersion: v1 
metadata:
    name: traefik-config
data:
    traefik.yaml: |- 
        serversTransport:
        insecureSkipVerify: true 
    api:
        insecure: true 
        dashboard: true 
        debug: true
    metrics:
        prometheus: "" 
    entryPoints:
        web:
            address: ":80" 
        websecure:
            address: ":443" 
        providers:
            kubernetesCRD: "" 
        log:
        filePath: "" 
        level: error 
        format: json
    accessLog:
        filePath: "" 
        format: json
        bufferingSize: 0 
        filters:
            retryAttempts: true 
            minDuration: 20
        fields:
            defaultMode: keep 
            names:
                ClientUsername: drop 
            headers:
                defaultMode: keep 
                names:
                    User-Agent: redact
                    Authorization: drop 
                    Content-Type: keep

14.3.1创建Traefik ConfigMap 资源配置

[root@master-1 ingress]# kubectl apply -f traefik-config.yaml -n kube-system
configmap/traefik-config created

14.4 设置节点标签

设置节点 label

[root@master-1 ingress]# kubectl label nodes 192.168.31.7 IngressProxy=true
node/192.168.31.7 labeled
[root@master-1 ingress]# kubectl label nodes 192.168.31.8 IngressProxy=true
node/192.168.31.8 labeled

14.4.1查看节点标签

检查是否成功

[root@master-1 ingress]# kubectl get nodes --show-labels
NAME           STATUS   ROLES    AGE    VERSION   LABELS
192.168.31.7   Ready    <none>   7h2m   v1.15.1   IngressProxy=true,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.31.7,kubernetes.io/os=linux
192.168.31.8   Ready    <none>   7h2m   v1.15.1   IngressProxy=true,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=192.168.31.8,kubernetes.io/os=linux

14.5 创建 traefik 部署文件

注意每个Node 节点的 80 与 443 端口不能被占用

[root@node-1 ~]# netstat -antupl | grep -E "80|443"	
[root@node-2 ~]# netstat -antupl | grep -E "80|443"	

部署文件

[root@master-1 ingress]# vi traefik-deploy.yaml 
apiVersion: v1
kind: Service
metadata:
  name: traefik
  labels:                       
    app: traefik-metrics
spec:
  ports:
    - name: web
      port: 80
    - name: websecure
      port: 443
    - name: admin
      port: 8080
  selector:
    app: traefik
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: traefik-ingress-controller
  labels:
    app: traefik
spec:
  selector:
    matchLabels:
      app: traefik
  template:
    metadata:
      name: traefik
      labels:
        app: traefik
    spec:
      serviceAccountName: traefik-ingress-controller
      terminationGracePeriodSeconds: 1
      containers:
        #- image: traefik:latest
        - image: traefik:2.0.5
          name: traefik-ingress-lb
          ports:
            - name: web
              containerPort: 80
              hostPort: 80
            - name: websecure
              containerPort: 443
              hostPort: 443
            - name: admin
              containerPort: 8080
            - name: redistcp
              containerPort: 6379
              hostPort: 6379
          resources:
            limits:
              cpu: 200m
              memory: 300Mi
            requests:
              cpu: 100m
              memory: 256Mi
          securityContext:
            capabilities:
              drop:
                - ALL
              add:
                - NET_BIND_SERVICE
          args:
            - --configfile=/config/traefik.yaml
          volumeMounts:
            - mountPath: "/config"
              name: "config"
      volumes:
        - name: config
          configMap:
            name: traefik-config 
      tolerations:              #设置容忍所有污点,防止节点被设置污点
        - operator: "Exists"
      nodeSelector:             #设置node筛选器,在特定label的节点上启动
        IngressProxy: "true"

14.5.1 部署 Traefik 资源

node节点yaml文件先拉取镜像 traefik:2.0.5

[root@node-1 ~]# docker pull traefik:2.0.5
2.0.5: Pulling from library/traefik
89d9c30c1d48: Pull complete 
275722d2e7f6: Pull complete 
a5605da1bde2: Pull complete 
13c9af667fbf: Pull complete 
Digest: sha256:380d2eee7035e3b88be937e7ddeac869b41034834dcc8a30231311805ed9fd22
Status: Downloaded newer image for traefik:2.0.5
docker.io/library/traefik:2.0.5

部署

[root@master-1 ingress]# kubectl apply -f traefik-deploy.yaml -n kube-system
service/traefik created
daemonset.apps/traefik-ingress-controller created

查看运行状态

[root@master-1 ingress]# kubectl get DaemonSet -A
NAMESPACE     NAME                         DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR       AGE
kube-system   traefik-ingress-controller   2         2         2       2            2           IngressProxy=true   14s

部署 kubernetes-下

错误问题:

部署 kubernetes-下

解决方法

#命名空间
[root@master-1 ingress]# kubectl apply -f traefik-default-rbac.yaml

错误

部署 kubernetes-下

解决方法

[root@master-1 ingress]#   kubectl apply -f traefik-config.yaml

正常显示

部署 kubernetes-下

14.6 Traefik 路由配置

14.6.1 配置Traefik Dashboard

[root@master-1 ingress]# cat traefik-dashboard-route.yaml 
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
  name: traefik-dashboard-route
  namespace: kube-system
spec:
  entryPoints:
    - web
  routes:
    - match: Host(`ingress.abcd.com`)
      kind: Rule
      services:
        - name: traefik
          port: 8080

创建Ingress (traefik)

[root@master-1 ingress]# kubectl apply -f traefik-dashboard-route.yaml 
ingressroute.traefik.containo.us/traefik-dashboard-route created
[root@master-1 ingress]# kubectl get pods -A
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE
default                nginx-7bb7cd8db5-kfrsk                       1/1     Running   2          5h47m
default                nginx-7bb7cd8db5-rwvgg                       1/1     Running   2          5h47m
kube-system            coredns-66db855d4d-kxcdh                     1/1     Running   2          6h5m
kube-system            nginx-n1-67bb8ccb5f-w8f87                    1/1     Running   2          5h44m
kube-system            traefik-ingress-controller-xw6mg             1/1     Running   0          3m2s
kube-system            traefik-ingress-controller-zcjcb             1/1     Running   0          3m2s
kubernetes-dashboard   dashboard-metrics-scraper-6c554969c6-2nzg9   1/1     Running   0          32m
kubernetes-dashboard   kubernetes-dashboard-c46b85bbd-nrsr9         1/1     Running   0          32m

14.6.2 客户端访问Traefik Dashboard

14.6.2.1绑定物理主机 Hosts 文件或者域名解析
/etc/hosts
192.168.31.8 ingress.abcd.com
14.6.2.2 访问web

部署 kubernetes-下

14.7 部署访问服务(http)

创建nginx 服务

[root@master-1 ingress]# kubectl run nginx-ingress-demo1 --image=nginx --replicas=1 -n kube-system
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx-ingress-demo1 created

[root@master-1 ingress]# kubectl expose deployment nginx-ingress-demo1 --port=1099 --target-port=80 -n  kube-system
service/nginx-ingress-demo1 exposed

创建nginx 路由服务

vim nginx-ingress-demo-route1.yaml

apiVersion: traefik.containo.us/v1alpha1 
kind: IngressRoute    #kubernetes 资源名称
metadata:
    name: traefik-nginx-demo-route1  #ingress Route 名称
    namespace: kube-system
spec:
    entryPoints:
    - web 
    routes:
    - match: Host(`nginx11.abcd.com`) 
      kind: Rule
      services:
      - name: nginx-ingress-demo1 
        port: 1099

创建

[root@master-1 ingress]# kubectl apply -f nginx-ingress-demo-route1.yaml
[root@master-1 ingress]# kubectl get IngressRoute -A
NAMESPACE       NAME                        AGE
default         traefik-dashboard-route     48m
kube-system     traefik-nginx-demo-route    68s  

访问

绑定hosts (物理机器)

192.168.91.21 nginx11.abcd.com

部署 kubernetes-下

14.8 创建 https 服务

代理dashboard https 服务

创建自签名证书

[root@master-1 ingress]# cd /root/ingress
[root@master-1 ingress]# openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=cloud.abcd.com"
Generating a 2048 bit RSA private key
........................+++
.................+++
writing new private key to 'tls.key'
-----

将证书存储到 Kubernetes Secret 中

[root@master-1 ingress]# kubectl create secret tls dashboard-tls --key=tls.key --cert=tls.crt -n kube-system	

查看系统 secret

[root@master-1 ingress]# kubectl get secret
NAME                                     TYPE                                  DATA   AGE
default-token-7rdlr                      kubernetes.io/service-account-token   3      17h
traefik-ingress-controller-token-pp7wh   kubernetes.io/service-account-token   3      9h

创建路由文件

先查询kuberbentes dashboard 的命名空间

root@master-1 ingress]# kubernetes-dashboard-route.2.0.yaml
#注意命名空间
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
  name: kubernetes-dashboard-route
  namespace: kubernetes-dashboard
spec:
  entryPoints:
    - websecure
  tls:
    secretName: dashboard-tls
  routes:
    - match: Host(`cloud.abcd.com`) 
      kind: Rule
      services:
        - name: kubernetes-dashboard
          port: 443

创建 Kubernetes Dashboard 路由规则对象

[root@master-1 ingress]# kubectl apply -f kubernetes-dashboard-route.2.0.yaml 
ingressroute.traefik.containo.us/kubernetes-dashboard-route created


#查看创建的路由
[root@master-1 ingress]# kubectl get IngressRoute -A
NAMESPACE              NAME                         AGE
kube-system            traefik-dashboard-route      9h
kube-system            traefik-nginx-demo-route1    25m
kubernetes-dashboard   kubernetes-dashboard-route   10s

绑定hosts 访问

192.168.31.8	cloud.abcd.com

配置完成后,打开浏览器输入地址:https://cloud.abcd.com 打开 Dashboard Dashboard。

部署 kubernetes-下

14.9 TCP 服务访问

修改配置文件 traefik-config.yaml

kind: ConfigMap
apiVersion: v1
metadata:
  name: traefik-config
data:
  traefik.yaml: |-
    serversTransport:
      insecureSkipVerify: true
    api:
      insecure: true
      dashboard: true
      debug: true
    metrics:
      prometheus: ""
    entryPoints:
      web:
        address: ":80"
      websecure:
        address: ":443"
      redistcp:
        address: ":6379"
    providers:
      kubernetesCRD: ""
    log:
      filePath: ""
      level: error
      format: json
    accessLog:
      filePath: ""
      format: json
      bufferingSize: 0
      filters:
        retryAttempts: true
        minDuration: 20
      fields:
        defaultMode: keep
        names:
          ClientUsername: drop
        headers:
          defaultMode: keep
          names:
            User-Agent: redact
            Authorization: drop
            Content-Type: keep

应用配置

[root@master-1 ingress]# kubectl apply -f traefik-config.yaml -n kube-system

修改配置文件 traefik-deploy.yaml

apiVersion: v1
kind: Service
metadata:
  name: traefik
  labels:                       
    app: traefik-metrics
spec:
  ports:
    - name: web
      port: 80
    - name: websecure
      port: 443
    - name: admin
      port: 8080
  selector:
    app: traefik
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: traefik-ingress-controller
  labels:
    app: traefik
spec:
  selector:
    matchLabels:
      app: traefik
  template:
    metadata:
      name: traefik
      labels:
        app: traefik
    spec:
      serviceAccountName: traefik-ingress-controller
      terminationGracePeriodSeconds: 1
      containers:
        #- image: traefik:latest
        - image: traefik:2.0.5
          name: traefik-ingress-lb
          ports:
            - name: web
              containerPort: 80
              hostPort: 80
            - name: websecure
              containerPort: 443
              hostPort: 443
            - name: admin
              containerPort: 8080
            - name: redistcp
              containerPort: 6379
              hostPort: 6379
          resources:
            limits:
              cpu: 200m
              memory: 300Mi
            requests:
              cpu: 100m
              memory: 256Mi
          securityContext:
            capabilities:
              drop:
                - ALL
              add:
                - NET_BIND_SERVICE
          args:
            - --configfile=/config/traefik.yaml
          volumeMounts:
            - mountPath: "/config"
              name: "config"
      volumes:
        - name: config
          configMap:
            name: traefik-config 
      tolerations:              #设置容忍所有污点,防止节点被设置污点
        - operator: "Exists"
      nodeSelector:             #设置node筛选器,在特定label的节点上启动
        IngressProxy: "true"

应用配置

[root@master-1 ingress]#kubectl apply -f traefik-deploy.yaml -n kube-system

配置redis 文件

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: redis-tcp
spec:
  template:
    metadata:
      labels:
        app: redis-tcp
    spec:
      containers:
      - name: redis-tcp
        image: redis
        ports:
        - containerPort: 6379
          protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
  name: redis-tcp-svc
spec:
  ports:
  - port: 6379
    targetPort: 6379
  selector:
    app: redis-tcp

部署redis

[root@master-1 ingress]# kubectl apply -f redis-tcp-deploy.yaml 
deployment.extensions/redis-tcp unchanged
service/redis-tcp-svc unchanged

配置路由

apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
  name: redis-tcp-ingress
spec:
  entryPoints:
    - redistcp
  routes:
  - match: HostSNI(`*`)
    services:
    - name: redis-tcp-svc
      port: 6379
      weight: 10
      terminationDelay: 400

部署路由

[root@master-1 ingress]# kubectl apply -f traefik-redis-tcp-route.yaml	

查看界面

部署 kubernetes-下

绑定任意主机名到 node 节点访问

192.168.91.21 redis.cc.com
[root@master-2 ~]# redis-cli -h redis.cc.com -p 6379 
redis.cc.com:6379> set a 12131
OK
redis.cc.com:6379> get a 
"12131"

15 部署监控系统

15.1监控内容

性能指标(如:CPU、Memory、Load、磁盘、网络等)

  1. 容器、Pod 相关的性能指标数据
  2. 主机节点相关的性能指标数据
  3. 容器的网络性能,如 http、tcp 等数据

状态指标

  1. kubernetes 资源对象(DeploymentDaemonsetPod 等)的运行状态指标
  2. kubernetes 平台组件(如kube-apiserverkube-scheduler、etcd 等)的运行状态指标

15.2 监控方案

  1. Heapster、InfluxDB、Grafana
    • 每个Kubernetes 节点的Kubelet 内含cAdvisor,暴露出API,
    • Heapster 通过访问这些端点得到容器监控数据。它支持多种储存方式,常用的是InfluxDB。
    • 这套方案的缺点是数据来源单一、缺乏报警功能以及InfluxDB 的单点问题,而且 Heapster 也已经在新版本中被 deprecated(被 metrics server 取代)
  2. Metrics Server、InfluxDB、Grafana
    • Kubernetes 从 1.8 版本开始,CPU、内存等资源的 metrics 信息可以通过 Metrics API 来获取,用户还可以通过 kubectl top 直接获取这些metrics 信息。
    • Metrics API 需要部署Metrics-Server。
  3. node-exporter、 Prometheus、Grafana
    • 通过各种export 采集不同维度的监控指标,并通过 Prometheus 支持的数据格式暴露出来,
    • Prometheus 定期pull 数据并用Grafana 展示,异常情况使用 AlertManager 告警。

使用组件

node-exporter 、 alertmanager、 grafana、 kube-state-metrics、 Prometheus

组件说明

  • MetricServer:是 kubernetes 集群资源使用情况的聚合器,收集数据给kubernetes 集群内使用,如 kubectl,hpa,scheduler 等。
  • NodeExporter:用于各 node 的关键度量指标状态数据(服务器 CPU、内存、磁盘、I/O)。
  • KubeStateMetrics:收集 kubernetes 集群内资源对象数据,制定告警规则。
  • Prometheus-adapter: 自定义监控指标与容器指标
  • Prometheus:采用 pull 方式收集 apiserver,scheduler,controller-manager,kubelet 组件数据,通过http 协议传输。
  • Grafana:是可视化数据统计和监控平台。
  • Alertmanager:实现短信或邮件报警。

读取数据流程:

部署 kubernetes-下

15.3 安装NFS 服务端 (生产环境不使用)

Prometheus 与 Grafana 存储使用

15.3.1 找1个节点安装nfs

 [root@master-1 ~]#	yum -y install nfs-utils

15.3.2 创建nfs 目录

 [root@master-1 ~]#	mkdir -p /ifs/kubernetes	

15.3.3 修改权限

[root@master-1 ~]#	chmod -R 777 /ifs/kubernetes	

15.3.4 编辑export 文件

[root@master-1 ~]#	vim /etc/exports
/ifs/kubernetes *(rw,no_root_squash,sync)

15.3.5 修改配置启动文件

修改配置文件

[root@master-1 ~]# cat >/etc/systemd/system/sockets.target.wants/rpcbind.socket<<EOFL 
[Unit]
Description=RPCbind Server Activation Socket 

[Socket]
ListenStream=/var/run/rpcbind.sock 
ListenStream=0.0.0.0:111 
ListenDatagram=0.0.0.0:111

[Install] 
WantedBy=sockets.target
EOFL

15.3.6 配置生效

[root@master-1 ~]# exportfs -f
exportfs: -f is available only with new cache controls. Mount /proc/fs/nfsd first

15.3.7 启动rpcbind、nfs 服务

[root@master-1 ~]# systemctl restart rpcbind 
[root@master-1 ~]# systemctl enable rpcbind 
[root@master-1 ~]# systemctl restart nfs
[root@master-1 ~]# systemctl enable nfs
Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.

15.3.8 showmount 测试(master-1)

[root@master-1 ~]# showmount -e 192.168.31.4 
Export list for 192.168.31.4:
/ifs/kubernetes *

15.3.9 所有node 节点安装客户端

[root@node-1 ~]# yum -y install nfs-utils	

15.4 所有的Node 检查

所有的节点是否可以挂载, 必须要可以看到, 才能挂载成功.

[root@node-1 ~]# showmount -e 192.168.31.4 
Export list for 192.168.31.4:
/ifs/kubernetes *

15.4.1 部署PVC

创建目录

[root@master-1 ~]# mkdir /root/nfs
[root@master-1 ~]# cd nfs
[root@master-1 nfs]# kubectl apply -f nfs-class.yaml
storageclass.storage.k8s.io/managed-nfs-storage created

现在node节点上下载所有需要的镜像 quay.io/external_storage/nfs-client-provisioner:latest

[root@node-1 ~]# docker pull quay.io/external_storage/nfs-client-provisioner:latest
latest: Pulling from external_storage/nfs-client-provisioner
a073c86ecf9e: Pull complete 
d9d714ee28a7: Pull complete 
36dfde95678a: Pull complete 
Digest: sha256:022ea0b0d69834b652a4c53655d78642ae23f0324309097be874fb58d09d2919
Status: Downloaded newer image for quay.io/external_storage/nfs-client-provisioner:latest
quay.io/external_storage/nfs-client-provisioner:latest

NFS 服务端地址需要修改
#注意修改 NFS IP 地址 nfs-deployment.yaml为本机地址

[root@master-1 nfs]# kubectl apply -fnfs-deployment.yaml 
serviceaccount/nfs-client-provisioner created
deployment.extensions/nfs-client-provisioner created

[root@master-1 nfs]# kubectl apply -fnfs-rabc.yaml
serviceaccount/nfs-client-provisioner unchanged
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created

查看 nfs pod 状态

[root@master-1 nfs]# kubectl get pods 
NAME                                      READY   STATUS    RESTARTS   AGE
nfs-client-provisioner-6d6f7ff69f-bk95b   1/1     Running   0          25s
nginx-7bb7cd8db5-kfrsk                    1/1     Running   3          17h
nginx-7bb7cd8db5-rwvgg                    1/1     Running   3          17h

15.4.2 查看是否部署成功

[root@master-1 nfs]# kubectl get StorageClass
NAME                  PROVISIONER      AGE
managed-nfs-storage   fuseim.pri/ifs   73s

15.4.3 登录页面查看

部署 kubernetes-下

15.5 部署监控系统

新建目录

[root@master-1 ~]# mkdir /root/monitor
[root@master-1 ~]# cd /root/monitor/

上传老师给的yaml文件夹中的monitor/prometheus 到 master的/root/monitor目录下

[root@master-1 prometheus]# pwd
/root/monitor/prometheus
[root@master-1 prometheus]# ll
total 24
drwxr-xr-x 2 root root 4096 Aug  7 10:54 adapter
drwxr-xr-x 2 root root  149 Aug  7 10:54 alertmanager
drwxr-xr-x 2 root root  268 Aug  7 10:54 grafana
drwxr-xr-x 2 root root 4096 Aug  7 10:54 kube-state-metrics
drwxr-xr-x 2 root root  200 Aug  7 10:54 node-exporter
drwxr-xr-x 2 root root 4096 Aug  7 10:54 prometheus
-rw-r--r-- 1 root root  174 Aug  7 10:54 serviceaccount.sh
drwxr-xr-x 2 root root 4096 Aug  7 10:54 serviceMonitor
drwxr-xr-x 2 root root 4096 Aug  7 10:54 setup

注意需要修改的配置文件

修改IP

[root@master-1 prometheus]# cd serviceMonitor 
[root@master-1 serviceMonitor]# ls | xargs grep 91
prometheus-EtcdService.yaml:  - ip: 192.168.91.143
prometheus-kubeControllerManagerService.yaml:  - ip: 192.168.91.143
prometheus-KubeProxyService.yaml:  - ip: 192.168.91.146
prometheus-KubeProxyService.yaml:  - ip: 192.168.91.147
prometheus-kubeSchedulerService.yaml:  - ip: 192.168.91.143

prometheus-EtcdService.yam, prometheus-kubeControllerManagerService.yaml, prometheus-kubeSchedulerService.yaml 配置文件修改为

subsets:
- addresses:
  - ip: 192.168.31.4
  - ip: 192.168.31.5
  - ip: 192.168.31.6

prometheus-KubeProxyService.yaml 配置文件修改为

subsets:
- addresses:
  - ip: 192.168.31.7
  - ip: 192.168.31.8

创建权限与 alertmanager 服务

[root@master-1 prometheus]# kubectl apply -f setup/ 
namespace/monitoring created
customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created
customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created
customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created
customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created
customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created
clusterrole.rbac.authorization.k8s.io/prometheus-operator created
clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created
deployment.apps/prometheus-operator created
service/prometheus-operator created
serviceaccount/prometheus-operator created


[root@master-1 prometheus]# kubectl apply -f alertmanager/
alertmanager.monitoring.coreos.com/main created
secret/alertmanager-main created
service/alertmanager-main created
serviceaccount/alertmanager-main created
部署 kubernetes-下

先下载镜像,查找需要下载哪些镜像

[root@master-1 prometheus]# cd node-exporter 
[root@master-1 node-exporter]# ls | xargs grep image
node-exporter-daemonset.yaml: image: prom/node-exporter:v0.18.1
node-exporter-daemonset.yaml: image: quay.io/coreos/kube-rbac-proxy:v0.4.1

所有node节点拉取镜像

[root@node-1 ~]# docker pull quay.io/coreos/kube-rbac-proxy:v0.4.1 
[root@node-1 ~]# docker pull prom/node-exporter:v0.18.1

执行node-exporter

[root@master-1 prometheus]# kubectl apply -f node-exporter/ 
clusterrole.rbac.authorization.k8s.io/node-exporter created
clusterrolebinding.rbac.authorization.k8s.io/node-exporter created
daemonset.apps/node-exporter created
service/node-exporter created
serviceaccount/node-exporter created
# 节点拉取镜像
[root@node-1 ~]# docker pull quay.io/coreos/kube-state-metrics:v1.8.0 

[root@master-1 prometheus]# kubectl apply -f kube-state-metrics/
clusterrole.rbac.authorization.k8s.io/kube-state-metrics created
clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created
deployment.apps/kube-state-metrics created
clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics-rbac created
role.rbac.authorization.k8s.io/kube-state-metrics created
rolebinding.rbac.authorization.k8s.io/kube-state-metrics created
service/kube-state-metrics created
serviceaccount/kube-state-metrics created
# 节点拉取镜像
[root@node-1 ~]# docker pull grafana/grafana:6.4.3 

[root@master-1 prometheus]# kubectl apply -f grafana/
secret/grafana-datasources created
configmap/grafana-dashboard-apiserver created
configmap/grafana-dashboard-cluster-total created
configmap/grafana-dashboard-controller-manager created
configmap/grafana-dashboard-k8s-resources-cluster created
configmap/grafana-dashboard-k8s-resources-namespace created
configmap/grafana-dashboard-k8s-resources-node created
configmap/grafana-dashboard-k8s-resources-pod created
configmap/grafana-dashboard-k8s-resources-workload created
configmap/grafana-dashboard-k8s-resources-workloads-namespace created
configmap/grafana-dashboard-kubelet created
configmap/grafana-dashboard-namespace-by-pod created
configmap/grafana-dashboard-namespace-by-workload created
configmap/grafana-dashboard-node-cluster-rsrc-use created
configmap/grafana-dashboard-node-rsrc-use created
configmap/grafana-dashboard-nodes created
configmap/grafana-dashboard-persistentvolumesusage created
configmap/grafana-dashboard-pod-total created
configmap/grafana-dashboard-pods created
configmap/grafana-dashboard-prometheus-remote-write created
configmap/grafana-dashboard-prometheus created
configmap/grafana-dashboard-proxy created
configmap/grafana-dashboard-scheduler created
configmap/grafana-dashboard-statefulset created
configmap/grafana-dashboard-workload-total created
configmap/grafana-dashboards created
deployment.apps/grafana created
persistentvolumeclaim/grafana created
clusterrolebinding.rbac.authorization.k8s.io/grafana-rbac created
service/grafana created
serviceaccount/grafana created
[root@master-1 prometheus]# kubectl apply -f prometheus/ 
clusterrole.rbac.authorization.k8s.io/prometheus-k8s created
clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created
prometheus.monitoring.coreos.com/k8s created
persistentvolumeclaim/prometheus-data created
clusterrolebinding.rbac.authorization.k8s.io/prometheus-rbac created
rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created
rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
rolebinding.rbac.authorization.k8s.io/prometheus-k8s created
role.rbac.authorization.k8s.io/prometheus-k8s-config created
role.rbac.authorization.k8s.io/prometheus-k8s created
role.rbac.authorization.k8s.io/prometheus-k8s created
role.rbac.authorization.k8s.io/prometheus-k8s created
prometheusrule.monitoring.coreos.com/prometheus-k8s-rules created
service/prometheus-k8s created
serviceaccount/prometheus-k8s created
[root@master-1 prometheus]# kubectl apply -f serviceMonitor/
servicemonitor.monitoring.coreos.com/alertmanager created
servicemonitor.monitoring.coreos.com/grafana created
servicemonitor.monitoring.coreos.com/kube-state-metrics created
servicemonitor.monitoring.coreos.com/node-exporter created
service/kube-etcd created
endpoints/kube-etcd created
service/kube-proxy created
endpoints/kube-proxy created
service/kube-controller-manager created
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
endpoints/kube-controller-manager configured
service/kube-scheduler created
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
endpoints/kube-scheduler configured
servicemonitor.monitoring.coreos.com/prometheus-operator created
servicemonitor.monitoring.coreos.com/prometheus created
servicemonitor.monitoring.coreos.com/kube-apiserver created
servicemonitor.monitoring.coreos.com/coredns created
servicemonitor.monitoring.coreos.com/kube-etcd created
servicemonitor.monitoring.coreos.com/kube-controller-manager created
servicemonitor.monitoring.coreos.com/kube-proxy created
servicemonitor.monitoring.coreos.com/kube-scheduler created
servicemonitor.monitoring.coreos.com/kubelet created

注意如果提示权限问题, 解决方法如下:(如果没有错误略过)

[root@master-1 monitor]# kubectl create serviceaccount kube-state-metrics -n monitoring 
[root@master-1 monitor]# kubectl create serviceaccount grafana -n monitoring
[root@master-1 monitor]# kubectl create serviceaccount prometheus-k8s -n monitoring

创建权限文件(如果没有错误略过)

[root@master-1 kube-state-metrics]# cat kube-state-metrics-rabc.yaml 
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: kube-state-metrics-rbac
subjects:
  - kind: ServiceAccount
    name: kube-state-metrics
    namespace: monitoring
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io

grafana

[root@master-1 grafana]# cat grafana-rabc.yaml 
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: grafana-rbac
subjects:
  - kind: ServiceAccount
    name: grafana
    namespace: monitoring
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io

prometheus

[root@master-1 grafana]# cat prometheus-rabc.yaml 
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: prometheus-rbac
subjects:
  - kind: ServiceAccount
    name: prometheus-k8s
    namespace: monitoring
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io

15.5.1 获取Grafana Pod

[root@master-1 prometheus]# kubectl get pod -A -o wide| grep grafana
monitoring             grafana-5dc77ff8cb-8h5hj                     1/1     Running   0          15m     172.17.34.8    192.168.31.8   <none>           <none>

15.5.2 获取Grafana SVC

[root@master-1 prometheus]# kubectl get svc -A | grep grafana
monitoring             grafana                     NodePort    10.0.0.154   <none>        3000:49193/TCP               15m

15.5.3 登录Grafana Dashboard

用户与密码: admin/admin

部署 kubernetes-下

15.5.4 选择资源

部署 kubernetes-下

获取prometheus 地址

[root@master-1 prometheus]# kubectl get svc -A| grep prometheus-k8s
monitoring             prometheus-k8s              NodePort    10.0.0.109   <none>        9090:34271/TCP               18m

查看 servicemonitor

[root@master-1 prometheus]# kubectl get servicemonitor -A
NAMESPACE    NAME                      AGE
monitoring   alertmanager              21m
monitoring   coredns                   21m
monitoring   grafana                   21m
monitoring   kube-apiserver            21m
monitoring   kube-controller-manager   21m
monitoring   kube-etcd                 21m
monitoring   kube-proxy                21m
monitoring   kube-scheduler            21m
monitoring   kube-state-metrics        21m
monitoring   kubelet                   21m
monitoring   node-exporter             21m
monitoring   prometheus                21m
monitoring   prometheus-operator       21m

15.6 prometheus

部署 kubernetes-下

部署 kubernetes-下

http://192.168.91.21:31626/targets

部署 kubernetes-下

15.7 添加Ingress 到监控系统

修改Ingress Services 配置文件

[root@master-1 ingress]# cat traefik-deploy.yaml 
apiVersion: v1
kind: Service
metadata:
  name: traefik
  labels:                       
    app: traefik-metrics
spec:
  ports:
    - name: web
      port: 80
    - name: websecure
      port: 443
    - name: admin
      port: 8080
  selector:
    app: traefik
.........

注意命名空间

[root@master-1 ingress]# kubectl apply -f traefik-deploy.yaml -n kube-system	

查看 service 内容

[root@master-1 ingress]# kubectl describe svc traefik 
Name:	        traefik
Namespace:	    kube-system
Labels:	        app=traefik-metrics
Annotations:	kubectl.kubernetes.io/last-applied-configuration:
                {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"labels":{"app":"traefik-metrics"},"name":"traefik","namespace":"default"...
Selector:	    app=traefik
Type:       	ClusterIP
IP:	            10.0.0.3
Port:	        web 80/TCP
TargetPort:	    80/TCP
Endpoints:	    172.17 90.14:80,172.17.98.5:80
Port:	        websecure 443/TCP
TargetPort:	    443/TCP
Endpoints:	    172.17 90.14:443,172.17.98.5:443
Port:	        admin 8080/TCP
TargetPort:	    8080/TCP
Endpoints:	    172.17 90.14:8080,172.17.98.5:8080
Session Affinity: None 
Events:	        <none>

添加 serviesMonitor 监控

[root@master-1 ingress]# cat traefik-serviceMonitor.yaml 
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
  name: traefik
  namespace: monitoring
  labels:
    app: traefik-metrics
spec:
  jobLabel: app
  endpoints:
  - port: admin
    interval: 10s
    path: '/metrics'
  selector:
    matchLabels:
      app: traefik-metrics
  namespaceSelector:
    matchNames:
    - kube-system

#创建监控

[root@master-1 ingress]# kubectl apply -f traefik-serviceMonitor.yaml	

查看prometheus #等待 1 分钟左右

部署 kubernetes-下

查询数据

部署 kubernetes-下

添加页面到 grafana

导入模板文件 Traefik 2-1587191399741.json

安装插件 grafana-piechart-panel

[root@master-1 ingress]# kubectl exec -ti -n monitoring grafana-5dc77ff8cb-srd9h /bin/bash 
bash-5.0$ grafana-cli plugins install grafana-piechart-panel

#删除 pod
[root@master-1 ingress]# kubectl delete pods monitoring grafana-5dc77ff8cb-srd9h -n monitoring

展示数据

部署 kubernetes-下

16 容器日志收集方案

  • 把 log-agent 打包至业务镜像
  • 日志落地至物理节点
  • 每个物理节点启动日志容器

本例中在每个 node 节点部署一个 pod 收集日志

部署 kubernetes-下

17 安装日志组件

创建目录

[root@master-1 ~]# mkdir /root/logs
[root@master-1 ~]# cd logs

设置 serviceAccount

[root@master-1 logs]# kubectl create serviceaccount admin -n kube-system
serviceaccount/admin created

17.1 配置权限

[root@master-1 logs]# cat es-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: es-rbac
subjects:
  - kind: ServiceAccount
    name: admin 
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io

创建权限

[root@master-1 logs]# kubectl apply -f es-rbac.yaml 
clusterrolebinding.rbac.authorization.k8s.io/es-rbac created

17.2 安装 Elasticsearch

node节点拉取镜像

[root@node-1 logs]# docker pull  busybox:1.27
[root@node-1 ~]# docker pull registry.cn-hangzhou.aliyuncs.com/cqz/elasticsearch:5.5.1

如果下载太慢,可以使用导入的方式加载容器(所有的 Node 节点)

[root@node-1 logs]# docker load < es_5.5.1.tar 
[root@node-1 logs]# docker load < kibana_5.5.1.tar 
[root@node-1 logs]# docker load < log-pilot.tar.gz
[root@node-1 logs]# docker tag repo.hostscc.com/elk/elasticsearch:5.5.1 registry.cn-hangzhou.aliyuncs.com/cqz/elasticsearch:5.5.1 

(生产环境需要修改内存大小,1G内存太小)

[root@master-1 log]# wget https://acs-logging.oss-cn-hangzhou.aliyuncs.com/elasticsearch.yml


[root@master-1 logs]# kubectl apply -f elasticsearch.yml
service/elasticsearch-api created
service/elasticsearch-discovery created
statefulset.apps/elasticsearch created

部署 kubernetes-下

#查看节点状态
[root@master-1 logs]# kubectl describe StatefulSet -A
Name:               elasticsearch
Namespace:          kube-system
CreationTimestamp:  Fri, 07 Aug 2020 14:23:12 +0800
Selector:           app=es
Labels:             kubernetes.io/cluster-service=true
Annotations:        kubectl.kubernetes.io/last-applied-configuration:
                      {"apiVersion":"apps/v1beta1","kind":"StatefulSet","metadata":{"annotations":{},"labels":{"kubernetes.io/cluster-service":"true"},"name":"e...
Replicas:           2 desired | 2 total
Update Strategy:    OnDelete
Pods Status:        2 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
  Labels:           app=es
  Service Account:  admin
  Init Containers:
   init-sysctl:
    Image:      busybox:1.27
    Port:       <none>
    Host Port:  <none>
    Command:
      sysctl
      -w
      vm.max_map_count=262144
    Environment:  <none>
    Mounts:       <none>
  Containers:
   elasticsearch:
    Image:       registry.cn-hangzhou.aliyuncs.com/cqz/elasticsearch:5.5.1
    Ports:       9200/TCP, 9300/TCP
    Host Ports:  0/TCP, 0/TCP
    Limits:
      memory:  1500Mi
    Requests:
      cpu:     100m
      memory:  1000Mi
    Liveness:  tcp-socket :transport delay=20s timeout=1s period=10s #success=1 #failure=3
    Environment:
      http.host:                                         0.0.0.0
      network.host:                                      _eth0_
      cluster.name:                                      docker-cluster
      bootstrap.memory_lock:                             false
      discovery.zen.ping.unicast.hosts:                  elasticsearch-discovery
      discovery.zen.ping.unicast.hosts.resolve_timeout:  10s
      discovery.zen.ping_timeout:                        6s
      discovery.zen.minimum_master_nodes:                2
      discovery.zen.fd.ping_interval:                    2s
      discovery.zen.no_master_block:                     write
      gateway.expected_nodes:                            2
      gateway.expected_master_nodes:                     1
      transport.tcp.connect_timeout:                     60s
      ES_JAVA_OPTS:                                      -Xms1g -Xmx1g
    Mounts:
      /data from es-data (rw)
  Volumes:
   es-data:
    Type:          HostPath (bare host directory volume)
    Path:          /es-data
    HostPathType:  
Volume Claims:     <none>
Events:
  Type    Reason            Age    From                    Message
  ----    ------            ----   ----                    -------
  Normal  SuccessfulCreate  2m39s  statefulset-controller  create Pod elasticsearch-0 in StatefulSet elasticsearch successful
  Normal  SuccessfulCreate  2m37s  statefulset-controller  create Pod elasticsearch-1 in StatefulSet elasticsearch successful


Name:               alertmanager-main
Namespace:          monitoring
CreationTimestamp:  Fri, 07 Aug 2020 11:06:11 +0800
Selector:           alertmanager=main,app=alertmanager
Labels:             alertmanager=main
Annotations:        kubectl.kubernetes.io/last-applied-configuration:
                      {"apiVersion":"monitoring.coreos.com/v1","kind":"Alertmanager","metadata":{"annotations":{},"labels":{"alertmanager":"main"},"name":"main"...
Replicas:           1 desired | 1 total
Update Strategy:    RollingUpdate
Pods Status:        1 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
  Labels:           alertmanager=main
                    app=alertmanager
  Service Account:  alertmanager-main
  Containers:
   alertmanager:
    Image:       prom/alertmanager:v0.18.0
    Ports:       9093/TCP, 9094/TCP, 9094/UDP
    Host Ports:  0/TCP, 0/TCP, 0/UDP
    Args:
      --config.file=/etc/alertmanager/config/alertmanager.yaml
      --cluster.listen-address=[$(POD_IP)]:9094
      --storage.path=/alertmanager
      --data.retention=120h
      --web.listen-address=:9093
      --web.route-prefix=/
      --cluster.peer=alertmanager-main-0.alertmanager-operated.monitoring.svc:9094
    Requests:
      memory:   200Mi
    Liveness:   http-get http://:web/-/healthy delay=0s timeout=3s period=10s #success=1 #failure=10
    Readiness:  http-get http://:web/-/ready delay=3s timeout=3s period=5s #success=1 #failure=10
    Environment:
      POD_IP:   (v1:status.podIP)
    Mounts:
      /alertmanager from alertmanager-main-db (rw)
      /etc/alertmanager/config from config-volume (rw)
   config-reloader:
    Image:      quay.io/coreos/configmap-reload:v0.0.1
    Port:       <none>
    Host Port:  <none>
    Args:
      -webhook-url=http://localhost:9093/-/reload
      -volume-dir=/etc/alertmanager/config
    Limits:
      cpu:        100m
      memory:     25Mi
    Environment:  <none>
    Mounts:
      /etc/alertmanager/config from config-volume (ro)
  Volumes:
   config-volume:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  alertmanager-main
    Optional:    false
   alertmanager-main-db:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
Volume Claims:  <none>
Events:         <none>


Name:               prometheus-k8s
Namespace:          monitoring
CreationTimestamp:  Fri, 07 Aug 2020 11:39:59 +0800
Selector:           app=prometheus,prometheus=k8s
Labels:             prometheus=k8s
Annotations:        kubectl.kubernetes.io/last-applied-configuration:
                      {"apiVersion":"monitoring.coreos.com/v1","kind":"Prometheus","metadata":{"annotations":{},"labels":{"prometheus":"k8s"},"name":"k8s","name...
                    prometheus-operator-input-hash: 17654339208259950180
Replicas:           2 desired | 2 total
Update Strategy:    RollingUpdate
Pods Status:        2 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
  Labels:           app=prometheus
                    prometheus=k8s
  Service Account:  prometheus-k8s
  Containers:
   prometheus:
    Image:      prom/prometheus:v2.11.0
    Port:       9090/TCP
    Host Port:  0/TCP
    Args:
      --web.console.templates=/etc/prometheus/consoles
      --web.console.libraries=/etc/prometheus/console_libraries
      --config.file=/etc/prometheus/config_out/prometheus.env.yaml
      --storage.tsdb.path=/prometheus
      --storage.tsdb.retention.time=24h
      --web.enable-lifecycle
      --storage.tsdb.no-lockfile
      --web.route-prefix=/
    Requests:
      memory:     400Mi
    Liveness:     http-get http://:web/-/healthy delay=0s timeout=3s period=5s #success=1 #failure=6
    Readiness:    http-get http://:web/-/ready delay=0s timeout=3s period=5s #success=1 #failure=120
    Environment:  <none>
    Mounts:
      /etc/prometheus/certs from tls-assets (ro)
      /etc/prometheus/config_out from config-out (ro)
      /etc/prometheus/rules/prometheus-k8s-rulefiles-0 from prometheus-k8s-rulefiles-0 (rw)
      /prometheus from prometheus-k8s-db (rw)
   prometheus-config-reloader:
    Image:      quay.io/coreos/prometheus-config-reloader:v0.34.0
    Port:       <none>
    Host Port:  <none>
    Command:
      /bin/prometheus-config-reloader
    Args:
      --log-format=logfmt
      --reload-url=http://localhost:9090/-/reload
      --config-file=/etc/prometheus/config/prometheus.yaml.gz
      --config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml
    Limits:
      cpu:     100m
      memory:  25Mi
    Environment:
      POD_NAME:   (v1:metadata.name)
    Mounts:
      /etc/prometheus/config from config (rw)
      /etc/prometheus/config_out from config-out (rw)
   rules-configmap-reloader:
    Image:      quay.io/coreos/configmap-reload:v0.0.1
    Port:       <none>
    Host Port:  <none>
    Args:
      --webhook-url=http://localhost:9090/-/reload
      --volume-dir=/etc/prometheus/rules/prometheus-k8s-rulefiles-0
    Limits:
      cpu:        100m
      memory:     25Mi
    Environment:  <none>
    Mounts:
      /etc/prometheus/rules/prometheus-k8s-rulefiles-0 from prometheus-k8s-rulefiles-0 (rw)
  Volumes:
   config:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  prometheus-k8s
    Optional:    false
   tls-assets:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  prometheus-k8s-tls-assets
    Optional:    false
   config-out:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
   prometheus-k8s-rulefiles-0:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      prometheus-k8s-rulefiles-0
    Optional:  false
   prometheus-k8s-db:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>
   prometheus-data:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  prometheus-data
    ReadOnly:   false
Volume Claims:  <none>
Events:         <none>

17.3 查看 ES 在 Kubernetes 中的状态

最好有三个 ES 节点

[root@master-1 logs]# kubectl get StatefulSet -n kube-system
NAME            READY   AGE
elasticsearch   2/2     3m54s

17.4 查看 ES 状态

[root@master-200 log]# kubectl exec -it elasticsearch-0 bash -n kube-system

#执行检查命令:
#curl http://localhost:9200/_cat/health?v
[root@master-1 logs]# kubectl exec -it elasticsearch-0 bash -n kube-system
elasticsearch@elasticsearch-0:/usr/share/elasticsearch$ curl http://localhost:9200/_cat/health?v
epoch      timestamp cluster        status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
1596781651 06:27:31  docker-cluster green           2         2      0   0    0    0        0             0                  -                100.0%
error: unable to upgrade connection: Forbidden (user=system:anonymous, verb=create, resource=nodes, subresource=proxy)

解决方法:

[root@master-200 log]# kubectl create clusterrolebinding system:anonymous --clusterrole=cluster-admin --user=system:anonymous

17.5 安装 log-pilot

[root@master-200 log]# wget https://acs-logging.oss-cn-hangzhou.aliyuncs.com/log-pilot.yml [root@master-200 log]# docker pull registry.cn-hangzhou.aliyuncs.com/acs-sample/log-pilot:0.9-filebeat

# 所有的 Node 节点
[root@node-1 opt]# docker tag log-pilot:latest registry.cn-hangzhou.aliyuncs.com/acs-sample/log-pilot:0.9-filebeat 

# 部署
[root@master-1 logs]# kubectl apply -f log-pilot-2.0.yml 
daemonset.extensions/log-pilot created

部署 kubernetes-下

17.6 安装 kibana

# 注意修改命名空间
[root@master-200 log]# wget h ttps://acs-logging.oss-cn-hangzhou.aliyuncs.com/kibana.yml

所有节点

[root@node-1 ~]# docker tag repo.hostscc.com/elk/kibana:5.5.1 registry.cn-hangzhou.aliyuncs.com/acs-sample/kibana:5.5.1 

部署

[root@master-1 logs]# kubectl apply -f kibana.yml
service/kibana created
deployment.apps/kibana created

部署 kubernetes-下

17.7 访问 Kibana 界面

17.7.1 获取 Kibana 节点

[root@master-1 logs]# kubectl get pods -o wide --all-namespaces | grep kibana
kube-system            kibana-777bb4dfb-sttvt                       1/1     Running   0          60s     172.17.34.12   192.168.31.8   <none>           <none>

17.7.2 获取 Kibana HostPort 节点

[root@master-1 logs]# kubectl get svc --all-namespaces | grep kibana
kube-system            kibana                      NodePort    10.0.0.155   <none>        80:35097/TCP                 76s

17.7.3 访问 web 界面:

http://192.168.31.8:35097

部署 kubernetes-下

17.8 案例一:运行容器收集日志

17.8.1. 创建 nginx yaml 文件

[root@master-1 ~]# mkdir /root/nginx && cd /root/nginx 
[root@master-1 nginx]# cat nginx-demo.yaml
apiVersion: apps/v1beta2
kind: Deployment 
metadata:
  name: nginx-demo 
spec:
  selector:
    matchLabels:
      app: nginx-demo 
  replicas: 1
  template:
    metadata:
      labels:
        app: nginx-demo 
    spec:
      containers:
        - name: nginx 
          image: nginx
          imagePullPolicy: IfNotPresent 
          env:
            - name: aliyun_logs_nginx
              value: "stdout"
---
apiVersion: v1 
kind: Service 
metadata:
  name: nginx-demo-svc 
spec:
  selector:
    app: nginx-demo 
  ports:
    - port: 80
      targetPort: 80
  • aliyun_logs_catalina=stdout 表示要收集容器的 stdout 日志。
  • aliyun_logs_access=/usr/local/tomcat/logs/catalina.*.log 表示要收集容器内 /usr/local/tomcat/logs/ 目录下所有名字匹配 catalina.*.log 的文件日志。
  • Log-Pilot 可以依据环境变量 aliyun_logs_$name = $path 动态地生成日志采集配置文件
[root@master-1 nginx]# kubectl apply -f nginx-demo.yaml 
deployment.apps/nginx-demo created
service/nginx-demo-svc created

检查 demo 状态

[root@master-1 nginx]# kubectl get svc,pods
NAME                     TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
service/kubernetes       ClusterIP   10.0.0.1     <none>        443/TCP        23h
service/nginx            NodePort    10.0.0.65    <none>        88:42671/TCP   21h
service/nginx-demo-svc   ClusterIP   10.0.0.136   <none>        80/TCP         20s

NAME                                          READY   STATUS    RESTARTS   AGE
pod/nfs-client-provisioner-6d6f7ff69f-bk95b   1/1     Running   0          4h8m
pod/nginx-7bb7cd8db5-kfrsk                    1/1     Running   3          21h
pod/nginx-7bb7cd8db5-rwvgg                    1/1     Running   3          21h
pod/nginx-demo-68749b58dc-xzk5c               1/1     Running   0          20s

17.8.2. 创建Nginx Ingress

[root@master-1 java]# cat nginx-route.yaml 
apiVersion: traefik.containo.us/v1alpha1 
kind: IngressRoute
metadata:
  name: nginx-demo-route 
spec:
  entryPoints:
    - web 
  routes:
    - match: Host(`nginx.cc.com`) 
      kind: Rule
      services:
        - name: nginx-demo-svc 
          port: 80

创建路由

[root@master-1 nginx]# kubectl apply -f nginx-route.yaml
ingressroute.traefik.containo.us/nginx-demo-route created

17.8.3. 使用 services 访问

[root@master-2 ~]# kubectl get pod,svc
NAME                                          READY   STATUS    RESTARTS   AGE
pod/dnstools                                  1/1     Running   0          99s
pod/nfs-client-provisioner-6d6f7ff69f-bk95b   1/1     Running   0          4h14m
pod/nginx-7bb7cd8db5-kfrsk                    1/1     Running   3          21h
pod/nginx-7bb7cd8db5-rwvgg                    1/1     Running   3          21h
pod/nginx-demo-68749b58dc-xzk5c               1/1     Running   0          5m43s

NAME                     TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
service/kubernetes       ClusterIP   10.0.0.1     <none>        443/TCP        23h
service/nginx            NodePort    10.0.0.65    <none>        88:42671/TCP   21h
service/nginx-demo-svc   ClusterIP   10.0.0.136   <none>        80/TCP         5m43s
[root@master-1 nginx]# kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools
If you don't see a command prompt, try pressing enter.
dnstools# curl  10.0.0.136
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

17.8.4. 绑定主机 hosts

192.168.31.8 nginx.cc.com

17.8.5. 访问界面

部署 kubernetes-下

17.8.6. 查看容器日志

[root@master-2 ~]# kubectl logs -f nginx-demo-68749b58dc-xzk5c 
/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
10-listen-on-ipv6-by-default.sh: Getting the checksum of /etc/nginx/conf.d/default.conf
10-listen-on-ipv6-by-default.sh: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
/docker-entrypoint.sh: Configuration complete; ready for start up
172.17.34.0 - - [07/Aug/2020:07:03:40 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.60.0" "-"
172.17.34.0 - - [07/Aug/2020:07:07:06 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.60.0" "-"
172.17.34.0 - - [07/Aug/2020:07:07:07 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.60.0" "-"
172.17.34.0 - - [07/Aug/2020:07:07:08 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.60.0" "-"
172.17.34.0 - - [07/Aug/2020:07:07:08 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.60.0" "-"					

17.8.7. 查看是否建立索引

[root@master-2 ~]# kubectl get pods -n=kube-system | grep elasticsearch
elasticsearch-0                       1/1     Running   0          45m
elasticsearch-1                       1/1     Running   0          45m
[root@master-2 ~]# kubectl exec -it elasticsearch-0 /bin/bash -n kube-system
elasticsearch@elasticsearch-0:/usr/share/elasticsearch$ curl 'localhost:9200/_cat/indices?v'
health status index            uuid                   pri rep docs.count docs.deleted store.size pri.store.size
green  open   .kibana          g_3YT_5jRgOVylQB22Uupw   1   1          1            0      6.4kb          3.2kb
green  open   nginx-2020.08.07 pUOf3W9_Rj6B0boIcy2ICg   5   1         13            0    205.1kb         96.4kb

17.8.8. 在 kibana 中写入索引的地址

部署 kubernetes-下

查看访问日志

部署 kubernetes-下

17.8.9. 注意多行日志收集(JAVA)

参考:https://www.iyunw.cn/archives/k8s-tong-guo-log-pilot-cai-ji-ying-yong-ri-zhi-ding-zhi-hua-tomcat-duo-xing/

上一篇:使用 Docker 和 Traefik 搭建 GitLab(后篇)


下一篇:使用 Docker 和 Traefik 搭建 WordPress