1、coredns.yaml
[root@k8s-master01 yaml]# cat coredns.yaml # Warning: This is a file generated from the base underscore template file: coredns.yaml.base apiVersion: v1 kind: ServiceAccount metadata: name: coredns namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: kubernetes.io/bootstrapping: rbac-defaults addonmanager.kubernetes.io/mode: Reconcile name: system:coredns rules: - apiGroups: - "" resources: - endpoints - services - pods - namespaces verbs: - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults addonmanager.kubernetes.io/mode: EnsureExists name: system:coredns roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:coredns subjects: - kind: ServiceAccount name: coredns namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata: name: coredns namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists data: Corefile: | .:53 { errors health kubernetes cluster.local in-addr.arpa ip6.arpa { pods insecure upstream fallthrough in-addr.arpa ip6.arpa } prometheus :9153 proxy . /etc/resolv.conf cache 30 loop reload loadbalance } --- apiVersion: apps/v1 kind: Deployment metadata: name: coredns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "CoreDNS" spec: # replicas: not specified here: # 1. In order to make Addon Manager do not reconcile this replicas parameter. # 2. Default is 1. # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. strategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns annotations: seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: serviceAccountName: coredns tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule - key: "CriticalAddonsOnly" operator: "Exists" containers: - name: coredns image: lizhenliang/coredns:1.2.2 imagePullPolicy: IfNotPresent resources: limits: memory: 170Mi requests: cpu: 100m memory: 70Mi args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: - name: config-volume mountPath: /etc/coredns readOnly: true ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP - containerPort: 9153 name: metrics protocol: TCP livenessProbe: httpGet: path: /health port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 securityContext: allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE drop: - all readOnlyRootFilesystem: true dnsPolicy: Default volumes: - name: config-volume configMap: name: coredns items: - key: Corefile path: Corefile --- apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system annotations: prometheus.io/port: "9153" prometheus.io/scrape: "true" labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "CoreDNS" spec: selector: k8s-app: kube-dns clusterIP: 10.0.0.2 ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP
2、部署
[root@k8s-master01 yaml]# kubectl apply -f coredns.yaml serviceaccount/coredns created clusterrole.rbac.authorization.k8s.io/system:coredns created clusterrolebinding.rbac.authorization.k8s.io/system:coredns created configmap/coredns created deployment.apps/coredns created service/kube-dns created [root@k8s-master01 yaml]# kubectl get pods -n kube-system NAME READY STATUS RESTARTS AGE coredns-6d8cfdd59d-vk724 1/1 Running 0 28s
3、测试dns
下载一个busybox镜像进行测试
[root@k8s-master01 yaml]# cat busybox.yaml apiVersion: v1 kind: Pod metadata: name: busybox namespace: default spec: containers: - image: busybox:1.28.4 command: - sleep - "3600" imagePullPolicy: IfNotPresent name: busybox restartPolicy: Always [root@k8s-master01 yaml]# kubectl apply -f busybox.yaml pod/busybox created [root@k8s-master01 yaml]# kubectl get pods NAME READY STATUS RESTARTS AGE busybox 1/1 Running 0 6s
创建一个nginx镜像,名称为web
root@k8s-master01 yaml]# kubectl create deployment web --image=nginx deployment.apps/web created [root@k8s-master01 yaml]# kubectl expose deployment web --port=80 --type=NodePort service/web exposed [root@k8s-master01 yaml]# kubectl get pods NAME READY STATUS RESTARTS AGE busybox 1/1 Running 0 6m13s web-d86c95cc9-9zxm8 1/1 Running 0 93s [root@k8s-master01 yaml]# kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 23h web NodePort 10.0.0.250 <none> 80:32241/TCP 5m18s
进入busybox测试
[root@k8s-master01 yaml]# kubectl exec -it busybox sh / # ping web PING web (10.0.0.250): 56 data bytes 64 bytes from 10.0.0.250: seq=0 ttl=64 time=0.102 ms 64 bytes from 10.0.0.250: seq=1 ttl=64 time=0.158 ms 64 bytes from 10.0.0.250: seq=2 ttl=64 time=0.181 ms 64 bytes from 10.0.0.250: seq=3 ttl=64 time=0.170 ms / # nslookup web Server: 10.0.0.2 Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local Name: web Address 1: 10.0.0.250 web.default.svc.cluster.local