fluentd用于收集k8s容器中的日志;
收集后的日志写入es中,我的es直接搭建在服务器上,要经过多方测试再决定是否要将es放在k8s上;
fluentd-es-configmap.yaml:此文件为fluentd的配置文件
kind: ConfigMap apiVersion: v1 metadata: name: fluentd-es-config-v0.2.0 namespace: efk-log labels: addonmanager.kubernetes.io/mode: Reconcile data: system.conf: |- <system> root_dir /tmp/fluentd-buffers/ </system> containers.input.conf: |- # This configuration file for Fluentd / td-agent is used # to watch changes to Docker log files. The kubelet creates symlinks that # capture the pod name, namespace, container name & Docker container ID # to the docker logs for pods in the /var/log/containers directory on the host. # If running this fluentd configuration in a Docker container, the /var/log # directory should be mounted in the container. # # These logs are then submitted to Elasticsearch which assumes the # installation of the fluent-plugin-elasticsearch & the # fluent-plugin-kubernetes_metadata_filter plugins. # See https://github.com/uken/fluent-plugin-elasticsearch & # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for # more information about the plugins. # # Example # ======= # A line in the Docker log file might look like this JSON: # # {"log":"2014/09/25 21:15:03 Got request with path wombat\n", # "stream":"stderr", # "time":"2014-09-25T21:15:03.499185026Z"} # # The time_format specification below makes sure we properly # parse the time format produced by Docker. This will be # submitted to Elasticsearch and should appear like: # $ curl 'http://elasticsearch-logging:9200/_search?pretty' # ... # { # "_index" : "logstash-2014.09.25", # "_type" : "fluentd", # "_id" : "VBrbor2QTuGpsQyTCdfzqA", # "_score" : 1.0, # "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n", # "stream":"stderr","tag":"docker.container.all", # "@timestamp":"2014-09-25T22:45:50+00:00"} # }, # ... # # The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log # record & add labels to the log record if properly configured. This enables users # to filter & search logs on any metadata. # For example a Docker container's logs might be in the directory: # # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b # # and in the file: # # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log # # where 997599971ee6... is the Docker ID of the running container. # The Kubernetes kubelet makes a symbolic link to this file on the host machine # in the /var/log/containers directory which includes the pod name and the Kubernetes # container name: # # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log # -> # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log # # The /var/log directory on the host is mapped to the /var/log directory in the container # running this instance of Fluentd and we end up collecting the file: # # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log # # This results in the tag: # # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log # # The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name # which are added to the log message as a kubernetes field object & the Docker container ID # is also added under the docker field object. # The final tag is: # # kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log # # And the final log record look like: # # { # "log":"2014/09/25 21:15:03 Got request with path wombat\n", # "stream":"stderr", # "time":"2014-09-25T21:15:03.499185026Z", # "kubernetes": { # "namespace": "default", # "pod_name": "synthetic-logger-0.25lps-pod", # "container_name": "synth-lgr" # }, # "docker": { # "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b" # } # } # # This makes it easier for users to search for logs by pod name or by # the name of the Kubernetes container regardless of how many times the # Kubernetes pod has been restarted (resulting in a several Docker container IDs). # Json Log Example: # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} # CRI Log Example: # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here <source> @id fluentd-containers.log @type tail path /var/log/containers/*.log pos_file /var/log/es-containers.log.pos tag raw.kubernetes.* read_from_head true <parse> @type multi_format <pattern> format json time_key time time_format %Y-%m-%dT%H:%M:%S.%NZ </pattern> <pattern> format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/ time_format %Y-%m-%dT%H:%M:%S.%N%:z </pattern> </parse> </source> # Detect exceptions in the log output and forward them as one log entry. <match raw.kubernetes.**> @id raw.kubernetes @type detect_exceptions remove_tag_prefix raw message log stream stream multiline_flush_interval 5 max_bytes 500000 max_lines 1000 </match> # Concatenate multi-line logs <filter **> @id filter_concat @type concat key message multiline_end_regexp /\n$/ separator "" </filter> # Enriches records with Kubernetes metadata <filter kubernetes.**> @id filter_kubernetes_metadata @type kubernetes_metadata </filter> # Fixes json fields in Elasticsearch <filter kubernetes.**> @id filter_parser @type parser key_name log reserve_data true remove_key_name_field true <parse> @type multi_format <pattern> format json </pattern> <pattern> format none </pattern> </parse> </filter> system.input.conf: |- # Logs from systemd-journal for interesting services. # TODO(random-liu): Remove this after cri container runtime rolls out. <source> @id journald-docker @type systemd matches [{ "_SYSTEMD_UNIT": "docker.service" }] <storage> @type local persistent true path /var/log/journald-docker.pos </storage> read_from_head true tag docker </source> <source> @id journald-container-runtime @type systemd matches [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }] <storage> @type local persistent true path /var/log/journald-container-runtime.pos </storage> read_from_head true tag container-runtime </source> <source> @id journald-etcd @type systemd matches [{ "_SYSTEMD_UNIT": "etcd.service" }] <storage> @type local persistent true path /var/log/journald-etcd.pos </storage> read_from_head true tag etcd </source> <source> @id journald-kube-apiserver @type systemd matches [{ "_SYSTEMD_UNIT": "kube-apiserver.service" }] <storage> @type local persistent true path /var/log/journald-kube-apiserver.pos </storage> read_from_head true tag kube-apiserver </source> <source> @id journald-kube-controller-manager @type systemd matches [{ "_SYSTEMD_UNIT": "kube-controller-manager.service" }] <storage> @type local persistent true path /var/log/journald-kube-controller-manager.pos </storage> read_from_head true tag kube-controller-manager </source> <source> @id journald-kube-scheduler @type systemd matches [{ "_SYSTEMD_UNIT": "kube-scheduler.service" }] <storage> @type local persistent true path /var/log/journald-kube-scheduler.pos </storage> read_from_head true tag kube-scheduler </source> <source> @id journald-kubelet @type systemd matches [{ "_SYSTEMD_UNIT": "kubelet.service" }] <storage> @type local persistent true path /var/log/journald-kubelet.pos </storage> read_from_head true tag kubelet </source> <source> @id journald-kube-proxy @type systemd matches [{ "_SYSTEMD_UNIT": "kube-proxy.service" }] <storage> @type local persistent true path /var/log/journald-kube-proxy.pos </storage> read_from_head true tag kube-proxy </source> <source> @id journald-node-problem-detector @type systemd matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }] <storage> @type local persistent true path /var/log/journald-node-problem-detector.pos </storage> read_from_head true tag node-problem-detector </source> <source> @id kernel @type systemd matches [{ "_TRANSPORT": "kernel" }] <storage> @type local persistent true path /var/log/kernel.pos </storage> <entry> fields_strip_underscores true fields_lowercase true </entry> read_from_head true tag kernel </source> forward.input.conf: |- # Takes the messages sent over TCP <source> @id forward @type forward </source> monitoring.conf: |- # Prometheus Exporter Plugin # input plugin that exports metrics <source> @id prometheus @type prometheus </source> <source> @id monitor_agent @type monitor_agent </source> # input plugin that collects metrics from MonitorAgent <source> @id prometheus_monitor @type prometheus_monitor <labels> host ${hostname} </labels> </source> # input plugin that collects metrics for output plugin <source> @id prometheus_output_monitor @type prometheus_output_monitor <labels> host ${hostname} </labels> </source> # input plugin that collects metrics for in_tail plugin <source> @id prometheus_tail_monitor @type prometheus_tail_monitor <labels> host ${hostname} </labels> </source> output.conf: |- <match **> @id elasticsearch @type elasticsearch @log_level info type_name _doc include_tag_key true host 10.10.5.78 port 9200 logstash_format true <buffer> @type file path /var/log/fluentd-buffers/kubernetes.system.buffer flush_mode interval retry_type exponential_backoff flush_thread_count 2 flush_interval 5s retry_forever retry_max_interval 30 chunk_limit_size 2M queue_limit_length 8 overflow_action block </buffer> </match>
fluentd-es-ds.yaml:此文件用于部署fluentd
apiVersion: v1 kind: ServiceAccount metadata: name: fluentd-es namespace: efk-log labels: k8s-app: fluentd-es kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: fluentd-es labels: k8s-app: fluentd-es kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile rules: - apiGroups: - "" resources: - "namespaces" - "pods" verbs: - "get" - "watch" - "list" --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: fluentd-es labels: k8s-app: fluentd-es kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile subjects: - kind: ServiceAccount name: fluentd-es namespace: efk-log apiGroup: "" roleRef: kind: ClusterRole name: fluentd-es apiGroup: "" --- apiVersion: apps/v1 kind: DaemonSet metadata: name: fluentd-es-v2.4.0 namespace: efk-log labels: k8s-app: fluentd-es version: v2.4.0 kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: selector: matchLabels: k8s-app: fluentd-es version: v2.4.0 template: metadata: labels: k8s-app: fluentd-es kubernetes.io/cluster-service: "true" version: v2.4.0 # This annotation ensures that fluentd does not get evicted if the node # supports critical pod annotation based priority scheme. # Note that this does not guarantee admission on the nodes (#40573). annotations: scheduler.alpha.kubernetes.io/critical-pod: '' seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-node-critical serviceAccountName: fluentd-es containers: - name: fluentd-es #image: k8s.gcr.io/fluentd-elasticsearch:v2.4.0 image: mirrorgooglecontainers/fluentd-elasticsearch:v2.4.0 env: - name: FLUENTD_ARGS value: --no-supervisor -q resources: limits: memory: 500Mi requests: cpu: 100m memory: 200Mi volumeMounts: - name: varlog mountPath: /var/log - name: varlibdockercontainers mountPath: /var/lib/docker/containers readOnly: true - name: config-volume mountPath: /etc/fluent/config.d #nodeSelector: #beta.kubernetes.io/fluentd-ds-ready: "true" terminationGracePeriodSeconds: 30 volumes: - name: varlog hostPath: path: /var/log - name: varlibdockercontainers hostPath: path: /var/lib/docker/containers - name: config-volume configMap: name: fluentd-es-config-v0.2.0