1.地址
使用文档:https://prometheus.io/docs/guides/node-exporter/ GitHub:https://github.com/prometheus/node_exporter exporter列表:https://prometheus.io/docs/instrumenting/exporters/
2.下载
wget https://github.com/prometheus/node_exporter/releases/download/v0.18.1/node_exporter-0.18.1.linux-amd64.tar.gz
3. 安装并启动
mv node_exporter-0.18.1.linux-amd64 /usr/local/node_exporter [root@zabbix-server system]# cat node_exporter.service [Unit] Description=node_exporter Documentation=https://prometheus.io/ After=network.target [Service] Type=simple User=prometheus ExecStart=/usr/local/node_exporter/node_exporter Restart=on-failure [Install] WantedBy=multi-user.target [root@zabbix-server system]# pwd /usr/lib/systemd/system [root@zabbix-server system]# systemctl daemon-reload systemctl start node_exporter
4.查看 metrics
http://192.168.249.10:9100/metrics
5. prometheus 把 node_exporter加载进去
[root@zabbix-proxy sd_config]# cat prometheus-server.yml - labels: service: prometheus idc: su project: monitor targets: - 192.168.249.11:9090 - 192.168.249.10:9100 [root@zabbix-proxy sd_config]# cat ../prometheus.yml # my global config global: scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Alertmanager configuration alerting: alertmanagers: - static_configs: - targets: # - alertmanager:9093 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: # - "first_rules.yml" # - "second_rules.yml" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. - job_name: 'prometheus' file_sd_configs: - files: ['/usr/local/prometheus/sd_config/*.yml'] refresh_interval: 5s [root@zabbix-proxy sd_config]#
6.查看