1.配置文件
在 Linux /tmp目录下创建prometheus.yml配置文件
# 全局配置段 global: # 采集间隔 scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. # 计算报警和预聚合间隔 evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. # 采集超时时间 scrape_timeout: 10s # 查询日志,包含各阶段耗时统计 query_log_file: /opt/logs/prometheus_query_log # 全局标签组 # 通过本实例采集的数据都会叠加下面的标签 external_labels: account: 'huawei-main' region: 'beijng-01' # Alertmanager信息段 alerting: alertmanagers: - scheme: http static_configs: - targets: - "localhost:9093" # 告警、预聚合配置文件段 rule_files: - /etc/prometheus/rules/record.yml - /etc/prometheus/rules/alert.yml # 采集配置段 scrape_configs: # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. - job_name: 'prometheus' # metrics_path defaults to '/metrics' # scheme defaults to 'http'. static_configs: - targets: ['localhost:9090'] # 远程查询段 remote_read: # prometheus - url: http://prometheus/v1/read read_recent: true # m3db - url: "http://m3coordinator-read:7201/api/v1/prom/remote/read" read_recent: true # 远程写入段 remote_write: - url: "http://m3coordinator-write:7201/api/v1/prom/remote/write" queue_config: capacity: 10000 max_samples_per_send: 60000 write_relabel_configs: - source_labels: [__name__] separator: ; # 标签key前缀匹配到的drop regex: '(kubelet_|apiserver_|container_fs_).*' replacement: $1 action: drop
2.安装并运行prometheus
docker run -d\ --name=prometheus\ -p9090:9090\ -v /tmp/promethues.yml:/etc/prometheus/prometheus.yml\ prom/prometheus
3.访问localhost:9090查看prometheus运行情况