SpringBoot Prometheus监控

Springboot程序配置

  1. 添加依赖

    <dependency>
        <groupId>io.micrometer</groupId>
        <artifactId>micrometer-registry-prometheus</artifactId>
        <version>1.7.3</version>
    </dependency>
    
  2. 配置文件

    spring.application.name=mrds-md-
    management.endpoints.web.exposure.include=*
    management.metrics.tags.application=${spring.application.name}
    
  3. 程序增加bean配置

    @Bean
    	MeterRegistryCustomizer<MeterRegistry> configurer(
    			@Value("${spring.application.name}") String applicationName) {
    		return (registry) -> registry.config().commonTags("application", applicationName);
    	}
    
  4. 启动项目验证

    启动项目,访问URL: localhost/actuator/prometheus

​ 可以看到一些度量指标

# HELP jvm_threads_states_threads The current number of threads having NEW state
# TYPE jvm_threads_states_threads gauge
jvm_threads_states_threads{application="huice-md-mrds-reciever",state="terminated",} 0.0
jvm_threads_states_threads{application="huice-md-mrds-reciever",state="runnable",} 33.0
jvm_threads_states_threads{application="huice-md-mrds-reciever",state="new",} 0.0
jvm_threads_states_threads{application="huice-md-mrds-reciever",state="waiting",} 50.0
jvm_threads_states_threads{application="huice-md-mrds-reciever",state="timed-waiting",} 15.0
jvm_threads_states_threads{application="huice-md-mrds-reciever",state="blocked",} 0.0
# HELP jvm_buffer_total_capacity_bytes An estimate of the total capacity of the buffers in this pool
# TYPE jvm_buffer_total_capacity_bytes gauge
jvm_buffer_total_capacity_bytes{application="huice-md-mrds-reciever",id="direct",} 8192.0
jvm_buffer_total_capacity_bytes{application="huice-md-mrds-reciever",id="mapped",} 0.0
# HELP logback_events_total Number of error level events that made it to the logs
# TYPE logback_events_total counter
logback_events_total{application="huice-md-mrds-reciever",level="info",} 31.0
logback_events_total{application="huice-md-mrds-reciever",level="error",} 0.0
logback_events_total{application="huice-md-mrds-reciever",level="warn",} 1.0
logback_events_total{application="huice-md-mrds-reciever",level="trace",} 0.0
logback_events_total{application="huice-md-mrds-reciever",level="debug",} 72.0
# HELP jvm_gc_pause_seconds Time spent in GC pause
# TYPE jvm_gc_pause_seconds summary
jvm_gc_pause_seconds_count{action="end of minor GC",application="huice-md-mrds-reciever",cause="Allocation Failure",} 28.0
jvm_gc_pause_seconds_sum{action="end of minor GC",application="huice-md-mrds-reciever",cause="Allocation Failure",} 0.426
jvm_gc_pause_seconds_count{action="end of minor GC",application="huice-md-mrds-reciever",cause="Metadata GC Threshold",} 2.0
jvm_gc_pause_seconds_sum{action="end of minor GC",application="huice-md-mrds-reciever",cause="Metadata GC Threshold",} 0.024
jvm_gc_pause_seconds_count{action="end of major GC",application="huice-md-mrds-reciever",cause="Metadata GC Threshold",} 2.0
jvm_gc_pause_seconds_sum{action="end of major GC",application="huice-md-mrds-reciever",cause="Metadata GC Threshold",} 0.174
# HELP jvm_gc_pause_seconds_max Time spent in GC pause
# TYPE jvm_gc_pause_seconds_max gauge
jvm_gc_pause_seconds_max{action="end of minor GC",application="huice-md-mrds-reciever",cause="Allocation Failure",} 0.023
jvm_gc_pause_seconds_max{action="end of minor GC",application="huice-md-mrds-reciever",cause="Metadata GC Threshold",} 0.015
jvm_gc_pause_seconds_max{action="end of major GC",application="huice-md-mrds-reciever",cause="Metadata GC Threshold",} 0.101

Prometheus配置

  1. 启动docker

     
    #下载镜像
    docker pull prom/prometheus
    # 启动docker
    docker run -p 9090:9090 prom/prometheus
    # copy 出配置文件
    docker cp 445818f4494f:/etc/prometheus/prometheus.yml d:/
    
    
  2. 修改配置文件: prometheus.yml

    # my global config
    global:
      scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
      evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
      # scrape_timeout is set to the global default (10s).
    
    # Alertmanager configuration
    alerting:
      alertmanagers:
      - static_configs:
        - targets:
          # - alertmanager:9093
    
    # Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
    rule_files:
      # - "first_rules.yml"
      # - "second_rules.yml"
    
    # A scrape configuration containing exactly one endpoint to scrape:
    # Here it's Prometheus itself.
    scrape_configs:
      - job_name: 'prometheus'
        static_configs:
        - targets: ['192.168.1.180:9090']
    ###以下内容为SpringBoot应用配置
      - job_name: 'springboot_prometheus'
        scrape_interval: 5s
        metrics_path: '/actuator/prometheus'
        static_configs:
          - targets: ['192.168.1.180:8080']
    
    

3.重启prometheus

   # 删除docker容器
   docker rm -f 445818f4494f
   #启动
   docker run -d -p 9090:9090 -v D:/Enviroments/monitor/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml  prom/prometheus
       
   #持久化到存储
   docker run -p 9090:9090 -v /prometheus-data prom/prometheus --config.file=/prometheus-data/prometheus.yml
  
  1. 验证

    测试 status targets 查看状态
    http://192.168.1.180:9090

安装grafana

  1. 安装运行
#https://grafana.com/grafana/download?platform=docker
docker run -d --name=grafana -p 3000:3000 grafana/grafana

  1. 验证

    登陆 admin admin
    http://192.168.1.180:3000/login

  2. 配置

    grafana登陆后首先添加 数据源
    Add data source
    选中 prometheus
    http url 中添加prometheus的地址 http://192.168.1.180:9090

  3. 验证

上一篇:一招搞定 Spring Boot 可视化监控!


下一篇:prometheus process-export进程监控