prometheus 监测系统学习记录-1

与springboot整合过程记录(maven方式)

		<!-- prometheus 支持 -->
		<dependency>
			<groupId>org.springframework.boot</groupId>
			<artifactId>spring-boot-starter-actuator</artifactId>
		</dependency>
		<dependency>
			<groupId>io.micrometer</groupId>
			<artifactId>micrometer-core</artifactId>
		</dependency>
		<dependency>
			<groupId>io.micrometer</groupId>
			<artifactId>micrometer-registry-prometheus</artifactId>
		</dependency>

prometheus监控配置

spring.application.name=gold
management.endpoints.web.exposure.include=*
management.metrics.tags.application=${spring.application.name}

使用片段

public class PrometheusInterceptor extends HandlerInterceptorAdapter{

	private static final Counter COUNTER = Counter.build()
    .name("http_requests_total")
    .labelNames("path", "method", "code")
    .help("http请求总计数").register();
	
	@Override
	public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler)
			throws Exception {
		String handlerLabel = "";
		if(handler instanceof HandlerMethod) {
			HandlerMethod methods = (HandlerMethod)handler;
			Method method = methods.getMethod();
			handlerLabel = method.getDeclaringClass().getSimpleName()+"."+method.getName();
		}
		COUNTER.labels(request.getRequestURI(),handlerLabel
				,String.valueOf(response.getStatus())).inc();
		
		return true;
	}

}

prometheus.yml文件如下

# my global config
global:
  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).

# Alertmanager configuration
alerting:
  alertmanagers:
  - static_configs:
    - targets:
      # - alertmanager:9093

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
  - job_name: 'prometheus'
    #metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.
    static_configs:
    - targets: ['localhost:9090']
  - job_name: 'gold'
    metrics_path: '/actuator/prometheus'
    #metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.
    static_configs:
    - targets: ['localhost:8099']
  - job_name: 'eteniot'
    metrics_path: '/actuator/prometheus'
    #metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.
    static_configs:
    - targets: ['localhost:8080']

初学做一个记录

上一篇:【 数据结构 】循环队列


下一篇:差分与前缀和