Promethues mysql_exporter 集中式监控
集中式优点
- 无须在每个mysql主机安装mysql_exporter
- 对于云数据库,无法安装mysql_exporter更适用
下载mysql_exporter
监控 mysql 主要是 dsn 变量
dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/", "root", "123456", target, 3306)
这里测试写死了 mysql 用与监控的账号与密码,可以用接口获取账户密码其他方式*发挥
修改mysql_exporter.go
- newHandler 函数修改
func newHandler(metrics collector.Metrics, scrapers []collector.Scraper, logger log.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
filteredScrapers := scrapers
// -------------- 添加部分 ---------------- //
v:=r.URL.Query()
params := v["collect[]"]
target := v.Get("target")
dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/", "root", "123456", target, 3306)
level.Info(logger).Log("dsn", dsn)
// -------------- 添加部分 ---------------- //
// Use request context for cancellation when connection gets closed.
ctx := r.Context()
// If a timeout is configured via the Prometheus header, add it to the context.
if v := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds"); v != "" {
timeoutSeconds, err := strconv.ParseFloat(v, 64)
if err != nil {
level.Error(logger).Log("msg", "Failed to parse timeout from Prometheus header", "err", err)
} else {
if *timeoutOffset >= timeoutSeconds {
// Ignore timeout offset if it doesn‘t leave time to scrape.
level.Error(logger).Log("msg", "Timeout offset should be lower than prometheus scrape timeout", "offset", *timeoutOffset, "prometheus_scrape_timeout", timeoutSeconds)
} else {
// Subtract timeout offset from timeout.
timeoutSeconds -= *timeoutOffset
}
// Create new timeout context with request context as parent.
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Duration(timeoutSeconds*float64(time.Second)))
defer cancel()
// Overwrite request with timeout context.
r = r.WithContext(ctx)
}
}
- main 函数修改