当前位置: 首页 > article >正文

jvm接入prometheus监控

  1. 创建以下两个配置类:
package com.haoze.doctor.config;
import com.alibaba.druid.pool.DruidDataSource;
import io.micrometer.core.instrument.Gauge;
import io.micrometer.core.instrument.MeterRegistry;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import java.util.List;
import java.util.function.ToDoubleFunction;
@Component
public class DruidCollector {
    @Value("${spring.application.name}")
    private String applicationName;
    private static final String LABEL_NAME = "druid_pool";
    private final MeterRegistry registry;
    DruidCollector(MeterRegistry registry) {
        this.registry = registry;
    }
    void register(List<DruidDataSource> dataSources) {
        dataSources.forEach((druidDataSource) -> {
            // basic configurations
            createGauge(druidDataSource, "druid_initial_size", "Initial size", (datasource) -> (double) druidDataSource.getInitialSize());
            createGauge(druidDataSource, "druid_min_idle", "Min idle", datasource -> (double) druidDataSource.getMinIdle());
            createGauge(druidDataSource, "druid_max_active", "Max active", datasource -> (double) druidDataSource.getMaxActive());
            // connection pool core metrics
            createGauge(druidDataSource, "druid_active_count", "Active count", datasource -> (double) druidDataSource.getActiveCount());
            createGauge(druidDataSource, "druid_active_peak", "Active peak", datasource -> (double) druidDataSource.getActivePeak());
            createGauge(druidDataSource, "druid_pooling_peak", "Pooling peak", datasource -> (double) druidDataSource.getPoolingPeak());
            createGauge(druidDataSource, "druid_pooling_count", "Pooling count", datasource -> (double) druidDataSource.getPoolingCount());
            createGauge(druidDataSource, "druid_wait_thread_count", "Wait thread count", datasource -> (double) druidDataSource.getWaitThreadCount());
            // connection pool detail metrics
            createGauge(druidDataSource, "druid_not_empty_wait_count", "Not empty wait count", datasource -> (double) druidDataSource.getNotEmptyWaitCount());
            createGauge(druidDataSource, "druid_not_empty_wait_millis", "Not empty wait millis", datasource -> (double) druidDataSource.getNotEmptyWaitMillis());
            createGauge(druidDataSource, "druid_not_empty_thread_count", "Not empty thread count", datasource -> (double) druidDataSource.getNotEmptyWaitThreadCount());
            createGauge(druidDataSource, "druid_logic_connect_count", "Logic connect count", datasource -> (double) druidDataSource.getConnectCount());
            createGauge(druidDataSource, "druid_logic_close_count", "Logic close count", datasource -> (double) druidDataSource.getCloseCount());
            createGauge(druidDataSource, "druid_logic_connect_error_count", "Logic connect error count", datasource -> (double) druidDataSource.getConnectErrorCount());
            createGauge(druidDataSource, "druid_physical_connect_count", "Physical connect count", datasource -> (double) druidDataSource.getCreateCount());
            createGauge(druidDataSource, "druid_physical_close_count", "Physical close count", datasource -> (double) druidDataSource.getDestroyCount());
            createGauge(druidDataSource, "druid_physical_connect_error_count", "Physical connect error count", datasource -> (double) druidDataSource.getCreateErrorCount());
            // sql execution core metrics
            createGauge(druidDataSource, "druid_error_count", "Error count", datasource -> (double) druidDataSource.getErrorCount());
            createGauge(druidDataSource, "druid_execute_count", "Execute count", datasource -> (double) druidDataSource.getExecuteCount());
            // transaction metrics
            createGauge(druidDataSource, "druid_start_transaction_count", "Start transaction count", datasource -> (double) druidDataSource.getStartTransactionCount());
            createGauge(druidDataSource, "druid_commit_count", "Commit count", datasource -> (double) druidDataSource.getCommitCount());
            createGauge(druidDataSource, "druid_rollback_count", "Rollback count", datasource -> (double) druidDataSource.getRollbackCount());
            // sql execution detail
            createGauge(druidDataSource, "druid_prepared_statement_open_count", "Prepared statement open count", datasource -> (double) druidDataSource.getPreparedStatementCount());
            createGauge(druidDataSource, "druid_prepared_statement_closed_count", "Prepared statement closed count", datasource -> (double) druidDataSource.getClosedPreparedStatementCount());
            createGauge(druidDataSource, "druid_ps_cache_access_count", "PS cache access count", datasource -> (double) druidDataSource.getCachedPreparedStatementAccessCount());
            createGauge(druidDataSource, "druid_ps_cache_hit_count", "PS cache hit count", datasource -> (double) druidDataSource.getCachedPreparedStatementHitCount());
            createGauge(druidDataSource, "druid_ps_cache_miss_count", "PS cache miss count", datasource -> (double) druidDataSource.getCachedPreparedStatementMissCount());
            createGauge(druidDataSource, "druid_execute_query_count", "Execute query count", datasource -> (double) druidDataSource.getExecuteQueryCount());
            createGauge(druidDataSource, "druid_execute_update_count", "Execute update count", datasource -> (double) druidDataSource.getExecuteUpdateCount());
            createGauge(druidDataSource, "druid_execute_batch_count", "Execute batch count", datasource -> (double) druidDataSource.getExecuteBatchCount());
            // none core metrics, some are static configurations
            createGauge(druidDataSource, "druid_max_wait", "Max wait", datasource -> (double) druidDataSource.getMaxWait());
            createGauge(druidDataSource, "druid_max_wait_thread_count", "Max wait thread count", datasource -> (double) druidDataSource.getMaxWaitThreadCount());
            createGauge(druidDataSource, "druid_login_timeout", "Login timeout", datasource -> (double) druidDataSource.getLoginTimeout());
            createGauge(druidDataSource, "druid_query_timeout", "Query timeout", datasource -> (double) druidDataSource.getQueryTimeout());
            createGauge(druidDataSource, "druid_transaction_query_timeout", "Transaction query timeout", datasource -> (double) druidDataSource.getTransactionQueryTimeout());
        });
    }
    private void createGauge(DruidDataSource weakRef, String metric, String help, ToDoubleFunction<DruidDataSource> measure) {
        Gauge.builder(metric, weakRef, measure)
                .description(help)
                .tag(LABEL_NAME, weakRef.getUsername() + "-" + weakRef.getUrl())
                .tag("application", applicationName)
                .register(this.registry);
    }
}

package com.haoze.doctor.config;
import com.alibaba.druid.pool.DruidDataSource;
import io.micrometer.core.instrument.MeterRegistry;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import javax.sql.DataSource;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
@Configuration
@ConditionalOnClass({DruidDataSource.class, MeterRegistry.class})
@Slf4j
public class MetricsConfiguration {
    @Value("${spring.application.name}")
    private String applicationName;
    private final MeterRegistry registry;
    @Autowired
    private DruidCollector druidCollector;
    public MetricsConfiguration(MeterRegistry registry) {
        this.registry = registry;
    }
    @Bean
    MeterRegistryCustomizer<MeterRegistry> configurer() {
        return (registry) -> registry.config().commonTags("application", applicationName);
    }
    @Autowired
    public void bindMetricsRegistryToDruidDataSources(Collection<DataSource> dataSources) throws SQLException {
        List<DruidDataSource> druidDataSources = new ArrayList<>(dataSources.size());
        for (DataSource dataSource : dataSources) {
            DruidDataSource druidDataSource = dataSource.unwrap(DruidDataSource.class);
            if (druidDataSource != null) {
                druidDataSources.add(druidDataSource);
            }
        }
//        DruidCollector druidCollector = new DruidCollector(druidDataSources, registry);
        druidCollector.register(druidDataSources);
        log.info("finish register metrics to micrometer");
    }
}

  1. pom文件中的druid版本需要在这个以上:
     <dependency>
         <groupId>com.alibaba</groupId>
         <artifactId>druid</artifactId>
         <version>1.1.14</version>
     </dependency>

其他需要引入的maven依赖:

     <dependency>
         <groupId>org.springframework.boot</groupId>
         <artifactId>spring-boot-starter-actuator</artifactId>
     </dependency>
     <dependency>
         <groupId>io.micrometer</groupId>
         <artifactId>micrometer-registry-prometheus</artifactId>
     </dependency>
     <!-- micrometer获取JVM相关信息,并展示在Grafana上 -->
     <dependency>
         <groupId>io.github.mweirauch</groupId>
         <artifactId>micrometer-jvm-extras</artifactId>
         <version>0.2.2</version>
     </dependency>
  1. actuator配置需要支持prometheus
management:
endpoints:
 web:
   exposure:
     include: "httptrace,health,shutdown,prometheus"
 shutdown:
   enabled: true
 health:
   show-details: ALWAYS
  1. 本地验证:
    在这里插入图片描述5. 对于普通的部署方式,到这一步之后,我们直接到prometheus上去配置ip和端口就可以实现监控了,像下面这样在prometheus.yml配置文件中进行配置,并重启服务prometheus服务即可。
    #新增jvm监控任务  
  - job_name: "jvm"
    # 采集数据间隔时间
    scrape_interval: 5s
    # 采集时的超时时间
    scrape_timeout: 5s
    # 采集数据的路径
    metrics_path: '/actuator/prometheus'
    # 应用服务的地址
    static_configs:
      - targets: ['169.169.169.98:8203']
  1. 但对于容器化部署,这种ip会变的场景下,需要借助prometheus的注册发现来实现了,比如这里我们通过nacos注册发现。nacos的application.properties文件中进行修改,修改后重启nacos服务
## 配置设置为true
nacos.prometheus.metrics.enabled=true

然后可以通过 http://ip:8848/nacos/prometheus/,查看注册内容。
6. prometheus配置自动发现。修改prometheus的prometheus.yml,添加以下内容:

   - job_name: 'prod-jvm'
     metrics_path: /actuator/prometheus
     scheme: http
     http_sd_configs:
      ## namespaceId/prod 表示指定prod这个命名空间下的
      ## 如果不指定命名空间,可以省去这部分,直接:http://ip:8848/nacos/prometheus/
     - url: http://ip:8848/nacos/prometheus/namespaceId/prod

http://www.kler.cn/a/449989.html

相关文章:

  • 使用idea创建JDK8的SpringBoot项目
  • ajax中get和post的区别,datatype返回的数据类型有哪些?web开发中数据提交的几种方式,有什么区别。
  • 汽车IVI中控开发入门及进阶(46):FFmpeg
  • 【JetPack】WorkManager笔记
  • 多模态医学图像融合概述
  • Flutter组件————Container
  • vsCode 的 setting.json 配置
  • opencv中的各种滤波器简介
  • Day13 用Excel表体验梯度下降法
  • 【JavaEE进阶】初始Spring Web MVC
  • oracle 加字段和字段注释 sql
  • 深度学习中的注意力机制:解锁智能模型的新视角
  • C++的STL_swap trick和现代C++的方法
  • leetcode hot100 轮转数组
  • 深度学习之超分辨率算法——SRCNN
  • Visual Studio 、 MSBuild 、 Roslyn 、 .NET Runtime、SDK Tools之间的关系
  • 【Java基础面试题022】什么是Java内部类?有什么作用?
  • Qt笔记-Qt Creator开发环境搭建
  • C#(委托)2
  • 放弃机器学习框架,如何用Python做物体检测?
  • 监控MySQL数据表变化:Binlog的重要性及实践
  • 自建MD5解密平台-续
  • mysql中局部变量_MySQL中变量的总结
  • 【YashanDB知识库】Oracle pipelined函数在YashanDB中的改写
  • 蓝桥杯练习生第四天
  • Blazor 直接读取并显示HTML 文件内容