栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 软件开发 > 后端开发 > Java

数据源监控

Java 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

数据源监控

背景:
针对线上数据库数据源的配置,怎么合理的设置参数,更加清楚的了解线上服务的运行状态,下面将说下常用数据源的监控。

1.Druid监控

问题:
众所周知,alibaba druid提供了比较完善的数据库监控,但是也是有比较明显的劣势(比如:数据源的连接数等在监控页面只能看到那瞬间的值等),不能持久化监控以及和公司内部监控告警集成
解决:
通过内部druid监控方法

private class DruidStatsThread extends Thread {

    public DruidStatsThread(String name) {
 super(name);
 this.setDaemon(true);
    }

    @Override
    public void run() {
 long initialDelay = metricDruidProperties.getInitialDelay() * 1000;
 if (initialDelay > 0) {
     MwThreadUtil.sleep(initialDelay);
 }
 while (!this.isInterrupted()) {
     try {
  try {
      Set druidDataSources =
DruidDataSourceStatManager.getDruidDataSourceInstances();
      Optional.ofNullable(druidDataSources).ifPresent(val -> val.forEach(druidDataSource -> {
   DruidDataSourceStatValue statValue = druidDataSource.getStatValueAndReset();
   long maxWaitMillis = druidDataSource.getMaxWait();//最大等待时间
   long waitThreadCount = statValue.getWaitThreadCount();//当前等待获取连接的线程数
   long notEmptyWaitMillis = statValue.getNotEmptyWaitMillis();//获取连接时累计等待多长时间
   long notEmptyWaitCount = statValue.getNotEmptyWaitCount();//获取连接时累计等待多少次'

   int maxActive = druidDataSource.getMaxActive();//最大活跃数
   int poolingCount = statValue.getPoolingCount();//当前连接池数
   int poolingPeak = statValue.getPoolingPeak();//连接池峰值
   int activeCount = statValue.getActiveCount();//当前活跃连接数
   int activePeak = statValue.getActivePeak();//活跃数峰值

   if (Objects.nonNull(statsDClient)) {
URI jdbcUri = parseJdbcUrl(druidDataSource.getUrl());
Optional.ofNullable(jdbcUri).ifPresent(val2 -> {
    String host = StringUtils.replaceChars(val2.getHost(), '.', '_');
    String prefix = METRIC_DRUID_PREFIX + host + '.' + val2.getPort() + '.';
    statsDClient.recordExecutionTime(prefix + "maxWaitMillis", maxWaitMillis);
    statsDClient.recordExecutionTime(prefix + "waitThreadCount", waitThreadCount);
    statsDClient.recordExecutionTime(prefix + "notEmptyWaitMillis", notEmptyWaitMillis);
    statsDClient.recordExecutionTime(prefix + "notEmptyWaitCount", notEmptyWaitCount);
    statsDClient.recordExecutionTime(prefix + "maxActive", maxActive);
    statsDClient.recordExecutionTime(prefix + "poolingCount", poolingCount);
    statsDClient.recordExecutionTime(prefix + "poolingPeak", poolingPeak);
    statsDClient.recordExecutionTime(prefix + "activeCount", activeCount);
    statsDClient.recordExecutionTime(prefix + "activePeak", activePeak);
});
   } else {
druidDataSource.logStats();
   }
      }));
  } catch (Exception e) {
      logger.error("druid stats exception", e);
  }
  TimeUnit.SECONDS.sleep(metricDruidProperties.getStatsInterval());
     } catch (InterruptedException e) {
  Thread.currentThread().interrupt();
  logger.info("metric druid interrupt exit...");
     } catch (Exception e) {
  logger.error("metric druid exception...", e);
     }
 }
    }
}

private URI parseJdbcUrl(String url) {
    if (StringUtils.isBlank(url) || !StringUtils.startsWith(url, "jdbc:")) {
 return null;
    }
    String cleanURI = url.substring(5);
    return URI.create(cleanURI);
}
2.Hikari监控

问题:
针对Hikari数据源,没有统一的监控处理,但是,提供了JMX入口,同理,持久化在监控服务上
解决:

private class HikariStatsThread extends Thread {

    public HikariStatsThread(String name) {
 super(name);
 this.setDaemon(true);
    }

    @Override
    public void run() {
 long initialDelay = metricHikariProperties.getInitialDelay() * 1000;
 if (initialDelay > 0) {
     MwThreadUtil.sleep(initialDelay);
 }
 while (!this.isInterrupted()) {
     try {
  Optional.ofNullable(hikariDataSources).ifPresent(val -> val.forEach(hikariDataSource -> {
      URI jdbcUri = parseJdbcUrl(hikariDataSource.getJdbcUrl());
      Optional.ofNullable(jdbcUri).ifPresent(val2 -> {
   String host = StringUtils.replaceChars(val2.getHost(), '.', '_');
   String prefix = METRIC_HIKARI_PREFIX + host + '.' + val2.getPort() + '.';

   PoolStatBean poolStatBean = PoolStatBean.builder().build();
   HikariPoolMXBean hikariPoolMXBean = hikariDataSource.getHikariPoolMXBean();
   Optional.ofNullable(hikariPoolMXBean).ifPresent(val3 -> {
int activeConnections = val3.getActiveConnections();
int idleConnections = val3.getIdleConnections();
int totalConnections = val3.getTotalConnections();
int threadsAwaitingConnection = val3.getThreadsAwaitingConnection();
poolStatBean.setActiveConnections(activeConnections);
poolStatBean.setIdleConnections(idleConnections);
poolStatBean.setTotalConnections(totalConnections);
poolStatBean.setThreadsAwaitingConnection(threadsAwaitingConnection);
   });
   HikariConfigMXBean hikariConfigMXBean = hikariDataSource.getHikariConfigMXBean();
   Optional.ofNullable(hikariConfigMXBean).ifPresent(val3 -> {
int maximumPoolSize = val3.getMaximumPoolSize();
int minimumIdle = val3.getMinimumIdle();
poolStatBean.setMaximumPoolSize(maximumPoolSize);
poolStatBean.setMinimumIdle(minimumIdle);
   });
   statsPool(prefix, poolStatBean);
      });
  }));
  TimeUnit.SECONDS.sleep(metricHikariProperties.getStatsInterval());
     } catch (InterruptedException e) {
  Thread.currentThread().interrupt();
  logger.info("metric hikari interrupt exit...");
     } catch (Exception e) {
  logger.error("metric hikari exception...", e);
     }
 }
    }
}

private void statsPool(String prefix, PoolStatBean poolStatBean) {
    if (Objects.nonNull(statsDClient)) {
 statsDClient.recordExecutionTime(prefix + "activeConnections", poolStatBean.getActiveConnections());
 statsDClient.recordExecutionTime(prefix + "idleConnections", poolStatBean.getIdleConnections());
 statsDClient.recordExecutionTime(prefix + "totalConnections", poolStatBean.getTotalConnections());
 statsDClient.recordExecutionTime(prefix + "threadsAwaitingConnection",
  poolStatBean.getThreadsAwaitingConnection());
 statsDClient.recordExecutionTime(prefix + "maximumPoolSize", poolStatBean.getMaximumPoolSize());
 statsDClient.recordExecutionTime(prefix + "minimumIdle", poolStatBean.getMinimumIdle());
 return;
    }
    StringBuilder sBuilder = new StringBuilder(16);
    sBuilder.append(prefix + "activeConnections => [" + poolStatBean.getActiveConnections() + "],");
    sBuilder.append(prefix + "idleConnections => [" + poolStatBean.getIdleConnections() + "],");
    sBuilder.append(prefix + "totalConnections => [" + poolStatBean.getTotalConnections() + "],");
    sBuilder.append(prefix + "threadsAwaitingConnection => [" + poolStatBean.getThreadsAwaitingConnection() + "],");
    sBuilder.append(prefix + "maximumPoolSize => [" + poolStatBean.getMaximumPoolSize() + "],");
    sBuilder.append(prefix + "minimumIdle => [" + poolStatBean.getMinimumIdle() + "]");
    logger.info(sBuilder.toString());
}

private URI parseJdbcUrl(String url) {
    if (StringUtils.isBlank(url) || !StringUtils.startsWith(url, "jdbc:")) {
 return null;
    }
    String cleanURI = url.substring(5);
    return URI.create(cleanURI);
}

@Data
@Builder
private static class PoolStatBean {
    private int activeConnections;
    private int idleConnections;
    private int totalConnections;
    private int threadsAwaitingConnection;
    private int maximumPoolSize;
    private int minimumIdle;
}

注:以上只是提供一种解决方案,小伙伴们也可以用Prometheus采集,然后再Grafana等展示出来~

本文由博客一文多发平台 OpenWrite 发布!

转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/239509.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号