diff --git a/src/main/java/cn/mesalab/config/ApplicationConfig.java b/src/main/java/cn/mesalab/config/ApplicationConfig.java index 6c28e90..b55042e 100644 --- a/src/main/java/cn/mesalab/config/ApplicationConfig.java +++ b/src/main/java/cn/mesalab/config/ApplicationConfig.java @@ -59,9 +59,10 @@ public class ApplicationConfig { public static final Double BASELINE_KALMAN_P = ConfigUtils.getDoubleProperty("baseline.kalman.p"); public static final Double BASELINE_KALMAN_M = ConfigUtils.getDoubleProperty("baseline.kalman.m"); - //public static final Long DRUID_READ_BATCH_TIME_GRAD_HOUR = ConfigUtils.getLongProperty("druid.read.batch.time.grad.hour"); public static final Integer THREAD_POOL_NUM = ConfigUtils.getIntProperty("thread.pool.num"); public static final Integer PARTITION_NUM_MAX = ConfigUtils.getIntProperty("druid.partition.num.max"); + public static final Integer DRUID_STATEMENT_QUERY_TIMEOUT = ConfigUtils.getIntProperty("druid.statement.query.timeout"); + public static final Integer DRUID_CONNECTION_RETRY_TIME_MAX = ConfigUtils.getIntProperty("druid.connection.retry.time.max"); public static final Integer DRUID_CONNECTION_RETRY_SLEEP_TIME = ConfigUtils.getIntProperty("druid.connection.retry.sleep.time"); diff --git a/src/main/java/cn/mesalab/dao/ReadHistoricalDruidData.java b/src/main/java/cn/mesalab/dao/ReadHistoricalDruidData.java deleted file mode 100644 index d97dcff..0000000 --- a/src/main/java/cn/mesalab/dao/ReadHistoricalDruidData.java +++ /dev/null @@ -1,58 +0,0 @@ -package cn.mesalab.dao; - -import cn.mesalab.utils.DruidUtils; -import org.apache.calcite.avatica.AvaticaConnection; -import org.apache.calcite.avatica.AvaticaStatement; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Callable; -import java.util.concurrent.CountDownLatch; - -/** - * @author yjy - * @version 1.0 - * @date 2021/8/3 8:10 下午 - */ -public class ReadHistoricalDruidData implements Callable>>> { - private static final Logger LOG = LoggerFactory.getLogger(ReadHistoricalDruidData.class); - - private final String sql; - private final CountDownLatch countDownLatch; - - public ReadHistoricalDruidData( - String sql, - CountDownLatch countDownLatch - ){ - this.sql = sql; - this.countDownLatch = countDownLatch; - } - - @Override - public Map>> call() { - Map>> resultData = new HashMap<>(); - try { - long start = System.currentTimeMillis(); - AvaticaConnection connection = DruidUtils.getConn(); - AvaticaStatement stat = connection.createStatement(); - Map>> readFromDruid = DruidData.readFromDruid(sql, stat); - resultData.putAll(readFromDruid); - - long end = System.currentTimeMillis(); - LOG.info("获取Server IP:" + resultData.size() + - " 运行时间:" + (end - start) + - " 剩余线程数量:" + countDownLatch.getCount()) ; - connection.close(); - stat.close(); - } catch (Exception e) { - e.printStackTrace(); - } finally { - countDownLatch.countDown(); - } - return resultData; - } - -} diff --git a/src/main/java/cn/mesalab/service/BaselineGeneration.java b/src/main/java/cn/mesalab/service/BaselineGeneration.java index 58a14e3..d15be5d 100644 --- a/src/main/java/cn/mesalab/service/BaselineGeneration.java +++ b/src/main/java/cn/mesalab/service/BaselineGeneration.java @@ -2,7 +2,6 @@ package cn.mesalab.service; import cn.mesalab.config.ApplicationConfig; import cn.mesalab.dao.DruidData; -import cn.mesalab.dao.ReadHistoricalDruidData; import cn.mesalab.utils.HbaseUtils; import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.vavr.Tuple2; diff --git a/src/main/java/cn/mesalab/service/BaselineSingleThread.java b/src/main/java/cn/mesalab/service/BaselineSingleThread.java index 3fd1698..9829b05 100644 --- a/src/main/java/cn/mesalab/service/BaselineSingleThread.java +++ b/src/main/java/cn/mesalab/service/BaselineSingleThread.java @@ -131,6 +131,7 @@ public class BaselineSingleThread extends Thread { try { AvaticaConnection connection = DruidUtils.getConn(); AvaticaStatement stat = connection.createStatement(); + stat.setQueryTimeout(ApplicationConfig.DRUID_STATEMENT_QUERY_TIMEOUT); String sql = DruidData.getBatchDruidQuerySql(attackTypeList, currentBatch, batchPartitionRange); LOG.debug("Read Druid SQL: " + sql); readFromDruid = DruidData.readFromDruid(sql, stat); diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties index 66d55db..06d03cc 100644 --- a/src/main/resources/application.properties +++ b/src/main/resources/application.properties @@ -68,9 +68,9 @@ monitor.frequency.bin.num=100 ########################################## ################ 并发参数 ################# ########################################## -#druid.read.batch.time.grad.hour=4 -thread.pool.num=500 +thread.pool.num=100 #druid分区字段partition_num的最大值为9999 +druid.statement.query.timeout=36000 druid.partition.num.max=10000 druid.connection.retry.time.max=10000 #druid重连等待时间约为一个线程处理完成时间