增加statement queryTimeout (无效)

This commit is contained in:
yinjiangyi
2021-09-09 17:25:03 +08:00
parent 6e0829224a
commit e914511138
5 changed files with 5 additions and 62 deletions

View File

@@ -59,9 +59,10 @@ public class ApplicationConfig {
public static final Double BASELINE_KALMAN_P = ConfigUtils.getDoubleProperty("baseline.kalman.p");
public static final Double BASELINE_KALMAN_M = ConfigUtils.getDoubleProperty("baseline.kalman.m");
//public static final Long DRUID_READ_BATCH_TIME_GRAD_HOUR = ConfigUtils.getLongProperty("druid.read.batch.time.grad.hour");
public static final Integer THREAD_POOL_NUM = ConfigUtils.getIntProperty("thread.pool.num");
public static final Integer PARTITION_NUM_MAX = ConfigUtils.getIntProperty("druid.partition.num.max");
public static final Integer DRUID_STATEMENT_QUERY_TIMEOUT = ConfigUtils.getIntProperty("druid.statement.query.timeout");
public static final Integer DRUID_CONNECTION_RETRY_TIME_MAX = ConfigUtils.getIntProperty("druid.connection.retry.time.max");
public static final Integer DRUID_CONNECTION_RETRY_SLEEP_TIME = ConfigUtils.getIntProperty("druid.connection.retry.sleep.time");

View File

@@ -1,58 +0,0 @@
package cn.mesalab.dao;
import cn.mesalab.utils.DruidUtils;
import org.apache.calcite.avatica.AvaticaConnection;
import org.apache.calcite.avatica.AvaticaStatement;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
/**
* @author yjy
* @version 1.0
* @date 2021/8/3 8:10 下午
*/
public class ReadHistoricalDruidData implements Callable<Map<String, List<Map<String, Object>>>> {
private static final Logger LOG = LoggerFactory.getLogger(ReadHistoricalDruidData.class);
private final String sql;
private final CountDownLatch countDownLatch;
public ReadHistoricalDruidData(
String sql,
CountDownLatch countDownLatch
){
this.sql = sql;
this.countDownLatch = countDownLatch;
}
@Override
public Map<String, List<Map<String, Object>>> call() {
Map<String, List<Map<String, Object>>> resultData = new HashMap<>();
try {
long start = System.currentTimeMillis();
AvaticaConnection connection = DruidUtils.getConn();
AvaticaStatement stat = connection.createStatement();
Map<String, List<Map<String, Object>>> readFromDruid = DruidData.readFromDruid(sql, stat);
resultData.putAll(readFromDruid);
long end = System.currentTimeMillis();
LOG.info("获取Server IP" + resultData.size() +
" 运行时间:" + (end - start) +
" 剩余线程数量:" + countDownLatch.getCount()) ;
connection.close();
stat.close();
} catch (Exception e) {
e.printStackTrace();
} finally {
countDownLatch.countDown();
}
return resultData;
}
}

View File

@@ -2,7 +2,6 @@ package cn.mesalab.service;
import cn.mesalab.config.ApplicationConfig;
import cn.mesalab.dao.DruidData;
import cn.mesalab.dao.ReadHistoricalDruidData;
import cn.mesalab.utils.HbaseUtils;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.vavr.Tuple2;

View File

@@ -131,6 +131,7 @@ public class BaselineSingleThread extends Thread {
try {
AvaticaConnection connection = DruidUtils.getConn();
AvaticaStatement stat = connection.createStatement();
stat.setQueryTimeout(ApplicationConfig.DRUID_STATEMENT_QUERY_TIMEOUT);
String sql = DruidData.getBatchDruidQuerySql(attackTypeList, currentBatch, batchPartitionRange);
LOG.debug("Read Druid SQL: " + sql);
readFromDruid = DruidData.readFromDruid(sql, stat);

View File

@@ -68,9 +68,9 @@ monitor.frequency.bin.num=100
##########################################
################ 并发参数 #################
##########################################
#druid.read.batch.time.grad.hour=4
thread.pool.num=500
thread.pool.num=100
#druid分区字段partition_num的最大值为9999
druid.statement.query.timeout=36000
druid.partition.num.max=10000
druid.connection.retry.time.max=10000
#druid重连等待时间约为一个线程处理完成时间