1:增加default配置文件。

2:修复解析异常程序终止性bug。
This commit is contained in:
qidaijie
2021-06-11 11:16:26 +08:00
parent d7f3e40340
commit 1b47ecf76b
6 changed files with 23 additions and 15 deletions

View File

@@ -97,7 +97,7 @@ public class SnowflakeId {
/** /**
* 设置允许时间回拨的最大限制10s * 设置允许时间回拨的最大限制10s
*/ */
private static final long rollBackTime = 10000L; private static final long ROLL_BACK_TIME = 10000L;
private static SnowflakeId idWorker; private static SnowflakeId idWorker;
@@ -143,7 +143,7 @@ public class SnowflakeId {
private synchronized long nextId() { private synchronized long nextId() {
long timestamp = timeGen(); long timestamp = timeGen();
//设置一个允许回拨限制时间系统时间回拨范围在rollBackTime内可以等待校准 //设置一个允许回拨限制时间系统时间回拨范围在rollBackTime内可以等待校准
if (lastTimestamp - timestamp > 0 && lastTimestamp - timestamp < rollBackTime) { if (lastTimestamp - timestamp > 0 && lastTimestamp - timestamp < ROLL_BACK_TIME) {
timestamp = tilNextMillis(lastTimestamp); timestamp = tilNextMillis(lastTimestamp);
} }
//如果当前时间小于上一次ID生成的时间戳说明系统时钟回退过这个时候应当抛出异常 //如果当前时间小于上一次ID生成的时间戳说明系统时钟回退过这个时候应当抛出异常

View File

@@ -35,7 +35,7 @@ public class HBaseUtils {
private static HBaseUtils hBaseUtils; private static HBaseUtils hBaseUtils;
private static void getHBaseInstance() { private static void getInstance() {
hBaseUtils = new HBaseUtils(); hBaseUtils = new HBaseUtils();
} }
@@ -47,14 +47,14 @@ public class HBaseUtils {
zookeeperIp = FlowWriteConfig.HBASE_ZOOKEEPER_SERVERS; zookeeperIp = FlowWriteConfig.HBASE_ZOOKEEPER_SERVERS;
hBaseTable = FlowWriteConfig.HBASE_TABLE_NAME; hBaseTable = FlowWriteConfig.HBASE_TABLE_NAME;
//获取连接 //获取连接
getHbaseConn(); getConnection();
//拉取所有 //拉取所有
getAll(); getAll();
//定时更新 //定时更新
updateHBaseCache(); updateCache();
} }
private static void getHbaseConn() { private static void getConnection() {
try { try {
// 管理Hbase的配置信息 // 管理Hbase的配置信息
Configuration configuration = HBaseConfiguration.create(); Configuration configuration = HBaseConfiguration.create();
@@ -78,7 +78,7 @@ public class HBaseUtils {
*/ */
private static void change() { private static void change() {
if (hBaseUtils == null) { if (hBaseUtils == null) {
getHBaseInstance(); getInstance();
} }
long nowTime = System.currentTimeMillis(); long nowTime = System.currentTimeMillis();
timestampsFilter(time - 1000, nowTime + 500); timestampsFilter(time - 1000, nowTime + 500);
@@ -164,7 +164,7 @@ public class HBaseUtils {
/** /**
* 验证定时器,每隔一段时间验证一次-验证获取新的Cookie * 验证定时器,每隔一段时间验证一次-验证获取新的Cookie
*/ */
private void updateHBaseCache() { private void updateCache() {
// ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(1, // ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(1,
// new BasicThreadFactory.Builder().namingPattern("hbase-change-pool-%d").daemon(true).build()); // new BasicThreadFactory.Builder().namingPattern("hbase-change-pool-%d").daemon(true).build());
ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(1); ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(1);
@@ -192,7 +192,7 @@ public class HBaseUtils {
public static String getAccount(String clientIp) { public static String getAccount(String clientIp) {
if (hBaseUtils == null) { if (hBaseUtils == null) {
getHBaseInstance(); getInstance();
} }
return subIdMap.get(clientIp); return subIdMap.get(clientIp);

View File

@@ -18,7 +18,6 @@ import java.io.InputStreamReader;
* @author qidaijie * @author qidaijie
*/ */
public class HttpClientUtil { public class HttpClientUtil {
// private static final int MAX_STR_LEN = 512000;
private static final Log logger = LogFactory.get(); private static final Log logger = LogFactory.get();
/** /**

View File

@@ -124,7 +124,7 @@ public class JsonParseUtil {
* @return 用于反射生成schema类型的对象的一个map集合 * @return 用于反射生成schema类型的对象的一个map集合
*/ */
public static HashMap<String, Class> getMapFromHttp(String http) { public static HashMap<String, Class> getMapFromHttp(String http) {
HashMap<String, Class> map = new HashMap<>(); HashMap<String, Class> map = new HashMap<>(16);
String schema = HttpClientUtil.requestByGetMethod(http); String schema = HttpClientUtil.requestByGetMethod(http);
Object data = JSON.parseObject(schema).get("data"); Object data = JSON.parseObject(schema).get("data");

View File

@@ -76,9 +76,8 @@ public class KafkaLogSend {
properties.put("batch.size", DefaultProConfig.BATCH_SIZE); properties.put("batch.size", DefaultProConfig.BATCH_SIZE);
properties.put("buffer.memory", DefaultProConfig.BUFFER_MEMORY); properties.put("buffer.memory", DefaultProConfig.BUFFER_MEMORY);
properties.put("max.request.size", DefaultProConfig.MAX_REQUEST_SIZE); properties.put("max.request.size", DefaultProConfig.MAX_REQUEST_SIZE);
// properties.put("compression.type", FlowWriteConfig.KAFKA_COMPRESSION_TYPE);
/** /*
* kafka限流配置-20201117 * kafka限流配置-20201117
*/ */
properties.put(ProducerConfig.CLIENT_ID_CONFIG, FlowWriteConfig.PRODUCER_CLIENT_ID); properties.put(ProducerConfig.CLIENT_ID_CONFIG, FlowWriteConfig.PRODUCER_CLIENT_ID);

View File

@@ -67,7 +67,9 @@ public class DistributedLock implements Lock, Watcher {
} }
} }
// 节点监视器 /**
* 节点监视器
*/
@Override @Override
public void process(WatchedEvent event) { public void process(WatchedEvent event) {
if (this.countDownLatch != null) { if (this.countDownLatch != null) {
@@ -140,7 +142,15 @@ public class DistributedLock implements Lock, Watcher {
return false; return false;
} }
// 等待锁 /**
* 等待锁
*
* @param prev 锁名称
* @param waitTime 等待时间
* @return
* @throws KeeperException
* @throws InterruptedException
*/
private boolean waitForLock(String prev, long waitTime) throws KeeperException, InterruptedException { private boolean waitForLock(String prev, long waitTime) throws KeeperException, InterruptedException {
Stat stat = zk.exists(ROOT_LOCK + "/" + prev, true); Stat stat = zk.exists(ROOT_LOCK + "/" + prev, true);