更新2109版本
This commit is contained in:
@@ -30,12 +30,12 @@ public class SnowflakeId {
|
||||
/**
|
||||
* 机器id所占的位数
|
||||
*/
|
||||
private final long workerIdBits = 7L;
|
||||
private final long workerIdBits = 8L;
|
||||
|
||||
/**
|
||||
* 数据标识id所占的位数
|
||||
*/
|
||||
private final long dataCenterIdBits = 6L;
|
||||
private final long dataCenterIdBits = 5L;
|
||||
|
||||
/**
|
||||
* 支持的最大机器id,结果是63 (这个移位算法可以很快的计算出几位二进制数所能表示的最大十进制数)
|
||||
@@ -74,12 +74,12 @@ public class SnowflakeId {
|
||||
private final long sequenceMask = -1L ^ (-1L << sequenceBits);
|
||||
|
||||
/**
|
||||
* 工作机器ID(0~127)
|
||||
* 工作机器ID(0~255)
|
||||
*/
|
||||
private long workerId;
|
||||
|
||||
/**
|
||||
* 数据中心ID(0~63)
|
||||
* 数据中心ID(0~31)
|
||||
*/
|
||||
private long dataCenterId;
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import cn.hutool.log.Log;
|
||||
import cn.hutool.log.LogFactory;
|
||||
import com.jayway.jsonpath.InvalidPathException;
|
||||
import com.jayway.jsonpath.JsonPath;
|
||||
import com.zdjizhi.common.DefaultProConfig;
|
||||
import com.zdjizhi.common.FlowWriteConfig;
|
||||
import com.zdjizhi.utils.FormatUtils;
|
||||
import com.zdjizhi.utils.IpLookup;
|
||||
@@ -34,12 +33,12 @@ class TransFunction {
|
||||
* IP定位库工具类
|
||||
*/
|
||||
private static IpLookup ipLookup = new IpLookup.Builder(false)
|
||||
.loadDataFileV4(FlowWriteConfig.IP_LIBRARY + "ip_v4.mmdb")
|
||||
.loadDataFileV6(FlowWriteConfig.IP_LIBRARY + "ip_v6.mmdb")
|
||||
.loadDataFilePrivateV4(FlowWriteConfig.IP_LIBRARY + "ip_private_v4.mmdb")
|
||||
.loadDataFilePrivateV6(FlowWriteConfig.IP_LIBRARY + "ip_private_v6.mmdb")
|
||||
.loadAsnDataFile(FlowWriteConfig.IP_LIBRARY + "asn_v4.mmdb")
|
||||
.loadAsnDataFileV6(FlowWriteConfig.IP_LIBRARY + "asn_v6.mmdb")
|
||||
.loadDataFileV4(FlowWriteConfig.TOOLS_LIBRARY + "ip_v4.mmdb")
|
||||
.loadDataFileV6(FlowWriteConfig.TOOLS_LIBRARY + "ip_v6.mmdb")
|
||||
.loadDataFilePrivateV4(FlowWriteConfig.TOOLS_LIBRARY + "ip_private_v4.mmdb")
|
||||
.loadDataFilePrivateV6(FlowWriteConfig.TOOLS_LIBRARY + "ip_private_v6.mmdb")
|
||||
.loadAsnDataFile(FlowWriteConfig.TOOLS_LIBRARY + "asn_v4.mmdb")
|
||||
.loadAsnDataFileV6(FlowWriteConfig.TOOLS_LIBRARY + "asn_v6.mmdb")
|
||||
.build();
|
||||
|
||||
/**
|
||||
@@ -93,9 +92,9 @@ class TransFunction {
|
||||
*/
|
||||
static String radiusMatch(String ip) {
|
||||
String account = HBaseUtils.getAccount(ip.trim());
|
||||
if (StringUtil.isBlank(account)) {
|
||||
logger.warn("HashMap get account is null, Ip is :" + ip);
|
||||
}
|
||||
// if (StringUtil.isBlank(account)) {
|
||||
// logger.warn("HashMap get account is null, Ip is :" + ip);
|
||||
// }
|
||||
return account;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package com.zdjizhi.utils.hbase;
|
||||
|
||||
import cn.hutool.log.Log;
|
||||
import cn.hutool.log.LogFactory;
|
||||
import com.zdjizhi.common.DefaultProConfig;
|
||||
import com.zdjizhi.common.FlowWriteConfig;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
@@ -46,7 +45,7 @@ public class HBaseUtils {
|
||||
*/
|
||||
private HBaseUtils() {
|
||||
zookeeperIp = FlowWriteConfig.HBASE_ZOOKEEPER_SERVERS;
|
||||
hBaseTable = DefaultProConfig.HBASE_TABLE_NAME;
|
||||
hBaseTable = FlowWriteConfig.HBASE_TABLE_NAME;
|
||||
//获取连接
|
||||
getConnection();
|
||||
//拉取所有
|
||||
|
||||
36
src/main/java/com/zdjizhi/utils/kafka/CertUtils.java
Normal file
36
src/main/java/com/zdjizhi/utils/kafka/CertUtils.java
Normal file
@@ -0,0 +1,36 @@
|
||||
package com.zdjizhi.utils.kafka;
|
||||
|
||||
import com.zdjizhi.common.FlowWriteConfig;
|
||||
import org.apache.kafka.common.config.SslConfigs;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.utils.kafka
|
||||
* @Description:
|
||||
* @date 2021/9/610:37
|
||||
*/
|
||||
class CertUtils {
|
||||
static void chooseCert(String type, Properties properties) {
|
||||
switch (type) {
|
||||
case "SSL":
|
||||
properties.put("security.protocol", "SSL");
|
||||
properties.put(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");
|
||||
properties.put("ssl.keystore.location", FlowWriteConfig.TOOLS_LIBRARY + "keystore.jks");
|
||||
properties.put("ssl.keystore.password", FlowWriteConfig.KAFKA_PIN);
|
||||
properties.put("ssl.truststore.location", FlowWriteConfig.TOOLS_LIBRARY + "truststore.jks");
|
||||
properties.put("ssl.truststore.password", FlowWriteConfig.KAFKA_PIN);
|
||||
properties.put("ssl.key.password", FlowWriteConfig.KAFKA_PIN);
|
||||
break;
|
||||
case "SASL":
|
||||
properties.put("security.protocol", "SASL_PLAINTEXT");
|
||||
properties.put("sasl.mechanism", "PLAIN");
|
||||
properties.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username="
|
||||
+ FlowWriteConfig.KAFKA_USER + " password=" + FlowWriteConfig.KAFKA_PIN + ";");
|
||||
break;
|
||||
default:
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import com.zdjizhi.common.FlowWriteConfig;
|
||||
import org.apache.flink.api.common.serialization.SimpleStringSchema;
|
||||
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.config.SslConfigs;
|
||||
|
||||
|
||||
import java.util.Properties;
|
||||
@@ -25,10 +26,8 @@ public class Consumer {
|
||||
properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
|
||||
properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
|
||||
|
||||
/*
|
||||
* kafka限流配置-20201117
|
||||
*/
|
||||
// properties.put(ConsumerConfig.CLIENT_ID_CONFIG, FlowWriteConfig.CONSUMER_CLIENT_ID);
|
||||
CertUtils.chooseCert(FlowWriteConfig.KAFKA_SOURCE_PROTOCOL,properties);
|
||||
|
||||
return properties;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package com.zdjizhi.utils.kafka;
|
||||
|
||||
import com.zdjizhi.common.DefaultProConfig;
|
||||
import com.zdjizhi.common.FlowWriteConfig;
|
||||
import org.apache.flink.api.common.serialization.SimpleStringSchema;
|
||||
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
|
||||
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.common.config.SslConfigs;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
@@ -20,21 +20,17 @@ public class Producer {
|
||||
private static Properties createProducerConfig() {
|
||||
Properties properties = new Properties();
|
||||
properties.put("bootstrap.servers", FlowWriteConfig.OUTPUT_KAFKA_SERVERS);
|
||||
// properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
|
||||
// properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
|
||||
properties.put("acks", FlowWriteConfig.PRODUCER_ACK);
|
||||
properties.put("retries", DefaultProConfig.RETRIES);
|
||||
properties.put("linger.ms", DefaultProConfig.LINGER_MS);
|
||||
properties.put("request.timeout.ms", DefaultProConfig.REQUEST_TIMEOUT_MS);
|
||||
properties.put("batch.size", DefaultProConfig.BATCH_SIZE);
|
||||
properties.put("buffer.memory", DefaultProConfig.BUFFER_MEMORY);
|
||||
properties.put("max.request.size", DefaultProConfig.MAX_REQUEST_SIZE);
|
||||
properties.put("retries", FlowWriteConfig.RETRIES);
|
||||
properties.put("linger.ms", FlowWriteConfig.LINGER_MS);
|
||||
properties.put("request.timeout.ms", FlowWriteConfig.REQUEST_TIMEOUT_MS);
|
||||
properties.put("batch.size", FlowWriteConfig.BATCH_SIZE);
|
||||
properties.put("buffer.memory", FlowWriteConfig.BUFFER_MEMORY);
|
||||
properties.put("max.request.size", FlowWriteConfig.MAX_REQUEST_SIZE);
|
||||
properties.put("compression.type", FlowWriteConfig.PRODUCER_KAFKA_COMPRESSION_TYPE);
|
||||
|
||||
CertUtils.chooseCert(FlowWriteConfig.KAFKA_SINK_PROTOCOL, properties);
|
||||
|
||||
/**
|
||||
* kafka限流配置-20201117
|
||||
*/
|
||||
// properties.put(ProducerConfig.CLIENT_ID_CONFIG, FlowWriteConfig.PRODUCER_CLIENT_ID);
|
||||
// properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, FlowWriteConfig.PRODUCER_KAFKA_COMPRESSION_TYPE);
|
||||
return properties;
|
||||
}
|
||||
|
||||
@@ -43,9 +39,10 @@ public class Producer {
|
||||
FlinkKafkaProducer<String> kafkaProducer = new FlinkKafkaProducer<String>(
|
||||
FlowWriteConfig.OUTPUT_KAFKA_TOPIC,
|
||||
new SimpleStringSchema(),
|
||||
createProducerConfig());
|
||||
createProducerConfig(), Optional.empty());
|
||||
|
||||
kafkaProducer.setLogFailuresOnly(false);
|
||||
|
||||
// kafkaProducer.setWriteTimestampToKafka(true);
|
||||
|
||||
return kafkaProducer;
|
||||
|
||||
@@ -19,6 +19,7 @@ import java.util.concurrent.CountDownLatch;
|
||||
*/
|
||||
public class ZookeeperUtils implements Watcher {
|
||||
private static final Log logger = LogFactory.get();
|
||||
private static final int ID_MAX = 255;
|
||||
|
||||
private ZooKeeper zookeeper;
|
||||
|
||||
@@ -46,7 +47,7 @@ public class ZookeeperUtils implements Watcher {
|
||||
connectZookeeper(zookeeperIp);
|
||||
Stat stat = zookeeper.exists(path, true);
|
||||
workerId = Integer.parseInt(getNodeDate(path));
|
||||
if (workerId > 63) {
|
||||
if (workerId > ID_MAX) {
|
||||
workerId = 0;
|
||||
zookeeper.setData(path, "1".getBytes(), stat.getVersion());
|
||||
} else {
|
||||
|
||||
Reference in New Issue
Block a user