Merge branch 'v3.20.12.12-ack-ratelimit-con-eal4' into 'master'

V3.20.12.12 ack ratelimit con eal4

See merge request bigdata/tsg/log-stream-completion!4
This commit is contained in:
戚岱杰
2021-04-23 02:14:42 +00:00
45 changed files with 1517 additions and 2494 deletions

191
pom.xml
View File

@@ -3,22 +3,38 @@
<modelVersion>4.0.0</modelVersion>
<groupId>cn.ac.iie</groupId>
<artifactId>log-stream-completion</artifactId>
<version>0.0.1-SNAPSHOT</version>
<artifactId>log-stream-completion-schema</artifactId>
<version>v3.21.03.25-eal4</version>
<packaging>jar</packaging>
<name>log-stream-completion</name>
<name>log-stream-completion-schema</name>
<url>http://maven.apache.org</url>
<repositories>
<repository>
<id>nexus</id>
<name>Team Nexus Repository</name>
<url>http://192.168.10.125:8099/content/groups/public</url>
<url>http://192.168.40.125:8099/content/groups/public</url>
</repository>
<repository>
<id>maven-ali</id>
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
<releases>
<enabled>true</enabled>
</releases>
<snapshots>
<enabled>true</enabled>
<updatePolicy>always</updatePolicy>
<checksumPolicy>fail</checksumPolicy>
</snapshots>
</repository>
</repositories>
<build>
<plugins>
<plugin>
@@ -35,7 +51,7 @@
<transformers>
<transformer
implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass>cn.ac.iie.topology.LogFlowWriteTopology</mainClass>
<mainClass>com.zdjizhi.topology.LogFlowWriteTopology</mainClass>
</transformer>
<transformer
implementation="org.apache.maven.plugins.shade.resource.AppendingTransformer">
@@ -51,6 +67,19 @@
</executions>
</plugin>
<plugin>
<groupId>io.github.zlika</groupId>
<artifactId>reproducible-build-maven-plugin</artifactId>
<version>0.2</version>
<executions>
<execution>
<goals>
<goal>strip-jar</goal>
</goals>
<phase>package</phase>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
@@ -61,7 +90,9 @@
<target>1.8</target>
</configuration>
</plugin>
</plugins>
<resources>
<resource>
<directory>properties</directory>
@@ -71,7 +102,7 @@
<filtering>false</filtering>
</resource>
<resource>
<directory>src/main/java</directory>
<directory>properties</directory>
<includes>
<include>log4j.properties</include>
</includes>
@@ -84,6 +115,8 @@
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<kafka.version>1.0.0</kafka.version>
<storm.version>1.0.2</storm.version>
<hbase.version>2.2.3</hbase.version>
<hadoop.version>2.7.1</hadoop.version>
</properties>
<dependencies>
@@ -129,12 +162,6 @@
<version>${storm.version}</version>
</dependency>
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>2.8.1</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
@@ -145,13 +172,19 @@
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.47</version>
<version>1.2.70</version>
</dependency>
<dependency>
<groupId>cglib</groupId>
<artifactId>cglib-nodep</artifactId>
<version>3.2.4</version>
</dependency>
<dependency>
<groupId>com.zdjizhi</groupId>
<artifactId>galaxy</artifactId>
<version>1.0.1</version>
<version>1.0.3</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
@@ -181,6 +214,136 @@
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-client -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>${hbase.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-over-slf4j</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-server -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>${hbase.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-over-slf4j</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-over-slf4j</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-over-slf4j</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-over-slf4j</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<version>5.3.2</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.2</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>4.4.1</version>
</dependency>
<dependency>
<groupId>com.jayway.jsonpath</groupId>
<artifactId>json-path</artifactId>
<version>2.4.0</version>
</dependency>
<dependency>
<groupId>io.prometheus</groupId>
<artifactId>simpleclient_pushgateway</artifactId>
<version>0.9.0</version>
</dependency>
<dependency>
<groupId>cn.hutool</groupId>
<artifactId>hutool-all</artifactId>
<version>5.5.2</version>
</dependency>
<!--&lt;!&ndash; https://mvnrepository.com/artifact/io.github.zlika/reproducible-build-maven-plugin &ndash;&gt;-->
<!--<dependency>-->
<!--<groupId>io.github.zlika</groupId>-->
<!--<artifactId>reproducible-build-maven-plugin</artifactId>-->
<!--<version>0.12</version>-->
<!--</dependency>-->
</dependencies>
</project>

View File

@@ -0,0 +1,17 @@
#producer<65><72><EFBFBD>ԵĴ<D4B5><C4B4><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
retries=0
#<23><><EFBFBD>ĺ<EFBFBD><C4BA><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>˵һ<CBB5><D2BB>Batch<63><68><EFBFBD><EFBFBD><EFBFBD><EFBFBD>֮<EFBFBD><D6AE><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ã<EFBFBD><C3A3><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Batch<63><68>û<EFBFBD><C3BB>д<EFBFBD><D0B4><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͳ<EFBFBD>ȥ<EFBFBD><C8A5>
linger.ms=5
#<23><><EFBFBD><EFBFBD><EFBFBD>ڳ<EFBFBD>ʱ֮ǰδ<C7B0>յ<EFBFBD><D5B5><EFBFBD>Ӧ<EFBFBD><D3A6><EFBFBD>ͻ<EFBFBD><CDBB>˽<EFBFBD><CBBD>ڱ<EFBFBD>Ҫʱ<D2AA><CAB1><EFBFBD>·<EFBFBD><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
request.timeout.ms=30000
#producer<65><72><EFBFBD>ǰ<EFBFBD><C7B0><EFBFBD>batch<63><68><EFBFBD>з<EFBFBD><D0B7>͵<EFBFBD>,<2C><><EFBFBD>δ<EFBFBD>С<EFBFBD><D0A1>Ĭ<EFBFBD><C4AC>:16384
batch.size=262144
#Producer<65><72><EFBFBD><EFBFBD><EFBFBD>ڻ<EFBFBD><DABB><EFBFBD><EFBFBD><EFBFBD>Ϣ<EFBFBD>Ļ<EFBFBD><C4BB><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>С
buffer.memory=67108864
#<23><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ÿ<EFBFBD>η<EFBFBD><CEB7>͸<EFBFBD>Kafka<6B><61><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>С<><C4AC>1048576
max.request.size=5242880

View File

@@ -1,19 +0,0 @@
#*****************jedis连接参数设置*********************
#redis服务器ip
redis.ip=192.168.40.123
#redis服务器端口号
redis.port=6379
#与服务器建立连接的超时时间
redis.timeout=3000
#************************jedis池参数设置*******************
#jedis的最大活跃连接数
redis.pool.maxActive=200
#jedis最大空闲连接数
redis.pool.maxIdle=5
#jedis池没有连接对象返回时等待可用连接的最大时间单位毫秒默认值为-1表示永不超时。
#如果超过等待时间则直接抛出JedisConnectionException
redis.pool.maxWait=-1
#从池中获取连接的时候,是否进行有效检查
redis.pool.testOnBorrow=true
#归还连接的时候,是否进行有效检查
redis.pool.testOnReturn=true

View File

@@ -1,42 +1,68 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
#bootstrap.servers=10.4.35.7:9092,10.4.35.8:9092,10.4.35.9:9092
bootstrap.servers=192.168.6.200:9093,192.168.6.200:9094,192.168.6.200:9095
input.kafka.servers=192.168.44.12:9092
#zookeeper 地址
zookeeper.servers=192.168.6.200:2181
#zookeeper.servers=192.168.40.207:2181
#管理输出kafka地址
output.kafka.servers=192.168.44.12:9092
#latest/earliest
auto.offset.reset=latest
#zookeeper 地址 用于配置log_id
zookeeper.servers=192.168.44.12:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=192.168.44.12:2181
#定位库地址
ip.library=D:\\K18-Phase2\\tsgSpace\\dat\\
#ip.library=/home/bigdata/topology/dat/
#网关的schema位置
schema.http=http://192.168.44.12:9999/metadata/schema/v1/fields/security_event_log
#kafka broker下的topic名称
kafka.topic=SESSION-RECORD-LOG
#kafka.topic=Snowflake-test
#kafka.topic=CONNECTION-RECORD-LOG
kafka.topic=test
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=session-record-log-z
group.id=connection-record-log-20200818-1-test
#接收自kafka的消费者 client-id
consumer.client.id=consumer-connection-record
#回写给kafka的生产者 client-id
producer.client.id=producer-connection-record
#生产者压缩模式 none or snappy
producer.kafka.compression.type=snappy
#生产者ack
producer.ack=1
#latest/earliest 从当前消 or 从头消费
auto.offset.reset=latest
#输出topic
#results.output.topic=SESSION-TEST-COMPLETED-LOG
results.output.topic=SESSION-RECORD-COMPLETED-LOG
#results.output.topic=CONNECTION-RECORD-COMPLETED-LOG
results.output.topic=test-result
#--------------------------------topology配置------------------------------#
#storm topology workers
topology.workers=1
topology.workers=2
#spout并行度 建议与kafka分区数相同
spout.parallelism=3
#处理补全操作的bolt并行度-worker的倍数
datacenter.bolt.parallelism=3
completion.bolt.parallelism=6
#写入kafkad的并行度
kafka.bolt.parallelism=3
#写入kafka的并行度10
kafka.bolt.parallelism=6
#定位库地址
ip.library=/dat/
#ack设置 1启动ack 0不启动ack
topology.num.acks=0
#kafka批量条数
batch.insert.num=5000
batch.insert.num=2000
#数据中心UID
data.center.id.num=15
@@ -44,26 +70,27 @@ data.center.id.num=15
#tick时钟频率
topology.tick.tuple.freq.secs=5
#hbase 更新时间
hbase.tick.tuple.freq.secs=60
#--------------------------------默认值配置------------------------------#
#当bolt性能受限时限制spout接收速度理论看ack开启才有效
topology.config.max.spout.pending=150000
#ack设置 1启动ack 0不启动ack
topology.num.acks=0
#hbase table name
hbase.table.name=subscriber_info
#spout接收睡眠时间
topology.spout.sleep.time=1
#用于过滤对准用户名
check.ip.scope=10,100,192
#允许发送kafka最大失败数
max.failure.num=20
#influx地址
influx.ip=http://192.168.40.151:8086
#邮件默认编码
mail.default.charset=UTF-8
#influx用户名
influx.username=admin
#需不要补全,不需要则原样日志输出
log.need.complete=yes
#influx密码
influx.password=admin

View File

@@ -1,672 +0,0 @@
package cn.ac.iie.bean;
import com.alibaba.fastjson.annotation.JSONField;
import com.alibaba.fastjson.support.spring.annotation.FastJsonFilter;
/**
* @author qidaijie
*/
public class SessionRecordLog {
private long uid;
private int policy_id;
private long action;
private int start_time;
private int end_time;
private long recv_time;
private String trans_proto;
private String app_proto;
private int addr_type;
private String server_ip;
private String client_ip;
private int server_port;
private int client_port;
private int service;
private int entrance_id;
private int device_id;
private int Link_id;
private String isp;
private int encap_type;
private int direction;
private int stream_dir;
private String cap_ip;
private String addr_list;
private String server_location;
private String client_location;
private String client_asn;
private String server_asn;
private String subscribe_id;
private long con_duration_ms;
private String url;
private String host;
private String domain;
private String category;
private String req_line;
private String res_line;
private String cookie;
private String referer;
private String user_agent;
private String content_len;
private String content_type;
private String set_cookie;
private String req_header;
private String resp_header;
private String req_body_key;
private String req_body;
private String res_body_key;
private String resp_body;
private String version;
private String sni;
private String san;
private String cn;
private int app_id;
private int protocol_id;
private long con_latency_ms;
private int pinningst;
private int intercept_state;
private long ssl_server_side_latency;
private long ssl_client_side_latency;
private String ssl_server_side_version;
private String ssl_client_side_version;
private int ssl_cert_verify;
private String stream_trace_id;
private String ssl_error;
private long c2s_pkt_num;
private long S2c_pkt_num;
private long c2s_byte_num;
private long s2c_byte_num;
private String nas_ip;
private String framed_ip;
private String account;
private int packet_type;
private int has_dup_traffic;
private String stream_error;
public SessionRecordLog() {
}
public long getUid() {
return uid;
}
public void setUid(long uid) {
this.uid = uid;
}
public int getPolicy_id() {
return policy_id;
}
public void setPolicy_id(int policy_id) {
this.policy_id = policy_id;
}
public long getAction() {
return action;
}
public void setAction(long action) {
this.action = action;
}
public int getStart_time() {
return start_time;
}
public void setStart_time(int start_time) {
this.start_time = start_time;
}
public int getEnd_time() {
return end_time;
}
public void setEnd_time(int end_time) {
this.end_time = end_time;
}
public String getSsl_error() {
return ssl_error;
}
public void setSsl_error(String ssl_error) {
this.ssl_error = ssl_error;
}
public String getApp_proto() {
return app_proto;
}
public void setApp_proto(String app_proto) {
this.app_proto = app_proto;
}
public long getRecv_time() {
return recv_time;
}
public void setRecv_time(long recv_time) {
this.recv_time = recv_time;
}
public String getTrans_proto() {
return trans_proto;
}
public void setTrans_proto(String trans_proto) {
this.trans_proto = trans_proto;
}
public int getAddr_type() {
return addr_type;
}
public void setAddr_type(int addr_type) {
this.addr_type = addr_type;
}
public String getServer_ip() {
return server_ip;
}
public void setServer_ip(String server_ip) {
this.server_ip = server_ip;
}
public String getClient_ip() {
return client_ip;
}
public void setClient_ip(String client_ip) {
this.client_ip = client_ip;
}
public int getServer_port() {
return server_port;
}
public void setServer_port(int server_port) {
this.server_port = server_port;
}
public int getClient_port() {
return client_port;
}
public void setClient_port(int client_port) {
this.client_port = client_port;
}
public int getService() {
return service;
}
public void setService(int service) {
this.service = service;
}
public int getEntrance_id() {
return entrance_id;
}
public void setEntrance_id(int entrance_id) {
this.entrance_id = entrance_id;
}
public int getDevice_id() {
return device_id;
}
public void setDevice_id(int device_id) {
this.device_id = device_id;
}
public int getLink_id() {
return Link_id;
}
public void setLink_id(int link_id) {
Link_id = link_id;
}
public String getIsp() {
return isp;
}
public void setIsp(String isp) {
this.isp = isp;
}
public int getEncap_type() {
return encap_type;
}
public void setEncap_type(int encap_type) {
this.encap_type = encap_type;
}
public int getDirection() {
return direction;
}
public void setDirection(int direction) {
this.direction = direction;
}
public int getStream_dir() {
return stream_dir;
}
public void setStream_dir(int stream_dir) {
this.stream_dir = stream_dir;
}
public String getCap_ip() {
return cap_ip;
}
public void setCap_ip(String cap_ip) {
this.cap_ip = cap_ip;
}
public String getAddr_list() {
return addr_list;
}
public void setAddr_list(String addr_list) {
this.addr_list = addr_list;
}
public String getServer_location() {
return server_location;
}
public void setServer_location(String server_location) {
this.server_location = server_location;
}
public String getClient_location() {
return client_location;
}
public void setClient_location(String client_location) {
this.client_location = client_location;
}
public String getClient_asn() {
return client_asn;
}
public void setClient_asn(String client_asn) {
this.client_asn = client_asn;
}
public String getServer_asn() {
return server_asn;
}
public void setServer_asn(String server_asn) {
this.server_asn = server_asn;
}
public String getSubscribe_id() {
return subscribe_id;
}
public void setSubscribe_id(String subscribe_id) {
this.subscribe_id = subscribe_id;
}
public long getCon_duration_ms() {
return con_duration_ms;
}
public void setCon_duration_ms(long con_duration_ms) {
this.con_duration_ms = con_duration_ms;
}
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public String getDomain() {
return domain;
}
public void setDomain(String domain) {
this.domain = domain;
}
public String getCategory() {
return category;
}
public void setCategory(String category) {
this.category = category;
}
public String getReq_line() {
return req_line;
}
public void setReq_line(String req_line) {
this.req_line = req_line;
}
public String getRes_line() {
return res_line;
}
public void setRes_line(String res_line) {
this.res_line = res_line;
}
public String getCookie() {
return cookie;
}
public void setCookie(String cookie) {
this.cookie = cookie;
}
public String getReferer() {
return referer;
}
public void setReferer(String referer) {
this.referer = referer;
}
public String getUser_agent() {
return user_agent;
}
public void setUser_agent(String user_agent) {
this.user_agent = user_agent;
}
public String getContent_len() {
return content_len;
}
public void setContent_len(String content_len) {
this.content_len = content_len;
}
public String getContent_type() {
return content_type;
}
public void setContent_type(String content_type) {
this.content_type = content_type;
}
public String getSet_cookie() {
return set_cookie;
}
public void setSet_cookie(String set_cookie) {
this.set_cookie = set_cookie;
}
public String getReq_header() {
return req_header;
}
public void setReq_header(String req_header) {
this.req_header = req_header;
}
public String getResp_header() {
return resp_header;
}
public void setResp_header(String resp_header) {
this.resp_header = resp_header;
}
public String getReq_body_key() {
return req_body_key;
}
public void setReq_body_key(String req_body_key) {
this.req_body_key = req_body_key;
}
public String getReq_body() {
return req_body;
}
public void setReq_body(String req_body) {
this.req_body = req_body;
}
public String getRes_body_key() {
return res_body_key;
}
public void setRes_body_key(String res_body_key) {
this.res_body_key = res_body_key;
}
public String getResp_body() {
return resp_body;
}
public void setResp_body(String resp_body) {
this.resp_body = resp_body;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getSni() {
return sni;
}
public void setSni(String sni) {
this.sni = sni;
}
public String getSan() {
return san;
}
public void setSan(String san) {
this.san = san;
}
public String getCn() {
return cn;
}
public void setCn(String cn) {
this.cn = cn;
}
public int getApp_id() {
return app_id;
}
public void setApp_id(int app_id) {
this.app_id = app_id;
}
public int getProtocol_id() {
return protocol_id;
}
public void setProtocol_id(int protocol_id) {
this.protocol_id = protocol_id;
}
public int getIntercept_state() {
return intercept_state;
}
public void setIntercept_state(int intercept_state) {
this.intercept_state = intercept_state;
}
public long getSsl_server_side_latency() {
return ssl_server_side_latency;
}
public void setSsl_server_side_latency(long ssl_server_side_latency) {
this.ssl_server_side_latency = ssl_server_side_latency;
}
public long getSsl_client_side_latency() {
return ssl_client_side_latency;
}
public void setSsl_client_side_latency(long ssl_client_side_latency) {
this.ssl_client_side_latency = ssl_client_side_latency;
}
public String getSsl_server_side_version() {
return ssl_server_side_version;
}
public void setSsl_server_side_version(String ssl_server_side_version) {
this.ssl_server_side_version = ssl_server_side_version;
}
public String getSsl_client_side_version() {
return ssl_client_side_version;
}
public void setSsl_client_side_version(String ssl_client_side_version) {
this.ssl_client_side_version = ssl_client_side_version;
}
public int getSsl_cert_verify() {
return ssl_cert_verify;
}
public void setSsl_cert_verify(int ssl_cert_verify) {
this.ssl_cert_verify = ssl_cert_verify;
}
public String getStream_trace_id() {
return stream_trace_id;
}
public void setStream_trace_id(String stream_trace_id) {
this.stream_trace_id = stream_trace_id;
}
public long getCon_latency_ms() {
return con_latency_ms;
}
public void setCon_latency_ms(long con_latency_ms) {
this.con_latency_ms = con_latency_ms;
}
public int getPinningst() {
return pinningst;
}
public void setPinningst(int pinningst) {
this.pinningst = pinningst;
}
public long getC2s_pkt_num() {
return c2s_pkt_num;
}
public void setC2s_pkt_num(long c2s_pkt_num) {
this.c2s_pkt_num = c2s_pkt_num;
}
public long getS2c_pkt_num() {
return S2c_pkt_num;
}
public void setS2c_pkt_num(long s2c_pkt_num) {
S2c_pkt_num = s2c_pkt_num;
}
public long getC2s_byte_num() {
return c2s_byte_num;
}
public void setC2s_byte_num(long c2s_byte_num) {
this.c2s_byte_num = c2s_byte_num;
}
public long getS2c_byte_num() {
return s2c_byte_num;
}
public void setS2c_byte_num(long s2c_byte_num) {
this.s2c_byte_num = s2c_byte_num;
}
public String getNas_ip() {
return nas_ip;
}
public void setNas_ip(String nas_ip) {
this.nas_ip = nas_ip;
}
public String getFramed_ip() {
return framed_ip;
}
public void setFramed_ip(String framed_ip) {
this.framed_ip = framed_ip;
}
public String getAccount() {
return account;
}
public void setAccount(String account) {
this.account = account;
}
public int getPacket_type() {
return packet_type;
}
public void setPacket_type(int packet_type) {
this.packet_type = packet_type;
}
public int getHas_dup_traffic() {
return has_dup_traffic;
}
public void setHas_dup_traffic(int has_dup_traffic) {
this.has_dup_traffic = has_dup_traffic;
}
public String getStream_error() {
return stream_error;
}
public void setStream_error(String stream_error) {
this.stream_error = stream_error;
}
}

View File

@@ -1,65 +0,0 @@
package cn.ac.iie.bolt;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.influxdb.InfluxDbUtils;
import cn.ac.iie.utils.system.TupleUtils;
import org.apache.log4j.Logger;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseBasicBolt;
import org.apache.storm.tuple.Tuple;
import org.influxdb.InfluxDB;
import org.influxdb.InfluxDBFactory;
import org.influxdb.dto.Point;
import java.util.HashMap;
import java.util.Map;
/**
* 统计总数bolt用于将统计后的数入influxDB
*
* @author antlee
* @date 2018/8/14
*/
public class SummaryBolt extends BaseBasicBolt {
private static final long serialVersionUID = 4614020687381536301L;
private static Logger logger = Logger.getLogger(SummaryBolt.class);
private static long sum = 0L;
@Override
public void prepare(Map stormConf, TopologyContext context) {
}
@Override
public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) {
try {
if (TupleUtils.isTick(tuple)) {
InfluxDbUtils.sendKafkaSuccess(sum);
sum = 0L;
} else {
long successfulSum = tuple.getLong(0);
sum += successfulSum;
}
} catch (Exception e) {
logger.error("计数写入influxDB出现异常 ", e);
e.printStackTrace();
}
}
@Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
}
@Override
public Map<String, Object> getComponentConfiguration() {
Map<String, Object> conf = new HashMap<String, Object>(16);
conf.put(org.apache.storm.Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60);
return conf;
}
}

View File

@@ -1,88 +0,0 @@
package cn.ac.iie.topology;
import cn.ac.iie.bolt.ConnCompletionBolt;
import cn.ac.iie.bolt.NtcLogSendBolt;
import cn.ac.iie.bolt.SummaryBolt;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.spout.CustomizedKafkaSpout;
import org.apache.log4j.Logger;
import org.apache.storm.Config;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.topology.TopologyBuilder;
/**
* Storm程序主类
*
* @author Administrator
*/
public class LogFlowWriteTopology {
private static Logger logger = Logger.getLogger(LogFlowWriteTopology.class);
private final String topologyName;
private final Config topologyConfig;
private TopologyBuilder builder;
private LogFlowWriteTopology() {
this(LogFlowWriteTopology.class.getSimpleName());
}
private LogFlowWriteTopology(String topologyName) {
this.topologyName = topologyName;
topologyConfig = createTopologConfig();
}
private Config createTopologConfig() {
Config conf = new Config();
conf.setDebug(false);
conf.setMessageTimeoutSecs(60);
conf.setMaxSpoutPending(FlowWriteConfig.TOPOLOGY_CONFIG_MAX_SPOUT_PENDING);
conf.setNumAckers(FlowWriteConfig.TOPOLOGY_NUM_ACKS);
return conf;
}
private void runLocally() throws InterruptedException {
topologyConfig.setMaxTaskParallelism(1);
StormRunner.runTopologyLocally(builder, topologyName, topologyConfig, 600);
}
private void runRemotely() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
topologyConfig.setNumWorkers(FlowWriteConfig.TOPOLOGY_WORKERS);
//设置过高会导致很多问题,如心跳线程饿死、吞吐量大幅下跌
topologyConfig.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 8);
StormRunner.runTopologyRemotely(builder, topologyName, topologyConfig);
}
private void buildTopology() {
builder = new TopologyBuilder();
builder.setSpout("LogFlowWriteSpout", new CustomizedKafkaSpout(), FlowWriteConfig.SPOUT_PARALLELISM);
builder.setBolt("ConnCompletionBolt", new ConnCompletionBolt(), FlowWriteConfig.DATACENTER_BOLT_PARALLELISM).localOrShuffleGrouping("LogFlowWriteSpout");
builder.setBolt("NtcLogSendBolt", new NtcLogSendBolt(), FlowWriteConfig.KAFKA_BOLT_PARALLELISM).localOrShuffleGrouping("ConnCompletionBolt");
// builder.setBolt("SummaryBolt", new SummaryBolt(), 1).localOrShuffleGrouping("NtcLogSendBolt");
}
public static void main(String[] args) throws Exception {
LogFlowWriteTopology csst = null;
boolean runLocally = true;
String parameter = "remote";
int size = 2;
if (args.length >= size && parameter.equalsIgnoreCase(args[1])) {
runLocally = false;
csst = new LogFlowWriteTopology(args[0]);
} else {
csst = new LogFlowWriteTopology();
}
csst.buildTopology();
if (runLocally) {
logger.info("执行本地模式...");
csst.runLocally();
} else {
logger.info("执行远程部署模式...");
csst.runRemotely();
}
}
}

View File

@@ -1,68 +0,0 @@
package cn.ac.iie.utils.general;
import org.apache.log4j.Logger;
import java.security.MessageDigest;
/**
* 描述:转换MD5工具类
*
* @author Administrator
* @create 2018-08-13 15:11
*/
public class EncryptionUtils {
private static Logger logger = Logger.getLogger(EncryptionUtils.class);
public static String md5Encode(String msg) throws Exception {
try {
byte[] msgBytes = msg.getBytes("utf-8");
/*
* 声明使用Md5算法,获得MessaDigest对象
*/
MessageDigest md5 = MessageDigest.getInstance("MD5");
/*
* 使用指定的字节更新摘要
*/
md5.update(msgBytes);
/*
* 完成哈希计算,获得密文
*/
byte[] digest = md5.digest();
/*
* 以上两行代码等同于
* byte[] digest = md5.digest(msgBytes);
*/
return byteArr2hexString(digest);
} catch (Exception e) {
logger.error("Error in conversion MD5! " + msg);
return "";
}
}
/**
* 将byte数组转化为16进制字符串形式
*
* @param bys 字节数组
* @return 字符串
*/
private static String byteArr2hexString(byte[] bys) {
StringBuilder hexVal = new StringBuilder();
int val = 0;
for (byte by : bys) {
//将byte转化为int 如果byte是一个负数就必须要和16进制的0xff做一次与运算
val = ((int) by) & 0xff;
if (val < 16) {
hexVal.append("0");
}
hexVal.append(Integer.toHexString(val));
}
return hexVal.toString();
}
public static void main(String[] args) {
}
}

View File

@@ -1,123 +0,0 @@
package cn.ac.iie.utils.general;
import cn.ac.iie.bean.SessionRecordLog;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.redis.RedisPollUtils;
import cn.ac.iie.utils.system.SnowflakeId;
import cn.ac.iie.utils.zookeeper.DistributedLock;
import cn.ac.iie.utils.zookeeper.ZookeeperUtils;
import com.alibaba.fastjson.JSONObject;
import com.zdjizhi.utils.IpLookup;
import com.zdjizhi.utils.StringUtil;
import org.apache.log4j.Logger;
import redis.clients.jedis.Jedis;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* 描述:转换或补全工具类
*
* @author qidaijie
* @create 2018-08-13 15:11
*/
public class TransFormUtils {
private static Logger logger = Logger.getLogger(TransFormUtils.class);
private static Pattern WEB_PATTERN = Pattern.compile("[^\\\\.]+(\\.com\\.cn|\\.net\\.cn|\\.org\\.cn|\\.gov\\.cn|\\.com|\\.net|\\.cn|\\.org|\\.cc|\\.me|\\.tel|\\.mobi|\\.asia|\\.biz|\\.info|\\.name|\\.tv|\\.hk|\\.公司|\\.中国|\\.网络)");
private static IpLookup ipLookup = new IpLookup.Builder(false)
.loadDataFileV4(FlowWriteConfig.IP_LIBRARY + "Kazakhstan.mmdb")
.loadDataFileV6(FlowWriteConfig.IP_LIBRARY + "Kazakhstan.mmdb")
.loadAsnDataFileV4(FlowWriteConfig.IP_LIBRARY + "asn_v4.mmdb")
.loadAsnDataFileV6(FlowWriteConfig.IP_LIBRARY + "asn_v6.mmdb")
.build();
// private static ZookeeperUtils zookeeperUtils = new ZookeeperUtils();
// private static SnowflakeId snowflakeId = new SnowflakeId();
/**
* 解析日志,并补全
*
* @param message 原始日志
* @return 补全后的日志
*/
public static String getJsonMessage(String message) {
SessionRecordLog sessionRecordLog = JSONObject.parseObject(message, SessionRecordLog.class);
String serverIp = sessionRecordLog.getServer_ip();
String clientIp = sessionRecordLog.getClient_ip();
try {
sessionRecordLog.setUid(SnowflakeId.generateId());
sessionRecordLog.setServer_location(ipLookup.countryLookup(serverIp));
sessionRecordLog.setClient_location(ipLookup.cityLookupDetail(clientIp));
sessionRecordLog.setClient_asn(ipLookup.asnLookup(clientIp, true));
sessionRecordLog.setServer_asn(ipLookup.asnLookup(serverIp, true));
sessionRecordLog.setDomain(getTopDomain(sessionRecordLog.getSni(), sessionRecordLog.getHost()));
sessionRecordLog.setRecv_time(System.currentTimeMillis() / 1000);
// sessionRecordLog.setSubscribe_id(getSubscribeId(clientIp));
return JSONObject.toJSONString(sessionRecordLog);
} catch (Exception e) {
logger.error("日志解析过程出现异常", e);
return "";
}
}
/**
* 有sni通过sni获取域名有hots根据host获取域名
*
* @param sni sni
* @param host host
* @return 顶级域名
*/
private static String getTopDomain(String sni, String host) {
if (StringUtil.isNotBlank(sni)) {
return getDomain(sni);
} else if (StringUtil.isNotBlank(host)) {
return getDomain(host);
} else {
return "";
}
}
/**
* 获取用户名
*
* @param key Sip
* @return SubscribeId
*/
private static String getSubscribeId(String key) {
String sub = "";
try (Jedis jedis = RedisPollUtils.getJedis()) {
if (jedis != null) {
sub = jedis.get(key);
}
} catch (Exception e) {
logger.error("通过Redis获取用户名出现异常", e);
}
return sub;
}
/**
* 根据url截取顶级域名
*
* @param url 网站url
* @return 顶级域名
*/
private static String getDomain(String url) {
try {
Matcher matcher = WEB_PATTERN.matcher(url);
if (matcher.find()) {
return matcher.group();
}
} catch (Exception e) {
e.printStackTrace();
}
return "";
}
public static void main(String[] args) {
String s = ipLookup.countryLookup("192.168.10.207");
System.out.println(s);
}
}

View File

@@ -1,69 +0,0 @@
package cn.ac.iie.utils.influxdb;
import cn.ac.iie.common.FlowWriteConfig;
import org.influxdb.InfluxDB;
import org.influxdb.InfluxDBFactory;
import org.influxdb.dto.Point;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.function.LongFunction;
/**
* 写入influxDB工具类
*
* @author antlee
* @date 2018/8/17
*/
public class InfluxDbUtils {
/**
* 原始日志写入数据中心kafka失败标识
*/
public static void sendKafkaFail(int discarded) {
InfluxDB client = InfluxDBFactory.connect(FlowWriteConfig.INFLUX_IP, FlowWriteConfig.INFLUX_USERNAME, FlowWriteConfig.INFLUX_PASSWORD);
Point point1 = Point.measurement("SendKafkaFail")
.tag("topology", FlowWriteConfig.KAFKA_TOPIC)
.tag("hostname", getIp())
.field("discarded", discarded)
.build();
client.write("BusinessMonitor", "", point1);
}
/**
* 原始日志写入数据中心kafka失败标识
*/
public static void sendKafkaSuccess(Long complete) {
if (complete != 0) {
InfluxDB client = InfluxDBFactory.connect(FlowWriteConfig.INFLUX_IP, FlowWriteConfig.INFLUX_USERNAME, FlowWriteConfig.INFLUX_PASSWORD);
Point point1 = Point.measurement("SendKafkaSuccess")
.tag("topology", FlowWriteConfig.KAFKA_TOPIC)
.tag("hostname", getIp())
.field("complete", complete)
.build();
client.write("BusinessMonitor", "", point1);
}
}
/**
* 获取本机IP
*
* @return IP地址
*/
private static String getIp() {
InetAddress addr;
try {
addr = InetAddress.getLocalHost();
return addr.getHostAddress();
} catch (UnknownHostException e) {
e.printStackTrace();
return null;
}
}
public static void main(String[] args) {
sendKafkaFail(100);
// sendKafkaSuccess(100L);
}
}

View File

@@ -1,79 +0,0 @@
package cn.ac.iie.utils.redis;
import cn.ac.iie.common.FlowWriteConfig;
import org.apache.log4j.Logger;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisCluster;
import redis.clients.jedis.JedisPoolConfig;
import java.io.IOException;
import java.util.LinkedHashSet;
import java.util.Properties;
import java.util.Set;
/**
* 预用于对准IP对应的用户名的 Redis连接池
*
* @author my
* @date 2018-07-04
*/
public final class RedisClusterUtils {
private static final Logger logger = Logger.getLogger(RedisClusterUtils.class);
private static JedisCluster jedisCluster;
private static Properties props = new Properties();
static {
try {
String redisConfigFile = "redis_config.properties";
props.load(RedisClusterUtils.class.getClassLoader().getResourceAsStream(redisConfigFile));
} catch (IOException e) {
props = null;
logger.error("加载Redis配置文件失败", e);
}
}
/**
* 不允许通过new创建该类的实例
*/
private RedisClusterUtils() {
}
/**
* 初始化Redis连接池
*/
private static JedisCluster getJedisCluster() {
if (jedisCluster == null) {
JedisPoolConfig poolConfig = new JedisPoolConfig();
poolConfig.setMaxTotal(Integer.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_MAXACTIVE)));
poolConfig.setMaxIdle(Integer.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_MAXIDLE)));
poolConfig.setMaxWaitMillis(Long.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_MAXWAIT)));
poolConfig.setTestOnReturn(Boolean.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_TESTONRETURN)));
poolConfig.setTestOnBorrow(Boolean.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_TESTONBORROW)));
Set<HostAndPort> nodes = new LinkedHashSet<HostAndPort>();
for (String port : props.getProperty(FlowWriteConfig.REDIS_PORT).split(FlowWriteConfig.SEGMENTATION)) {
for (String ip : props.getProperty(FlowWriteConfig.REDIS_IP).split(FlowWriteConfig.SEGMENTATION)) {
nodes.add(new HostAndPort(ip, Integer.parseInt(port)));
}
}
jedisCluster = new JedisCluster(nodes, poolConfig);
}
return jedisCluster;
}
/**
* 获取用户名
*
* @param key service_ip
* @return Subscribe_id
*/
public static String get(String key) {
String s = key.split("\\.")[0];
if (!FlowWriteConfig.CHECK_IP_SCOPE.contains(s)) {
jedisCluster = getJedisCluster();
return jedisCluster.get(key);
}
return "";
}
}

View File

@@ -1,115 +0,0 @@
package cn.ac.iie.utils.redis;
import cn.ac.iie.common.FlowWriteConfig;
import com.zdjizhi.utils.StringUtil;
import org.apache.commons.lang3.RandomUtils;
import org.apache.log4j.Logger;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
import java.util.Properties;
/**
* @author qidaijie
*/
public class RedisPollUtils {
private static final Logger logger = Logger.getLogger(RedisPollUtils.class);
private static JedisPool jedisPool = null;
private static Properties props = new Properties();
private RedisPollUtils() {
}
static {
initialPool();
}
/**
* 初始化Redis连接池
*/
private static void initialPool() {
try {
//加载连接池配置文件
props.load(RedisPollUtils.class.getClassLoader().getResourceAsStream("redis_config.properties"));
// 创建jedis池配置实例
JedisPoolConfig poolConfig = new JedisPoolConfig();
poolConfig.setMaxTotal(Integer.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_MAXACTIVE)));
poolConfig.setMaxIdle(Integer.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_MAXIDLE)));
poolConfig.setMaxWaitMillis(Long.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_MAXWAIT)));
poolConfig.setTestOnReturn(Boolean.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_TESTONRETURN)));
poolConfig.setTestOnBorrow(Boolean.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_TESTONBORROW)));
// 根据配置实例化jedis池
jedisPool = new JedisPool(poolConfig, props.getProperty(FlowWriteConfig.REDIS_IP),
Integer.valueOf(props.getProperty(FlowWriteConfig.REDIS_PORT)));
} catch (Exception e) {
logger.error("Redis连接池初始化错误", e);
}
}
/**
* 获取Jedis实例
*
* @return Jedis实例
*/
public static Jedis getJedis() {
Jedis jedis = null;
try {
if (jedisPool == null) {
initialPool();
}
jedis = jedisPool.getResource();
} catch (Exception e) {
logger.error("Redis连接池错误,无法获取连接", e);
}
return jedis;
}
// /**
// * @param key redis key
// * @return value
// */
// public static Integer getWorkerId(String key) {
// int workId = 0;
// int maxId = 32;
// try (Jedis jedis = RedisPollUtils.getJedis()) {
// if (jedis != null) {
// String work = jedis.get(key);
// if (StringUtil.isBlank(work)) {
// jedis.set(key, "0");
// } else {
// workId = Integer.parseInt(work);
// }
// if (workId < maxId) {
// jedis.set(key, String.valueOf(workId + 1));
// } else {
// workId = 0;
// jedis.set(key, "1");
// }
// }
// } catch (Exception e) {
// logger.error("通过Redis获取用户名出现异常", e);
// workId = RandomUtils.nextInt(0, 31);
// }
// return workId;
// }
public static Integer getWorkerId(String key) {
int workId = 0;
try (Jedis jedis = RedisPollUtils.getJedis()) {
if (jedis != null) {
workId = Integer.parseInt(jedis.get(key));
jedis.set(key, String.valueOf(workId + 2));
logger.error("\n工作id是" + workId + "\n");
}
} catch (Exception e) {
logger.error("通过Redis获取用户名出现异常", e);
workId = RandomUtils.nextInt(0, 31);
}
return workId;
}
}

View File

@@ -1,7 +1,10 @@
package cn.ac.iie.bolt;
package com.zdjizhi.bolt;
import com.zdjizhi.common.FlowWriteConfig;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.utils.StringUtil;
import org.apache.log4j.Logger;
import com.zdjizhi.utils.exception.StreamCompletionException;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
@@ -12,37 +15,39 @@ import org.apache.storm.tuple.Values;
import java.util.Map;
import static cn.ac.iie.utils.general.TransFormUtils.getJsonMessage;
import static com.zdjizhi.utils.general.TransFormUtils.dealCommonMessage;
/**
* 通联关系日志补全
*
* @author qidaijie
*/
public class ConnCompletionBolt extends BaseBasicBolt {
private static final long serialVersionUID = -1059151670138465894L;
private final static Logger logger = Logger.getLogger(ConnCompletionBolt.class);
public class CompletionBolt extends BaseBasicBolt {
private static final long serialVersionUID = 9006119186526123734L;
private static final Log logger = LogFactory.get();
@Override
public void prepare(Map stormConf, TopologyContext context) {
}
@Override
public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) {
try {
String message = tuple.getString(0);
if (StringUtil.isNotBlank(message)) {
basicOutputCollector.emit(new Values(getJsonMessage(message)));
basicOutputCollector.emit(new Values(dealCommonMessage(message)));
}
} catch (Exception e) {
logger.error("接收解析过程出现异常", e);
} catch (StreamCompletionException e) {
logger.error(FlowWriteConfig.KAFKA_TOPIC + "接收/解析过程出现异常");
}
}
@Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("connLog"));
outputFieldsDeclarer.declare(new Fields("tsgLog"));
}
}

View File

@@ -1,18 +1,17 @@
package cn.ac.iie.bolt;
package com.zdjizhi.bolt.kafka;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.influxdb.InfluxDbUtils;
import cn.ac.iie.utils.system.TupleUtils;
import cn.ac.iie.utils.kafka.KafkaLogNtc;
import com.zdjizhi.common.FlowWriteConfig;
import com.zdjizhi.utils.exception.StreamCompletionException;
import com.zdjizhi.utils.kafka.KafkaLogSend;
import com.zdjizhi.utils.system.TupleUtils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.utils.StringUtil;
import org.apache.log4j.Logger;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseBasicBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import java.util.HashMap;
import java.util.LinkedList;
@@ -23,18 +22,17 @@ import java.util.Map;
* @author qidaijie
* @date 2018/8/14
*/
public class NtcLogSendBolt extends BaseBasicBolt {
private static final long serialVersionUID = 3940515789830317517L;
private static Logger logger = Logger.getLogger(NtcLogSendBolt.class);
public class LogSendBolt extends BaseBasicBolt {
private static final long serialVersionUID = -3663610927224396615L;
private static final Log logger = LogFactory.get();
private List<String> list;
private KafkaLogNtc kafkaLogNtc;
private static long successfulSum = 0;
private KafkaLogSend kafkaLogSend;
@Override
public void prepare(Map stormConf, TopologyContext context) {
list = new LinkedList<>();
kafkaLogNtc = KafkaLogNtc.getInstance();
kafkaLogSend = KafkaLogSend.getInstance();
}
@Override
@@ -42,26 +40,21 @@ public class NtcLogSendBolt extends BaseBasicBolt {
try {
if (TupleUtils.isTick(tuple)) {
if (list.size() != 0) {
kafkaLogNtc.sendMessage(list);
successfulSum += list.size();
kafkaLogSend.sendMessage(list);
list.clear();
}
basicOutputCollector.emit(new Values(successfulSum));
successfulSum = 0L;
} else {
String message = tuple.getString(0);
String message = tuple.getValue(0).toString();
if (StringUtil.isNotBlank(message)) {
list.add(message);
}
if (list.size() == FlowWriteConfig.BATCH_INSERT_NUM) {
kafkaLogNtc.sendMessage(list);
successfulSum += list.size();
kafkaLogSend.sendMessage(list);
list.clear();
}
}
} catch (Exception e) {
logger.error("日志发送Kafka过程出现异常 ", e);
e.printStackTrace();
} catch (StreamCompletionException e) {
logger.error(FlowWriteConfig.KAFKA_TOPIC + "日志发送Kafka过程出现异常");
}
}
@@ -74,7 +67,6 @@ public class NtcLogSendBolt extends BaseBasicBolt {
@Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("suc"));
}
}

View File

@@ -1,63 +1,64 @@
package cn.ac.iie.common;
package com.zdjizhi.common;
import cn.ac.iie.utils.system.FlowWriteConfigurations;
import com.zdjizhi.utils.system.FlowWriteConfigurations;
/**
* @author Administrator
*/
public class FlowWriteConfig {
public static final String LOG_STRING_SPLITTER = "\t";
public static final String SQL_STRING_SPLITTER = "#";
public static final String SEGMENTATION = ",";
public static final int IF_PARAM_LENGTH = 3;
public static final String VISIBILITY = "disabled";
public static final String FORMAT_SPLITTER = ",";
public static final String IS_JSON_KEY_TAG = "$.";
public static final String IF_CONDITION_SPLITTER = "=";
public static final String MODEL = "remote";
/**
* System
*/
public static final Integer SPOUT_PARALLELISM = FlowWriteConfigurations.getIntProperty(0, "spout.parallelism");
public static final Integer DATACENTER_BOLT_PARALLELISM = FlowWriteConfigurations.getIntProperty(0, "datacenter.bolt.parallelism");
public static final Integer COMPLETION_BOLT_PARALLELISM = FlowWriteConfigurations.getIntProperty(0, "completion.bolt.parallelism");
public static final Integer TOPOLOGY_WORKERS = FlowWriteConfigurations.getIntProperty(0, "topology.workers");
public static final Integer KAFKA_BOLT_PARALLELISM = FlowWriteConfigurations.getIntProperty(0, "kafka.bolt.parallelism");
public static final Integer TOPOLOGY_TICK_TUPLE_FREQ_SECS = FlowWriteConfigurations.getIntProperty(0, "topology.tick.tuple.freq.secs");
public static final Integer HBASE_TICK_TUPLE_FREQ_SECS = FlowWriteConfigurations.getIntProperty(0, "hbase.tick.tuple.freq.secs");
public static final Integer TOPOLOGY_CONFIG_MAX_SPOUT_PENDING = FlowWriteConfigurations.getIntProperty(0, "topology.config.max.spout.pending");
public static final Integer TOPOLOGY_NUM_ACKS = FlowWriteConfigurations.getIntProperty(0, "topology.num.acks");
public static final Integer TOPOLOGY_SPOUT_SLEEP_TIME = FlowWriteConfigurations.getIntProperty(0, "topology.spout.sleep.time");
public static final Integer BATCH_INSERT_NUM = FlowWriteConfigurations.getIntProperty(0, "batch.insert.num");
public static final Integer DATA_CENTER_ID_NUM = FlowWriteConfigurations.getIntProperty(0, "data.center.id.num");
public static final String CHECK_IP_SCOPE = FlowWriteConfigurations.getStringProperty(0, "check.ip.scope");
public static final Integer MAX_FAILURE_NUM = FlowWriteConfigurations.getIntProperty(0, "max.failure.num");
/**
* influxDB
*/
public static final String INFLUX_IP = FlowWriteConfigurations.getStringProperty(0, "influx.ip");
public static final String INFLUX_USERNAME = FlowWriteConfigurations.getStringProperty(0, "influx.username");
public static final String INFLUX_PASSWORD = FlowWriteConfigurations.getStringProperty(0, "influx.password");
public static final String MAIL_DEFAULT_CHARSET = FlowWriteConfigurations.getStringProperty(0, "mail.default.charset");
public static final String LOG_NEED_COMPLETE = FlowWriteConfigurations.getStringProperty(0, "log.need.complete");
/**
* kafka
*/
public static final String BOOTSTRAP_SERVERS = FlowWriteConfigurations.getStringProperty(0, "bootstrap.servers");
public static final String INPUT_KAFKA_SERVERS = FlowWriteConfigurations.getStringProperty(0, "input.kafka.servers");
public static final String OUTPUT_KAFKA_SERVERS = FlowWriteConfigurations.getStringProperty(0, "output.kafka.servers");
public static final String ZOOKEEPER_SERVERS = FlowWriteConfigurations.getStringProperty(0, "zookeeper.servers");
public static final String HBASE_ZOOKEEPER_SERVERS = FlowWriteConfigurations.getStringProperty(0, "hbase.zookeeper.servers");
public static final String HBASE_TABLE_NAME = FlowWriteConfigurations.getStringProperty(0, "hbase.table.name");
public static final String GROUP_ID = FlowWriteConfigurations.getStringProperty(0, "group.id");
public static final String RESULTS_OUTPUT_TOPIC = FlowWriteConfigurations.getStringProperty(0, "results.output.topic");
public static final String KAFKA_TOPIC = FlowWriteConfigurations.getStringProperty(0, "kafka.topic");
public static final String AUTO_OFFSET_RESET = FlowWriteConfigurations.getStringProperty(0, "auto.offset.reset");
public static final String PRODUCER_ACK = FlowWriteConfigurations.getStringProperty(0, "producer.ack");
public static final String IP_LIBRARY = FlowWriteConfigurations.getStringProperty(0, "ip.library");
/***
* Redis
/**
* kafka限流配置-20201117
*/
public static final String REDIS_IP = "redis.ip";
public static final String REDIS_PORT = "redis.port";
public static final String REDIS_TIMEOUT = "redis.timeout";
public static final String REDIS_POOL_MAXACTIVE = "redis.pool.maxActive";
public static final String REDIS_POOL_MAXIDLE = "redis.pool.maxIdle";
public static final String REDIS_POOL_MAXWAIT = "redis.pool.maxWait";
public static final String REDIS_POOL_TESTONBORROW = "redis.pool.testOnBorrow";
public static final String REDIS_POOL_TESTONRETURN = "redis.pool.testOnReturn";
public static final String PRODUCER_KAFKA_COMPRESSION_TYPE = FlowWriteConfigurations.getStringProperty(0, "producer.kafka.compression.type");
public static final String CONSUMER_CLIENT_ID = FlowWriteConfigurations.getStringProperty(0, "consumer.client.id");
public static final String PRODUCER_CLIENT_ID = FlowWriteConfigurations.getStringProperty(0, "producer.client.id");
/**
* http
*/
public static final String SCHEMA_HTTP = FlowWriteConfigurations.getStringProperty(0, "schema.http");
}

View File

@@ -0,0 +1,20 @@
package com.zdjizhi.common;
import com.zdjizhi.utils.system.FlowWriteConfigurations;
/**
* @author Administrator
*/
public class KafkaProConfig {
public static final String RETRIES = FlowWriteConfigurations.getStringProperty(1, "retries");
public static final String LINGER_MS = FlowWriteConfigurations.getStringProperty(1, "linger.ms");
public static final Integer REQUEST_TIMEOUT_MS = FlowWriteConfigurations.getIntProperty(1, "request.timeout.ms");
public static final Integer BATCH_SIZE = FlowWriteConfigurations.getIntProperty(1, "batch.size");
public static final Integer BUFFER_MEMORY = FlowWriteConfigurations.getIntProperty(1, "buffer.memory");
public static final Integer MAX_REQUEST_SIZE = FlowWriteConfigurations.getIntProperty(1, "max.request.size");
}

View File

@@ -1,10 +1,14 @@
package cn.ac.iie.spout;
package com.zdjizhi.spout;
import cn.ac.iie.common.FlowWriteConfig;
import cn.hutool.core.thread.ThreadUtil;
import com.zdjizhi.common.FlowWriteConfig;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.utils.exception.StreamCompletionException;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.log4j.Logger;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
@@ -26,19 +30,26 @@ public class CustomizedKafkaSpout extends BaseRichSpout {
private KafkaConsumer<String, String> consumer;
private SpoutOutputCollector collector = null;
private TopologyContext context = null;
private final static Logger logger = Logger.getLogger(CustomizedKafkaSpout.class);
private static final Log logger = LogFactory.get();
private static Properties createConsumerConfig() {
Properties props = new Properties();
props.put("bootstrap.servers", FlowWriteConfig.BOOTSTRAP_SERVERS);
props.put("bootstrap.servers", FlowWriteConfig.INPUT_KAFKA_SERVERS);
props.put("group.id", FlowWriteConfig.GROUP_ID);
props.put("session.timeout.ms", "60000");
props.put("max.poll.records", 3000);
props.put("max.partition.fetch.bytes", 31457280);
props.put("auto.offset.reset", FlowWriteConfig.AUTO_OFFSET_RESET);
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
/**
* kafka限流配置-20201117
*/
props.put(ConsumerConfig.CLIENT_ID_CONFIG, FlowWriteConfig.CONSUMER_CLIENT_ID);
return props;
}
@@ -62,13 +73,12 @@ public class CustomizedKafkaSpout extends BaseRichSpout {
try {
// TODO Auto-generated method stub
ConsumerRecords<String, String> records = consumer.poll(10000L);
Thread.sleep(FlowWriteConfig.TOPOLOGY_SPOUT_SLEEP_TIME);
ThreadUtil.sleep(FlowWriteConfig.TOPOLOGY_SPOUT_SLEEP_TIME);
for (ConsumerRecord<String, String> record : records) {
this.collector.emit(new Values(record.value()));
}
} catch (Exception e) {
} catch (StreamCompletionException e) {
logger.error("KafkaSpout发送消息出现异常!", e);
e.printStackTrace();
}
}

View File

@@ -0,0 +1,99 @@
package com.zdjizhi.topology;
import com.zdjizhi.bolt.CompletionBolt;
import com.zdjizhi.bolt.kafka.LogSendBolt;
import com.zdjizhi.common.FlowWriteConfig;
import com.zdjizhi.spout.CustomizedKafkaSpout;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.utils.exception.StreamCompletionException;
import org.apache.storm.Config;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.topology.TopologyBuilder;
/**
* Storm程序主类
*
* @author Administrator
*/
public class LogFlowWriteTopology {
private static final Log logger = LogFactory.get();
private final String topologyName;
private final Config topologyConfig;
private TopologyBuilder builder;
private LogFlowWriteTopology() {
this(LogFlowWriteTopology.class.getSimpleName());
}
private LogFlowWriteTopology(String topologyName) {
this.topologyName = topologyName;
topologyConfig = createTopologyConfig();
}
private Config createTopologyConfig() {
Config conf = new Config();
conf.setDebug(false);
conf.setMessageTimeoutSecs(60);
conf.setMaxSpoutPending(FlowWriteConfig.TOPOLOGY_CONFIG_MAX_SPOUT_PENDING);
conf.setNumAckers(FlowWriteConfig.TOPOLOGY_NUM_ACKS);
return conf;
}
private void runLocally() throws InterruptedException {
topologyConfig.setMaxTaskParallelism(1);
StormRunner.runTopologyLocally(builder, topologyName, topologyConfig, 600);
}
private void runRemotely() throws AlreadyAliveException, InvalidTopologyException, AuthorizationException {
topologyConfig.setNumWorkers(FlowWriteConfig.TOPOLOGY_WORKERS);
//设置过高会导致很多问题,如心跳线程饿死、吞吐量大幅下跌
topologyConfig.put(Config.TOPOLOGY_EXECUTOR_RECEIVE_BUFFER_SIZE, 8);
StormRunner.runTopologyRemotely(builder, topologyName, topologyConfig);
}
private void buildTopology() {
String need = "yes";
builder = new TopologyBuilder();
builder.setSpout("LogFlowWriteSpout", new CustomizedKafkaSpout(), FlowWriteConfig.SPOUT_PARALLELISM);
if (need.equals(FlowWriteConfig.LOG_NEED_COMPLETE)) {
builder.setBolt("LogCompletionBolt", new CompletionBolt(),
FlowWriteConfig.COMPLETION_BOLT_PARALLELISM).localOrShuffleGrouping("LogFlowWriteSpout");
builder.setBolt("CompletionLogSendBolt", new LogSendBolt(),
FlowWriteConfig.KAFKA_BOLT_PARALLELISM).localOrShuffleGrouping("LogCompletionBolt");
} else {
builder.setBolt("LogSendBolt", new LogSendBolt(),
FlowWriteConfig.KAFKA_BOLT_PARALLELISM).localOrShuffleGrouping("LogFlowWriteSpout");
}
}
public static void main(String[] args) {
LogFlowWriteTopology flowWriteTopology;
try {
boolean runLocally = true;
int size = 2;
if (args.length >= size && FlowWriteConfig.MODEL.equalsIgnoreCase(args[1])) {
runLocally = false;
flowWriteTopology = new LogFlowWriteTopology(args[0]);
} else {
flowWriteTopology = new LogFlowWriteTopology();
}
flowWriteTopology.buildTopology();
if (runLocally) {
logger.info("执行本地模式...");
flowWriteTopology.runLocally();
} else {
logger.info("执行远程部署模式...");
flowWriteTopology.runRemotely();
}
} catch (StreamCompletionException | InterruptedException | InvalidTopologyException | AlreadyAliveException | AuthorizationException e) {
logger.error("Topology Start ERROR! message is:" + e);
}
}
}

View File

@@ -1,6 +1,7 @@
package cn.ac.iie.topology;
package com.zdjizhi.topology;
import cn.hutool.core.thread.ThreadUtil;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
@@ -21,7 +22,7 @@ public final class StormRunner{
LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology(topologyName, conf, builder.createTopology());
Thread.sleep((long) runtimeInSeconds * MILLS_IN_SEC);
ThreadUtil.sleep((long) runtimeInSeconds * MILLS_IN_SEC);
localCluster.shutdown();
}

View File

@@ -0,0 +1,18 @@
package com.zdjizhi.utils.exception;
/**
* @author qidaijie
* @Package com.zdjizhi.utils.exception
* @Description:
* @date 2021/3/2510:14
*/
public class StreamCompletionException extends RuntimeException {
public StreamCompletionException(Exception e) {
super(e);
}
public StreamCompletionException(String e) {
super(e);
}
}

View File

@@ -1,9 +1,11 @@
package cn.ac.iie.utils.system;
package com.zdjizhi.utils.general;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.zookeeper.DistributedLock;
import cn.ac.iie.utils.zookeeper.ZookeeperUtils;
import org.apache.log4j.Logger;
import com.zdjizhi.common.FlowWriteConfig;
import com.zdjizhi.utils.exception.StreamCompletionException;
import com.zdjizhi.utils.zookeeper.DistributedLock;
import com.zdjizhi.utils.zookeeper.ZookeeperUtils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
/**
* 雪花算法
@@ -11,13 +13,20 @@ import org.apache.log4j.Logger;
* @author qidaijie
*/
public class SnowflakeId {
private static Logger logger = Logger.getLogger(SnowflakeId.class);
private static final Log logger = LogFactory.get();
// ==============================Fields===========================================
/**
* 开始时间截 (2018-08-01 00:00:00) max 17years
* 共64位 第一位为符号位 默认0
* 时间戳 39位(17 year), centerId:(关联每个环境或任务数) :7位(0-127),
* workerId(关联进程):6(0-63) ,序列号11位(2047/ms)
*
* 序列号 /ms = (-1L ^ (-1L << 11))
* 最大使用年 = (1L << 39) / (1000L * 60 * 60 * 24 * 365)
*/
private final long twepoch = 1564588800000L;
/**
* 开始时间截 (2020-11-14 00:00:00) max 17years
*/
private final long twepoch = 1605283200000L;
/**
* 机器id所占的位数
@@ -27,22 +36,23 @@ public class SnowflakeId {
/**
* 数据标识id所占的位数
*/
private final long dataCenterIdBits = 4L;
private final long dataCenterIdBits = 7L;
/**
* 支持的最大机器id结果是3 (这个移位算法可以很快的计算出几位二进制数所能表示的最大十进制数)
* 支持的最大机器id结果是63 (这个移位算法可以很快的计算出几位二进制数所能表示的最大十进制数)
* M << n = M * 2^n
*/
private final long maxWorkerId = -1L ^ (-1L << workerIdBits);
/**
* 支持的最大数据标识id结果是15
* 支持的最大数据标识id结果是127
*/
private final long maxDataCenterId = -1L ^ (-1L << dataCenterIdBits);
/**
* 序列在id中占的位数
*/
private final long sequenceBits = 14L;
private final long sequenceBits = 11L;
/**
* 机器ID向左移12位
@@ -60,7 +70,7 @@ public class SnowflakeId {
private final long timestampLeftShift = sequenceBits + workerIdBits + dataCenterIdBits;
/**
* 生成序列的掩码这里为16383
* 生成序列的掩码这里为2047
*/
private final long sequenceMask = -1L ^ (-1L << sequenceBits);
@@ -70,12 +80,12 @@ public class SnowflakeId {
private long workerId;
/**
* 数据中心ID(0~15)
* 数据中心ID(0~127)
*/
private long dataCenterId;
/**
* 毫秒内序列(0~16383)
* 毫秒内序列(0~2047)
*/
private long sequence = 0L;
@@ -85,12 +95,18 @@ public class SnowflakeId {
private long lastTimestamp = -1L;
/**
* 设置允许时间回拨的最大限制10s
*/
private static final long rollBackTime = 10000L;
private static SnowflakeId idWorker;
private static ZookeeperUtils zookeeperUtils = new ZookeeperUtils();
static {
idWorker = new SnowflakeId();
idWorker = new SnowflakeId(FlowWriteConfig.ZOOKEEPER_SERVERS, FlowWriteConfig.DATA_CENTER_ID_NUM);
}
//==============================Constructors=====================================
@@ -98,19 +114,24 @@ public class SnowflakeId {
/**
* 构造函数
*/
private SnowflakeId() {
private SnowflakeId(String zookeeperIp, long dataCenterIdNum) {
DistributedLock lock = new DistributedLock(FlowWriteConfig.ZOOKEEPER_SERVERS, "disLocks1");
try {
lock.lock();
int tmpWorkerId = zookeeperUtils.modifyNode("/Snowflake/" + FlowWriteConfig.KAFKA_TOPIC);
int tmpWorkerId = zookeeperUtils.modifyNode("/Snowflake/" + "worker" + dataCenterIdNum, zookeeperIp);
if (tmpWorkerId > maxWorkerId || tmpWorkerId < 0) {
throw new IllegalArgumentException(String.format("worker Id can't be greater than %d or less than 0", maxWorkerId));
}
int dataCenterId = FlowWriteConfig.DATA_CENTER_ID_NUM;
if (dataCenterId > maxDataCenterId || dataCenterId < 0) {
throw new IllegalArgumentException(String.format("datacenter Id can't be greater than %d or less than 0", maxDataCenterId));
if (dataCenterIdNum > maxDataCenterId || dataCenterIdNum < 0) {
throw new IllegalArgumentException(String.format("datacenter Id can't be greater than %d or less than ", maxDataCenterId));
}
this.workerId = tmpWorkerId;
this.dataCenterId = dataCenterId;
this.dataCenterId = dataCenterIdNum;
} catch (StreamCompletionException e) {
logger.error("This is not usual error!!!===>>>" + e + "<<<===");
}finally {
lock.unlock();
}
}
// ==============================Methods==========================================
@@ -122,7 +143,10 @@ public class SnowflakeId {
*/
private synchronized long nextId() {
long timestamp = timeGen();
//设置一个允许回拨限制时间系统时间回拨范围在rollBackTime内可以等待校准
if (lastTimestamp - timestamp > 0 && lastTimestamp - timestamp < rollBackTime) {
timestamp = tilNextMillis(lastTimestamp);
}
//如果当前时间小于上一次ID生成的时间戳说明系统时钟回退过这个时候应当抛出异常
if (timestamp < lastTimestamp) {
throw new RuntimeException(

View File

@@ -0,0 +1,166 @@
package com.zdjizhi.utils.general;
import com.zdjizhi.common.FlowWriteConfig;
import com.zdjizhi.utils.exception.StreamCompletionException;
import com.zdjizhi.utils.json.JsonParseUtil;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.alibaba.fastjson.JSONObject;
import com.zdjizhi.utils.IpLookup;
import java.util.*;
/**
* 描述:转换或补全工具类
*
* @author qidaijie
*/
public class TransFormUtils {
private static final Log logger = LogFactory.get();
/**
* 在内存中加载反射类用的map
*/
private static HashMap<String, Class> map = JsonParseUtil.getMapFromHttp(FlowWriteConfig.SCHEMA_HTTP);
/**
* 反射成一个类
*/
private static Object mapObject = JsonParseUtil.generateObject(map);
/**
* 获取任务列表
* list的每个元素是一个四元字符串数组 (有format标识的字段补全的字段用到的功能函数用到的参数),例如:
* (mail_subject mail_subject decode_of_base64 mail_subject_charset)
*/
private static ArrayList<String[]> jobList = JsonParseUtil.getJobListFromHttp(FlowWriteConfig.SCHEMA_HTTP);
/**
* 补全工具类
*/
// private static FormatUtils build = new FormatUtils.Builder(false).build();
/**
* IP定位库工具类
*/
private static IpLookup ipLookup = new IpLookup.Builder(false)
.loadDataFileV4(FlowWriteConfig.IP_LIBRARY + "ip_v4.mmdb")
.loadDataFileV6(FlowWriteConfig.IP_LIBRARY + "ip_v6.mmdb")
.loadDataFilePrivateV4(FlowWriteConfig.IP_LIBRARY + "ip_private_v4.mmdb")
.loadDataFilePrivateV6(FlowWriteConfig.IP_LIBRARY + "ip_private_v6.mmdb")
.loadAsnDataFile(FlowWriteConfig.IP_LIBRARY + "asn_v4.mmdb")
.loadAsnDataFileV6(FlowWriteConfig.IP_LIBRARY + "asn_v6.mmdb")
.build();
/**
* 解析日志,并补全
*
* @param message kafka Topic原始日志
* @return 补全后的日志
*/
public static String dealCommonMessage(String message) {
Object object = JSONObject.parseObject(message, mapObject.getClass());
try {
for (String[] strings : jobList) {
//用到的参数的值
Object name = JsonParseUtil.getValue(object, strings[0]);
//需要补全的字段的key
String appendToKeyName = strings[1];
//需要补全的字段的值
Object appendTo = JsonParseUtil.getValue(object, appendToKeyName);
//匹配操作函数的字段
String function = strings[2];
//额外的参数的值
String param = strings[3];
functionSet(function, object, appendToKeyName, appendTo, name, param);
}
return JSONObject.toJSONString(object);
} catch (StreamCompletionException e) {
logger.error(FlowWriteConfig.KAFKA_TOPIC + "日志预处理过程出现异常");
return "";
}
}
/**
* 根据schema描述对应字段进行操作的 函数集合
*
* @param function 匹配操作函数的字段
* @param object 动态POJO Object
* @param appendToKeyName 需要补全的字段的key
* @param appendTo 需要补全的字段的值
* @param name 用到的参数的值
* @param param 额外的参数的值
*/
private static void functionSet(String function, Object object, String appendToKeyName, Object appendTo, Object name, String param) {
switch (function) {
case "current_timestamp":
if ((long) appendTo == 0L) {
JsonParseUtil.setValue(object, appendToKeyName, TransFunction.getCurrentTime());
}
break;
case "snowflake_id":
// JsonParseUtil.setValue(object, appendToKeyName,
// build.getSnowflakeId(FlowWriteConfig.ZOOKEEPER_SERVERS, FlowWriteConfig.DATA_CENTER_ID_NUM));
JsonParseUtil.setValue(object, appendToKeyName, SnowflakeId.generateId());
break;
case "geo_ip_detail":
if (name != null && appendTo == null) {
JsonParseUtil.setValue(object, appendToKeyName, TransFunction.getGeoIpDetail(ipLookup, name.toString()));
}
break;
case "geo_asn":
if (name != null && appendTo == null) {
JsonParseUtil.setValue(object, appendToKeyName, TransFunction.getGeoAsn(ipLookup, name.toString()));
}
break;
case "geo_ip_country":
if (name != null && appendTo == null) {
JsonParseUtil.setValue(object, appendToKeyName, TransFunction.getGeoIpCountry(ipLookup, name.toString()));
}
break;
case "set_value":
if (name != null && param != null) {
JsonParseUtil.setValue(object, appendToKeyName, TransFunction.setValue(param));
}
break;
case "get_value":
if (name != null) {
JsonParseUtil.setValue(object, appendToKeyName, name);
}
break;
case "if":
if (param != null) {
JsonParseUtil.setValue(object, appendToKeyName, TransFunction.condition(object, param));
}
break;
case "sub_domain":
if (appendTo == null && name != null) {
JsonParseUtil.setValue(object, appendToKeyName, TransFunction.getTopDomain(name.toString()));
}
break;
case "radius_match":
if (name != null) {
JsonParseUtil.setValue(object, appendToKeyName, TransFunction.radiusMatch(name.toString()));
}
break;
case "decode_of_base64":
if (name != null) {
JsonParseUtil.setValue(object, appendToKeyName, TransFunction.decodeBase64(name.toString(), TransFunction.isJsonValue(object, param)));
}
break;
case "flattenSpec":
if (name != null && param != null) {
JsonParseUtil.setValue(object, appendToKeyName, TransFunction.flattenSpec(name.toString(), TransFunction.isJsonValue(object, param)));
}
break;
default:
}
}
}

View File

@@ -0,0 +1,212 @@
package com.zdjizhi.utils.general;
import cn.hutool.core.codec.Base64;
import com.zdjizhi.common.FlowWriteConfig;
import com.zdjizhi.utils.hbase.HBaseUtils;
import com.zdjizhi.utils.json.JsonParseUtil;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.jayway.jsonpath.InvalidPathException;
import com.jayway.jsonpath.JsonPath;
import com.zdjizhi.utils.Encodes;
import com.zdjizhi.utils.FormatUtils;
import com.zdjizhi.utils.IpLookup;
import com.zdjizhi.utils.StringUtil;
import java.util.ArrayList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* @author qidaijie
*/
class TransFunction {
private static final Log logger = LogFactory.get();
private static final Pattern PATTERN = Pattern.compile("[0-9]*");
/**
* 生成当前时间戳的操作
*/
static long getCurrentTime() {
return System.currentTimeMillis() / 1000;
}
/**
* 根据clientIp获取location信息
*
* @param ip client IP
* @return ip地址详细信息
*/
static String getGeoIpDetail(IpLookup ipLookup, String ip) {
return ipLookup.cityLookupDetail(ip);
}
/**
* 根据ip获取asn信息
*
* @param ip client/server IP
* @return ASN
*/
static String getGeoAsn(IpLookup ipLookup, String ip) {
return ipLookup.asnLookup(ip);
}
/**
* 根据ip获取country信息
*
* @param ip server IP
* @return 国家
*/
static String getGeoIpCountry(IpLookup ipLookup, String ip) {
return ipLookup.countryLookup(ip);
}
/**
* radius借助HBase补齐
*
* @param ip client IP
* @return account
*/
static String radiusMatch(String ip) {
String account = HBaseUtils.getAccount(ip.trim());
if (StringUtil.isBlank(account)) {
logger.warn("HashMap get account is null, Ip is :{}", ip);
}
return account;
}
/**
* 解析顶级域名
*
* @param domain 初始域名
* @return 顶级域名
*/
static String getTopDomain(String domain) {
try {
return FormatUtils.getTopPrivateDomain(domain);
} catch (StringIndexOutOfBoundsException outException) {
logger.error("解析顶级域名异常,异常域名:{}" + domain);
return "";
}
}
/**
* 根据编码解码base64
*
* @param message base64
* @param charset 编码
* @return 解码字符串
*/
static String decodeBase64(String message, String charset) {
String result = "";
try {
if (StringUtil.isNotBlank(message)) {
if (StringUtil.isNotBlank(charset)) {
result = Base64.decodeStr(message, charset);
} else {
result = Base64.decodeStr(message, FlowWriteConfig.MAIL_DEFAULT_CHARSET);
}
}
} catch (RuntimeException rune) {
logger.error("解析 Base64 异常,异常信息:" + rune);
}
return result;
}
/**
* 根据表达式解析json
*
* @param message json
* @param expr 解析表达式
* @return 解析结果
*/
static String flattenSpec(String message, String expr) {
String flattenResult = "";
try {
if (StringUtil.isNotBlank(expr)) {
ArrayList<String> read = JsonPath.parse(message).read(expr);
flattenResult = read.get(0);
}
} catch (ClassCastException | InvalidPathException e) {
logger.error("设备标签解析异常,[ " + expr + " ]解析表达式错误" + e);
}
return flattenResult;
}
/**
* 判断是否为日志字段,是则返回对应value否则返回原始字符串
*
* @param object 内存实体类
* @param param 字段名/普通字符串
* @return JSON.Value or String
*/
static String isJsonValue(Object object, String param) {
if (param.contains(FlowWriteConfig.IS_JSON_KEY_TAG)) {
Object value = JsonParseUtil.getValue(object, param.substring(2));
if (value != null) {
return value.toString();
} else {
return "";
}
} else {
return param;
}
}
/**
* IF函数实现解析日志构建三目运算;包含判断是否为数字若为数字则转换为long类型返回结果。
*
* @param object 内存实体类
* @param ifParam 字段名/普通字符串
* @return resultA or resultB or ""
*/
static Object condition(Object object, String ifParam) {
try {
String[] split = ifParam.split(FlowWriteConfig.FORMAT_SPLITTER);
String[] norms = split[0].split(FlowWriteConfig.IF_CONDITION_SPLITTER);
String direction = isJsonValue(object, norms[0]);
if (StringUtil.isNotBlank(direction)) {
if (split.length == FlowWriteConfig.IF_PARAM_LENGTH) {
String resultA = isJsonValue(object, split[1]);
String resultB = isJsonValue(object, split[2]);
String result = (Integer.parseInt(direction) == Integer.parseInt(norms[1])) ? resultA : resultB;
Matcher isNum = PATTERN.matcher(result);
if (isNum.matches()) {
return Long.parseLong(result);
} else {
return result;
}
}
}
} catch (RuntimeException e) {
logger.error("IF 函数执行异常,异常信息:" + e);
}
return null;
}
/**
* 设置固定值函数 若为数字则转为long返回
*
* @param param 默认值
* @return 返回数字或字符串
*/
static Object setValue(String param) {
try {
Matcher isNum = PATTERN.matcher(param);
if (isNum.matches()) {
return Long.parseLong(param);
} else {
return param;
}
} catch (RuntimeException e) {
logger.error("SetValue 函数异常,异常信息:" + e);
}
return null;
}
}

View File

@@ -0,0 +1,199 @@
package com.zdjizhi.utils.hbase;
import com.zdjizhi.common.FlowWriteConfig;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* HBase 工具类
*
* @author qidaijie
*/
public class HBaseUtils {
private static final Log logger = LogFactory.get();
private static Map<String, String> subIdMap = new ConcurrentHashMap<>(83334);
private static Connection connection;
private static Long time;
private static String zookeeperIp;
private static String hBaseTable;
private static HBaseUtils hBaseUtils;
private static void getHBaseInstance() {
hBaseUtils = new HBaseUtils();
}
/**
* 构造函数-新
*/
private HBaseUtils() {
zookeeperIp = FlowWriteConfig.HBASE_ZOOKEEPER_SERVERS;
hBaseTable = FlowWriteConfig.HBASE_TABLE_NAME;
//获取连接
getHbaseConn();
//拉取所有
getAll();
//定时更新
updateHabaseCache();
}
private static void getHbaseConn() {
try {
// 管理Hbase的配置信息
Configuration configuration = HBaseConfiguration.create();
// 设置zookeeper节点
configuration.set("hbase.zookeeper.quorum", zookeeperIp);
configuration.set("hbase.client.retries.number", "3");
configuration.set("hbase.bulkload.retries.number", "3");
configuration.set("zookeeper.recovery.retry", "3");
connection = ConnectionFactory.createConnection(configuration);
time = System.currentTimeMillis();
logger.warn("HBaseUtils get HBase connection,now to getAll().");
} catch (IOException ioe) {
logger.error("HBaseUtils getHbaseConn() IOException===>{" + ioe + "}<===");
} catch (RuntimeException e) {
logger.error("HBaseUtils getHbaseConn() Exception===>{" + e + "}<===");
}
}
/**
* 更新变量
*/
private static void change() {
if (hBaseUtils == null) {
getHBaseInstance();
}
long nowTime = System.currentTimeMillis();
timestampsFilter(time - 1000, nowTime + 500);
}
/**
* 获取变更内容
*
* @param startTime 开始时间
* @param endTime 结束时间
*/
private static void timestampsFilter(Long startTime, Long endTime) {
Long begin = System.currentTimeMillis();
Table table = null;
ResultScanner scanner = null;
Scan scan2 = new Scan();
try {
table = connection.getTable(TableName.valueOf("sub:" + hBaseTable));
scan2.setTimeRange(startTime, endTime);
scanner = table.getScanner(scan2);
for (Result result : scanner) {
Cell[] cells = result.rawCells();
for (Cell cell : cells) {
String key = Bytes.toString(CellUtil.cloneRow(cell)).trim();
String value = Bytes.toString(CellUtil.cloneValue(cell)).trim();
if (subIdMap.containsKey(key)) {
if (!value.equals(subIdMap.get(key))) {
subIdMap.put(key, value);
}
} else {
subIdMap.put(key, value);
}
}
}
Long end = System.currentTimeMillis();
logger.warn("HBaseUtils Now subIdMap.keySet().size() is: " + subIdMap.keySet().size());
logger.warn("HBaseUtils Update cache timeConsuming is: " + (end - begin) + ",BeginTime: " + startTime + ",EndTime: " + endTime);
time = endTime;
} catch (IOException ioe) {
logger.error("HBaseUtils timestampsFilter is IOException===>{" + ioe + "}<===");
} catch (RuntimeException e) {
logger.error("HBaseUtils timestampsFilter is Exception===>{" + e + "}<===");
} finally {
if (scanner != null) {
scanner.close();
}
if (table != null) {
try {
table.close();
} catch (IOException e) {
logger.error("HBase Table Close ERROR! Exception message is:" + e);
}
}
}
}
/**
* 获取所有的 key value
*/
private static void getAll() {
long begin = System.currentTimeMillis();
try {
Table table = connection.getTable(TableName.valueOf("sub:" + hBaseTable));
Scan scan2 = new Scan();
ResultScanner scanner = table.getScanner(scan2);
for (Result result : scanner) {
Cell[] cells = result.rawCells();
for (Cell cell : cells) {
subIdMap.put(Bytes.toString(CellUtil.cloneRow(cell)), Bytes.toString(CellUtil.cloneValue(cell)));
}
}
logger.warn("HBaseUtils Get fullAmount List size->subIdMap.size(): " + subIdMap.size());
logger.warn("HBaseUtils Get fullAmount List size->subIdMap.size() timeConsuming is: " + (System.currentTimeMillis() - begin));
scanner.close();
} catch (IOException ioe) {
logger.error("HBaseUtils getAll() is IOException===>{" + ioe + "}<===");
} catch (RuntimeException e) {
logger.error("HBaseUtils getAll() is Exception===>{" + e + "}<===");
}
}
/**
* 验证定时器,每隔一段时间验证一次-验证获取新的Cookie
*/
private void updateHabaseCache() {
// ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(1,
// new BasicThreadFactory.Builder().namingPattern("hbase-change-pool-%d").daemon(true).build());
ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(1);
executorService.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
try {
change();
} catch (RuntimeException e) {
logger.error("HBaseUtils update hbaseCache is error===>{" + e + "}<===");
}
}
}, 1, FlowWriteConfig.HBASE_TICK_TUPLE_FREQ_SECS, TimeUnit.SECONDS);
}
/**
* 获取 account
*
* @param clientIp client_ip
* @return account
*/
public static String getAccount(String clientIp) {
if (hBaseUtils == null) {
getHBaseInstance();
}
return subIdMap.get(clientIp);
}
}

View File

@@ -0,0 +1,77 @@
package com.zdjizhi.utils.http;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
/**
* 获取网关schema的工具类
*
* @author qidaijie
*/
public class HttpClientUtil {
// private static final int MAX_STR_LEN = 512000;
private static final Log logger = LogFactory.get();
/**
* 请求网关获取schema
*
* @param http 网关url
* @return schema
*/
public static String requestByGetMethod(String http) {
CloseableHttpClient httpClient = HttpClients.createDefault();
StringBuilder entityStringBuilder;
HttpGet get = new HttpGet(http);
BufferedReader bufferedReader = null;
CloseableHttpResponse httpResponse = null;
try {
httpResponse = httpClient.execute(get);
HttpEntity entity = httpResponse.getEntity();
entityStringBuilder = new StringBuilder();
if (null != entity) {
bufferedReader = new BufferedReader(new InputStreamReader(httpResponse.getEntity().getContent(), "UTF-8"), 8 * 1024);
int intC;
while ((intC = bufferedReader.read()) != -1) {
char c = (char) intC;
if (c == '\n') {
break;
}
entityStringBuilder.append(c);
}
return entityStringBuilder.toString();
}
} catch (IOException e) {
logger.error("Get Schema from Query engine ERROR! Exception message is:" + e);
} finally {
if (httpClient != null) {
try {
httpClient.close();
} catch (IOException e) {
logger.error("Close HTTP Client ERROR! Exception messgae is:" + e);
}
}
if (httpResponse != null) {
try {
httpResponse.close();
} catch (IOException e) {
logger.error("Close httpResponse ERROR! Exception messgae is:" + e);
}
}
if (bufferedReader != null) {
org.apache.commons.io.IOUtils.closeQuietly(bufferedReader);
}
}
return "";
}
}

View File

@@ -0,0 +1,235 @@
package com.zdjizhi.utils.json;
import com.zdjizhi.common.FlowWriteConfig;
import com.zdjizhi.utils.http.HttpClientUtil;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.jayway.jsonpath.JsonPath;
import com.zdjizhi.utils.StringUtil;
import net.sf.cglib.beans.BeanGenerator;
import net.sf.cglib.beans.BeanMap;
import java.util.*;
/**
* 使用FastJson解析json的工具类
*
* @author qidaijie
*/
public class JsonParseUtil {
private static final Log logger = LogFactory.get();
/**
* 模式匹配,给定一个类型字符串返回一个类类型
*
* @param type 类型
* @return 类类型
*/
private static Class getClassName(String type) {
Class clazz;
switch (type) {
case "int":
clazz = Integer.class;
break;
case "String":
clazz = String.class;
break;
case "long":
clazz = long.class;
break;
case "Integer":
clazz = Integer.class;
break;
case "double":
clazz = double.class;
break;
case "float":
clazz = float.class;
break;
case "char":
clazz = char.class;
break;
case "byte":
clazz = byte.class;
break;
case "boolean":
clazz = boolean.class;
break;
case "short":
clazz = short.class;
break;
default:
clazz = String.class;
}
return clazz;
}
/**
* 根据反射生成对象的方法
*
* @param properties 反射类用的map
* @return 生成的Object类型的对象
*/
public static Object generateObject(Map properties) {
BeanGenerator generator = new BeanGenerator();
Set keySet = properties.keySet();
for (Object aKeySet : keySet) {
String key = (String) aKeySet;
generator.addProperty(key, (Class) properties.get(key));
}
return generator.create();
}
/**
* 获取属性值的方法
*
* @param obj 对象
* @param property key
* @return 属性的值
*/
public static Object getValue(Object obj, String property) {
BeanMap beanMap = BeanMap.create(obj);
return beanMap.get(property);
}
/**
* 更新属性值的方法
*
* @param obj 对象
* @param property 更新的key
* @param value 更新的值
*/
public static void setValue(Object obj, String property, Object value) {
try {
BeanMap beanMap = BeanMap.create(obj);
beanMap.put(property, value);
} catch (ClassCastException e) {
logger.error("赋予实体类错误类型数据", e);
}
}
/**
* 通过获取String类型的网关schema链接来获取map用于生成一个Object类型的对象
*
* @param http 网关schema地址
* @return 用于反射生成schema类型的对象的一个map集合
*/
public static HashMap<String, Class> getMapFromHttp(String http) {
HashMap<String, Class> map = new HashMap<>();
String schema = HttpClientUtil.requestByGetMethod(http);
Object data = JSON.parseObject(schema).get("data");
//获取fields并转化为数组数组的每个元素都是一个name doc type
JSONObject schemaJson = JSON.parseObject(data.toString());
JSONArray fields = (JSONArray) schemaJson.get("fields");
for (Object field : fields) {
String filedStr = field.toString();
if (checkKeepField(filedStr)) {
String name = JsonPath.read(filedStr, "$.name").toString();
String type = JsonPath.read(filedStr, "$.type").toString();
//组合用来生成实体类的map
map.put(name, getClassName(type));
}
}
return map;
}
/**
* 判断字段是否需要保留
*
* @param message 单个field-json
* @return true or false
*/
private static boolean checkKeepField(String message) {
boolean isKeepField = true;
boolean isHiveDoc = JSON.parseObject(message).containsKey("doc");
if (isHiveDoc) {
boolean isHiveVi = JsonPath.read(message, "$.doc").toString().contains("visibility");
if (isHiveVi) {
String visibility = JsonPath.read(message, "$.doc.visibility").toString();
if (FlowWriteConfig.VISIBILITY.equals(visibility)) {
isKeepField = false;
}
}
}
return isKeepField;
}
/**
* 根据http链接获取schema解析之后返回一个任务列表 (useList toList funcList paramlist)
*
* @param http 网关url
* @return 任务列表
*/
public static ArrayList<String[]> getJobListFromHttp(String http) {
ArrayList<String[]> list = new ArrayList<>();
String schema = HttpClientUtil.requestByGetMethod(http);
//解析data
Object data = JSON.parseObject(schema).get("data");
//获取fields并转化为数组数组的每个元素都是一个name doc type
JSONObject schemaJson = JSON.parseObject(data.toString());
JSONArray fields = (JSONArray) schemaJson.get("fields");
for (Object field : fields) {
if (JSON.parseObject(field.toString()).containsKey("doc")) {
Object doc = JSON.parseObject(field.toString()).get("doc");
if (JSON.parseObject(doc.toString()).containsKey("format")) {
String name = JSON.parseObject(field.toString()).get("name").toString();
Object format = JSON.parseObject(doc.toString()).get("format");
JSONObject formatObject = JSON.parseObject(format.toString());
String functions = formatObject.get("functions").toString();
String appendTo = null;
String params = null;
if (formatObject.containsKey("appendTo")) {
appendTo = formatObject.get("appendTo").toString();
}
if (formatObject.containsKey("param")) {
params = formatObject.get("param").toString();
}
if (StringUtil.isNotBlank(appendTo) && StringUtil.isBlank(params)) {
String[] functionArray = functions.split(FlowWriteConfig.FORMAT_SPLITTER);
String[] appendToArray = appendTo.split(FlowWriteConfig.FORMAT_SPLITTER);
for (int i = 0; i < functionArray.length; i++) {
list.add(new String[]{name, appendToArray[i], functionArray[i], null});
}
} else if (StringUtil.isNotBlank(appendTo) && StringUtil.isNotBlank(params)) {
String[] functionArray = functions.split(FlowWriteConfig.FORMAT_SPLITTER);
String[] appendToArray = appendTo.split(FlowWriteConfig.FORMAT_SPLITTER);
String[] paramArray = params.split(FlowWriteConfig.FORMAT_SPLITTER);
for (int i = 0; i < functionArray.length; i++) {
list.add(new String[]{name, appendToArray[i], functionArray[i], paramArray[i]});
}
} else {
list.add(new String[]{name, name, functions, params});
}
}
}
}
return list;
}
}

View File

@@ -1,11 +1,11 @@
package cn.ac.iie.utils.kafka;
package com.zdjizhi.utils.kafka;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.influxdb.InfluxDbUtils;
import com.zdjizhi.common.FlowWriteConfig;
import com.zdjizhi.common.KafkaProConfig;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import org.apache.kafka.clients.producer.*;
import org.apache.log4j.Logger;
import java.util.LinkedList;
import java.util.List;
import java.util.Properties;
@@ -16,8 +16,8 @@ import java.util.Properties;
* @create 2018-08-13 15:11
*/
public class KafkaLogNtc {
private static Logger logger = Logger.getLogger(KafkaLogNtc.class);
public class KafkaLogSend {
private static final Log logger = LogFactory.get();
/**
* kafka生产者用于向kafka中发送消息
@@ -27,17 +27,17 @@ public class KafkaLogNtc {
/**
* kafka生产者适配器单例用来代理kafka生产者发送消息
*/
private static KafkaLogNtc kafkaLogNtc;
private static KafkaLogSend kafkaLogSend;
private KafkaLogNtc() {
private KafkaLogSend() {
initKafkaProducer();
}
public static KafkaLogNtc getInstance() {
if (kafkaLogNtc == null) {
kafkaLogNtc = new KafkaLogNtc();
public static KafkaLogSend getInstance() {
if (kafkaLogSend == null) {
kafkaLogSend = new KafkaLogSend();
}
return kafkaLogNtc;
return kafkaLogSend;
}
@@ -54,12 +54,11 @@ public class KafkaLogNtc {
}
});
if (errorSum[0] > FlowWriteConfig.MAX_FAILURE_NUM) {
// InfluxDbUtils.sendKafkaFail(list.size());
list.clear();
}
}
kafkaProducer.flush();
logger.warn("Log sent to National Center successfully!!!!!");
logger.debug("Log sent to National Center successfully!!!!!");
}
/**
@@ -67,15 +66,25 @@ public class KafkaLogNtc {
*/
private void initKafkaProducer() {
Properties properties = new Properties();
properties.put("bootstrap.servers", FlowWriteConfig.BOOTSTRAP_SERVERS);
properties.put("bootstrap.servers", FlowWriteConfig.OUTPUT_KAFKA_SERVERS);
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("acks", "1");
properties.put("linger.ms", "2");
properties.put("request.timeout.ms", 60000);
properties.put("batch.size", 262144);
properties.put("buffer.memory", 33554432);
properties.put("compression.type", "snappy");
properties.put("acks", FlowWriteConfig.PRODUCER_ACK);
properties.put("retries", KafkaProConfig.RETRIES);
properties.put("linger.ms", KafkaProConfig.LINGER_MS);
properties.put("request.timeout.ms", KafkaProConfig.REQUEST_TIMEOUT_MS);
properties.put("batch.size", KafkaProConfig.BATCH_SIZE);
properties.put("buffer.memory", KafkaProConfig.BUFFER_MEMORY);
properties.put("max.request.size", KafkaProConfig.MAX_REQUEST_SIZE);
// properties.put("compression.type", FlowWriteConfig.KAFKA_COMPRESSION_TYPE);
/**
* kafka限流配置-20201117
*/
properties.put(ProducerConfig.CLIENT_ID_CONFIG, FlowWriteConfig.PRODUCER_CLIENT_ID);
properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, FlowWriteConfig.PRODUCER_KAFKA_COMPRESSION_TYPE);
kafkaProducer = new KafkaProducer<>(properties);
}

View File

@@ -1,5 +1,10 @@
package cn.ac.iie.utils.system;
package com.zdjizhi.utils.system;
import com.zdjizhi.utils.StringUtil;
import com.zdjizhi.utils.exception.StreamCompletionException;
import java.io.IOException;
import java.util.Locale;
import java.util.Properties;
@@ -9,15 +14,15 @@ import java.util.Properties;
public final class FlowWriteConfigurations {
// private static Properties propCommon = new Properties();
private static Properties propKafka = new Properties();
private static Properties propService = new Properties();
public static String getStringProperty(Integer type, String key) {
if (type == 0) {
return propService.getProperty(key);
// } else if (type == 1) {
// return propCommon.getProperty(key);
} else if (type == 1) {
return propKafka.getProperty(key);
} else {
return null;
}
@@ -27,8 +32,8 @@ public final class FlowWriteConfigurations {
public static Integer getIntProperty(Integer type, String key) {
if (type == 0) {
return Integer.parseInt(propService.getProperty(key));
// } else if (type == 1) {
// return Integer.parseInt(propCommon.getProperty(key));
} else if (type == 1) {
return Integer.parseInt(propKafka.getProperty(key));
} else {
return null;
}
@@ -37,8 +42,8 @@ public final class FlowWriteConfigurations {
public static Long getLongProperty(Integer type, String key) {
if (type == 0) {
return Long.parseLong(propService.getProperty(key));
// } else if (type == 1) {
// return Long.parseLong(propCommon.getProperty(key));
} else if (type == 1) {
return Long.parseLong(propKafka.getProperty(key));
} else {
return null;
}
@@ -46,9 +51,9 @@ public final class FlowWriteConfigurations {
public static Boolean getBooleanProperty(Integer type, String key) {
if (type == 0) {
return "true".equals(propService.getProperty(key).toLowerCase().trim());
// } else if (type == 1) {
// return "true".equals(propCommon.getProperty(key).toLowerCase().trim());
return StringUtil.equals(propService.getProperty(key).toLowerCase().trim().toUpperCase(Locale.ENGLISH), "true");
} else if (type == 1) {
return StringUtil.equals(propKafka.getProperty(key).toLowerCase().trim().toUpperCase(Locale.ENGLISH), "true");
} else {
return null;
}
@@ -57,8 +62,9 @@ public final class FlowWriteConfigurations {
static {
try {
propService.load(FlowWriteConfigurations.class.getClassLoader().getResourceAsStream("service_flow_config.properties"));
} catch (Exception e) {
// propCommon = null;
propKafka.load(FlowWriteConfigurations.class.getClassLoader().getResourceAsStream("kafka_config.properties"));
} catch (IOException | StreamCompletionException e) {
propKafka = null;
propService = null;
}
}

View File

@@ -1,4 +1,4 @@
package cn.ac.iie.utils.system;
package com.zdjizhi.utils.system;
import org.apache.storm.Constants;
import org.apache.storm.tuple.Tuple;

View File

@@ -1,7 +1,8 @@
package cn.ac.iie.utils.zookeeper;
package com.zdjizhi.utils.zookeeper;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.system.SnowflakeId;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.utils.exception.StreamCompletionException;
import org.apache.log4j.Logger;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.Stat;
@@ -19,7 +20,7 @@ import java.util.concurrent.locks.Lock;
* @author qidaijie
*/
public class DistributedLock implements Lock, Watcher {
private static Logger logger = Logger.getLogger(DistributedLock.class);
private static final Log logger = LogFactory.get();
private ZooKeeper zk = null;
/**
@@ -83,13 +84,13 @@ public class DistributedLock implements Lock, Watcher {
}
try {
if (this.tryLock()) {
System.out.println(Thread.currentThread().getName() + " " + lockName + "获得了锁");
logger.info(Thread.currentThread().getName() + " " + lockName + "获得了锁");
} else {
// 等待锁
waitForLock(waitLock, sessionTimeout);
}
} catch (InterruptedException | KeeperException e) {
e.printStackTrace();
logger.error("获取锁异常" + e);
}
}
@@ -122,7 +123,7 @@ public class DistributedLock implements Lock, Watcher {
String prevNode = currentLock.substring(currentLock.lastIndexOf("/") + 1);
waitLock = lockObjects.get(Collections.binarySearch(lockObjects, prevNode) - 1);
} catch (InterruptedException | KeeperException e) {
e.printStackTrace();
logger.error("获取锁过程异常" + e);
}
return false;
}
@@ -135,8 +136,8 @@ public class DistributedLock implements Lock, Watcher {
return true;
}
return waitForLock(waitLock, timeout);
} catch (Exception e) {
e.printStackTrace();
} catch (KeeperException | InterruptedException | StreamCompletionException e) {
logger.error("判断是否锁定异常" + e);
}
return false;
}
@@ -161,7 +162,7 @@ public class DistributedLock implements Lock, Watcher {
currentLock = null;
zk.close();
} catch (InterruptedException | KeeperException e) {
e.printStackTrace();
logger.error("关闭锁异常" + e);
}
}
@@ -188,32 +189,4 @@ public class DistributedLock implements Lock, Watcher {
}
}
public static void main(String[] args) {
ZookeeperUtils zookeeperUtils = new ZookeeperUtils();
Runnable runnable = new Runnable() {
@Override
public void run() {
DistributedLock lock = null;
try {
lock = new DistributedLock(FlowWriteConfig.ZOOKEEPER_SERVERS, "disLocks1");
lock.lock();
// System.out.println(SnowflakeId.generateId());
System.out.println(1);
Thread.sleep(3000);
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
if (lock != null) {
lock.unlock();
}
}
}
};
for (int i = 0; i < 10; i++) {
Thread t = new Thread(runnable);
t.start();
}
}
}

View File

@@ -1,8 +1,8 @@
package cn.ac.iie.utils.zookeeper;
package com.zdjizhi.utils.zookeeper;
import cn.ac.iie.common.FlowWriteConfig;
import org.apache.commons.lang3.RandomUtils;
import org.apache.log4j.Logger;
import cn.hutool.core.util.StrUtil;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
@@ -13,9 +13,12 @@ import java.util.concurrent.CountDownLatch;
/**
* @author qidaijie
* @Package cn.ac.iie.utils.zookeeper
* @Description:
* @date 2020/11/1411:28
*/
public class ZookeeperUtils implements Watcher {
private static Logger logger = Logger.getLogger(ZookeeperUtils.class);
private static final Log logger = LogFactory.get();
private ZooKeeper zookeeper;
@@ -25,7 +28,7 @@ public class ZookeeperUtils implements Watcher {
@Override
public void process(WatchedEvent event) {
if (event.getState() == Watcher.Event.KeeperState.SyncConnected) {
if (event.getState() == Event.KeeperState.SyncConnected) {
countDownLatch.countDown();
}
}
@@ -36,15 +39,14 @@ public class ZookeeperUtils implements Watcher {
*
* @param path 节点路径
*/
public int modifyNode(String path) {
createNode("/Snowflake", null, ZooDefs.Ids.OPEN_ACL_UNSAFE);
createNode("/Snowflake/" + FlowWriteConfig.KAFKA_TOPIC, "0".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE);
int workerId;
public int modifyNode(String path, String zookeeperIp) {
createNode(path, "0".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, zookeeperIp);
int workerId = 0;
try {
connectZookeeper();
connectZookeeper(zookeeperIp);
Stat stat = zookeeper.exists(path, true);
workerId = Integer.parseInt(getNodeDate(path));
if (workerId > 55) {
if (workerId > 63) {
workerId = 0;
zookeeper.setData(path, "1".getBytes(), stat.getVersion());
} else {
@@ -56,38 +58,38 @@ public class ZookeeperUtils implements Watcher {
}
}
} catch (KeeperException | InterruptedException e) {
e.printStackTrace();
workerId = RandomUtils.nextInt(56, 63);
logger.error("modify error Can't modify," + e);
} finally {
closeConn();
}
logger.error("工作ID是" + workerId);
logger.warn("workerID is" + workerId);
return workerId;
}
/**
* 连接zookeeper
*
* @param host 地址
*/
private void connectZookeeper() {
public void connectZookeeper(String host) {
try {
zookeeper = new ZooKeeper(FlowWriteConfig.ZOOKEEPER_SERVERS, SESSION_TIME_OUT, this);
zookeeper = new ZooKeeper(host, SESSION_TIME_OUT, this);
countDownLatch.await();
} catch (IOException | InterruptedException e) {
e.printStackTrace();
logger.error("Connection to the Zookeeper Exception! message:" + e);
}
}
/**
* 关闭连接
*/
private void closeConn() {
public void closeConn() {
try {
if (zookeeper != null) {
zookeeper.close();
}
} catch (InterruptedException e) {
e.printStackTrace();
logger.error("Close the Zookeeper connection Exception! message:" + e);
}
}
@@ -97,15 +99,15 @@ public class ZookeeperUtils implements Watcher {
* @param path 节点路径
* @return 内容/异常null
*/
private String getNodeDate(String path) {
public String getNodeDate(String path) {
String result = null;
Stat stat = new Stat();
try {
byte[] resByte = zookeeper.getData(path, true, stat);
result = new String(resByte);
result = StrUtil.str(resByte, "UTF-8");
} catch (KeeperException | InterruptedException e) {
logger.error("Get node information exception");
e.printStackTrace();
logger.error("Get node information exception" + e);
}
return result;
}
@@ -115,20 +117,23 @@ public class ZookeeperUtils implements Watcher {
* @param date 节点所存储的数据的byte[]
* @param acls 控制权限策略
*/
private void createNode(String path, byte[] date, List<ACL> acls) {
public void createNode(String path, byte[] date, List<ACL> acls, String zookeeperIp) {
try {
connectZookeeper();
connectZookeeper(zookeeperIp);
Stat exists = zookeeper.exists(path, true);
if (exists == null) {
Stat existsSnowflakeld = zookeeper.exists("/Snowflake", true);
if (existsSnowflakeld == null) {
zookeeper.create("/Snowflake", null, acls, CreateMode.PERSISTENT);
}
zookeeper.create(path, date, acls, CreateMode.PERSISTENT);
} else {
logger.warn("Node already exists!,Don't need to create");
logger.warn("Node already exists ! Don't need to create");
}
} catch (KeeperException | InterruptedException e) {
e.printStackTrace();
logger.error(e);
} finally {
closeConn();
}
}
}

View File

@@ -1,23 +0,0 @@
#Log4j
log4j.rootLogger=info,console,file
# 控制台日志设置
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.Threshold=info
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
# 文件日志设置
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
log4j.appender.file.Threshold=error
log4j.appender.file.encoding=UTF-8
log4j.appender.file.Append=true
#路径请用相对路径,做好相关测试输出到应用目下
log4j.appender.file.file=galaxy-name.log
log4j.appender.file.DatePattern='.'yyyy-MM-dd
log4j.appender.file.layout=org.apache.log4j.PatternLayout
#log4j.appender.file.layout.ConversionPattern=%d{HH:mm:ss} %X{ip} [%t] %5p %c{1} %m%n
log4j.appender.file.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] %X{ip} [Thread\:%t] %l %x - %m%n
#MyBatis 配置com.nis.web.dao是mybatis接口所在包
log4j.logger.com.nis.web.dao=debug
#bonecp数据源配置
log4j.category.com.jolbox=debug,console

13
src/main/main.iml Normal file
View File

@@ -0,0 +1,13 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="JAVA_MODULE" version="4">
<component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output />
<content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/java" isTestSource="false" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Maven: com.alibaba:fastjson:1.2.59" level="project" />
<orderEntry type="library" name="Maven: com.esotericsoftware:minlog:1.3.0" level="project" />
</component>
</module>

View File

@@ -1,200 +0,0 @@
package cn.ac.iie.test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.zookeeper.ZookeeperUtils;
import org.apache.log4j.Logger;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.Stat;
public class DistributedLock implements Lock, Watcher {
private static Logger logger = Logger.getLogger(DistributedLock.class);
private ZooKeeper zk = null;
// 根节点
private String ROOT_LOCK = "/locks";
// 竞争的资源
private String lockName;
// 等待的前一个锁
private String WAIT_LOCK;
// 当前锁
private String CURRENT_LOCK;
// 计数器
private CountDownLatch countDownLatch;
private int sessionTimeout = 30000;
private List<Exception> exceptionList = new ArrayList<Exception>();
/**
* 配置分布式锁
*
* @param config 连接的url
* @param lockName 竞争资源
*/
public DistributedLock(String config, String lockName) {
this.lockName = lockName;
try {
// 连接zookeeper
zk = new ZooKeeper(config, sessionTimeout, this);
Stat stat = zk.exists(ROOT_LOCK, false);
if (stat == null) {
// 如果根节点不存在,则创建根节点
zk.create(ROOT_LOCK, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
}
} catch (IOException | InterruptedException | KeeperException e) {
logger.error("Node already exists!");
}
}
// 节点监视器
public void process(WatchedEvent event) {
if (this.countDownLatch != null) {
this.countDownLatch.countDown();
}
}
public void lock() {
if (exceptionList.size() > 0) {
throw new LockException(exceptionList.get(0));
}
try {
if (this.tryLock()) {
// System.out.println(Thread.currentThread().getName() + " " + lockName + "获得了锁");
// ZookeeperUtils zookeeperUtils = new ZookeeperUtils();
// zookeeperUtils.modifyNode("/Snowflake/" + FlowWriteConfig.KAFKA_TOPIC);
} else {
// 等待锁
waitForLock(WAIT_LOCK, sessionTimeout);
}
} catch (InterruptedException | KeeperException e) {
e.printStackTrace();
}
}
public boolean tryLock() {
try {
String splitStr = "_lock_";
if (lockName.contains(splitStr)) {
throw new LockException("锁名有误");
}
// 创建临时有序节点
CURRENT_LOCK = zk.create(ROOT_LOCK + "/" + lockName + splitStr, new byte[0],
ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL);
// System.out.println(CURRENT_LOCK + " 已经创建");
// 取所有子节点
List<String> subNodes = zk.getChildren(ROOT_LOCK, false);
// 取出所有lockName的锁
List<String> lockObjects = new ArrayList<String>();
for (String node : subNodes) {
String _node = node.split(splitStr)[0];
if (_node.equals(lockName)) {
lockObjects.add(node);
}
}
Collections.sort(lockObjects);
// System.out.println(Thread.currentThread().getName() + " 的锁是 " + CURRENT_LOCK);
// 若当前节点为最小节点,则获取锁成功
if (CURRENT_LOCK.equals(ROOT_LOCK + "/" + lockObjects.get(0))) {
return true;
}
// 若不是最小节点,则找到自己的前一个节点
String prevNode = CURRENT_LOCK.substring(CURRENT_LOCK.lastIndexOf("/") + 1);
WAIT_LOCK = lockObjects.get(Collections.binarySearch(lockObjects, prevNode) - 1);
} catch (InterruptedException | KeeperException e) {
e.printStackTrace();
}
return false;
}
@Override
public boolean tryLock(long timeout, TimeUnit unit) {
try {
if (this.tryLock()) {
return true;
}
return waitForLock(WAIT_LOCK, timeout);
} catch (Exception e) {
e.printStackTrace();
}
return false;
}
// 等待锁
private boolean waitForLock(String prev, long waitTime) throws KeeperException, InterruptedException {
Stat stat = zk.exists(ROOT_LOCK + "/" + prev, true);
if (stat != null) {
// System.out.println(Thread.currentThread().getName() + "等待锁 " + ROOT_LOCK + "/" + prev);
this.countDownLatch = new CountDownLatch(1);
// 计数等待若等到前一个节点消失则precess中进行countDown停止等待获取锁
this.countDownLatch.await(waitTime, TimeUnit.MILLISECONDS);
this.countDownLatch = null;
// System.out.println(Thread.currentThread().getName() + " 等到了锁");
}
return true;
}
public void unlock() {
try {
// System.out.println("释放锁 " + CURRENT_LOCK);
zk.delete(CURRENT_LOCK, -1);
CURRENT_LOCK = null;
zk.close();
} catch (InterruptedException | KeeperException e) {
e.printStackTrace();
}
}
public Condition newCondition() {
return null;
}
public void lockInterruptibly() throws InterruptedException {
this.lock();
}
public class LockException extends RuntimeException {
private static final long serialVersionUID = 1L;
public LockException(String e) {
super(e);
}
public LockException(Exception e) {
super(e);
}
}
public static void main(String[] args) {
ZookeeperUtils zookeeperUtils = new ZookeeperUtils();
Runnable runnable = new Runnable() {
public void run() {
DistributedLock lock = null;
try {
lock = new DistributedLock(FlowWriteConfig.ZOOKEEPER_SERVERS, "disLocks1");
lock.lock();
zookeeperUtils.modifyNode("/Snowflake/" + FlowWriteConfig.KAFKA_TOPIC);
} finally {
if (lock != null) {
lock.unlock();
}
}
}
};
for (int i = 0; i < 10; i++) {
Thread t = new Thread(runnable);
t.start();
}
}
}

View File

@@ -1,37 +0,0 @@
package cn.ac.iie.test;
import com.zdjizhi.utils.StringUtil;
import javax.xml.bind.SchemaOutputResolver;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.LinkedList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class DomainUtils {
private static Pattern pattern = Pattern.compile("[^\\\\.]+(\\.com\\.cn|\\.net\\.cn|\\.org\\.cn|\\.gov\\.cn|\\.com|\\.net|\\.cn|\\.org|\\.cc|\\.me|\\.tel|\\.mobi|\\.asia|\\.biz|\\.info|\\.name|\\.tv|\\.hk|\\.公司|\\.中国|\\.网络)");
public static void main(String[] args) {
System.out.println(getTopDomain("agoo-report.m.taobao.com"));
}
private static String getTopDomain(String url) {
// try {
//获取值转换为小写
// String host = new URL(url).getHost().toLowerCase();//news.hexun.com
// Pattern pattern = Pattern.compile("[^\\\\.]+(\\.com\\.cn|\\.net\\.cn|\\.org\\.cn|\\.gov\\.cn|\\.com|\\.net|\\.cn|\\.org|\\.cc|\\.me|\\.tel|\\.mobi|\\.asia|\\.biz|\\.info|\\.name|\\.tv|\\.hk|\\.公司|\\.中国|\\.网络)");
Matcher matcher = pattern.matcher(url);
if (matcher.find()){
return matcher.group();
}
// } catch (MalformedURLException e) {
// e.printStackTrace();
// }
return null;
}
}

View File

@@ -1,182 +0,0 @@
package cn.ac.iie.test;
/**
* Twitter_Snowflake<br>
* SnowFlake的结构如下(每部分用-分开):<br>
* 0 - 0000000000 0000000000 0000000000 0000000000 0 - 00000 - 00000 - 000000000000 <br>
* 1位标识由于long基本类型在Java中是带符号的最高位是符号位正数是0负数是1所以id一般是正数最高位是0<br>
* 41位时间截(毫秒级)注意41位时间截不是存储当前时间的时间截而是存储时间截的差值当前时间截 - 开始时间截)
* 得到的值这里的的开始时间截一般是我们的id生成器开始使用的时间由我们程序来指定的如下下面程序IdWorker类的startTime属性。41位的时间截可以使用69年年T = (1L << 41) / (1000L * 60 * 60 * 24 * 365) = 69<br>
* 10位的数据机器位可以部署在1024个节点包括5位datacenterId和5位workerId<br>
* 12位序列毫秒内的计数12位的计数顺序号支持每个节点每毫秒(同一机器,同一时间截)产生4096个ID序号<br>
* 加起来刚好64位为一个Long型。<br>
* SnowFlake的优点是整体上按照时间自增排序并且整个分布式系统内不会产生ID碰撞(由数据中心ID和机器ID作区分)并且效率较高经测试SnowFlake每秒能够产生26万ID左右。
*/
public class SnowflakeIdWorker {
// ==============================Fields===========================================
/**
* 开始时间截 (2015-01-01)
*/
private final long twepoch = timeGen();
/**
* 机器id所占的位数
*/
private final long workerIdBits = 5L;
/**
* 数据标识id所占的位数
*/
private final long datacenterIdBits = 5L;
/**
* 支持的最大机器id结果是31 (这个移位算法可以很快的计算出几位二进制数所能表示的最大十进制数)
*/
private final long maxWorkerId = -1L ^ (-1L << workerIdBits);
/**
* 支持的最大数据标识id结果是31
*/
private final long maxDatacenterId = -1L ^ (-1L << datacenterIdBits);
/**
* 序列在id中占的位数
*/
private final long sequenceBits = 12L;
/**
* 机器ID向左移12位
*/
private final long workerIdShift = sequenceBits;
/**
* 数据标识id向左移17位(12+5)
*/
private final long datacenterIdShift = sequenceBits + workerIdBits;
/**
* 时间截向左移22位(5+5+12)
*/
private final long timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits;
/**
* 生成序列的掩码这里为4095 (0b111111111111=0xfff=4095)
*/
private final long sequenceMask = -1L ^ (-1L << sequenceBits);
/**
* 工作机器ID(0~31)
*/
private long workerId;
/**
* 数据中心ID(0~31)
*/
private long datacenterId;
/**
* 毫秒内序列(0~4095)
*/
private long sequence = 0L;
/**
* 上次生成ID的时间截
*/
private long lastTimestamp = -1L;
//==============================Constructors=====================================
/**
* 构造函数
*
* @param workerId 工作ID (0~31)
* @param datacenterId 数据中心ID (0~31)
*/
public SnowflakeIdWorker(long workerId, long datacenterId) {
if (workerId > maxWorkerId || workerId < 0) {
throw new IllegalArgumentException(String.format("worker Id can't be greater than %d or less than 0", maxWorkerId));
}
if (datacenterId > maxDatacenterId || datacenterId < 0) {
throw new IllegalArgumentException(String.format("datacenter Id can't be greater than %d or less than 0", maxDatacenterId));
}
this.workerId = workerId;
this.datacenterId = datacenterId;
}
// ==============================Methods==========================================
/**
* 获得下一个ID (该方法是线程安全的)
*
* @return SnowflakeId
*/
public synchronized long nextId() {
long timestamp = timeGen();
//如果当前时间小于上一次ID生成的时间戳说明系统时钟回退过这个时候应当抛出异常
if (timestamp < lastTimestamp) {
throw new RuntimeException(
String.format("Clock moved backwards. Refusing to generate id for %d milliseconds", lastTimestamp - timestamp));
}
//如果是同一时间生成的,则进行毫秒内序列
if (lastTimestamp == timestamp) {
sequence = (sequence + 1) & sequenceMask;
//毫秒内序列溢出
if (sequence == 0) {
//阻塞到下一个毫秒,获得新的时间戳
timestamp = tilNextMillis(lastTimestamp);
}
}
//时间戳改变,毫秒内序列重置
else {
sequence = 0L;
}
//上次生成ID的时间截
lastTimestamp = timestamp;
//移位并通过或运算拼到一起组成64位的ID
return ((timestamp - twepoch) << timestampLeftShift)
| (datacenterId << datacenterIdShift)
| (workerId << workerIdShift)
| sequence;
}
/**
* 阻塞到下一个毫秒,直到获得新的时间戳
*
* @param lastTimestamp 上次生成ID的时间截
* @return 当前时间戳
*/
protected long tilNextMillis(long lastTimestamp) {
long timestamp = timeGen();
while (timestamp <= lastTimestamp) {
timestamp = timeGen();
}
return timestamp;
}
/**
* 返回以毫秒为单位的当前时间
*
* @return 当前时间(毫秒)
*/
protected long timeGen() {
return System.currentTimeMillis();
}
//==============================Test=============================================
/**
* 测试
*/
public static void main(String[] args) {
SnowflakeIdWorker idWorker = new SnowflakeIdWorker(0, 0);
for (int i = 0; i < 1000; i++) {
long id = idWorker.nextId();
// System.out.println(Long.toBinaryString(id));
System.out.println(id);
}
}
}

View File

@@ -1,49 +0,0 @@
package cn.ac.iie.test;
import cn.ac.iie.utils.system.SnowflakeId;
import cn.ac.iie.utils.zookeeper.ZookeeperUtils;
class RunnableDemo implements Runnable {
private Thread t;
private static ZookeeperUtils zookeeperUtils = new ZookeeperUtils();
static {
zookeeperUtils.connectZookeeper("192.168.40.207:2181");
}
@Override
public void run() {
zookeeperUtils.modifyNode("/testNode/UID-TEST");
System.out.println(zookeeperUtils.getNodeDate("/testNode/UID-TEST"));
// zookeeperUtils.closeConn();
}
public void start() {
if (t == null) {
t = new Thread(this);
t.start();
}
}
}
public class TestThread {
public static void main(String[] args) {
RunnableDemo R1 = new RunnableDemo();
RunnableDemo R2 = new RunnableDemo();
// RunnableDemo R3 = new RunnableDemo();
// RunnableDemo R4 = new RunnableDemo();
R1.start();
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
e.printStackTrace();
}
R2.start();
// R3.start();
// R4.start();
}
}

View File

@@ -1,90 +0,0 @@
package cn.ac.iie.test;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import java.util.regex.Pattern;
public class URLUtil {
private final static Set<String> PublicSuffixSet = new HashSet<String>(
Arrays.asList(new String(
"com|org|net|gov|edu|co|tv|mobi|info|asia|xxx|onion|cn|com.cn|edu.cn|gov.cn|net.cn|org.cn|jp|kr|tw|com.hk|hk|com.hk|org.hk|se|com.se|org.se")
.split("\\|")));
private static Pattern IP_PATTERN = Pattern.compile("(\\d{1,3}\\.){3}(\\d{1,3})");
/**
* 获取url的顶级域名
*
* @param url
* @return
*/
public static String getDomainName(URL url) {
String host = url.getHost();
if (host.endsWith(".")) {
host = host.substring(0, host.length() - 1);
}
if (IP_PATTERN.matcher(host).matches()) {
return host;
}
int index = 0;
String candidate = host;
for (; index >= 0; ) {
index = candidate.indexOf('.');
String subCandidate = candidate.substring(index + 1);
if (PublicSuffixSet.contains(subCandidate)) {
return candidate;
}
candidate = subCandidate;
}
return candidate;
}
/**
* 获取url的顶级域名
*
* @param url
* @return
* @throws MalformedURLException
*/
public static String getDomainName(String url) throws MalformedURLException {
return getDomainName(new URL(url));
}
/**
* 判断两个url顶级域名是否相等
*
* @param url1
* @param url2
* @return
*/
public static boolean isSameDomainName(URL url1, URL url2) {
return getDomainName(url1).equalsIgnoreCase(getDomainName(url2));
}
/**
* 判断两个url顶级域名是否相等
*
* @param url1
* @param url2
* @return
* @throws MalformedURLException
*/
public static boolean isSameDomainName(String url1, String url2)
throws MalformedURLException {
return isSameDomainName(new URL(url1), new URL(url2));
}
public static void main(String[] args) throws Exception {
// String urlStr = "http://news.hexun.com/2017-09-23/190978248.html";
String urlStr = "array703-prod.do.dsp.mp.microsoft.com";
System.out.println(getDomainName(urlStr.replace("\uFEFF", "")));
System.out.println(getDomainName(new URL(urlStr.replace("\uFEFF", ""))));
}
}

View File

@@ -1,126 +0,0 @@
package cn.ac.iie.test;
import org.apache.log4j.Logger;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.CountDownLatch;
public class ZookeeperTest implements Watcher {
private static Logger logger = Logger.getLogger(ZookeeperTest.class);
private static ZooKeeper zookeeper;
private static final int SESSION_TIME_OUT = 2000;
// private static Stat stat = new Stat();
private CountDownLatch countDownLatch = new CountDownLatch(1);
public void process(WatchedEvent event) {
if (event.getState() == Watcher.Event.KeeperState.SyncConnected) {
countDownLatch.countDown();
}
}
/**
* 连接zookeeper
*
* @param host 地址
*/
private void connectZookeeper(String host) {
try {
zookeeper = new ZooKeeper(host, SESSION_TIME_OUT, this);
countDownLatch.await();
} catch (IOException | InterruptedException e) {
e.printStackTrace();
}
}
/**
* @param path 路径
* @return 子节点
*/
private List<String> getChildren(String path) {
try {
return zookeeper.getChildren(path, false);
} catch (KeeperException | InterruptedException e) {
e.printStackTrace();
return null;
}
}
/**
* @param path 节点创建的路径
* @param date 节点所存储的数据的byte[]
* @param acls 控制权限策略
*/
private void createNode(String path, byte[] date, List<ACL> acls) {
try {
Stat exists = zookeeper.exists(path, true);
if (exists == null) {
zookeeper.create(path, date, acls, CreateMode.PERSISTENT);
} else {
logger.warn("Node already exists!,Don't need to create");
}
} catch (KeeperException | InterruptedException e) {
e.printStackTrace();
}
}
/**
* 修改节点信息
*
* @param path 节点路径
* @param date 修改的数据
*/
private void modifyNode(String path, byte[] date) {
try {
Stat stat = zookeeper.exists(path, true);
if (stat != null) {
zookeeper.setData(path, date, stat.getVersion());
} else {
logger.error("Node does not exist!,Can't modify");
}
} catch (KeeperException | InterruptedException e) {
e.printStackTrace();
}
}
/**
* 获取节点内容
*
* @param path 节点路径
* @return 内容/异常null
*/
private String getNodeDate(String path) {
String result = null;
Stat stat = new Stat();
try {
byte[] resByte = zookeeper.getData(path, true, stat);
result = new String(resByte);
} catch (KeeperException | InterruptedException e) {
logger.error("Get node information exception");
e.printStackTrace();
}
return result;
}
public static void main(String[] args) {
ZookeeperTest zookeeperTest = new ZookeeperTest();
try {
zookeeperTest.connectZookeeper("192.168.40.119:2181,192.168.40.122:2181,192.168.40.123:2181");
// zookeeperTest.createNode("/Snowflake", "".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE);
// System.out.println(zookeeperTest.getNodeDate("/testNode/UID-TEST"));
zookeeperTest.modifyNode("/Snowflake/SESSION-TEST-LOG", "0".getBytes());
// System.out.println(zookeeperTest.getNodeDate("/testNode/UID-TEST"));
} catch (Exception e) {
e.printStackTrace();
}
}
}

View File

@@ -1,12 +0,0 @@
<shard>
<!-- Optional. Shard weight when writing data. Default: 1. -->
<weight>1</weight>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<internal_replication>false</internal_replication>
<replica>
<host>{ip}</host>
<port>9001</port>
<user>default</user>
<password>{rootpassword}</password>
</replica>
</shard>

View File

@@ -1,52 +0,0 @@
package cn.ac.iie.test;
import org.influxdb.InfluxDB;
import org.influxdb.InfluxDBFactory;
import org.influxdb.dto.Pong;
import org.influxdb.dto.Query;
import org.influxdb.dto.QueryResult;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class influxQueryTest {
private static InfluxDB client;
public static void main(String[] args) {
Query query = new Query("select * from test", "deltest");
QueryResult a = getClient().query(query);
// List<dataInfo> lists = new ArrayList<dataInfo>();
for (QueryResult.Result result : a.getResults()) {
List<QueryResult.Series> series = result.getSeries();
for (QueryResult.Series serie : series) {
List<List<Object>> values = serie.getValues();//字段字集合
List<String> colums = serie.getColumns();//字段名
System.out.println("colums:" + colums);
for (List<Object> n : values) {
System.out.println("value:" + n);
}
//lists.addAll(getQueryData(colums,values));
}
System.out.println("数据长度:" + series.size());
}
}
/**
* 链接时序数据库获得InfluDB
*/
public static InfluxDB getClient() {
client = InfluxDBFactory.connect("http://192.168.40.207:8086", "test", "123456");
Pong pong = client.ping();
if (pong != null) {
System.out.println("Influx数据库连接成功");
} else {
return null;
}
// client.createDatabase("testDB");
return client;
}
}

View File

@@ -1,46 +0,0 @@
package cn.ac.iie.test;
import cn.ac.iie.bean.SessionRecordLog;
import cn.ac.iie.common.FlowWriteConfig;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.zdjizhi.utils.IpLookup;
import org.junit.Test;
import javax.servlet.http.HttpServletRequest;
import java.math.BigInteger;
import java.util.Arrays;
public class test {
public static void main(String[] args) {
String message = "{\"str_ea_m-t-r-a-ceid\":\"JSON\"}";
SessionRecordLog sessionRecordLog = JSONObject.parseObject(message, SessionRecordLog.class);
System.out.println(sessionRecordLog.getStream_trace_id());
String message2 = "{\"streamtraceid\":\"JSON\"}";
SessionRecordLog sessionRecordLog2 = JSONObject.parseObject(message2, SessionRecordLog.class);
System.out.println(sessionRecordLog2.getStream_trace_id());
JSONObject jsonObject = JSON.parseObject(message);
System.out.println("\n" + Arrays.toString(jsonObject.keySet().toArray()));
HttpServletRequest request = null;
if (request != null) {
String contextPath = request.getScheme() + "://" + request.getServerName() + ":" + request.getServerPort() + request.getContextPath();
}
System.out.println(System.currentTimeMillis() / 1000);
}
@Test
public void test2() {
// String minTimeStampStr = "00000000000000000000000000000000000000000";
String minTimeStampStr = "000000000000000000000000000000000000000";
long minTimeStamp = new BigInteger(minTimeStampStr, 2).longValue();
// String maxTimeStampStr = "11111111111111111111111111111111111111111";
String maxTimeStampStr = "111111111111111111111111111111111111111";
long maxTimeStamp = new BigInteger(maxTimeStampStr, 2).longValue();
long oneYearMills = 1L * 1000 * 60 * 60 * 24 * 365;
System.out.println((maxTimeStamp - minTimeStamp) / oneYearMills);
}
}

View File

@@ -1,26 +0,0 @@
package cn.ac.iie.test.zookeeper;
import java.util.concurrent.TimeUnit;
public interface DistributedLock {
/**
* 获取锁,如果没有得到就等待
*/
public void acquire() throws Exception;
/**
* 获取锁,直到超时
*
* @param unit time参数的单位
* @throws Exception
* @return是否获取到锁
*/
public boolean acquire(long time, TimeUnit unit) throws Exception;
/**
* 释放锁
*
* @throws Exception
*/
public void release() throws Exception;
}

View File

@@ -1,92 +0,0 @@
package cn.ac.iie.test.zookeeper;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.influxdb.InfluxDbUtils;
import org.apache.kafka.clients.producer.*;
import org.apache.log4j.Logger;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
/**
* NTC系统配置产生日志写入数据中心类
*
* @author Administrator
* @create 2018-08-13 15:11
*/
public class KafkaLogNtc {
private static Logger logger = Logger.getLogger(KafkaLogNtc.class);
/**
* kafka生产者用于向kafka中发送消息
*/
private static Producer<String, String> kafkaProducer;
/**
* kafka生产者适配器单例用来代理kafka生产者发送消息
*/
private static KafkaLogNtc kafkaLogNtc;
private KafkaLogNtc() {
initKafkaProducer();
}
public static KafkaLogNtc getInstance() {
if (kafkaLogNtc == null) {
kafkaLogNtc = new KafkaLogNtc();
}
return kafkaLogNtc;
}
public void sendMessage(List<String> list) {
final int[] errorSum = {0};
for (String value : list) {
kafkaProducer.send(new ProducerRecord<>("topic001", value), new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null) {
logger.error("写入" + FlowWriteConfig.RESULTS_OUTPUT_TOPIC + "出现异常", exception);
errorSum[0]++;
}
}
});
if (errorSum[0] > FlowWriteConfig.MAX_FAILURE_NUM) {
InfluxDbUtils.sendKafkaFail(list.size());
list.clear();
}
}
kafkaProducer.flush();
logger.warn("Log sent to National Center successfully!!!!!");
}
/**
* 根据kafka生产者配置信息初始化kafka消息生产者,只初始化一次
*/
private void initKafkaProducer() {
Properties properties = new Properties();
properties.put("bootstrap.servers", "kafka1:9093");
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("acks", "1");
properties.put("linger.ms", "2");
properties.put("request.timeout.ms", 60000);
properties.put("batch.size", 262144);
properties.put("buffer.memory", 33554432);
properties.put("compression.type", "snappy");
kafkaProducer = new KafkaProducer<>(properties);
}
public static void main(String[] args) {
KafkaLogNtc kafkaLogNtc = KafkaLogNtc.getInstance();
List<String> list = new ArrayList<>();
list.add("a");
list.add("b");
list.add("c");
kafkaLogNtc.sendMessage(list);
}
}

View File

@@ -1,16 +0,0 @@
package cn.ac.iie.test.zookeeper;
import cn.ac.iie.common.FlowWriteConfig;
import com.zdjizhi.utils.IPUtil;
import com.zdjizhi.utils.IpLookup;
public class test {
private static IpLookup ipLookup = new IpLookup.Builder(false)
.loadDataFileV4("Kazakhstan.mmdb")
.loadDataFileV6("Kazakhstan.mmdb")
.build();
public static void main(String[] args) {
System.out.println(ipLookup.cityLookupDetail("256.5.5.5"));
}
}