Revert "Merge branch 'tsg_galaxy_com_schema_v3.0.20191217' into 'master'"

This reverts merge request !2
This commit is contained in:
李玺康
2020-02-25 15:12:56 +08:00
parent 2c448492c6
commit 94f14a0c59
39 changed files with 1441 additions and 1860 deletions

124
pom.xml
View File

@@ -14,7 +14,7 @@
<repository>
<id>nexus</id>
<name>Team Nexus Repository</name>
<url>http://192.168.40.125:8099/content/groups/public</url>
<url>http://192.168.10.125:8099/content/groups/public</url>
</repository>
</repositories>
@@ -67,7 +67,6 @@
<directory>properties</directory>
<includes>
<include>**/*.properties</include>
<!--<include>**/*.xml</include>-->
</includes>
<filtering>false</filtering>
</resource>
@@ -85,8 +84,6 @@
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<kafka.version>1.0.0</kafka.version>
<storm.version>1.0.2</storm.version>
<hbase.version>2.2.1</hbase.version>
<hadoop.version>2.7.1</hadoop.version>
</properties>
<dependencies>
@@ -132,6 +129,12 @@
<version>${storm.version}</version>
</dependency>
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>2.8.1</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
@@ -142,18 +145,13 @@
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.59</version>
</dependency>
<dependency>
<groupId>cglib</groupId>
<artifactId>cglib-nodep</artifactId>
<version>3.2.4</version>
<version>1.2.47</version>
</dependency>
<dependency>
<groupId>com.zdjizhi</groupId>
<artifactId>galaxy</artifactId>
<version>1.0.2</version>
<version>1.0.1</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
@@ -183,110 +181,6 @@
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-client -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>${hbase.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-over-slf4j</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-server -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>${hbase.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-over-slf4j</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-over-slf4j</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-over-slf4j</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-over-slf4j</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<version>5.3.2</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.2</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>4.4.1</version>
</dependency>
</dependencies>
</project>

View File

@@ -1,71 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/opt/hadoop/tmp</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131702</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.logfile.size</name>
<value>10000000</value>
<description>The max size of each log file</description>
</property>
<property>
<name>hadoop.logfile.count</name>
<value>1</value>
<description>The max number of log files</description>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>master:2181,slave1:2181,slave2:2181</value>
</property>
<property>  
    <name>fs.hdfs.impl</name>  
    <value>org.apache.hadoop.hdfs.DistributedFileSystem</value>  
    <description>The FileSystem for hdfs: uris.</description>  
</property>
<property>
<name>io.compression.codecs</name>
<value>com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec</value>
</property>
<property>
<name>io.compression.codec.lzo.class</name>
<value>com.hadoop.compression.lzo.LzoCodec</value>
</property>
</configuration>

View File

@@ -1,77 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://ns1/hbase-1.4.9</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>192.168.40.119,192.168.40.122,192.168.40.123</value>
</property>
<property>
<name>hbase.master.info.port</name>
<value>60010</value>
</property>
<!-- 开启启schema支持 对应hbase的namespace -->
<property>
<name>phoenix.schema.isNamespaceMappingEnabled</name>
<value>true</value>
</property>
<property>
<name>phoenix.schema.mapSystemTablesToNamespace</name>
<value>true</value>
</property>
<property>
<name>hbase.client.keyvalue.maxsize</name>
<value>99428800</value>
</property>
<property>
<name>hbase.server.keyvalue.maxsize</name>
<value>99428800</value>
</property>
<property>
<name>hbase.regionserver.wal.codec</name>
<value>org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec</value>
</property>
<property>
<name>phoenix.query.timeoutMs</name>
<value>1800000</value>
</property>
<property>
<name>hbase.rpc.timeout</name>
<value>1200000</value>
</property>
<property>
<name>hbase.client.scanner.caching</name>
<value>1000</value>
</property>
<property>
<name>hbase.client.scanner.timeout.period</name>
<value>1200000</value>
</property>
</configuration>

View File

@@ -1,116 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/ceiec/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/ceiec/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>192.168.40.119:9001</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.nameservices</name>
<value>ns1</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>134217728</value>
</property>
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2</value>
</property>
<!-- nn1的RPC通信地址nn1所在地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>192.168.40.119:8020</value>
</property>
<!-- nn1的http通信地址外部访问地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>192.168.40.119:50070</value>
</property>
<!-- nn2的RPC通信地址nn2所在地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>192.168.40.122:8020</value>
</property>
<!-- nn2的http通信地址外部访问地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>192.168.40.122:50070</value>
</property>
<!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://192.168.40.119:8485;192.168.40.122:8485;192.168.40.123:8485/ns1</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/home/ceiec/hadoop/journal</value>
</property>
<!--客户端通过代理访问namenode访问文件系统HDFS 客户端与Active 节点通信的Java 类使用其确定Active 节点是否活跃 -->
<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<!-- 配置sshfence隔离机制超时时间这个属性同上如果你是用脚本的方法切换这个应该是可以不配置的 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
<!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
</configuration>

View File

@@ -0,0 +1,19 @@
#*****************jedis连接参数设置*********************
#redis服务器ip
redis.ip=192.168.40.123
#redis服务器端口号
redis.port=6379
#与服务器建立连接的超时时间
redis.timeout=3000
#************************jedis池参数设置*******************
#jedis的最大活跃连接数
redis.pool.maxActive=200
#jedis最大空闲连接数
redis.pool.maxIdle=5
#jedis池没有连接对象返回时等待可用连接的最大时间单位毫秒默认值为-1表示永不超时。
#如果超过等待时间则直接抛出JedisConnectionException
redis.pool.maxWait=-1
#从池中获取连接的时候,是否进行有效检查
redis.pool.testOnBorrow=true
#归还连接的时候,是否进行有效检查
redis.pool.testOnReturn=true

View File

@@ -1,59 +1,49 @@
#管理kafka地址
#bootstrap.servers=192.168.40.119:9092,192.168.40.122:9092,192.168.40.123:9092
bootstrap.servers=192.168.40.151:9092
#bootstrap.servers=10.4.35.7:9092,10.4.35.8:9092,10.4.35.9:9092
bootstrap.servers=192.168.6.200:9093,192.168.6.200:9094,192.168.6.200:9095
#zookeeper 地址
zookeeper.servers=192.168.40.151:2181
#zookeeper.servers=192.168.40.119:2181,192.168.40.122:2181,192.168.40.123:2181
#hbase zookeeper地址
#hbase.zookeeper.servers=192.168.40.119:2181,192.168.40.122:2181,192.168.40.123:2181
hbase.zookeeper.servers=192.168.40.151:2181
#hbase tablename
hbase.table.name=subscriber_info
zookeeper.servers=192.168.6.200:2181
#zookeeper.servers=192.168.40.207:2181
#latest/earliest
auto.offset.reset=latest
#kafka broker下的topic名称
kafka.topic=SECURITY-EVENT-LOG
kafka.topic=SESSION-RECORD-LOG
#kafka.topic=Snowflake-test
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=security-policy-200224
group.id=session-record-log-z
#输出topic
results.output.topic=SECURITY-EVENT-COMPLETED-LOG
#results.output.topic=SESSION-TEST-COMPLETED-LOG
results.output.topic=SESSION-RECORD-COMPLETED-LOG
#storm topology workers
topology.workers=1
#spout并行度 建议与kafka分区数相同
spout.parallelism=1
spout.parallelism=3
#处理补全操作的bolt并行度-worker的倍数
datacenter.bolt.parallelism=1
datacenter.bolt.parallelism=3
#写入kafka的并行度10
kafka.bolt.parallelism=1
#写入kafkad的并行度
kafka.bolt.parallelism=3
#定位库地址
#ip.library=/home/ceiec/topology/dat/
#ip.library=D:\\workerSpace\\K18-Phase2\\3.0.2019115\\log-stream-completion\\
ip.library=D:\\dat\\
ip.library=/dat/
#kafka批量条数
batch.insert.num=2000
#网关的schema位置
schema.http=http://192.168.40.151:9999/metadata/schema/v1/fields/security_event_log
batch.insert.num=5000
#数据中心UID
data.center.id.num=15
#tick时钟频率
topology.tick.tuple.freq.secs=5
#hbase 更新时间
hbase.tick.tuple.freq.secs=60
#当bolt性能受限时限制spout接收速度理论看ack开启才有效
topology.config.max.spout.pending=150000

View File

@@ -0,0 +1,672 @@
package cn.ac.iie.bean;
import com.alibaba.fastjson.annotation.JSONField;
import com.alibaba.fastjson.support.spring.annotation.FastJsonFilter;
/**
* @author qidaijie
*/
public class SessionRecordLog {
private long uid;
private int policy_id;
private long action;
private int start_time;
private int end_time;
private long recv_time;
private String trans_proto;
private String app_proto;
private int addr_type;
private String server_ip;
private String client_ip;
private int server_port;
private int client_port;
private int service;
private int entrance_id;
private int device_id;
private int Link_id;
private String isp;
private int encap_type;
private int direction;
private int stream_dir;
private String cap_ip;
private String addr_list;
private String server_location;
private String client_location;
private String client_asn;
private String server_asn;
private String subscribe_id;
private long con_duration_ms;
private String url;
private String host;
private String domain;
private String category;
private String req_line;
private String res_line;
private String cookie;
private String referer;
private String user_agent;
private String content_len;
private String content_type;
private String set_cookie;
private String req_header;
private String resp_header;
private String req_body_key;
private String req_body;
private String res_body_key;
private String resp_body;
private String version;
private String sni;
private String san;
private String cn;
private int app_id;
private int protocol_id;
private long con_latency_ms;
private int pinningst;
private int intercept_state;
private long ssl_server_side_latency;
private long ssl_client_side_latency;
private String ssl_server_side_version;
private String ssl_client_side_version;
private int ssl_cert_verify;
private String stream_trace_id;
private String ssl_error;
private long c2s_pkt_num;
private long S2c_pkt_num;
private long c2s_byte_num;
private long s2c_byte_num;
private String nas_ip;
private String framed_ip;
private String account;
private int packet_type;
private int has_dup_traffic;
private String stream_error;
public SessionRecordLog() {
}
public long getUid() {
return uid;
}
public void setUid(long uid) {
this.uid = uid;
}
public int getPolicy_id() {
return policy_id;
}
public void setPolicy_id(int policy_id) {
this.policy_id = policy_id;
}
public long getAction() {
return action;
}
public void setAction(long action) {
this.action = action;
}
public int getStart_time() {
return start_time;
}
public void setStart_time(int start_time) {
this.start_time = start_time;
}
public int getEnd_time() {
return end_time;
}
public void setEnd_time(int end_time) {
this.end_time = end_time;
}
public String getSsl_error() {
return ssl_error;
}
public void setSsl_error(String ssl_error) {
this.ssl_error = ssl_error;
}
public String getApp_proto() {
return app_proto;
}
public void setApp_proto(String app_proto) {
this.app_proto = app_proto;
}
public long getRecv_time() {
return recv_time;
}
public void setRecv_time(long recv_time) {
this.recv_time = recv_time;
}
public String getTrans_proto() {
return trans_proto;
}
public void setTrans_proto(String trans_proto) {
this.trans_proto = trans_proto;
}
public int getAddr_type() {
return addr_type;
}
public void setAddr_type(int addr_type) {
this.addr_type = addr_type;
}
public String getServer_ip() {
return server_ip;
}
public void setServer_ip(String server_ip) {
this.server_ip = server_ip;
}
public String getClient_ip() {
return client_ip;
}
public void setClient_ip(String client_ip) {
this.client_ip = client_ip;
}
public int getServer_port() {
return server_port;
}
public void setServer_port(int server_port) {
this.server_port = server_port;
}
public int getClient_port() {
return client_port;
}
public void setClient_port(int client_port) {
this.client_port = client_port;
}
public int getService() {
return service;
}
public void setService(int service) {
this.service = service;
}
public int getEntrance_id() {
return entrance_id;
}
public void setEntrance_id(int entrance_id) {
this.entrance_id = entrance_id;
}
public int getDevice_id() {
return device_id;
}
public void setDevice_id(int device_id) {
this.device_id = device_id;
}
public int getLink_id() {
return Link_id;
}
public void setLink_id(int link_id) {
Link_id = link_id;
}
public String getIsp() {
return isp;
}
public void setIsp(String isp) {
this.isp = isp;
}
public int getEncap_type() {
return encap_type;
}
public void setEncap_type(int encap_type) {
this.encap_type = encap_type;
}
public int getDirection() {
return direction;
}
public void setDirection(int direction) {
this.direction = direction;
}
public int getStream_dir() {
return stream_dir;
}
public void setStream_dir(int stream_dir) {
this.stream_dir = stream_dir;
}
public String getCap_ip() {
return cap_ip;
}
public void setCap_ip(String cap_ip) {
this.cap_ip = cap_ip;
}
public String getAddr_list() {
return addr_list;
}
public void setAddr_list(String addr_list) {
this.addr_list = addr_list;
}
public String getServer_location() {
return server_location;
}
public void setServer_location(String server_location) {
this.server_location = server_location;
}
public String getClient_location() {
return client_location;
}
public void setClient_location(String client_location) {
this.client_location = client_location;
}
public String getClient_asn() {
return client_asn;
}
public void setClient_asn(String client_asn) {
this.client_asn = client_asn;
}
public String getServer_asn() {
return server_asn;
}
public void setServer_asn(String server_asn) {
this.server_asn = server_asn;
}
public String getSubscribe_id() {
return subscribe_id;
}
public void setSubscribe_id(String subscribe_id) {
this.subscribe_id = subscribe_id;
}
public long getCon_duration_ms() {
return con_duration_ms;
}
public void setCon_duration_ms(long con_duration_ms) {
this.con_duration_ms = con_duration_ms;
}
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public String getDomain() {
return domain;
}
public void setDomain(String domain) {
this.domain = domain;
}
public String getCategory() {
return category;
}
public void setCategory(String category) {
this.category = category;
}
public String getReq_line() {
return req_line;
}
public void setReq_line(String req_line) {
this.req_line = req_line;
}
public String getRes_line() {
return res_line;
}
public void setRes_line(String res_line) {
this.res_line = res_line;
}
public String getCookie() {
return cookie;
}
public void setCookie(String cookie) {
this.cookie = cookie;
}
public String getReferer() {
return referer;
}
public void setReferer(String referer) {
this.referer = referer;
}
public String getUser_agent() {
return user_agent;
}
public void setUser_agent(String user_agent) {
this.user_agent = user_agent;
}
public String getContent_len() {
return content_len;
}
public void setContent_len(String content_len) {
this.content_len = content_len;
}
public String getContent_type() {
return content_type;
}
public void setContent_type(String content_type) {
this.content_type = content_type;
}
public String getSet_cookie() {
return set_cookie;
}
public void setSet_cookie(String set_cookie) {
this.set_cookie = set_cookie;
}
public String getReq_header() {
return req_header;
}
public void setReq_header(String req_header) {
this.req_header = req_header;
}
public String getResp_header() {
return resp_header;
}
public void setResp_header(String resp_header) {
this.resp_header = resp_header;
}
public String getReq_body_key() {
return req_body_key;
}
public void setReq_body_key(String req_body_key) {
this.req_body_key = req_body_key;
}
public String getReq_body() {
return req_body;
}
public void setReq_body(String req_body) {
this.req_body = req_body;
}
public String getRes_body_key() {
return res_body_key;
}
public void setRes_body_key(String res_body_key) {
this.res_body_key = res_body_key;
}
public String getResp_body() {
return resp_body;
}
public void setResp_body(String resp_body) {
this.resp_body = resp_body;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getSni() {
return sni;
}
public void setSni(String sni) {
this.sni = sni;
}
public String getSan() {
return san;
}
public void setSan(String san) {
this.san = san;
}
public String getCn() {
return cn;
}
public void setCn(String cn) {
this.cn = cn;
}
public int getApp_id() {
return app_id;
}
public void setApp_id(int app_id) {
this.app_id = app_id;
}
public int getProtocol_id() {
return protocol_id;
}
public void setProtocol_id(int protocol_id) {
this.protocol_id = protocol_id;
}
public int getIntercept_state() {
return intercept_state;
}
public void setIntercept_state(int intercept_state) {
this.intercept_state = intercept_state;
}
public long getSsl_server_side_latency() {
return ssl_server_side_latency;
}
public void setSsl_server_side_latency(long ssl_server_side_latency) {
this.ssl_server_side_latency = ssl_server_side_latency;
}
public long getSsl_client_side_latency() {
return ssl_client_side_latency;
}
public void setSsl_client_side_latency(long ssl_client_side_latency) {
this.ssl_client_side_latency = ssl_client_side_latency;
}
public String getSsl_server_side_version() {
return ssl_server_side_version;
}
public void setSsl_server_side_version(String ssl_server_side_version) {
this.ssl_server_side_version = ssl_server_side_version;
}
public String getSsl_client_side_version() {
return ssl_client_side_version;
}
public void setSsl_client_side_version(String ssl_client_side_version) {
this.ssl_client_side_version = ssl_client_side_version;
}
public int getSsl_cert_verify() {
return ssl_cert_verify;
}
public void setSsl_cert_verify(int ssl_cert_verify) {
this.ssl_cert_verify = ssl_cert_verify;
}
public String getStream_trace_id() {
return stream_trace_id;
}
public void setStream_trace_id(String stream_trace_id) {
this.stream_trace_id = stream_trace_id;
}
public long getCon_latency_ms() {
return con_latency_ms;
}
public void setCon_latency_ms(long con_latency_ms) {
this.con_latency_ms = con_latency_ms;
}
public int getPinningst() {
return pinningst;
}
public void setPinningst(int pinningst) {
this.pinningst = pinningst;
}
public long getC2s_pkt_num() {
return c2s_pkt_num;
}
public void setC2s_pkt_num(long c2s_pkt_num) {
this.c2s_pkt_num = c2s_pkt_num;
}
public long getS2c_pkt_num() {
return S2c_pkt_num;
}
public void setS2c_pkt_num(long s2c_pkt_num) {
S2c_pkt_num = s2c_pkt_num;
}
public long getC2s_byte_num() {
return c2s_byte_num;
}
public void setC2s_byte_num(long c2s_byte_num) {
this.c2s_byte_num = c2s_byte_num;
}
public long getS2c_byte_num() {
return s2c_byte_num;
}
public void setS2c_byte_num(long s2c_byte_num) {
this.s2c_byte_num = s2c_byte_num;
}
public String getNas_ip() {
return nas_ip;
}
public void setNas_ip(String nas_ip) {
this.nas_ip = nas_ip;
}
public String getFramed_ip() {
return framed_ip;
}
public void setFramed_ip(String framed_ip) {
this.framed_ip = framed_ip;
}
public String getAccount() {
return account;
}
public void setAccount(String account) {
this.account = account;
}
public int getPacket_type() {
return packet_type;
}
public void setPacket_type(int packet_type) {
this.packet_type = packet_type;
}
public int getHas_dup_traffic() {
return has_dup_traffic;
}
public void setHas_dup_traffic(int has_dup_traffic) {
this.has_dup_traffic = has_dup_traffic;
}
public String getStream_error() {
return stream_error;
}
public void setStream_error(String stream_error) {
this.stream_error = stream_error;
}
}

View File

@@ -1,6 +1,5 @@
package cn.ac.iie.bolt.radius;
package cn.ac.iie.bolt;
import cn.ac.iie.common.FlowWriteConfig;
import com.zdjizhi.utils.StringUtil;
import org.apache.log4j.Logger;
import org.apache.storm.task.TopologyContext;
@@ -13,17 +12,16 @@ import org.apache.storm.tuple.Values;
import java.util.Map;
import static cn.ac.iie.utils.general.TransFormUtils.dealCommonMessage;
import static cn.ac.iie.utils.general.TransFormUtils.getJsonMessage;
/**
* 通联关系日志补全
*
* @author qidaijie
*/
public class RadiusCompletionBolt extends BaseBasicBolt {
private final static Logger logger = Logger.getLogger(RadiusCompletionBolt.class);
private static final long serialVersionUID = -3657802387129063952L;
public class ConnCompletionBolt extends BaseBasicBolt {
private static final long serialVersionUID = -1059151670138465894L;
private final static Logger logger = Logger.getLogger(ConnCompletionBolt.class);
@Override
public void prepare(Map stormConf, TopologyContext context) {
@@ -35,15 +33,13 @@ public class RadiusCompletionBolt extends BaseBasicBolt {
try {
String message = tuple.getString(0);
if (StringUtil.isNotBlank(message)) {
basicOutputCollector.emit(new Values(dealCommonMessage(message)));
basicOutputCollector.emit(new Values(getJsonMessage(message)));
}
} catch (Exception e) {
logger.error(FlowWriteConfig.KAFKA_TOPIC + "接收/解析过程出现异常");
e.printStackTrace();
logger.error("接收解析过程出现异常", e);
}
}
@Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("connLog"));

View File

@@ -24,11 +24,11 @@ import java.util.Map;
* @date 2018/8/14
*/
public class NtcLogSendBolt extends BaseBasicBolt {
private static final long serialVersionUID = -3663610927224396615L;
private static final long serialVersionUID = 3940515789830317517L;
private static Logger logger = Logger.getLogger(NtcLogSendBolt.class);
private List<String> list;
private KafkaLogNtc kafkaLogNtc;
// private static long successfulSum = 0;
private static long successfulSum = 0;
@Override
@@ -43,11 +43,11 @@ public class NtcLogSendBolt extends BaseBasicBolt {
if (TupleUtils.isTick(tuple)) {
if (list.size() != 0) {
kafkaLogNtc.sendMessage(list);
// successfulSum += list.size();
successfulSum += list.size();
list.clear();
}
// basicOutputCollector.emit(new Values(successfulSum));
// successfulSum = 0L;
basicOutputCollector.emit(new Values(successfulSum));
successfulSum = 0L;
} else {
String message = tuple.getString(0);
if (StringUtil.isNotBlank(message)) {
@@ -55,12 +55,12 @@ public class NtcLogSendBolt extends BaseBasicBolt {
}
if (list.size() == FlowWriteConfig.BATCH_INSERT_NUM) {
kafkaLogNtc.sendMessage(list);
// successfulSum += list.size();
successfulSum += list.size();
list.clear();
}
}
} catch (Exception e) {
logger.error(FlowWriteConfig.KAFKA_TOPIC + "日志发送Kafka过程出现异常");
logger.error("日志发送Kafka过程出现异常 ", e);
e.printStackTrace();
}
}
@@ -74,7 +74,7 @@ public class NtcLogSendBolt extends BaseBasicBolt {
@Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
// outputFieldsDeclarer.declare(new Fields("suc"));
outputFieldsDeclarer.declare(new Fields("suc"));
}
}

View File

@@ -1,68 +0,0 @@
package cn.ac.iie.bolt.collect;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.system.TupleUtils;
import com.zdjizhi.utils.HBaseUtils;
import com.zdjizhi.utils.StringUtil;
import org.apache.log4j.Logger;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseBasicBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import java.util.HashMap;
import java.util.Map;
import static cn.ac.iie.utils.general.TransFormUtils.dealCommonMessage;
/**
* 通联关系日志补全
*
* @author qidaijie
*/
public class CollectCompletedBolt extends BaseBasicBolt {
private final static Logger logger = Logger.getLogger(CollectCompletedBolt.class);
private static final long serialVersionUID = 4682827168247333522L;
@Override
public void prepare(Map stormConf, TopologyContext context) {
}
@Override
public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) {
try {
if (TupleUtils.isTick(tuple)) {
HBaseUtils.change();
} else {
String message = tuple.getString(0);
if (StringUtil.isNotBlank(message)) {
basicOutputCollector.emit(new Values(dealCommonMessage(message)));
}
}
} catch (Exception e) {
logger.error(FlowWriteConfig.KAFKA_TOPIC + "接收/解析过程出现异常");
e.printStackTrace();
}
}
@Override
public Map<String, Object> getComponentConfiguration() {
Map<String, Object> conf = new HashMap<String, Object>(16);
conf.put(org.apache.storm.Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS,
FlowWriteConfig.HBASE_TICK_TUPLE_FREQ_SECS);
return conf;
}
@Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("connLog"));
}
}

View File

@@ -1,66 +0,0 @@
package cn.ac.iie.bolt.proxy;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.system.TupleUtils;
import com.zdjizhi.utils.HBaseUtils;
import com.zdjizhi.utils.StringUtil;
import org.apache.log4j.Logger;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseBasicBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import java.util.HashMap;
import java.util.Map;
import static cn.ac.iie.utils.general.TransFormUtils.dealCommonMessage;
/**
* 通联关系日志补全
*
* @author qidaijie
*/
public class ProxyCompletionBolt extends BaseBasicBolt {
private final static Logger logger = Logger.getLogger(ProxyCompletionBolt.class);
private static final long serialVersionUID = 6097654428594885032L;
@Override
public void prepare(Map stormConf, TopologyContext context) {
}
@Override
public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) {
try {
if (TupleUtils.isTick(tuple)) {
HBaseUtils.change();
} else {
String message = tuple.getString(0);
if (StringUtil.isNotBlank(message)) {
basicOutputCollector.emit(new Values(dealCommonMessage(message)));
}
}
} catch (Exception e) {
logger.error(FlowWriteConfig.KAFKA_TOPIC + "接收/解析过程出现异常");
e.printStackTrace();
}
}
@Override
public Map<String, Object> getComponentConfiguration() {
Map<String, Object> conf = new HashMap<String, Object>(16);
conf.put(org.apache.storm.Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS,
FlowWriteConfig.HBASE_TICK_TUPLE_FREQ_SECS);
return conf;
}
@Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("connLog"));
}
}

View File

@@ -1,68 +0,0 @@
package cn.ac.iie.bolt.security;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.system.TupleUtils;
import com.zdjizhi.utils.HBaseUtils;
import com.zdjizhi.utils.StringUtil;
import org.apache.log4j.Logger;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseBasicBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import java.util.HashMap;
import java.util.Map;
import static cn.ac.iie.utils.general.TransFormUtils.dealCommonMessage;
/**
* 通联关系日志补全
*
* @author qidaijie
*/
public class SecurityCompletionBolt extends BaseBasicBolt {
private final static Logger logger = Logger.getLogger(SecurityCompletionBolt.class);
private static final long serialVersionUID = -2380858260054733989L;
@Override
public void prepare(Map stormConf, TopologyContext context) {
}
@Override
public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) {
try {
if (TupleUtils.isTick(tuple)) {
HBaseUtils.change();
} else {
String message = tuple.getString(0);
if (StringUtil.isNotBlank(message)) {
basicOutputCollector.emit(new Values(dealCommonMessage(message)));
}
}
} catch (Exception e) {
logger.error(FlowWriteConfig.KAFKA_TOPIC + "接收/解析过程出现异常");
e.printStackTrace();
}
}
@Override
public Map<String, Object> getComponentConfiguration() {
Map<String, Object> conf = new HashMap<String, Object>(16);
conf.put(org.apache.storm.Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS,
FlowWriteConfig.HBASE_TICK_TUPLE_FREQ_SECS);
return conf;
}
@Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("connLog"));
}
}

View File

@@ -8,9 +8,10 @@ import cn.ac.iie.utils.system.FlowWriteConfigurations;
*/
public class FlowWriteConfig {
public static final int IPV4_TYPE = 1;
public static final int IPV6_TYPE = 2;
public static final String DOMAIN_SPLITTER = ".";
public static final String LOG_STRING_SPLITTER = "\t";
public static final String SQL_STRING_SPLITTER = "#";
public static final String SEGMENTATION = ",";
/**
* System
*/
@@ -19,7 +20,6 @@ public class FlowWriteConfig {
public static final Integer TOPOLOGY_WORKERS = FlowWriteConfigurations.getIntProperty(0, "topology.workers");
public static final Integer KAFKA_BOLT_PARALLELISM = FlowWriteConfigurations.getIntProperty(0, "kafka.bolt.parallelism");
public static final Integer TOPOLOGY_TICK_TUPLE_FREQ_SECS = FlowWriteConfigurations.getIntProperty(0, "topology.tick.tuple.freq.secs");
public static final Integer HBASE_TICK_TUPLE_FREQ_SECS = FlowWriteConfigurations.getIntProperty(0, "hbase.tick.tuple.freq.secs");
public static final Integer TOPOLOGY_CONFIG_MAX_SPOUT_PENDING = FlowWriteConfigurations.getIntProperty(0, "topology.config.max.spout.pending");
public static final Integer TOPOLOGY_NUM_ACKS = FlowWriteConfigurations.getIntProperty(0, "topology.num.acks");
public static final Integer TOPOLOGY_SPOUT_SLEEP_TIME = FlowWriteConfigurations.getIntProperty(0, "topology.spout.sleep.time");
@@ -40,8 +40,6 @@ public class FlowWriteConfig {
*/
public static final String BOOTSTRAP_SERVERS = FlowWriteConfigurations.getStringProperty(0, "bootstrap.servers");
public static final String ZOOKEEPER_SERVERS = FlowWriteConfigurations.getStringProperty(0, "zookeeper.servers");
public static final String HBASE_ZOOKEEPER_SERVERS = FlowWriteConfigurations.getStringProperty(0, "hbase.zookeeper.servers");
public static final String HBASE_TABLE_NAME = FlowWriteConfigurations.getStringProperty(0, "hbase.table.name");
public static final String GROUP_ID = FlowWriteConfigurations.getStringProperty(0, "group.id");
public static final String RESULTS_OUTPUT_TOPIC = FlowWriteConfigurations.getStringProperty(0, "results.output.topic");
public static final String KAFKA_TOPIC = FlowWriteConfigurations.getStringProperty(0, "kafka.topic");
@@ -49,9 +47,17 @@ public class FlowWriteConfig {
public static final String IP_LIBRARY = FlowWriteConfigurations.getStringProperty(0, "ip.library");
/**
* http
/***
* Redis
*/
public static final String SCHEMA_HTTP = FlowWriteConfigurations.getStringProperty(0, "schema.http");
public static final String REDIS_IP = "redis.ip";
public static final String REDIS_PORT = "redis.port";
public static final String REDIS_TIMEOUT = "redis.timeout";
public static final String REDIS_POOL_MAXACTIVE = "redis.pool.maxActive";
public static final String REDIS_POOL_MAXIDLE = "redis.pool.maxIdle";
public static final String REDIS_POOL_MAXWAIT = "redis.pool.maxWait";
public static final String REDIS_POOL_TESTONBORROW = "redis.pool.testOnBorrow";
public static final String REDIS_POOL_TESTONRETURN = "redis.pool.testOnReturn";
}

View File

@@ -37,7 +37,6 @@ public class CustomizedKafkaSpout extends BaseRichSpout {
props.put("max.poll.records", 3000);
props.put("max.partition.fetch.bytes", 31457280);
props.put("auto.offset.reset", FlowWriteConfig.AUTO_OFFSET_RESET);
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
return props;

View File

@@ -1,13 +1,9 @@
package cn.ac.iie.topology;
import cn.ac.iie.bolt.ConnCompletionBolt;
import cn.ac.iie.bolt.NtcLogSendBolt;
import cn.ac.iie.bolt.collect.CollectCompletedBolt;
import cn.ac.iie.bolt.radius.RadiusCompletionBolt;
import cn.ac.iie.bolt.security.SecurityCompletionBolt;
import cn.ac.iie.bolt.proxy.ProxyCompletionBolt;
import cn.ac.iie.bolt.SummaryBolt;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.spout.CustomizedKafkaSpout;
import org.apache.log4j.Logger;
@@ -62,28 +58,9 @@ public class LogFlowWriteTopology {
private void buildTopology() {
builder = new TopologyBuilder();
builder.setSpout("LogFlowWriteSpout", new CustomizedKafkaSpout(), FlowWriteConfig.SPOUT_PARALLELISM);
switch (FlowWriteConfig.KAFKA_TOPIC) {
case "PROXY-EVENT-LOG":
builder.setBolt("ProxyCompletionBolt", new ProxyCompletionBolt(), FlowWriteConfig.DATACENTER_BOLT_PARALLELISM).localOrShuffleGrouping("LogFlowWriteSpout");
builder.setBolt("NtcLogSendBolt", new NtcLogSendBolt(), FlowWriteConfig.KAFKA_BOLT_PARALLELISM).localOrShuffleGrouping("ProxyCompletionBolt");
break;
case "RADIUS-RECORD-LOG":
builder.setBolt("RadiusCompletionBolt", new RadiusCompletionBolt(), FlowWriteConfig.DATACENTER_BOLT_PARALLELISM).localOrShuffleGrouping("LogFlowWriteSpout");
builder.setBolt("NtcLogSendBolt", new NtcLogSendBolt(), FlowWriteConfig.KAFKA_BOLT_PARALLELISM).localOrShuffleGrouping("RadiusCompletionBolt");
break;
case "CONNECTION-RECORD-LOG":
builder.setBolt("CollectCompletedBolt", new CollectCompletedBolt(), FlowWriteConfig.DATACENTER_BOLT_PARALLELISM).localOrShuffleGrouping("LogFlowWriteSpout");
builder.setBolt("NtcLogSendBolt", new NtcLogSendBolt(), FlowWriteConfig.KAFKA_BOLT_PARALLELISM).localOrShuffleGrouping("CollectCompletedBolt");
break;
case "SECURITY-EVENT-LOG":
builder.setBolt("SecurityCompletionBolt", new SecurityCompletionBolt(), FlowWriteConfig.DATACENTER_BOLT_PARALLELISM).localOrShuffleGrouping("LogFlowWriteSpout");
builder.setBolt("NtcLogSendBolt", new NtcLogSendBolt(), FlowWriteConfig.KAFKA_BOLT_PARALLELISM).localOrShuffleGrouping("SecurityCompletionBolt");
break;
default:
}
builder.setBolt("ConnCompletionBolt", new ConnCompletionBolt(), FlowWriteConfig.DATACENTER_BOLT_PARALLELISM).localOrShuffleGrouping("LogFlowWriteSpout");
builder.setBolt("NtcLogSendBolt", new NtcLogSendBolt(), FlowWriteConfig.KAFKA_BOLT_PARALLELISM).localOrShuffleGrouping("ConnCompletionBolt");
// builder.setBolt("SummaryBolt", new SummaryBolt(), 1).localOrShuffleGrouping("NtcLogSendBolt");
}
public static void main(String[] args) throws Exception {

View File

@@ -1,18 +1,19 @@
package cn.ac.iie.utils.general;
import cn.ac.iie.bean.SessionRecordLog;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.hbase.HBaseUtils;
import cn.ac.iie.utils.json.JsonParseUtil;
import cn.ac.iie.utils.redis.RedisPollUtils;
import cn.ac.iie.utils.system.SnowflakeId;
import cn.ac.iie.utils.zookeeper.DistributedLock;
import cn.ac.iie.utils.zookeeper.ZookeeperUtils;
import com.alibaba.fastjson.JSONObject;
import com.google.common.net.InternetDomainName;
import com.zdjizhi.utils.FormatUtils;
import com.zdjizhi.utils.IpLookup;
import com.zdjizhi.utils.StringUtil;
import org.apache.log4j.Logger;
import redis.clients.jedis.Jedis;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
@@ -21,179 +22,102 @@ import java.util.*;
* @author qidaijie
* @create 2018-08-13 15:11
*/
public class TransFormUtils {
private static Logger logger = Logger.getLogger(TransFormUtils.class);
//在内存中加载反射类用的map
private static HashMap<String, Class> map = JsonParseUtil.getMapFromhttp(FlowWriteConfig.SCHEMA_HTTP);
//反射成一个类
private static Object mapObject = JsonParseUtil.generateObject(map);
//获取任务列表
private static ArrayList<String[]> jobList = JsonParseUtil.getJobListFromHttp(FlowWriteConfig.SCHEMA_HTTP);
//补全工具类
private static FormatUtils build = new FormatUtils.Builder(false).build();
//IP定位库工具类
private static Pattern WEB_PATTERN = Pattern.compile("[^\\\\.]+(\\.com\\.cn|\\.net\\.cn|\\.org\\.cn|\\.gov\\.cn|\\.com|\\.net|\\.cn|\\.org|\\.cc|\\.me|\\.tel|\\.mobi|\\.asia|\\.biz|\\.info|\\.name|\\.tv|\\.hk|\\.公司|\\.中国|\\.网络)");
private static IpLookup ipLookup = new IpLookup.Builder(false)
.loadDataFileV4(FlowWriteConfig.IP_LIBRARY + "Kazakhstan.mmdb")
.loadDataFileV6(FlowWriteConfig.IP_LIBRARY + "Kazakhstan.mmdb")
.loadAsnDataFileV4(FlowWriteConfig.IP_LIBRARY + "asn_v4.mmdb")
.loadAsnDataFileV6(FlowWriteConfig.IP_LIBRARY + "asn_v6.mmdb")
.build();
// private static ZookeeperUtils zookeeperUtils = new ZookeeperUtils();
// private static SnowflakeId snowflakeId = new SnowflakeId();
/**
* 解析日志,并补全
*
* @param message kafka Topic原始日志
* @param message 原始日志
* @return 补全后的日志
*/
public static String dealCommonMessage(String message) {
Object object = JSONObject.parseObject(message, mapObject.getClass());
// System.out.println("补全之前 ===》 "+JSON.toJSONString(object));
public static String getJsonMessage(String message) {
SessionRecordLog sessionRecordLog = JSONObject.parseObject(message, SessionRecordLog.class);
String serverIp = sessionRecordLog.getServer_ip();
String clientIp = sessionRecordLog.getClient_ip();
try {
for (String[] strings : jobList) {
//用到的参数的值
Object name = JsonParseUtil.getValue(object, strings[0]);
//需要补全的字段的值
Object appendTo = JsonParseUtil.getValue(object, strings[1]);
//匹配操作函数的字段
String function=strings[2];
//额外的参数的值
Object param = null;
if (strings[3] != null){
param=JsonParseUtil.getValue(object, strings[3]);
}
if (function.equals("current_timestamp")) {
JsonParseUtil.setValue(object, strings[1], getCurrentTime());
} else if (function.equals("snowflake_id")) {
JsonParseUtil.setValue(object, strings[1], build.getSnowflakeId(FlowWriteConfig.ZOOKEEPER_SERVERS,FlowWriteConfig.KAFKA_TOPIC,FlowWriteConfig.DATA_CENTER_ID_NUM));
} else if (function.equals("geo_ip_detail")) {
JsonParseUtil.setValue(object, strings[1], getGeoIpDetail(name.toString()));
} else if (function.equals("geo_asn")) {
JsonParseUtil.setValue(object, strings[1], getGeoAsn(name.toString()));
} else if (function.equals("radius_match")) {
JsonParseUtil.setValue(object, strings[1], radiusMatch(name.toString()));
} else if (function.equals("geo_ip_country")) {
JsonParseUtil.setValue(object, strings[1], getGeoIpCountry(name.toString()));
} else if (function.equals("decode_of_base64") && param != null){
JsonParseUtil.setValue(object, strings[1], FormatUtils.base64Str(name.toString(),param.toString()));
} else if (name.equals("http_host") && function.equals("sub_domain")) {
if (appendTo == null || StringUtil.isBlank(appendTo.toString())) {
JsonParseUtil.setValue(object, strings[1], getTopDomain(null, name.toString()));
}
} else if (name.equals("ssl_sni") && strings[2].equals("sub_domain")) {
if (appendTo == null || StringUtil.isBlank(appendTo.toString())) {
JsonParseUtil.setValue(object, strings[1], getTopDomain(name.toString(), null));
}
}
}
return JSONObject.toJSONString(object);
// System.out.println("补全之后 ===》 "+JSON.toJSONString(object));
sessionRecordLog.setUid(SnowflakeId.generateId());
sessionRecordLog.setServer_location(ipLookup.countryLookup(serverIp));
sessionRecordLog.setClient_location(ipLookup.cityLookupDetail(clientIp));
sessionRecordLog.setClient_asn(ipLookup.asnLookup(clientIp, true));
sessionRecordLog.setServer_asn(ipLookup.asnLookup(serverIp, true));
sessionRecordLog.setDomain(getTopDomain(sessionRecordLog.getSni(), sessionRecordLog.getHost()));
sessionRecordLog.setRecv_time(System.currentTimeMillis() / 1000);
// sessionRecordLog.setSubscribe_id(getSubscribeId(clientIp));
return JSONObject.toJSONString(sessionRecordLog);
} catch (Exception e) {
logger.error(FlowWriteConfig.KAFKA_TOPIC + "日志解析过程出现异常");
e.printStackTrace();
logger.error("日志解析过程出现异常", e);
return "";
}
}
// @Test
// public void aaa() {
// String sni = "www.baidu.com";
// System.out.println(getTopDomain(sni, null));
// System.out.println(getTopDomain(null,sni));
//
// }
/**
* 有sni通过sni获取域名有host根据host获取域名
* 有sni通过sni获取域名有hots根据host获取域名
*
* @param sni sni
* @param host host
* @return 顶级域名
*/
private static String getTopDomain(String sni, String host) {
if (StringUtil.isNotBlank(host)) {
return getDomainName(host);
} else if (StringUtil.isNotBlank(sni)) {
return getDomainName(sni);
if (StringUtil.isNotBlank(sni)) {
return getDomain(sni);
} else if (StringUtil.isNotBlank(host)) {
return getDomain(host);
} else {
return "";
}
}
/**
* 获取用户名
*
* @param key Sip
* @return SubscribeId
*/
private static String getSubscribeId(String key) {
String sub = "";
try (Jedis jedis = RedisPollUtils.getJedis()) {
if (jedis != null) {
sub = jedis.get(key);
}
} catch (Exception e) {
logger.error("通过Redis获取用户名出现异常", e);
}
return sub;
}
/**
* 根据url截取顶级域名
*
* @param host 网站url
* @param url 网站url
* @return 顶级域名
*/
private static String getDomainName(String host) {
String domain = "";
private static String getDomain(String url) {
try {
domain = InternetDomainName.from(host).topPrivateDomain().toString();
Matcher matcher = WEB_PATTERN.matcher(url);
if (matcher.find()) {
return matcher.group();
}
} catch (Exception e) {
logger.error("host解析顶级域名异常: " + e.getMessage());
e.printStackTrace();
}
return domain;
return "";
}
/**
* 生成当前时间戳的操作
*/
private static int getCurrentTime() {
return (int)(System.currentTimeMillis() / 1000);
public static void main(String[] args) {
String s = ipLookup.countryLookup("192.168.10.207");
System.out.println(s);
}
/**
* 根据clientIp获取location信息
*
* @param ip
* @return
*/
private static String getGeoIpDetail(String ip) {
return ipLookup.cityLookupDetail(ip);
}
/**
* 根据ip获取asn信息
*
* @param ip
* @return
*/
private static String getGeoAsn(String ip) {
return ipLookup.asnLookup(ip, true);
}
/**
* 根据ip获取country信息
*
* @param ip
* @return
*/
private static String getGeoIpCountry(String ip) {
return ipLookup.countryLookup(ip);
}
/**
* radius借助hbase补齐
*
* @param ip
* @return
*/
private static String radiusMatch(String ip) {
return HBaseUtils.getAccount(ip);
}
}
}

View File

@@ -1,138 +0,0 @@
package cn.ac.iie.utils.hbase;
import cn.ac.iie.common.FlowWriteConfig;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.log4j.Logger;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* HBase 工具类
*
* @author qidaijie
*/
public class HBaseUtils {
private final static Logger logger = Logger.getLogger(HBaseUtils.class);
private static Map<String, String> subIdMap = new HashMap<>(333334);
// private static Map<String, String> subIdMap = new ConcurrentSkipListMap<>();
private static Connection connection;
private static Long time;
static {
// 管理Hbase的配置信息
Configuration configuration = HBaseConfiguration.create();
// 设置zookeeper节点
configuration.set("hbase.zookeeper.quorum", FlowWriteConfig.HBASE_ZOOKEEPER_SERVERS);
configuration.set("hbase.client.retries.number", "3");
configuration.set("hbase.bulkload.retries.number", "3");
configuration.set("zookeeper.recovery.retry", "3");
try {
connection = ConnectionFactory.createConnection(configuration);
time = System.currentTimeMillis();
getAll();
} catch (IOException e) {
logger.error("获取HBase连接失败");
e.printStackTrace();
}
}
/**
* 更新变量
*/
public static void change() {
Long nowTime = System.currentTimeMillis();
timestampsFilter(time - 1000, nowTime + 500);
}
/**
* 获取变更内容
*
* @param startTime 开始时间
* @param endTime 结束时间
*/
private static void timestampsFilter(Long startTime, Long endTime) {
Long begin = System.currentTimeMillis();
Table table = null;
ResultScanner scanner = null;
Scan scan2 = new Scan();
try {
table = connection.getTable(TableName.valueOf("sub:" + FlowWriteConfig.HBASE_TABLE_NAME));
scan2.setTimeRange(startTime, endTime);
scanner = table.getScanner(scan2);
for (Result result : scanner) {
Cell[] cells = result.rawCells();
for (Cell cell : cells) {
String key = Bytes.toString(CellUtil.cloneRow(cell));
String value = Bytes.toString(CellUtil.cloneValue(cell));
if (subIdMap.containsKey(key)) {
if (!value.equals(subIdMap.get(key))) {
subIdMap.put(key, value);
}
} else {
subIdMap.put(key, value);
}
}
}
Long end = System.currentTimeMillis();
logger.warn("当前集合长度" + subIdMap.keySet().size());
logger.warn("更新缓存耗时:" + (end - begin) + "开始时间:" + begin + "结束时间:" + end);
time = endTime;
} catch (IOException e) {
e.printStackTrace();
} finally {
if (scanner != null) {
scanner.close();
}
if (table != null) {
try {
table.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
/**
* 获取所有的 key value
*/
private static void getAll() {
Long begin = System.currentTimeMillis();
try {
Table table = connection.getTable(TableName.valueOf("sub:" + FlowWriteConfig.HBASE_TABLE_NAME));
Scan scan2 = new Scan();
ResultScanner scanner = table.getScanner(scan2);
for (Result result : scanner) {
Cell[] cells = result.rawCells();
for (Cell cell : cells) {
subIdMap.put(Bytes.toString(CellUtil.cloneRow(cell)), Bytes.toString(CellUtil.cloneValue(cell)));
}
}
logger.warn("获取全量后集合长度:" + subIdMap.size());
logger.warn("获取全量耗时:" + (System.currentTimeMillis() - begin));
scanner.close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 获取 account
*
* @param clientIp client_ip
* @return account
*/
public static String getAccount(String clientIp) {
return subIdMap.get(clientIp);
}
}

View File

@@ -1,51 +0,0 @@
package cn.ac.iie.utils.http;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
/**
* 获取网关schema的工具类
*/
public class HttpClientUtil {
public static String requestByGetMethod(String s) {
CloseableHttpClient httpClient = HttpClients.createDefault();
StringBuilder entityStringBuilder = null;
try {
HttpGet get = new HttpGet(s);
CloseableHttpResponse httpResponse = null;
httpResponse = httpClient.execute(get);
try {
HttpEntity entity = httpResponse.getEntity();
entityStringBuilder = new StringBuilder();
if (null != entity) {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(httpResponse.getEntity().getContent(), "UTF-8"), 8 * 1024);
String line = null;
while ((line = bufferedReader.readLine()) != null) {
entityStringBuilder.append(line);
}
}
} finally {
httpResponse.close();
}
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
if (httpClient != null) {
httpClient.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
return entityStringBuilder.toString();
}
}

View File

@@ -45,22 +45,6 @@ public class InfluxDbUtils {
}
}
/**
* 记录对准失败次数-即内存中没有对应的key
*
* @param failure 对准失败量
*/
public static void sendHBaseFailure(int failure) {
if (failure != 0) {
InfluxDB client = InfluxDBFactory.connect(FlowWriteConfig.INFLUX_IP, FlowWriteConfig.INFLUX_USERNAME, FlowWriteConfig.INFLUX_PASSWORD);
Point point1 = Point.measurement("sendHBaseFailure")
.tag("topic", FlowWriteConfig.KAFKA_TOPIC)
.field("failure", failure)
.build();
client.write("BusinessMonitor", "", point1);
}
}
/**
* 获取本机IP
*

View File

@@ -1,231 +0,0 @@
package cn.ac.iie.utils.json;
import cn.ac.iie.utils.http.HttpClientUtil;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import net.sf.cglib.beans.BeanGenerator;
import net.sf.cglib.beans.BeanMap;
import java.util.*;
/**
* 使用fastjson解析json的工具类
*/
public class JsonParseUtil {
/**
* 模式匹配,给定一个类型字符串返回一个类类型
*
* @param type
* @return
*/
public static Class getClassName(String type) {
Class clazz;
switch (type) {
case "int":
clazz = Integer.class;
break;
case "String":
clazz = String.class;
break;
case "long":
clazz = long.class;
break;
case "Integer":
clazz = Integer.class;
break;
case "double":
clazz = double.class;
break;
case "float":
clazz = float.class;
break;
case "char":
clazz = char.class;
break;
case "byte":
clazz = byte.class;
break;
case "boolean":
clazz = boolean.class;
break;
case "short":
clazz = short.class;
break;
default:
clazz = String.class;
}
return clazz;
}
/**
* 根据反射生成对象的方法
*
* @param properties
* @return 生成的Object类型的对象
*/
public static Object generateObject(Map properties) {
BeanGenerator generator = new BeanGenerator();
Set keySet = properties.keySet();
for (Iterator i = keySet.iterator(); i.hasNext(); ) {
String key = (String) i.next();
generator.addProperty(key, (Class) properties.get(key));
}
return generator.create();
}
/**
* 获取属性值的方法
*
* @param obj
* @param property
* @return 属性的值
*/
public static Object getValue(Object obj, String property) {
BeanMap beanMap = BeanMap.create(obj);
return beanMap.get(property);
}
/**
* 更新属性值的方法
*
* @param obj
* @param property
* @param value
*/
public static void setValue(Object obj, String property, Object value) {
BeanMap beanMap = BeanMap.create(obj);
beanMap.put(property, value);
}
/**
* 通过获取String类型的网关schema链接来获取map用于生成一个Object类型的对象
*
* @param http
* @return 用于反射生成schema类型的对象的一个map集合
*/
public static HashMap<String, Class> getMapFromhttp(String http) {
HashMap<String, Class> map = new HashMap<>();
String schema = HttpClientUtil.requestByGetMethod(http);
Object data = JSON.parseObject(schema).get("data");
//获取fields并转化为数组数组的每个元素都是一个name doc type
JSONObject schemaJson = JSON.parseObject(data.toString());
JSONArray fields = (JSONArray) schemaJson.get("fields");
for (Object field : fields) {
String name = JSON.parseObject(field.toString()).get("name").toString();
String type = JSON.parseObject(field.toString()).get("type").toString();
// if(
// name.equals("dns_qr") ||
// name.equals("dns_opcode") ||
// name.equals("ssl_pinningst") ||
// name.equals("ssl_intercept_state") ||
// name.equals("ssl_cert_verify")
//
// ){
// type="Integer";
// }
//组合用来生成实体类的map
map.put(name, getClassName(type));
}
return map;
}
/**
* 根据http链接获取schema解析之后返回一个任务列表 (useList toList funcList)
*
* @param http
* @return
*/
public static ArrayList<String[]> getJobListFromHttp(String http) {
ArrayList<String[]> list = new ArrayList<>();
String schema = HttpClientUtil.requestByGetMethod(http);
//解析data
Object data = JSON.parseObject(schema).get("data");
//获取fields并转化为数组数组的每个元素都是一个name doc type
JSONObject schemaJson = JSON.parseObject(data.toString());
JSONArray fields = (JSONArray) schemaJson.get("fields");
for (Object field : fields) {
Object doc = JSON.parseObject(field.toString()).get("doc");
String name = JSON.parseObject(field.toString()).get("name").toString();
if (doc != null) {
Object format = JSON.parseObject(doc.toString()).get("format");
if (format != null) {
String functions = null;
String appendTo = null;
String params = null;
Object functionsObj = JSON.parseObject(format.toString()).get("functions");
Object appendToObj = JSON.parseObject(format.toString()).get("appendTo");
Object paramObj = JSON.parseObject(format.toString()).get("param");
if (functionsObj != null) {
functions = functionsObj.toString();
}
if (appendToObj != null) {
appendTo = appendToObj.toString();
}
if (paramObj != null) {
params = paramObj.toString();
}
if (appendTo != null && params == null) {
String[] functionArray = functions.split(",");
String[] appendToArray = appendTo.split(",");
for (int i = 0; i < functionArray.length; i++) {
// useList.add(name);
// toList.add(appendToArray[i]);
// funcList.add(functionArray[i]);
list.add(new String[]{name, appendToArray[i], functionArray[i],null});
}
}else if (appendTo != null && params != null){
String[] functionArray = functions.split(",");
String[] appendToArray = appendTo.split(",");
String[] paramArray = params.split(",");
for (int i = 0; i < functionArray.length; i++) {
// useList.add(name);
// toList.add(appendToArray[i]);
// funcList.add(functionArray[i]);
list.add(new String[]{name, appendToArray[i], functionArray[i],paramArray[i]});
}
}
else {
// useList.add(name);
// funcList.add(functions.toString());
// toList.add(name);
list.add(new String[]{name, name, functions,params});
}
}
}
}
return list;
}
}

View File

@@ -59,7 +59,7 @@ public class KafkaLogNtc {
}
}
kafkaProducer.flush();
logger.debug("Log sent to National Center successfully!!!!!");
logger.warn("Log sent to National Center successfully!!!!!");
}
/**
@@ -72,10 +72,10 @@ public class KafkaLogNtc {
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("acks", "1");
properties.put("linger.ms", "2");
properties.put("request.timeout.ms", 30000);
properties.put("request.timeout.ms", 60000);
properties.put("batch.size", 262144);
properties.put("buffer.memory", 33554432);
// properties.put("compression.type", "snappy");
properties.put("compression.type", "snappy");
kafkaProducer = new KafkaProducer<>(properties);
}

View File

@@ -0,0 +1,79 @@
package cn.ac.iie.utils.redis;
import cn.ac.iie.common.FlowWriteConfig;
import org.apache.log4j.Logger;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisCluster;
import redis.clients.jedis.JedisPoolConfig;
import java.io.IOException;
import java.util.LinkedHashSet;
import java.util.Properties;
import java.util.Set;
/**
* 预用于对准IP对应的用户名的 Redis连接池
*
* @author my
* @date 2018-07-04
*/
public final class RedisClusterUtils {
private static final Logger logger = Logger.getLogger(RedisClusterUtils.class);
private static JedisCluster jedisCluster;
private static Properties props = new Properties();
static {
try {
String redisConfigFile = "redis_config.properties";
props.load(RedisClusterUtils.class.getClassLoader().getResourceAsStream(redisConfigFile));
} catch (IOException e) {
props = null;
logger.error("加载Redis配置文件失败", e);
}
}
/**
* 不允许通过new创建该类的实例
*/
private RedisClusterUtils() {
}
/**
* 初始化Redis连接池
*/
private static JedisCluster getJedisCluster() {
if (jedisCluster == null) {
JedisPoolConfig poolConfig = new JedisPoolConfig();
poolConfig.setMaxTotal(Integer.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_MAXACTIVE)));
poolConfig.setMaxIdle(Integer.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_MAXIDLE)));
poolConfig.setMaxWaitMillis(Long.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_MAXWAIT)));
poolConfig.setTestOnReturn(Boolean.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_TESTONRETURN)));
poolConfig.setTestOnBorrow(Boolean.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_TESTONBORROW)));
Set<HostAndPort> nodes = new LinkedHashSet<HostAndPort>();
for (String port : props.getProperty(FlowWriteConfig.REDIS_PORT).split(FlowWriteConfig.SEGMENTATION)) {
for (String ip : props.getProperty(FlowWriteConfig.REDIS_IP).split(FlowWriteConfig.SEGMENTATION)) {
nodes.add(new HostAndPort(ip, Integer.parseInt(port)));
}
}
jedisCluster = new JedisCluster(nodes, poolConfig);
}
return jedisCluster;
}
/**
* 获取用户名
*
* @param key service_ip
* @return Subscribe_id
*/
public static String get(String key) {
String s = key.split("\\.")[0];
if (!FlowWriteConfig.CHECK_IP_SCOPE.contains(s)) {
jedisCluster = getJedisCluster();
return jedisCluster.get(key);
}
return "";
}
}

View File

@@ -0,0 +1,115 @@
package cn.ac.iie.utils.redis;
import cn.ac.iie.common.FlowWriteConfig;
import com.zdjizhi.utils.StringUtil;
import org.apache.commons.lang3.RandomUtils;
import org.apache.log4j.Logger;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
import java.util.Properties;
/**
* @author qidaijie
*/
public class RedisPollUtils {
private static final Logger logger = Logger.getLogger(RedisPollUtils.class);
private static JedisPool jedisPool = null;
private static Properties props = new Properties();
private RedisPollUtils() {
}
static {
initialPool();
}
/**
* 初始化Redis连接池
*/
private static void initialPool() {
try {
//加载连接池配置文件
props.load(RedisPollUtils.class.getClassLoader().getResourceAsStream("redis_config.properties"));
// 创建jedis池配置实例
JedisPoolConfig poolConfig = new JedisPoolConfig();
poolConfig.setMaxTotal(Integer.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_MAXACTIVE)));
poolConfig.setMaxIdle(Integer.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_MAXIDLE)));
poolConfig.setMaxWaitMillis(Long.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_MAXWAIT)));
poolConfig.setTestOnReturn(Boolean.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_TESTONRETURN)));
poolConfig.setTestOnBorrow(Boolean.valueOf(props.getProperty(FlowWriteConfig.REDIS_POOL_TESTONBORROW)));
// 根据配置实例化jedis池
jedisPool = new JedisPool(poolConfig, props.getProperty(FlowWriteConfig.REDIS_IP),
Integer.valueOf(props.getProperty(FlowWriteConfig.REDIS_PORT)));
} catch (Exception e) {
logger.error("Redis连接池初始化错误", e);
}
}
/**
* 获取Jedis实例
*
* @return Jedis实例
*/
public static Jedis getJedis() {
Jedis jedis = null;
try {
if (jedisPool == null) {
initialPool();
}
jedis = jedisPool.getResource();
} catch (Exception e) {
logger.error("Redis连接池错误,无法获取连接", e);
}
return jedis;
}
// /**
// * @param key redis key
// * @return value
// */
// public static Integer getWorkerId(String key) {
// int workId = 0;
// int maxId = 32;
// try (Jedis jedis = RedisPollUtils.getJedis()) {
// if (jedis != null) {
// String work = jedis.get(key);
// if (StringUtil.isBlank(work)) {
// jedis.set(key, "0");
// } else {
// workId = Integer.parseInt(work);
// }
// if (workId < maxId) {
// jedis.set(key, String.valueOf(workId + 1));
// } else {
// workId = 0;
// jedis.set(key, "1");
// }
// }
// } catch (Exception e) {
// logger.error("通过Redis获取用户名出现异常", e);
// workId = RandomUtils.nextInt(0, 31);
// }
// return workId;
// }
public static Integer getWorkerId(String key) {
int workId = 0;
try (Jedis jedis = RedisPollUtils.getJedis()) {
if (jedis != null) {
workId = Integer.parseInt(jedis.get(key));
jedis.set(key, String.valueOf(workId + 2));
logger.error("\n工作id是" + workId + "\n");
}
} catch (Exception e) {
logger.error("通过Redis获取用户名出现异常", e);
workId = RandomUtils.nextInt(0, 31);
}
return workId;
}
}

View File

@@ -1,56 +0,0 @@
package cn.ac.iie.utils.system;
/**
* IP工具类
*
* @author qidaijie
*/
public class IpUtils {
/**
* IPV4 正则
*/
private static final String IPV4 = "^((\\d|[1-9]\\d|1\\d\\d|2([0-4]\\d|5[0-5]))\\.){4}$";
/**
* IPV6正则
*/
private static final String IPV6 = "^(([\\da-fA-F]{1,4}):){8}$";
/**
* 判断IP类型 v4 or v6
*
* @param ip IP
* @return 1:v4 2:v6 3:abnormal
*/
public static int validIPAddress(String ip) {
return String.format("%s.", ip).matches(IPV4) ? 1 : String.format("%s:", ip).matches(IPV6) ? 2 : 3;
}
/**
* ip字符串转整数
* ip是.分割的整数字符串,按照r进制转十进制的规律,按权相加求和,这里的权是256.
*
* @param ip IP
* @return ip(int)
*/
public static int ipChangeInt(String ip) {
//分割ip
String[] ipSplit = ip.split("\\.");
int result = 0;
for (int i = 0; i < 4; i++) {
Integer ipSubInteger = Integer.parseInt(ipSplit[i]);
//正则验证不能为负数
if (ipSubInteger > 255) {
result = 0;
break;
}
result += (ipSubInteger << (24 - i * 8));
}
return result;
}
public static void main(String[] args) {
System.out.println(validIPAddress("192.254.254.254"));
System.out.println(ipChangeInt("254.254.254.254"));
}
}

View File

@@ -0,0 +1,190 @@
package cn.ac.iie.utils.system;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.zookeeper.DistributedLock;
import cn.ac.iie.utils.zookeeper.ZookeeperUtils;
import org.apache.log4j.Logger;
/**
* 雪花算法
*
* @author qidaijie
*/
public class SnowflakeId {
private static Logger logger = Logger.getLogger(SnowflakeId.class);
// ==============================Fields===========================================
/**
* 开始时间截 (2018-08-01 00:00:00) max 17years
*/
private final long twepoch = 1564588800000L;
/**
* 机器id所占的位数
*/
private final long workerIdBits = 6L;
/**
* 数据标识id所占的位数
*/
private final long dataCenterIdBits = 4L;
/**
* 支持的最大机器id结果是3 (这个移位算法可以很快的计算出几位二进制数所能表示的最大十进制数)
*/
private final long maxWorkerId = -1L ^ (-1L << workerIdBits);
/**
* 支持的最大数据标识id结果是15
*/
private final long maxDataCenterId = -1L ^ (-1L << dataCenterIdBits);
/**
* 序列在id中占的位数
*/
private final long sequenceBits = 14L;
/**
* 机器ID向左移12位
*/
private final long workerIdShift = sequenceBits;
/**
* 数据标识id向左移17位(14+6)
*/
private final long dataCenterIdShift = sequenceBits + workerIdBits;
/**
* 时间截向左移22位(4+6+14)
*/
private final long timestampLeftShift = sequenceBits + workerIdBits + dataCenterIdBits;
/**
* 生成序列的掩码这里为16383
*/
private final long sequenceMask = -1L ^ (-1L << sequenceBits);
/**
* 工作机器ID(0~63)
*/
private long workerId;
/**
* 数据中心ID(0~15)
*/
private long dataCenterId;
/**
* 毫秒内序列(0~16383)
*/
private long sequence = 0L;
/**
* 上次生成ID的时间截
*/
private long lastTimestamp = -1L;
private static SnowflakeId idWorker;
private static ZookeeperUtils zookeeperUtils = new ZookeeperUtils();
static {
idWorker = new SnowflakeId();
}
//==============================Constructors=====================================
/**
* 构造函数
*/
private SnowflakeId() {
DistributedLock lock = new DistributedLock(FlowWriteConfig.ZOOKEEPER_SERVERS, "disLocks1");
lock.lock();
int tmpWorkerId = zookeeperUtils.modifyNode("/Snowflake/" + FlowWriteConfig.KAFKA_TOPIC);
if (tmpWorkerId > maxWorkerId || tmpWorkerId < 0) {
throw new IllegalArgumentException(String.format("worker Id can't be greater than %d or less than 0", maxWorkerId));
}
int dataCenterId = FlowWriteConfig.DATA_CENTER_ID_NUM;
if (dataCenterId > maxDataCenterId || dataCenterId < 0) {
throw new IllegalArgumentException(String.format("datacenter Id can't be greater than %d or less than 0", maxDataCenterId));
}
this.workerId = tmpWorkerId;
this.dataCenterId = dataCenterId;
}
// ==============================Methods==========================================
/**
* 获得下一个ID (该方法是线程安全的)
*
* @return SnowflakeId
*/
private synchronized long nextId() {
long timestamp = timeGen();
//如果当前时间小于上一次ID生成的时间戳说明系统时钟回退过这个时候应当抛出异常
if (timestamp < lastTimestamp) {
throw new RuntimeException(
String.format("Clock moved backwards. Refusing to generate id for %d milliseconds", lastTimestamp - timestamp));
}
//如果是同一时间生成的,则进行毫秒内序列
if (lastTimestamp == timestamp) {
sequence = (sequence + 1) & sequenceMask;
//毫秒内序列溢出
if (sequence == 0) {
//阻塞到下一个毫秒,获得新的时间戳
timestamp = tilNextMillis(lastTimestamp);
}
}
//时间戳改变,毫秒内序列重置
else {
sequence = 0L;
}
//上次生成ID的时间截
lastTimestamp = timestamp;
//移位并通过或运算拼到一起组成64位的ID
return ((timestamp - twepoch) << timestampLeftShift)
| (dataCenterId << dataCenterIdShift)
| (workerId << workerIdShift)
| sequence;
}
/**
* 阻塞到下一个毫秒,直到获得新的时间戳
*
* @param lastTimestamp 上次生成ID的时间截
* @return 当前时间戳
*/
protected long tilNextMillis(long lastTimestamp) {
long timestamp = timeGen();
while (timestamp <= lastTimestamp) {
timestamp = timeGen();
}
return timestamp;
}
/**
* 返回以毫秒为单位的当前时间
*
* @return 当前时间(毫秒)
*/
protected long timeGen() {
return System.currentTimeMillis();
}
/**
* 静态工具类
*
* @return
*/
public static Long generateId() {
return idWorker.nextId();
}
}

View File

@@ -1,5 +1,7 @@
package cn.ac.iie.utils.zookeeper;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.system.SnowflakeId;
import org.apache.log4j.Logger;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.Stat;
@@ -13,7 +15,9 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
/**
* @author qidaijie
*/
public class DistributedLock implements Lock, Watcher {
private static Logger logger = Logger.getLogger(DistributedLock.class);
@@ -79,7 +83,7 @@ public class DistributedLock implements Lock, Watcher {
}
try {
if (this.tryLock()) {
logger.warn(Thread.currentThread().getName() + " " + lockName + " is being locked......");
System.out.println(Thread.currentThread().getName() + " " + lockName + "获得了锁");
} else {
// 等待锁
waitForLock(waitLock, sessionTimeout);
@@ -94,7 +98,7 @@ public class DistributedLock implements Lock, Watcher {
try {
String splitStr = "_lock_";
if (lockName.contains(splitStr)) {
throw new LockException("locked name is error!!!");
throw new LockException("锁名有误");
}
// 创建临时有序节点
currentLock = zk.create(ROOT_LOCK + "/" + lockName + splitStr, new byte[0],
@@ -183,4 +187,33 @@ public class DistributedLock implements Lock, Watcher {
super(e);
}
}
public static void main(String[] args) {
ZookeeperUtils zookeeperUtils = new ZookeeperUtils();
Runnable runnable = new Runnable() {
@Override
public void run() {
DistributedLock lock = null;
try {
lock = new DistributedLock(FlowWriteConfig.ZOOKEEPER_SERVERS, "disLocks1");
lock.lock();
// System.out.println(SnowflakeId.generateId());
System.out.println(1);
Thread.sleep(3000);
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
if (lock != null) {
lock.unlock();
}
}
}
};
for (int i = 0; i < 10; i++) {
Thread t = new Thread(runnable);
t.start();
}
}
}

View File

@@ -0,0 +1,134 @@
package cn.ac.iie.utils.zookeeper;
import cn.ac.iie.common.FlowWriteConfig;
import org.apache.commons.lang3.RandomUtils;
import org.apache.log4j.Logger;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.CountDownLatch;
/**
* @author qidaijie
*/
public class ZookeeperUtils implements Watcher {
private static Logger logger = Logger.getLogger(ZookeeperUtils.class);
private ZooKeeper zookeeper;
private static final int SESSION_TIME_OUT = 20000;
private CountDownLatch countDownLatch = new CountDownLatch(1);
@Override
public void process(WatchedEvent event) {
if (event.getState() == Watcher.Event.KeeperState.SyncConnected) {
countDownLatch.countDown();
}
}
/**
* 修改节点信息
*
* @param path 节点路径
*/
public int modifyNode(String path) {
createNode("/Snowflake", null, ZooDefs.Ids.OPEN_ACL_UNSAFE);
createNode("/Snowflake/" + FlowWriteConfig.KAFKA_TOPIC, "0".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE);
int workerId;
try {
connectZookeeper();
Stat stat = zookeeper.exists(path, true);
workerId = Integer.parseInt(getNodeDate(path));
if (workerId > 55) {
workerId = 0;
zookeeper.setData(path, "1".getBytes(), stat.getVersion());
} else {
String result = String.valueOf(workerId + 1);
if (stat != null) {
zookeeper.setData(path, result.getBytes(), stat.getVersion());
} else {
logger.error("Node does not exist!,Can't modify");
}
}
} catch (KeeperException | InterruptedException e) {
e.printStackTrace();
workerId = RandomUtils.nextInt(56, 63);
} finally {
closeConn();
}
logger.error("工作ID是" + workerId);
return workerId;
}
/**
* 连接zookeeper
*
*/
private void connectZookeeper() {
try {
zookeeper = new ZooKeeper(FlowWriteConfig.ZOOKEEPER_SERVERS, SESSION_TIME_OUT, this);
countDownLatch.await();
} catch (IOException | InterruptedException e) {
e.printStackTrace();
}
}
/**
* 关闭连接
*/
private void closeConn() {
try {
if (zookeeper != null) {
zookeeper.close();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* 获取节点内容
*
* @param path 节点路径
* @return 内容/异常null
*/
private String getNodeDate(String path) {
String result = null;
Stat stat = new Stat();
try {
byte[] resByte = zookeeper.getData(path, true, stat);
result = new String(resByte);
} catch (KeeperException | InterruptedException e) {
logger.error("Get node information exception");
e.printStackTrace();
}
return result;
}
/**
* @param path 节点创建的路径
* @param date 节点所存储的数据的byte[]
* @param acls 控制权限策略
*/
private void createNode(String path, byte[] date, List<ACL> acls) {
try {
connectZookeeper();
Stat exists = zookeeper.exists(path, true);
if (exists == null) {
zookeeper.create(path, date, acls, CreateMode.PERSISTENT);
} else {
logger.warn("Node already exists!,Don't need to create");
}
} catch (KeeperException | InterruptedException e) {
e.printStackTrace();
} finally {
closeConn();
}
}
}

View File

@@ -12,7 +12,7 @@ log4j.appender.file.Threshold=error
log4j.appender.file.encoding=UTF-8
log4j.appender.file.Append=true
#路径请用相对路径,做好相关测试输出到应用目下
log4j.appender.file.file=storm-topology.log
log4j.appender.file.file=galaxy-name.log
log4j.appender.file.DatePattern='.'yyyy-MM-dd
log4j.appender.file.layout=org.apache.log4j.PatternLayout
#log4j.appender.file.layout.ConversionPattern=%d{HH:mm:ss} %X{ip} [%t] %5p %c{1} %m%n

View File

@@ -10,7 +10,7 @@ import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import cn.ac.iie.common.FlowWriteConfig;
import com.zdjizhi.utils.ZookeeperUtils;
import cn.ac.iie.utils.zookeeper.ZookeeperUtils;
import org.apache.log4j.Logger;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.Stat;
@@ -183,7 +183,7 @@ public class DistributedLock implements Lock, Watcher {
try {
lock = new DistributedLock(FlowWriteConfig.ZOOKEEPER_SERVERS, "disLocks1");
lock.lock();
zookeeperUtils.modifyNode("/Snowflake/" + FlowWriteConfig.KAFKA_TOPIC,FlowWriteConfig.ZOOKEEPER_SERVERS);
zookeeperUtils.modifyNode("/Snowflake/" + FlowWriteConfig.KAFKA_TOPIC);
} finally {
if (lock != null) {
lock.unlock();

View File

@@ -1,93 +1,37 @@
package cn.ac.iie.test;
import com.google.common.net.InternetDomainName;
import com.zdjizhi.utils.StringUtil;
import javax.xml.bind.SchemaOutputResolver;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.*;
import java.util.LinkedList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class DomainUtils {
private final static Set<String> PublicSuffixSet = new HashSet<String>(
Arrays.asList("com|edu|gov|int|mil|net|org|biz|info|pro|name|museum|coop|aero|xxx|idv|top|xyz|xin|vip|win|red|wang|co|mobi|travel|club|post|rec|asia"
.split("\\|")));
private static Pattern pattern = Pattern.compile("[^\\\\.]+(\\.com\\.cn|\\.net\\.cn|\\.org\\.cn|\\.gov\\.cn|\\.com|\\.net|\\.cn|\\.org|\\.cc|\\.me|\\.tel|\\.mobi|\\.asia|\\.biz|\\.info|\\.name|\\.tv|\\.hk|\\.公司|\\.中国|\\.网络)");
public static void main(String[] args) {
System.out.println(getTopDomain("agoo-report.m.taobao.com"));
// InternetDomainName.from("foo.co.uk").topPrivateDomain().toString();
String host = "www.aaa.co.uk";
// if (host.contains(":")){
// String s = host.split(":")[0];
// System.out.println(InternetDomainName.from(s));
// System.out.println(InternetDomainName.from(s).topPrivateDomain());
// }else {
// System.out.println(InternetDomainName.from(host).topDomainUnderRegistrySuffix());
System.out.println(InternetDomainName.from(host).topPrivateDomain());
// }
// System.out.println(InternetDomainName.from("shence.hupu.com").topPrivateDomain());
}
/**
* 获取url的顶级域名
* // * @param url
*
* @return
*/
public static String getDomainName(String host) {
if (host.endsWith(".") || host.contains("/")) {
host = host.substring(0, host.length() - 1);
}
int index = 0;
String candidate = host;
for (; index >= 0; ) {
index = candidate.indexOf('.');
String subCandidate = candidate.substring(index + 1);
if (PublicSuffixSet.contains(subCandidate)) {
return candidate;
}
candidate = subCandidate;
}
return candidate;
}
public static String getSourceDomain(String host) {
if (host.endsWith(".")) {
host = host.substring(0, host.length() - 1);
}
String[] hostStr = host.split("\\.");
int length = hostStr.length;
if (hostStr.length >= 2) {
if (PublicSuffixSet.contains(hostStr[length - 2])) {
return hostStr[length - 3] + "." + hostStr[length - 2] + "." + hostStr[length - 1];
} else {
return hostStr[length - 2] + "." + hostStr[length - 1];
}
} else {
return host;
}
}
// 定义正则表达式,域名的根需要自定义,这里不全
private static final String RE_TOP = "[\\w-]+\\.(com.cn|net.cn|gov.cn|org\\.nz|org.cn|com|net|org|gov|cc|biz|info|cn|co)\\b()*";
private static Pattern pattern = Pattern.compile(RE_TOP, Pattern.CASE_INSENSITIVE);
public static String getTopDomain(String url) {
String result = url;
try {
private static String getTopDomain(String url) {
// try {
//获取值转换为小写
// String host = new URL(url).getHost().toLowerCase();//news.hexun.com
// Pattern pattern = Pattern.compile("[^\\\\.]+(\\.com\\.cn|\\.net\\.cn|\\.org\\.cn|\\.gov\\.cn|\\.com|\\.net|\\.cn|\\.org|\\.cc|\\.me|\\.tel|\\.mobi|\\.asia|\\.biz|\\.info|\\.name|\\.tv|\\.hk|\\.公司|\\.中国|\\.网络)");
Matcher matcher = pattern.matcher(url);
matcher.find();
result = matcher.group();
} catch (Exception e) {
System.out.println("[getTopDomain ERROR]====>");
e.printStackTrace();
}
return result;
if (matcher.find()){
return matcher.group();
}
// } catch (MalformedURLException e) {
// e.printStackTrace();
// }
return null;
}
}

View File

@@ -1,4 +1,3 @@
/*
package cn.ac.iie.test;
@@ -11,7 +10,7 @@ class RunnableDemo implements Runnable {
private static ZookeeperUtils zookeeperUtils = new ZookeeperUtils();
static {
zookeeperUtils.connectZookeeper();
zookeeperUtils.connectZookeeper("192.168.40.207:2181");
}
@Override
@@ -48,4 +47,3 @@ public class TestThread {
}
}
*/

View File

@@ -1,27 +0,0 @@
package cn.ac.iie.test;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.json.JsonParseUtil;
import java.util.ArrayList;
import java.util.HashMap;
public class UtilTest {
private static HashMap<String, Class> map = JsonParseUtil.getMapFromhttp(FlowWriteConfig.SCHEMA_HTTP);
//反射成一个类
private static Object mapObject = JsonParseUtil.generateObject(map);
//获取任务列表
private static ArrayList<String[]> jobList = JsonParseUtil.getJobListFromHttp(FlowWriteConfig.SCHEMA_HTTP);
public static void main(String[] args) {
for (String[] strings : jobList) {
System.out.println(strings[0]);
System.out.println(strings[1]);
System.out.println(strings[2]);
}
}
}

View File

@@ -1 +0,0 @@
{"common_stream_dir":3,"common_address_type":4,"common_client_ip":"82.200.242.225","common_server_ip":"82.200.242.69","common_client_port":59387,"common_server_port":1812,"common_c2s_pkt_num":2,"common_s2c_pkt_num":1,"common_c2s_byte_num":507,"common_s2c_byte_num":151,"common_start_time":1575534194,"common_end_time":1575534195,"common_con_duration_ms":1000,"common_stream_trace_id":0,"common_l4_protocol":"IPv4_UDP","common_address_list":"59387-1812-82.200.242.225-82.200.242.69","radius_packet_type":1,"radius_account":"Kuanysh79143","radius_service_type":2,"radius_acct_session_id":"473332153","radius_framed_ip":"82.200.242.225","common_policy_id":0,"common_service":162,"common_entrance_id":0,"common_direction":0,"common_device_id":0,"common_encapsulation":14,"common_link_id":0,"common_sled_ip":"192.168.40.119","common_schema_type":"RADIUS"}

View File

@@ -1,22 +0,0 @@
package cn.ac.iie.test.bean;
public class Student {
private String name;
private Integer age;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Integer getAge() {
return age;
}
public void setAge(Integer age) {
this.age = age;
}
}

View File

@@ -1,147 +0,0 @@
package cn.ac.iie.test.hbase;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.system.IpUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.log4j.Logger;
import org.junit.Test;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentSkipListMap;
public class HBaseTest {
private final static Logger logger = Logger.getLogger(HBaseTest.class);
// private static Map<Integer, String> subIdMap = new ConcurrentHashMap<Integer, String>(13333334);
private static Map<String, String> subIdMap = new HashMap<>(13333334);
private static Map<String, String> testMap = new ConcurrentSkipListMap<>();
private static Connection connection;
private static Long time;
static {
// 管理Hbase的配置信息
Configuration configuration = HBaseConfiguration.create();
// 设置zookeeper节点
// configuration.set("hbase.zookeeper.quorum", FlowWriteConfig.HBASE_ZOOKEEPER_SERVERS);
// configuration.set("hbase.zookeeper.quorum", "192.168.40.119:2181,192.168.40.122:2181,192.168.40.123:2181");
configuration.set("hbase.zookeeper.quorum", "192.168.40.186:2182");
configuration.set("hbase.client.retries.number", "3");
configuration.set("hbase.bulkload.retries.number", "3");
configuration.set("zookeeper.recovery.retry", "3");
try {
connection = ConnectionFactory.createConnection(configuration);
time = System.currentTimeMillis();
} catch (IOException e) {
logger.error("获取HBase连接失败" + e);
e.printStackTrace();
}
}
@Test
public void change() {
Long begin = System.currentTimeMillis();
getAll();
System.out.println(System.currentTimeMillis() - begin);
}
/**
* 获取变更内容
*
* @param startTime 开始时间
* @param endTime 结束时间
*/
private static void timestampsFilter(Long startTime, Long endTime) {
Long begin = System.currentTimeMillis();
Table table = null;
ResultScanner scanner = null;
Scan scan2 = new Scan();
try {
table = connection.getTable(TableName.valueOf("sub:" + FlowWriteConfig.HBASE_TABLE_NAME));
scan2.setTimeRange(startTime, endTime);
scanner = table.getScanner(scan2);
for (Result result : scanner) {
Cell[] cells = result.rawCells();
for (Cell cell : cells) {
// int key = Integer.parseInt(Bytes.toString(CellUtil.cloneRow(cell)));
String key = Bytes.toString(CellUtil.cloneRow(cell));
String value = Bytes.toString(CellUtil.cloneValue(cell));
if (subIdMap.containsKey(key)) {
if (!value.equals(subIdMap.get(key))) {
subIdMap.put(key, value);
}
} else {
subIdMap.put(key, value);
}
}
}
Long end = System.currentTimeMillis();
logger.warn("当前集合长度" + subIdMap.keySet().size());
logger.warn("更新缓存耗时:" + (end - begin) + "开始时间:" + begin + "结束时间:" + end);
time = endTime;
} catch (IOException e) {
e.printStackTrace();
} finally {
if (scanner != null) {
scanner.close();
}
if (table != null) {
try {
table.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
/**
* 获取所有的 key value
*/
private static void getAll() {
Long begin = System.currentTimeMillis();
try {
Table table = connection.getTable(TableName.valueOf("sub:" + FlowWriteConfig.HBASE_TABLE_NAME));
Scan scan2 = new Scan();
ResultScanner scanner = table.getScanner(scan2);
for (Result result : scanner) {
Cell[] cells = result.rawCells();
for (Cell cell : cells) {
// subIdMap.put(Integer.valueOf(Bytes.toString(CellUtil.cloneRow(cell))), Bytes.toString(CellUtil.cloneValue(cell)));
// subIdMap.put(Bytes.toString(CellUtil.cloneRow(cell)), Bytes.toString(CellUtil.cloneValue(cell)));
testMap.put(Bytes.toString(CellUtil.cloneRow(cell)), Bytes.toString(CellUtil.cloneValue(cell)));
}
}
// logger.warn("获取全量后集合长度:" + subIdMap.size());
logger.warn("获取全量后集合长度:" + testMap.size());
logger.warn("获取全量耗时:" + (System.currentTimeMillis() - begin));
scanner.close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 获取 account
*
* @param clientIp client_ip
* @return account
*/
public static String getAccount(String clientIp) {
int ipType = cn.ac.iie.utils.system.IpUtils.validIPAddress(clientIp);
String account = "";
if (ipType == FlowWriteConfig.IPV4_TYPE) {
account = subIdMap.get(IpUtils.ipChangeInt(clientIp));
} else if (ipType == FlowWriteConfig.IPV6_TYPE) {
account = subIdMap.get(clientIp);
}
return account;
}
}

View File

@@ -1,63 +0,0 @@
package cn.ac.iie.test.hbase;
import cn.ac.iie.common.FlowWriteConfig;
import cn.ac.iie.utils.general.EncryptionUtils;
import org.apache.log4j.Logger;
import org.slf4j.LoggerFactory;
public class IpUtils {
private static Logger logger = Logger.getLogger(IpUtils.class);
public static void main(String[] args) {
System.out.println(System.currentTimeMillis());
System.out.println(System.currentTimeMillis() - 60000);
}
/**
* ip字符串转整数
* ip是.分割的整数字符串,按照r进制转十进制的规律,按权相加求和,这里的权是256.
*
* @param ip
* @return
*/
public static int ip2Int(String ip) {
String[] ipStrs = ip.split("\\.");//分割ip
int result = 0;
for (int i = 0; i < 4; i++) {
Integer ipSubInteger = Integer.parseInt(ipStrs[i]);
if (ipSubInteger > 255) {//正则验证不能为负数
result = 0;
break;
}
result += (ipSubInteger << (24 - i * 8));
}
return result;
}
/**
* 整数转ip
*
* @param ip
* @return
*/
public static String int2Ip(int ip) {
StringBuilder builder = new StringBuilder(String.valueOf(ip >>> 24));
builder.append(".");
builder.append(String.valueOf((ip & 0X00FFFFFF) >>> 16));
builder.append(".");
builder.append(String.valueOf((ip & 0X0000FFFF) >>> 8));
builder.append(".");
builder.append(String.valueOf(ip & 0X000000FF));
return builder.toString();
}
public static int validIPAddress(String ip) {
String ipv4 = "^((\\d|[1-9]\\d|1\\d\\d|2([0-4]\\d|5[0-5]))\\.){4}$";
//8个1-4位+:
String ipv6 = "^(([\\da-fA-F]{1,4}):){8}$";
return String.format("%s.", ip).matches(ipv4) ? 1 : String.format("%s:", ip).matches(ipv6) ? 2 : 3;
}
}

View File

@@ -1,53 +1,46 @@
package cn.ac.iie.test;
import cn.ac.iie.bean.SessionRecordLog;
import cn.ac.iie.common.FlowWriteConfig;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.google.common.net.InternetDomainName;
import com.zdjizhi.utils.*;
import org.apache.log4j.Logger;
import org.apache.storm.shade.com.google.common.collect.Lists;
import com.zdjizhi.utils.IpLookup;
import org.junit.Test;
import java.io.File;
import java.util.ArrayList;
import javax.servlet.http.HttpServletRequest;
import java.math.BigInteger;
import java.util.Arrays;
public class test {
private static Logger logger = Logger.getLogger(test.class);
public static void main(String[] args) {
String message = "{\"str_ea_m-t-r-a-ceid\":\"JSON\"}";
SessionRecordLog sessionRecordLog = JSONObject.parseObject(message, SessionRecordLog.class);
System.out.println(sessionRecordLog.getStream_trace_id());
static ZookeeperUtils zookeeperUtils = new ZookeeperUtils();
String message2 = "{\"streamtraceid\":\"JSON\"}";
SessionRecordLog sessionRecordLog2 = JSONObject.parseObject(message2, SessionRecordLog.class);
System.out.println(sessionRecordLog2.getStream_trace_id());
JSONObject jsonObject = JSON.parseObject(message);
System.out.println("\n" + Arrays.toString(jsonObject.keySet().toArray()));
HttpServletRequest request = null;
if (request != null) {
String contextPath = request.getScheme() + "://" + request.getServerName() + ":" + request.getServerPort() + request.getContextPath();
}
System.out.println(System.currentTimeMillis() / 1000);
}
//
@Test
public void test() throws InterruptedException {
File file = new File("D:\\123\\test.txt");
String zookeeperIp ="192.168.40.224:2181";
String kafkaTopic ="CONNECTION-RECORD-LOG";
System.out.println(zookeeperUtils.modifyNode("/Snowflake/" + kafkaTopic, zookeeperIp));
System.out.println(zookeeperUtils.modifyNode("/Snowflake/" + kafkaTopic, zookeeperIp));
// ArrayList<Long> list = Lists.newArrayList();
// for (int i = 1; i <= 500; i++) {
// ZooKeeperLock lock = new ZooKeeperLock(zookeeperIp, "/locks", "disLocks1");
// if (lock.lock()) {
// int tmpWorkerId = zookeeperUtils.modifyNode("/Snowflake/" + kafkaTopic, zookeeperIp);
// Long generateId = SnowflakeId.generateId(tmpWorkerId, 12);
// System.err.println(generateId);
// list.add(generateId);
// lock.unlock();
// }
// if(i%5==0) {
//// fileWrite(list, file);
// Thread.sleep(1000);
// }
// }
// System.err.println("第2个进程结束");
// FormatUtils build = new FormatUtils.Builder(false).build();
// long snowflakeId = build.getSnowflakeId("192.168.40.224:2181", "CONNECTION-RECORD-LOG", 12);
// System.err.println(snowflakeId);
public void test2() {
// String minTimeStampStr = "00000000000000000000000000000000000000000";
String minTimeStampStr = "000000000000000000000000000000000000000";
long minTimeStamp = new BigInteger(minTimeStampStr, 2).longValue();
// String maxTimeStampStr = "11111111111111111111111111111111111111111";
String maxTimeStampStr = "111111111111111111111111111111111111111";
long maxTimeStamp = new BigInteger(maxTimeStampStr, 2).longValue();
long oneYearMills = 1L * 1000 * 60 * 60 * 24 * 365;
System.out.println((maxTimeStamp - minTimeStamp) / oneYearMills);
}
}

View File

@@ -1,182 +1,26 @@
package cn.ac.iie.test.zookeeper;
import cn.ac.iie.common.FlowWriteConfig;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.Stat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
public class DistributedLock implements Lock, Watcher {
private ZooKeeper zk;
private String root = "/locks";//根
private String lockName;//竞争资源的标志
private String waitNode;//等待前一个锁
private String myZnode;//当前锁
private CountDownLatch latch;//计数器
private CountDownLatch connectedSignal = new CountDownLatch(1);
private int sessionTimeout = 2000;
public static void main(String[] args) {
DistributedLock lock = new DistributedLock(FlowWriteConfig.ZOOKEEPER_SERVERS, "disLocks1");
lock.lock();
System.out.println(1);
if (lock!=null){
lock.unlock();
}
}
public interface DistributedLock {
/**
* 创建分布式锁,使用前请确认config配置的zookeeper服务可用
* 获取锁,如果没有得到就等待
*/
public void acquire() throws Exception;
/**
* 获取锁,直到超时
*
* @param config 192.168.1.127:2181
* @param lockName 竞争资源标志,lockName中不能包含单词_lock_
* @param unit time参数的单位
* @throws Exception
* @return是否获取到锁
*/
public DistributedLock(String config, String lockName) {
this.lockName = lockName;
// 创建一个与服务器的连接
try {
zk = new ZooKeeper(config, sessionTimeout, this);
connectedSignal.await();
Stat stat = zk.exists(root, false);//此去不执行 Watcher
if (stat == null) {
// 创建根节点
zk.create(root, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
}
} catch (IOException e) {
throw new LockException(e);
} catch (KeeperException e) {
throw new LockException(e);
} catch (InterruptedException e) {
throw new LockException(e);
}
}
public boolean acquire(long time, TimeUnit unit) throws Exception;
/**
* zookeeper节点的监视器
* 释放锁
*
* @throws Exception
*/
public void process(WatchedEvent event) {
//建立连接用
if (event.getState() == Event.KeeperState.SyncConnected) {
connectedSignal.countDown();
return;
}
//其他线程放弃锁的标志
if (this.latch != null) {
this.latch.countDown();
}
}
public void lock() {
try {
if (this.tryLock()) {
System.out.println("Thread " + Thread.currentThread().getId() + " " + myZnode + " get lock true");
return;
} else {
waitForLock(waitNode, sessionTimeout);//等待锁
}
} catch (KeeperException e) {
throw new LockException(e);
} catch (InterruptedException e) {
throw new LockException(e);
}
}
public boolean tryLock() {
try {
String splitStr = "_lock_";
if (lockName.contains(splitStr))
throw new LockException("lockName can not contains \\u000B");
//创建临时子节点
myZnode = zk.create(root + "/" + lockName + splitStr, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL);
System.out.println(myZnode + " is created ");
//取出所有子节点
List<String> subNodes = zk.getChildren(root, false);
//取出所有lockName的锁
List<String> lockObjNodes = new ArrayList<String>();
for (String node : subNodes) {
String _node = node.split(splitStr)[0];
if (_node.equals(lockName)) {
lockObjNodes.add(node);
}
}
Collections.sort(lockObjNodes);
if (myZnode.equals(root + "/" + lockObjNodes.get(0))) {
//如果是最小的节点,则表示取得锁
System.out.println(myZnode + "==" + lockObjNodes.get(0));
return true;
}
//如果不是最小的节点找到比自己小1的节点
String subMyZnode = myZnode.substring(myZnode.lastIndexOf("/") + 1);
waitNode = lockObjNodes.get(Collections.binarySearch(lockObjNodes, subMyZnode) - 1);//找到前一个子节点
} catch (KeeperException e) {
throw new LockException(e);
} catch (InterruptedException e) {
throw new LockException(e);
}
return false;
}
public boolean tryLock(long time, TimeUnit unit) {
try {
if (this.tryLock()) {
return true;
}
return waitForLock(waitNode, time);
} catch (Exception e) {
e.printStackTrace();
}
return false;
}
private boolean waitForLock(String lower, long waitTime) throws InterruptedException, KeeperException {
Stat stat = zk.exists(root + "/" + lower, true);//同时注册监听。
//判断比自己小一个数的节点是否存在,如果不存在则无需等待锁,同时注册监听
if (stat != null) {
System.out.println("Thread " + Thread.currentThread().getId() + " waiting for " + root + "/" + lower);
this.latch = new CountDownLatch(1);
this.latch.await(waitTime, TimeUnit.MILLISECONDS);//等待,这里应该一直等待其他线程释放锁
this.latch = null;
}
return true;
}
public void unlock() {
try {
System.out.println("unlock " + myZnode);
zk.delete(myZnode, -1);
myZnode = null;
zk.close();
} catch (InterruptedException e) {
e.printStackTrace();
} catch (KeeperException e) {
e.printStackTrace();
}
}
public void lockInterruptibly() throws InterruptedException {
this.lock();
}
public Condition newCondition() {
return null;
}
public class LockException extends RuntimeException {
private static final long serialVersionUID = 1L;
public LockException(String e) {
super(e);
}
public LockException(Exception e) {
super(e);
}
}
}
public void release() throws Exception;
}

View File

@@ -1,8 +0,0 @@
package cn.ac.iie.test.zookeeper;
public class RandomTest {
public static void main(String[] args) {
}
}