Compare commits
13 Commits
ip-learnin
...
ip-learnin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e4d7737725 | ||
|
|
5cb7327f7b | ||
|
|
e0f5b20ab6 | ||
|
|
e7ff669d4c | ||
|
|
4ed79bfe79 | ||
|
|
0d5e4e9be2 | ||
|
|
40e76754d0 | ||
|
|
b7a156b0b8 | ||
|
|
233cf20d50 | ||
|
|
b13fc2bce1 | ||
|
|
cbeba6372b | ||
|
|
1750549c7d | ||
|
|
5a039bb492 |
@@ -13,7 +13,8 @@ public class ApplicationConfig {
|
||||
public static final Integer ARANGODB_TTL = ConfigUtils.getIntProperty( "arangoDB.ttl");
|
||||
public static final Integer ARANGODB_BATCH = ConfigUtils.getIntProperty( "arangoDB.batch");
|
||||
|
||||
public static final Integer UPDATE_ARANGO_BATCH =ConfigUtils.getIntProperty("update.arango.batch");
|
||||
public static final Integer UPDATE_ARANGO_BATCH = ConfigUtils.getIntProperty("update.arango.batch");
|
||||
public static final Long ARANGODB_READ_LIMIT = ConfigUtils.getLongProperty("arangoDB.read.limit");
|
||||
|
||||
public static final Integer THREAD_POOL_NUMBER = ConfigUtils.getIntProperty( "thread.pool.number");
|
||||
public static final Integer THREAD_AWAIT_TERMINATION_TIME = ConfigUtils.getIntProperty( "thread.await.termination.time");
|
||||
@@ -21,19 +22,11 @@ public class ApplicationConfig {
|
||||
public static final Long READ_CLICKHOUSE_MAX_TIME = ConfigUtils.getLongProperty("read.clickhouse.max.time");
|
||||
public static final Long READ_CLICKHOUSE_MIN_TIME = ConfigUtils.getLongProperty("read.clickhouse.min.time");
|
||||
|
||||
public static final Integer CLICKHOUSE_TIME_LIMIT_TYPE = ConfigUtils.getIntProperty("clickhouse.time.limit.type");
|
||||
public static final Integer TIME_LIMIT_TYPE = ConfigUtils.getIntProperty("time.limit.type");
|
||||
public static final Integer UPDATE_INTERVAL = ConfigUtils.getIntProperty("update.interval");
|
||||
|
||||
public static final Integer DISTINCT_CLIENT_IP_NUM = ConfigUtils.getIntProperty("distinct.client.ip.num");
|
||||
public static final Integer RECENT_COUNT_HOUR = ConfigUtils.getIntProperty("recent.count.hour");
|
||||
|
||||
public static final String TOP_DOMAIN_FILE_NAME = ConfigUtils.getStringProperty("top.domain.file.name");
|
||||
|
||||
public static final String ARANGODB_READ_LIMIT = ConfigUtils.getStringProperty("arangoDB.read.limit");
|
||||
|
||||
public static final Integer ARANGO_TIME_LIMIT_TYPE = ConfigUtils.getIntProperty("arango.time.limit.type");
|
||||
|
||||
public static final Long READ_ARANGO_MAX_TIME = ConfigUtils.getLongProperty("read.arango.max.time");
|
||||
public static final Long READ_ARANGO_MIN_TIME = ConfigUtils.getLongProperty("read.arango.min.time");
|
||||
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package cn.ac.iie.dao;
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig;
|
||||
import cn.ac.iie.service.read.ReadHistoryArangoData;
|
||||
import cn.ac.iie.service.ingestion.ReadHistoryArangoData;
|
||||
import cn.ac.iie.utils.ArangoDBConnect;
|
||||
import cn.ac.iie.utils.ExecutorThreadPool;
|
||||
import com.arangodb.ArangoCursor;
|
||||
@@ -10,38 +10,35 @@ import com.arangodb.entity.BaseEdgeDocument;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
/**
|
||||
* 获取arangoDB历史数据
|
||||
*
|
||||
* @author wlh
|
||||
*/
|
||||
public class BaseArangoData {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
|
||||
|
||||
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexFqdnMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexIpMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexSubscriberMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseDocument>> historyVertexFqdnMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseDocument>> historyVertexIpMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseDocument>> historyVertexSubscriberMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
|
||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
|
||||
|
||||
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
|
||||
|
||||
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
|
||||
|
||||
<T extends BaseDocument> void readHistoryData(String table,
|
||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
|
||||
ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map,
|
||||
Class<T> type) {
|
||||
try {
|
||||
LOG.info("开始更新" + table);
|
||||
LOG.info("开始更新"+table);
|
||||
long start = System.currentTimeMillis();
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
|
||||
historyMap.put(i, new ConcurrentHashMap<>());
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++){
|
||||
map.put(i,new ConcurrentHashMap<>());
|
||||
}
|
||||
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
|
||||
// long[] timeRange = getTimeRange(table);
|
||||
@@ -49,51 +46,18 @@ public class BaseArangoData {
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
|
||||
// String sql = getQuerySql(timeRange, i, table);
|
||||
String sql = getQuerySql(countTotal, i, table);
|
||||
ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
|
||||
ReadHistoryArangoData<T> readHistoryArangoData =
|
||||
new ReadHistoryArangoData<>(arangoDBConnect, sql, map,type,table,countDownLatch);
|
||||
threadPool.executor(readHistoryArangoData);
|
||||
}
|
||||
countDownLatch.await();
|
||||
long last = System.currentTimeMillis();
|
||||
LOG.info("读取" + table + " arangoDB 共耗时:" + (last - start));
|
||||
} catch (Exception e) {
|
||||
LOG.info("读取"+table+" arangoDB 共耗时:"+(last-start));
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
private long[] getTimeRange(String table) {
|
||||
long minTime = 0L;
|
||||
long maxTime = 0L;
|
||||
long startTime = System.currentTimeMillis();
|
||||
String sql = "LET doc = (FOR doc IN " + table + " RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
|
||||
switch (ApplicationConfig.ARANGO_TIME_LIMIT_TYPE) {
|
||||
case 0:
|
||||
ArangoCursor<BaseDocument> timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
|
||||
try {
|
||||
if (timeDoc != null) {
|
||||
while (timeDoc.hasNext()) {
|
||||
BaseDocument doc = timeDoc.next();
|
||||
maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER;
|
||||
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
|
||||
}
|
||||
} else {
|
||||
LOG.warn("获取ArangoDb时间范围为空");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
maxTime = ApplicationConfig.READ_ARANGO_MAX_TIME;
|
||||
minTime = ApplicationConfig.READ_ARANGO_MIN_TIME;
|
||||
break;
|
||||
default:
|
||||
}
|
||||
long lastTime = System.currentTimeMillis();
|
||||
LOG.info(sql + "\n查询最大最小时间用时:" + (lastTime - startTime));
|
||||
return new long[]{minTime, maxTime};
|
||||
|
||||
}
|
||||
|
||||
private Long getCountTotal(String table){
|
||||
long start = System.currentTimeMillis();
|
||||
Long cnt = 0L;
|
||||
@@ -111,19 +75,15 @@ public class BaseArangoData {
|
||||
return cnt;
|
||||
}
|
||||
|
||||
private String getQuerySql(long[] timeRange, int threadNumber, String table) {
|
||||
long minTime = timeRange[0];
|
||||
long maxTime = timeRange[1];
|
||||
long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER;
|
||||
long maxThreadTime = minTime + (threadNumber + 1) * diffTime;
|
||||
long minThreadTime = minTime + threadNumber * diffTime;
|
||||
return "FOR doc IN " + table + " filter doc.FIRST_FOUND_TIME >= " + minThreadTime + " and doc.FIRST_FOUND_TIME <= " + maxThreadTime + " " + ApplicationConfig.ARANGODB_READ_LIMIT + " RETURN doc";
|
||||
}
|
||||
|
||||
private String getQuerySql(Long cnt,int threadNumber, String table){
|
||||
long sepNum = cnt / ApplicationConfig.THREAD_POOL_NUMBER + 1;
|
||||
long offsetNum = threadNumber * sepNum;
|
||||
if (sepNum >= ApplicationConfig.ARANGODB_READ_LIMIT * 10000){
|
||||
sepNum = ApplicationConfig.ARANGODB_READ_LIMIT * 10000;
|
||||
}
|
||||
return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import java.util.HashMap;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static cn.ac.iie.service.read.ReadClickhouseData.putMapByHashcode;
|
||||
import static cn.ac.iie.service.ingestion.ReadClickhouseData.*;
|
||||
|
||||
/**
|
||||
* 读取clickhouse数据,封装到map
|
||||
@@ -24,25 +24,23 @@ import static cn.ac.iie.service.read.ReadClickhouseData.putMapByHashcode;
|
||||
public class BaseClickhouseData {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BaseClickhouseData.class);
|
||||
|
||||
private static ClickhouseConnect manger = ClickhouseConnect.getInstance();
|
||||
static HashMap<Integer, HashMap<String, ArrayList<BaseDocument>>> newVertexFqdnMap = new HashMap<>();
|
||||
static HashMap<Integer, HashMap<String, ArrayList<BaseDocument>>> newVertexIpMap = new HashMap<>();
|
||||
static HashMap<Integer, HashMap<String,ArrayList<BaseDocument>>> newVertexSubscriberMap = new HashMap<>();
|
||||
static HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> newRelationFqdnAddressIpMap = new HashMap<>();
|
||||
static HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> newRelationIpVisitFqdnMap = new HashMap<>();
|
||||
static HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> newRelationSubsciberLocateIpMap = new HashMap<>();
|
||||
static HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> newRelationFqdnSameFqdnMap = new HashMap<>();
|
||||
|
||||
private static ClickhouseConnect manger = ClickhouseConnect.getInstance();
|
||||
private DruidPooledConnection connection;
|
||||
private Statement statement;
|
||||
|
||||
<T extends BaseDocument> void baseDocumentFromClickhouse(HashMap<Integer, HashMap<String, ArrayList<T>>> newMap,
|
||||
Supplier<String> getSqlSupplier,
|
||||
Function<ResultSet,T> formatResultFunc){
|
||||
Function<ResultSet,T> formatResultFunc) {
|
||||
long start = System.currentTimeMillis();
|
||||
initializeMap(newMap);
|
||||
String sql = getSqlSupplier.get();
|
||||
LOG.info(sql);
|
||||
try {
|
||||
connection = manger.getConnection();
|
||||
statement = connection.createStatement();
|
||||
@@ -56,7 +54,7 @@ public class BaseClickhouseData {
|
||||
}
|
||||
}
|
||||
long last = System.currentTimeMillis();
|
||||
LOG.info("读取"+i+"条数据,运行时间:" + (last - start));
|
||||
LOG.info(sql + "\n读取"+i+"条数据,运行时间:" + (last - start));
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
}finally {
|
||||
@@ -67,7 +65,7 @@ public class BaseClickhouseData {
|
||||
private <T extends BaseDocument> void initializeMap(HashMap<Integer, HashMap<String,ArrayList<T>>> map){
|
||||
try {
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
|
||||
map.put(i, new HashMap<>(16));
|
||||
map.put(i, new HashMap<>());
|
||||
}
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
package cn.ac.iie.dao;
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig;
|
||||
import cn.ac.iie.service.read.ReadClickhouseData;
|
||||
import cn.ac.iie.service.ingestion.ReadClickhouseData;
|
||||
import cn.ac.iie.service.update.Document;
|
||||
import cn.ac.iie.service.update.relationship.LocateFqdn2Ip;
|
||||
import cn.ac.iie.service.update.relationship.SameFqdn2Fqdn;
|
||||
import cn.ac.iie.service.update.relationship.LocateSubscriber2Ip;
|
||||
import cn.ac.iie.service.update.relationship.VisitIp2Fqdn;
|
||||
import cn.ac.iie.service.update.vertex.Fqdn;
|
||||
import cn.ac.iie.service.update.vertex.Ip;
|
||||
import cn.ac.iie.service.update.vertex.Subscriber;
|
||||
import cn.ac.iie.utils.ArangoDBConnect;
|
||||
import cn.ac.iie.utils.ExecutorThreadPool;
|
||||
import com.arangodb.entity.BaseDocument;
|
||||
@@ -35,36 +36,37 @@ public class UpdateGraphData {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(UpdateGraphData.class);
|
||||
private static ExecutorThreadPool pool = ExecutorThreadPool.getInstance();
|
||||
private static ArangoDBConnect arangoManger = ArangoDBConnect.getInstance();
|
||||
|
||||
private static BaseArangoData baseArangoData = new BaseArangoData();
|
||||
private static BaseClickhouseData baseClickhouseData = new BaseClickhouseData();
|
||||
|
||||
|
||||
public void updateArango(){
|
||||
long start = System.currentTimeMillis();
|
||||
try {
|
||||
updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN",
|
||||
Fqdn.class,BaseDocument.class,
|
||||
ReadClickhouseData::getVertexFqdnSql, ReadClickhouseData::getVertexFqdnDocument);
|
||||
|
||||
updateDocument(newVertexIpMap,historyVertexIpMap,"IP",
|
||||
Ip.class,BaseDocument.class,
|
||||
ReadClickhouseData::getVertexIpSql, ReadClickhouseData::getVertexIpDocument);
|
||||
// updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN", Fqdn.class,BaseDocument.class,
|
||||
// ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
|
||||
|
||||
updateDocument(newRelationFqdnAddressIpMap,historyRelationFqdnAddressIpMap,"R_LOCATE_FQDN2IP",
|
||||
LocateFqdn2Ip.class,BaseEdgeDocument.class,
|
||||
ReadClickhouseData::getRelationshipFqdnAddressIpSql, ReadClickhouseData::getRelationFqdnAddressIpDocument);
|
||||
updateDocument(newVertexIpMap,historyVertexIpMap,"IP", Ip.class,BaseDocument.class,
|
||||
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
|
||||
|
||||
updateDocument(newRelationIpVisitFqdnMap,historyRelationIpVisitFqdnMap,"R_VISIT_IP2FQDN",
|
||||
VisitIp2Fqdn.class,BaseEdgeDocument.class,
|
||||
ReadClickhouseData::getRelationshipIpVisitFqdnSql, ReadClickhouseData::getRelationIpVisitFqdnDocument);
|
||||
updateDocument(newVertexSubscriberMap,historyVertexSubscriberMap,"SUBSCRIBER", Subscriber.class,BaseDocument.class,
|
||||
ReadClickhouseData::getVertexSubscriberSql,ReadClickhouseData::getVertexSubscriberDocument);
|
||||
|
||||
updateDocument(newRelationFqdnSameFqdnMap,historyRelationFqdnSameFqdnMap,"R_SAME_ORIGIN_FQDN2FQDN",
|
||||
SameFqdn2Fqdn.class,BaseEdgeDocument.class,
|
||||
ReadClickhouseData::getRelationshipFqdnSameFqdnSql, ReadClickhouseData::getRelationshipFqdnSameFqdnDocument);
|
||||
updateDocument(newRelationFqdnAddressIpMap,historyRelationFqdnAddressIpMap,"R_LOCATE_FQDN2IP", LocateFqdn2Ip.class,BaseEdgeDocument.class,
|
||||
ReadClickhouseData::getRelationshipFqdnAddressIpSql,ReadClickhouseData::getRelationFqdnAddressIpDocument);
|
||||
|
||||
// updateDocument(newRelationIpVisitFqdnMap,historyRelationIpVisitFqdnMap,"R_VISIT_IP2FQDN",
|
||||
// VisitIp2Fqdn.class,BaseEdgeDocument.class,
|
||||
// ReadClickhouseData::getRelationshipIpVisitFqdnSql,ReadClickhouseData::getRelationIpVisitFqdnDocument);
|
||||
|
||||
updateDocument(newRelationSubsciberLocateIpMap,historyRelationSubsciberLocateIpMap,"R_LOCATE_SUBSCRIBER2IP",
|
||||
LocateSubscriber2Ip.class,BaseEdgeDocument.class,
|
||||
ReadClickhouseData::getRelationshipSubsciberLocateIpSql,ReadClickhouseData::getRelationshipSubsciberLocateIpDocument);
|
||||
|
||||
|
||||
long last = System.currentTimeMillis();
|
||||
LOG.info("更新图数据库时间共计:"+(last - start));
|
||||
LOG.info("iplearning application运行完毕,用时:"+(last - start));
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
}finally {
|
||||
@@ -73,13 +75,15 @@ public class UpdateGraphData {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private <T extends BaseDocument> void updateDocument(HashMap<Integer, HashMap<String, ArrayList<T>>> newMap,
|
||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
|
||||
String collection,
|
||||
Class<? extends Document<T>> taskType,
|
||||
Class<T> docmentType,
|
||||
Supplier<String> getSqlSupplier,
|
||||
Function<ResultSet,T> formatResultFunc) {
|
||||
Function<ResultSet,T> formatResultFunc
|
||||
) {
|
||||
try {
|
||||
|
||||
baseArangoData.readHistoryData(collection,historyMap,docmentType);
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package cn.ac.iie.service.read;
|
||||
package cn.ac.iie.service.ingestion;
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig;
|
||||
import cn.ac.iie.utils.TopDomainUtils;
|
||||
import com.arangodb.entity.BaseDocument;
|
||||
import com.arangodb.entity.BaseEdgeDocument;
|
||||
import org.slf4j.Logger;
|
||||
@@ -23,10 +22,10 @@ public class ReadClickhouseData {
|
||||
private static Pattern pattern = Pattern.compile("^[\\d]*$");
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ReadClickhouseData.class);
|
||||
|
||||
|
||||
private static long[] timeLimit = getTimeLimit();
|
||||
private static long maxTime = timeLimit[0];
|
||||
private static long minTime = timeLimit[1];
|
||||
|
||||
public static final Integer DISTINCT_CLIENT_IP_NUM = ApplicationConfig.DISTINCT_CLIENT_IP_NUM;
|
||||
static final Integer RECENT_COUNT_HOUR = ApplicationConfig.RECENT_COUNT_HOUR;
|
||||
public static final HashSet<String> PROTOCOL_SET;
|
||||
@@ -38,11 +37,10 @@ public class ReadClickhouseData {
|
||||
PROTOCOL_SET.add("DNS");
|
||||
}
|
||||
|
||||
public static BaseDocument getVertexFqdnDocument(ResultSet resultSet){
|
||||
public static BaseDocument getVertexFqdnDocument(ResultSet resultSet) {
|
||||
BaseDocument newDoc = null;
|
||||
try {
|
||||
String fqdnOrReferer = resultSet.getString("FQDN");
|
||||
String fqdnName = TopDomainUtils.getDomainFromUrl(fqdnOrReferer);
|
||||
String fqdnName = resultSet.getString("FQDN");
|
||||
if (isDomain(fqdnName)) {
|
||||
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
|
||||
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
|
||||
@@ -52,13 +50,13 @@ public class ReadClickhouseData {
|
||||
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
|
||||
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
|
||||
}
|
||||
}catch (Exception e){
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return newDoc;
|
||||
}
|
||||
|
||||
public static BaseDocument getVertexIpDocument(ResultSet resultSet){
|
||||
public static BaseDocument getVertexIpDocument(ResultSet resultSet) {
|
||||
BaseDocument newDoc = new BaseDocument();
|
||||
try {
|
||||
String ip = resultSet.getString("IP");
|
||||
@@ -67,6 +65,13 @@ public class ReadClickhouseData {
|
||||
long sessionCount = resultSet.getLong("SESSION_COUNT");
|
||||
long bytesSum = resultSet.getLong("BYTES_SUM");
|
||||
String ipType = resultSet.getString("ip_type");
|
||||
String[] commonLinkInfos = (String[]) resultSet.getArray("common_link_info").getArray();
|
||||
String commonLinkInfo;
|
||||
if (commonLinkInfos.length > 1 && !commonLinkInfos[1].equals("")){
|
||||
commonLinkInfo = commonLinkInfos[1];
|
||||
}else {
|
||||
commonLinkInfo = commonLinkInfos[0];
|
||||
}
|
||||
newDoc.setKey(ip);
|
||||
newDoc.addAttribute("IP", ip);
|
||||
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
|
||||
@@ -85,20 +90,15 @@ public class ReadClickhouseData {
|
||||
newDoc.addAttribute("CLIENT_BYTES_SUM", 0L);
|
||||
break;
|
||||
default:
|
||||
newDoc.addAttribute("SERVER_SESSION_COUNT", 0L);
|
||||
newDoc.addAttribute("SERVER_BYTES_SUM", 0L);
|
||||
newDoc.addAttribute("CLIENT_SESSION_COUNT", 0L);
|
||||
newDoc.addAttribute("CLIENT_BYTES_SUM", 0L);
|
||||
break;
|
||||
}
|
||||
// newDoc.addAttribute("COMMON_LINK_INFO", "");
|
||||
}catch (Exception e){
|
||||
newDoc.addAttribute("COMMON_LINK_INFO", commonLinkInfo);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return newDoc;
|
||||
}
|
||||
|
||||
public static BaseDocument getVertexSubscriberDocument(ResultSet resultSet){
|
||||
public static BaseDocument getVertexSubscriberDocument(ResultSet resultSet) {
|
||||
BaseDocument newDoc = new BaseDocument();
|
||||
try {
|
||||
String subscriberId = resultSet.getString("common_subscriber_id");
|
||||
@@ -107,13 +107,14 @@ public class ReadClickhouseData {
|
||||
newDoc.setKey(subscriberId);
|
||||
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
|
||||
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
|
||||
}catch (Exception e){
|
||||
newDoc.addAttribute("SUBSCRIBER_ID",subscriberId);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return newDoc;
|
||||
}
|
||||
|
||||
public static BaseEdgeDocument getRelationshipSubsciberLocateIpDocument(ResultSet resultSet){
|
||||
public static BaseEdgeDocument getRelationshipSubsciberLocateIpDocument(ResultSet resultSet) {
|
||||
BaseEdgeDocument newDoc = new BaseEdgeDocument();
|
||||
try {
|
||||
String subscriberId = resultSet.getString("common_subscriber_id");
|
||||
@@ -129,14 +130,14 @@ public class ReadClickhouseData {
|
||||
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
|
||||
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
|
||||
newDoc.addAttribute("COUNT_TOTAL", countTotal);
|
||||
}catch (Exception e){
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return newDoc;
|
||||
|
||||
}
|
||||
|
||||
public static BaseEdgeDocument getRelationFqdnAddressIpDocument(ResultSet resultSet){
|
||||
public static BaseEdgeDocument getRelationFqdnAddressIpDocument(ResultSet resultSet) {
|
||||
BaseEdgeDocument newDoc = null;
|
||||
try {
|
||||
String vFqdn = resultSet.getString("FQDN");
|
||||
@@ -145,6 +146,7 @@ public class ReadClickhouseData {
|
||||
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
|
||||
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
|
||||
long countTotal = resultSet.getLong("COUNT_TOTAL");
|
||||
String schemaType = resultSet.getString("schema_type");
|
||||
String[] distCipRecents = (String[]) resultSet.getArray("DIST_CIP_RECENT").getArray();
|
||||
long[] clientIpTs = new long[distCipRecents.length];
|
||||
for (int i = 0; i < clientIpTs.length; i++) {
|
||||
@@ -158,43 +160,18 @@ public class ReadClickhouseData {
|
||||
newDoc.setTo("IP/" + vIp);
|
||||
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
|
||||
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
|
||||
newDoc.addAttribute("CNT_TOTAL",countTotal);
|
||||
newDoc.addAttribute("DIST_CIP", distCipRecents);
|
||||
newDoc.addAttribute("DIST_CIP_TS", clientIpTs);
|
||||
|
||||
newDoc.addAttribute("PROTOCOL_TYPE", schemaType);
|
||||
checkSchemaProperty(newDoc, schemaType, countTotal);
|
||||
}
|
||||
}catch (Exception e){
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return newDoc;
|
||||
}
|
||||
|
||||
public static BaseEdgeDocument getRelationshipFqdnSameFqdnDocument(ResultSet resultSet){
|
||||
BaseEdgeDocument newDoc = null;
|
||||
try {
|
||||
String domainFqdn = resultSet.getString("domainFqdn");
|
||||
String referer = resultSet.getString("referer");
|
||||
String refererFqdn = TopDomainUtils.getDomainFromUrl(referer);
|
||||
if (isDomain(refererFqdn) && isDomain(domainFqdn)){
|
||||
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
|
||||
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
|
||||
long countTotal = resultSet.getLong("COUNT_TOTAL");
|
||||
String key = domainFqdn + "-" + refererFqdn;
|
||||
newDoc = new BaseEdgeDocument();
|
||||
newDoc.setKey(key);
|
||||
newDoc.setFrom("FQDN/" + domainFqdn);
|
||||
newDoc.setTo("FQDN/" + refererFqdn);
|
||||
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
|
||||
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
|
||||
newDoc.addAttribute("CNT_TOTAL",countTotal);
|
||||
}
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
}
|
||||
return newDoc;
|
||||
}
|
||||
|
||||
public static BaseEdgeDocument getRelationIpVisitFqdnDocument(ResultSet resultSet){
|
||||
public static BaseEdgeDocument getRelationIpVisitFqdnDocument(ResultSet resultSet) {
|
||||
BaseEdgeDocument newDoc = null;
|
||||
try {
|
||||
String vFqdn = resultSet.getString("FQDN");
|
||||
@@ -204,16 +181,18 @@ public class ReadClickhouseData {
|
||||
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
|
||||
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
|
||||
long countTotal = resultSet.getLong("COUNT_TOTAL");
|
||||
String schemaType = resultSet.getString("schema_type");
|
||||
|
||||
newDoc = new BaseEdgeDocument();
|
||||
newDoc.setKey(key);
|
||||
newDoc.setFrom("IP/" + vIp);
|
||||
newDoc.setTo("FQDN/" + vFqdn);
|
||||
newDoc.addAttribute("CNT_TOTAL",countTotal);
|
||||
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
|
||||
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
|
||||
newDoc.addAttribute("PROTOCOL_TYPE", schemaType);
|
||||
checkSchemaProperty(newDoc, schemaType, countTotal);
|
||||
}
|
||||
}catch (Exception e){
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return newDoc;
|
||||
@@ -222,8 +201,8 @@ public class ReadClickhouseData {
|
||||
public static <T extends BaseDocument> void putMapByHashcode(T newDoc, HashMap<Integer, HashMap<String, ArrayList<T>>> map) {
|
||||
if (newDoc != null) {
|
||||
String key = newDoc.getKey();
|
||||
int i = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER;
|
||||
HashMap<String, ArrayList<T>> documentHashMap = map.getOrDefault(i, new HashMap<>());
|
||||
int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER;
|
||||
HashMap<String, ArrayList<T>> documentHashMap = map.getOrDefault(hashCode, new HashMap<>());
|
||||
ArrayList<T> documentArrayList = documentHashMap.getOrDefault(key, new ArrayList<>());
|
||||
documentArrayList.add(newDoc);
|
||||
documentHashMap.put(key, documentArrayList);
|
||||
@@ -262,17 +241,16 @@ public class ReadClickhouseData {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
private static void checkSchemaProperty(BaseEdgeDocument newDoc, String schema, long countTotal) {
|
||||
long[] recentCnt = new long[RECENT_COUNT_HOUR];
|
||||
recentCnt[0] = countTotal;
|
||||
for (String protocol: PROTOCOL_SET){
|
||||
String protocolRecent = protocol +"_CNT_RECENT";
|
||||
for (String protocol : PROTOCOL_SET) {
|
||||
String protocolRecent = protocol + "_CNT_RECENT";
|
||||
String protocolTotal = protocol + "_CNT_TOTAL";
|
||||
if (protocol.equals(schema)){
|
||||
if (protocol.equals(schema)) {
|
||||
newDoc.addAttribute(protocolTotal, countTotal);
|
||||
newDoc.addAttribute(protocolRecent, recentCnt);
|
||||
}else {
|
||||
} else {
|
||||
newDoc.addAttribute(protocolTotal, 0L);
|
||||
newDoc.addAttribute(protocolRecent, new long[RECENT_COUNT_HOUR]);
|
||||
}
|
||||
@@ -280,32 +258,32 @@ public class ReadClickhouseData {
|
||||
}
|
||||
|
||||
public static String getVertexFqdnSql() {
|
||||
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime;
|
||||
String mediaDomainSql = "SELECT s1_domain AS FQDN,MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME FROM media_expire_patch WHERE "+where+" and s1_domain != '' GROUP BY s1_domain";
|
||||
String refererSql = "SELECT s1_referer AS FQDN,MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME FROM media_expire_patch WHERE "+where+" and s1_referer != '' GROUP BY s1_referer";
|
||||
return "SELECT * FROM((" + mediaDomainSql + ") UNION ALL (" + refererSql + "))";
|
||||
String where = "common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
||||
String sslSql = "SELECT ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni";
|
||||
String httpSql = "SELECT http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host";
|
||||
return "SELECT FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME FROM ((" + sslSql + ") UNION ALL (" + httpSql + ")) GROUP BY FQDN HAVING FQDN != ''";
|
||||
}
|
||||
|
||||
public static String getVertexIpSql() {
|
||||
String where = " recv_time >= " + minTime + " AND recv_time < " + maxTime;
|
||||
String clientIpSql = "SELECT s1_s_ip AS IP, MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(media_len) as BYTES_SUM,'client' as ip_type FROM media_expire_patch where " + where + " group by IP";
|
||||
String serverIpSql = "SELECT s1_d_ip AS IP, MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(media_len) as BYTES_SUM,'server' as ip_type FROM media_expire_patch where " + where + " group by IP";
|
||||
// String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime + " AND common_schema_type != 'BASE'";
|
||||
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime ;
|
||||
String clientIpSql = "SELECT common_client_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info_c2s) as common_link_info,'client' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
|
||||
String serverIpSql = "SELECT common_server_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info_s2c) as common_link_info,'server' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
|
||||
return "SELECT * FROM((" + clientIpSql + ") UNION ALL (" + serverIpSql + "))";
|
||||
}
|
||||
|
||||
public static String getRelationshipFqdnAddressIpSql() {
|
||||
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime +" AND s1_domain != '' AND s1_d_ip != '' ";
|
||||
return "SELECT s1_domain AS FQDN,s1_d_ip AS common_server_ip,MIN( recv_time ) AS FIRST_FOUND_TIME,MAX( recv_time ) AS LAST_FOUND_TIME,COUNT( * ) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(s1_s_ip) AS DIST_CIP_RECENT FROM media_expire_patch WHERE "+where+" GROUP BY s1_d_ip,s1_domain";
|
||||
}
|
||||
|
||||
public static String getRelationshipFqdnSameFqdnSql(){
|
||||
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime +" AND s1_domain != '' AND s1_referer != '' ";
|
||||
return "SELECT s1_domain AS domainFqdn,s1_referer AS referer,MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL FROM media_expire_patch where "+where+" GROUP BY s1_domain,s1_referer";
|
||||
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
||||
String sslSql = "SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'TLS' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip";
|
||||
String httpSql = "SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'HTTP' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip";
|
||||
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
|
||||
}
|
||||
|
||||
public static String getRelationshipIpVisitFqdnSql() {
|
||||
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime+" AND s1_s_ip != '' AND s1_domain != '' ";
|
||||
return "SELECT s1_s_ip AS common_client_ip,s1_domain AS FQDN,MIN( recv_time ) AS FIRST_FOUND_TIME,MAX( recv_time ) AS LAST_FOUND_TIME,COUNT( * ) AS COUNT_TOTAL FROM media_expire_patch WHERE "+where+" GROUP BY s1_s_ip,s1_domain";
|
||||
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
||||
String httpSql = "SELECT http_host AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'HTTP' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_client_ip";
|
||||
String sslSql = "SELECT ssl_sni AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'TLS' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE common_schema_type = 'SSL' GROUP BY ssl_sni,common_client_ip";
|
||||
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
|
||||
}
|
||||
|
||||
public static String getVertexSubscriberSql() {
|
||||
@@ -321,7 +299,7 @@ public class ReadClickhouseData {
|
||||
private static long[] getTimeLimit() {
|
||||
long maxTime = 0L;
|
||||
long minTime = 0L;
|
||||
switch (ApplicationConfig.CLICKHOUSE_TIME_LIMIT_TYPE) {
|
||||
switch (ApplicationConfig.TIME_LIMIT_TYPE) {
|
||||
case 0:
|
||||
maxTime = currentHour;
|
||||
minTime = maxTime - ApplicationConfig.UPDATE_INTERVAL;
|
||||
@@ -1,4 +1,4 @@
|
||||
package cn.ac.iie.service.read;
|
||||
package cn.ac.iie.service.ingestion;
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig;
|
||||
import cn.ac.iie.utils.ArangoDBConnect;
|
||||
@@ -8,29 +8,32 @@ import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import static cn.ac.iie.service.read.ReadClickhouseData.RECENT_COUNT_HOUR;
|
||||
import static cn.ac.iie.service.ingestion.ReadClickhouseData.*;
|
||||
|
||||
/**
|
||||
* @author wlh
|
||||
* 多线程全量读取arangoDb历史数据,封装到map
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ReadHistoryArangoData.class);
|
||||
|
||||
private ArangoDBConnect arangoConnect;
|
||||
private String query;
|
||||
private ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map;
|
||||
private ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> map;
|
||||
private Class<T> type;
|
||||
private String table;
|
||||
private CountDownLatch countDownLatch;
|
||||
|
||||
public ReadHistoryArangoData(ArangoDBConnect arangoConnect,
|
||||
String query,
|
||||
ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map,
|
||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> map,
|
||||
Class<T> type,
|
||||
String table,
|
||||
CountDownLatch countDownLatch) {
|
||||
@@ -48,23 +51,33 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
||||
long s = System.currentTimeMillis();
|
||||
ArangoCursor<T> docs = arangoConnect.executorQuery(query, type);
|
||||
if (docs != null) {
|
||||
ArrayList<T> list = new ArrayList<>();
|
||||
List<T> baseDocuments = docs.asListRemaining();
|
||||
int i = 0;
|
||||
for (T doc : baseDocuments) {
|
||||
String key = doc.getKey();
|
||||
switch (table) {
|
||||
case "R_LOCATE_FQDN2IP":
|
||||
updateProtocolDocument(doc);
|
||||
// deleteDistinctClientIpByTime(doc);
|
||||
list.add(doc);
|
||||
break;
|
||||
default:
|
||||
}
|
||||
int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER;
|
||||
ConcurrentHashMap<String, T> tmpMap = map.get(hashCode);
|
||||
tmpMap.put(key, doc);
|
||||
i++;
|
||||
}
|
||||
arangoConnect.overwrite(list,table);
|
||||
long l = System.currentTimeMillis();
|
||||
LOG.info(query + "\n读取" + i + "条数据,运行时间:" + (l - s));
|
||||
}
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
}finally {
|
||||
} catch (Exception e) {
|
||||
LOG.error(Arrays.toString(e.getStackTrace()));
|
||||
} finally {
|
||||
countDownLatch.countDown();
|
||||
LOG.info("本线程读取完毕,剩余线程数量:"+countDownLatch.getCount());
|
||||
LOG.info("本线程读取完毕,剩余线程数量:" + countDownLatch.getCount());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,7 +86,7 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
||||
for (String protocol : ReadClickhouseData.PROTOCOL_SET) {
|
||||
String protocolRecent = protocol + "_CNT_RECENT";
|
||||
ArrayList<Long> cntRecent = (ArrayList<Long>) doc.getAttribute(protocolRecent);
|
||||
Long[] cntRecentsSrc = cntRecent.toArray(new Long[cntRecent.size()]);
|
||||
Long[] cntRecentsSrc = cntRecent.toArray(new Long[0]);
|
||||
Long[] cntRecentsDst = new Long[RECENT_COUNT_HOUR];
|
||||
System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1);
|
||||
cntRecentsDst[0] = 0L;
|
||||
@@ -82,4 +95,28 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteDistinctClientIpByTime(T doc) {
|
||||
ArrayList<String> distCip = (ArrayList<String>) doc.getAttribute("DIST_CIP");
|
||||
ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS");
|
||||
if (distCip == null || distCip.isEmpty()){
|
||||
doc.updateAttribute("DIST_CIP", new String[0]);
|
||||
doc.updateAttribute("DIST_CIP_TS", new long[0]);
|
||||
return;
|
||||
}
|
||||
distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
|
||||
Collections.sort(distCipTs);
|
||||
Collections.reverse(distCipTs);
|
||||
int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600);
|
||||
String[] distCipArr = new String[index];
|
||||
long[] disCipTsArr = new long[index];
|
||||
if (index != 0 && distCip.size() + 1 == distCipTs.size()){
|
||||
for (int i = 0; i < index; i++) {
|
||||
distCipArr[i] = distCip.get(i);
|
||||
disCipTsArr[i] = distCipTs.get(i);
|
||||
}
|
||||
}
|
||||
doc.updateAttribute("DIST_CIP", distCipArr);
|
||||
doc.updateAttribute("DIST_CIP_TS", disCipTsArr);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -124,5 +124,10 @@ public class Document<T extends BaseDocument> extends Thread{
|
||||
lastDoc.addAttribute(attribute,firstSumAttribute+lastSumAttribute);
|
||||
}
|
||||
|
||||
protected void replaceAttribute(T firstDoc,T lastDoc,String attribute){
|
||||
Object attributeObj = firstDoc.getAttribute(attribute);
|
||||
lastDoc.addAttribute(attribute,attributeObj);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package cn.ac.iie.service.update;
|
||||
|
||||
import cn.ac.iie.service.read.ReadClickhouseData;
|
||||
import cn.ac.iie.service.ingestion.ReadClickhouseData;
|
||||
import cn.ac.iie.utils.ArangoDBConnect;
|
||||
import com.arangodb.entity.BaseEdgeDocument;
|
||||
|
||||
@@ -24,6 +24,11 @@ public class Relationship extends Document<BaseEdgeDocument> {
|
||||
super.updateFunction(newEdgeDocument,historyEdgeDocument);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
|
||||
super.mergeFunction(lastDoc, newDocument);
|
||||
}
|
||||
|
||||
protected void updateProcotol(BaseEdgeDocument historyEdgeDocument, String schema, BaseEdgeDocument newEdgeDocument){
|
||||
String recentSchema = schema +"_CNT_RECENT";
|
||||
String totalSchema = schema + "_CNT_TOTAL";
|
||||
@@ -44,11 +49,6 @@ public class Relationship extends Document<BaseEdgeDocument> {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
|
||||
super.mergeFunction(lastDoc, newDocument);
|
||||
}
|
||||
|
||||
protected void mergeProtocol(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
|
||||
String schema = lastDoc.getAttribute("PROTOCOL_TYPE").toString();
|
||||
if (ReadClickhouseData.PROTOCOL_SET.contains(schema)){
|
||||
|
||||
@@ -22,19 +22,4 @@ public class Vertex extends Document<BaseDocument> {
|
||||
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void updateFunction(BaseDocument newDocument, BaseDocument historyDocument) {
|
||||
super.updateFunction(newDocument, historyDocument);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void mergeFunction(BaseDocument lastDoc,BaseDocument newDocument) {
|
||||
super.mergeFunction(lastDoc, newDocument);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
super.run();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package cn.ac.iie.service.update.relationship;
|
||||
|
||||
import cn.ac.iie.service.ingestion.ReadClickhouseData;
|
||||
import cn.ac.iie.service.update.Relationship;
|
||||
import cn.ac.iie.utils.ArangoDBConnect;
|
||||
import com.arangodb.entity.BaseEdgeDocument;
|
||||
@@ -8,8 +9,8 @@ import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import static cn.ac.iie.service.read.ReadClickhouseData.DISTINCT_CLIENT_IP_NUM;
|
||||
import static cn.ac.iie.service.read.ReadClickhouseData.currentHour;
|
||||
import static cn.ac.iie.service.ingestion.ReadClickhouseData.DISTINCT_CLIENT_IP_NUM;
|
||||
import static cn.ac.iie.service.ingestion.ReadClickhouseData.currentHour;
|
||||
|
||||
public class LocateFqdn2Ip extends Relationship {
|
||||
|
||||
@@ -25,7 +26,7 @@ public class LocateFqdn2Ip extends Relationship {
|
||||
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument){
|
||||
super.mergeFunction(lastDoc, newDocument);
|
||||
mergeDistinctClientIp(lastDoc, newDocument);
|
||||
putSumAttribute(lastDoc, newDocument,"CNT_TOTAL");
|
||||
mergeProtocol(lastDoc, newDocument);
|
||||
}
|
||||
|
||||
private void mergeDistinctClientIp(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument){
|
||||
@@ -45,8 +46,10 @@ public class LocateFqdn2Ip extends Relationship {
|
||||
@Override
|
||||
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument) {
|
||||
super.updateFunction(newEdgeDocument, historyEdgeDocument);
|
||||
for (String schema:ReadClickhouseData.PROTOCOL_SET){
|
||||
updateProcotol(historyEdgeDocument,schema,newEdgeDocument);
|
||||
}
|
||||
updateDistinctClientIp(newEdgeDocument, historyEdgeDocument);
|
||||
putSumAttribute(newEdgeDocument, historyEdgeDocument,"CNT_TOTAL");
|
||||
}
|
||||
|
||||
private void updateDistinctClientIp(BaseEdgeDocument newEdgeDocument,BaseEdgeDocument edgeDocument){
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package cn.ac.iie.service.update.relationship;
|
||||
|
||||
import cn.ac.iie.service.update.Relationship;
|
||||
import cn.ac.iie.utils.ArangoDBConnect;
|
||||
import com.arangodb.entity.BaseEdgeDocument;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
public class SameFqdn2Fqdn extends Relationship {
|
||||
|
||||
public SameFqdn2Fqdn(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
|
||||
ArangoDBConnect arangoManger,
|
||||
String collectionName,
|
||||
ConcurrentHashMap<String, BaseEdgeDocument> historyDocumentMap,
|
||||
CountDownLatch countDownLatch) {
|
||||
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument) {
|
||||
super.updateFunction(newEdgeDocument, historyEdgeDocument);
|
||||
putSumAttribute(newEdgeDocument,historyEdgeDocument,"CNT_TOTAL");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
|
||||
super.mergeFunction(lastDoc, newDocument);
|
||||
putSumAttribute(lastDoc,newDocument,"CNT_TOTAL");
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,11 +1,13 @@
|
||||
package cn.ac.iie.service.update.relationship;
|
||||
|
||||
import cn.ac.iie.service.ingestion.ReadClickhouseData;
|
||||
import cn.ac.iie.service.update.Relationship;
|
||||
import cn.ac.iie.utils.ArangoDBConnect;
|
||||
import com.arangodb.entity.BaseEdgeDocument;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
@@ -21,12 +23,14 @@ public class VisitIp2Fqdn extends Relationship {
|
||||
@Override
|
||||
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument) {
|
||||
super.updateFunction(newEdgeDocument, historyEdgeDocument);
|
||||
putSumAttribute(newEdgeDocument,historyEdgeDocument,"CNT_TOTAL");
|
||||
for (String schema: ReadClickhouseData.PROTOCOL_SET){
|
||||
updateProcotol(historyEdgeDocument,schema,newEdgeDocument);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
|
||||
super.mergeFunction(lastDoc, newDocument);
|
||||
putSumAttribute(lastDoc,newDocument,"CNT_TOTAL");
|
||||
mergeProtocol(lastDoc,newDocument);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,26 +16,20 @@ public class Ip extends Vertex {
|
||||
String collectionName,
|
||||
ConcurrentHashMap<String, BaseDocument> historyDocumentMap,
|
||||
CountDownLatch countDownLatch) {
|
||||
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
|
||||
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap,countDownLatch);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void updateFunction(BaseDocument newDocument, BaseDocument historyDocument) {
|
||||
super.updateFunction(newDocument, historyDocument);
|
||||
updateIpByType(newDocument, historyDocument);
|
||||
super.replaceAttribute(newDocument,historyDocument,"COMMON_LINK_INFO");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void mergeFunction(BaseDocument lastDoc, BaseDocument newDocument) {
|
||||
super.mergeFunction(lastDoc, newDocument);
|
||||
mergeIpByType(lastDoc, newDocument);
|
||||
}
|
||||
|
||||
private void mergeIpByType(BaseDocument lastDoc, BaseDocument newDocument) {
|
||||
putSumAttribute(lastDoc,newDocument,"CLIENT_SESSION_COUNT");
|
||||
putSumAttribute(lastDoc,newDocument,"CLIENT_BYTES_SUM");
|
||||
putSumAttribute(lastDoc,newDocument,"SERVER_SESSION_COUNT");
|
||||
putSumAttribute(lastDoc,newDocument,"SERVER_BYTES_SUM");
|
||||
updateIpByType(lastDoc, newDocument);
|
||||
}
|
||||
|
||||
private void updateIpByType(BaseDocument newDocument, BaseDocument historyDocument) {
|
||||
|
||||
@@ -2,15 +2,9 @@ package cn.ac.iie.test;
|
||||
|
||||
import cn.ac.iie.dao.UpdateGraphData;
|
||||
|
||||
|
||||
/**
|
||||
* iplearning程序入口
|
||||
* @author wlh
|
||||
*/
|
||||
public class IpLearningApplicationTest {
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
||||
UpdateGraphData updateGraphData = new UpdateGraphData();
|
||||
updateGraphData.updateArango();
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class ArangoDBConnect {
|
||||
@@ -23,10 +24,10 @@ public class ArangoDBConnect {
|
||||
private static ArangoDB arangoDB = null;
|
||||
private static ArangoDBConnect conn = null;
|
||||
static {
|
||||
getArangoDatabase();
|
||||
getArangoDB();
|
||||
}
|
||||
|
||||
private static void getArangoDatabase(){
|
||||
private static void getArangoDB(){
|
||||
arangoDB = new ArangoDB.Builder()
|
||||
.maxConnections(ApplicationConfig.THREAD_POOL_NUMBER)
|
||||
.host(ApplicationConfig.ARANGODB_HOST, ApplicationConfig.ARANGODB_PORT)
|
||||
@@ -52,45 +53,26 @@ public class ArangoDBConnect {
|
||||
arangoDB.shutdown();
|
||||
}
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
LOG.error(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public <T> ArangoCursor<T> executorQuery(String query,Class<T> type){
|
||||
ArangoDatabase database = getDatabase();
|
||||
Map<String, Object> bindVars = new MapBuilder().get();
|
||||
AqlQueryOptions options = new AqlQueryOptions().ttl(ApplicationConfig.ARANGODB_TTL);
|
||||
AqlQueryOptions options = new AqlQueryOptions()
|
||||
.ttl(ApplicationConfig.ARANGODB_TTL);
|
||||
try {
|
||||
return database.query(query, bindVars, options, type);
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
LOG.error(e.getMessage());
|
||||
return null;
|
||||
}finally {
|
||||
bindVars.clear();
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public <T> void insertAndUpdate(ArrayList<T> docInsert,ArrayList<T> docUpdate,String collectionName){
|
||||
ArangoDatabase database = getDatabase();
|
||||
try {
|
||||
ArangoCollection collection = database.collection(collectionName);
|
||||
if (!docInsert.isEmpty()){
|
||||
collection.importDocuments(docInsert);
|
||||
}
|
||||
if (!docUpdate.isEmpty()){
|
||||
collection.replaceDocuments(docUpdate);
|
||||
}
|
||||
}catch (Exception e){
|
||||
System.out.println("更新失败");
|
||||
e.printStackTrace();
|
||||
}finally {
|
||||
docInsert.clear();
|
||||
docInsert.clear();
|
||||
}
|
||||
}
|
||||
|
||||
public <T> void overwrite(ArrayList<T> docOverwrite,String collectionName){
|
||||
public <T> void overwrite(List<T> docOverwrite, String collectionName){
|
||||
ArangoDatabase database = getDatabase();
|
||||
try {
|
||||
ArangoCollection collection = database.collection(collectionName);
|
||||
@@ -101,16 +83,14 @@ public class ArangoDBConnect {
|
||||
MultiDocumentEntity<DocumentCreateEntity<T>> documentCreateEntityMultiDocumentEntity = collection.insertDocuments(docOverwrite, documentCreateOptions);
|
||||
Collection<ErrorEntity> errors = documentCreateEntityMultiDocumentEntity.getErrors();
|
||||
for (ErrorEntity errorEntity:errors){
|
||||
LOG.debug("写入arangoDB异常:"+errorEntity.getErrorMessage());
|
||||
LOG.error("写入arangoDB异常:"+errorEntity.getErrorMessage());
|
||||
}
|
||||
}
|
||||
}catch (Exception e){
|
||||
System.out.println("更新失败:"+e.toString());
|
||||
LOG.error("更新失败:"+e.toString());
|
||||
}finally {
|
||||
docOverwrite.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package cn.ac.iie.utils;
|
||||
|
||||
import com.alibaba.druid.pool.DruidDataSource;
|
||||
import com.alibaba.druid.pool.DruidPooledConnection;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.ResultSet;
|
||||
@@ -10,6 +12,7 @@ import java.sql.Statement;
|
||||
import java.util.Properties;
|
||||
|
||||
public class ClickhouseConnect {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ClickhouseConnect.class);
|
||||
private static DruidDataSource dataSource = null;
|
||||
private static ClickhouseConnect dbConnect = null;
|
||||
private static Properties props = new Properties();
|
||||
@@ -43,7 +46,7 @@ public class ClickhouseConnect {
|
||||
dataSource.setKeepAlive(true);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
LOG.error(e.getMessage());
|
||||
|
||||
}
|
||||
}
|
||||
@@ -85,7 +88,7 @@ public class ClickhouseConnect {
|
||||
connection.close();
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
LOG.error(e.getMessage());
|
||||
}
|
||||
|
||||
}
|
||||
@@ -98,7 +101,7 @@ public class ClickhouseConnect {
|
||||
pstm = connection.createStatement();
|
||||
return pstm.executeQuery(query);
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
LOG.error(e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
package cn.ac.iie.utils;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
public class ConfigUtils {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ConfigUtils.class);
|
||||
private static Properties propCommon = new Properties();
|
||||
|
||||
public static String getStringProperty(String key) {
|
||||
@@ -25,12 +29,12 @@ public class ConfigUtils {
|
||||
static {
|
||||
try {
|
||||
propCommon.load(ConfigUtils.class.getClassLoader().getResourceAsStream("application.properties"));
|
||||
System.out.println("application.properties加载成功");
|
||||
LOG.info("application.properties加载成功");
|
||||
|
||||
|
||||
} catch (Exception e) {
|
||||
propCommon = null;
|
||||
System.err.println("配置加载失败");
|
||||
LOG.error("配置加载失败");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,14 +2,13 @@ package cn.ac.iie.utils;
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.concurrent.*;
|
||||
|
||||
/**
|
||||
* 线程池管理
|
||||
* @author wlh
|
||||
*/
|
||||
public class ExecutorThreadPool {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ExecutorThreadPool.class);
|
||||
private static ExecutorService pool = null ;
|
||||
private static ExecutorThreadPool poolExecutor = null;
|
||||
|
||||
@@ -20,13 +19,9 @@ public class ExecutorThreadPool {
|
||||
private static void getThreadPool(){
|
||||
ThreadFactory namedThreadFactory = new ThreadFactoryBuilder()
|
||||
.setNameFormat("iplearning-application-pool-%d").build();
|
||||
|
||||
//Common Thread Pool
|
||||
pool = new ThreadPoolExecutor(ApplicationConfig.THREAD_POOL_NUMBER, ApplicationConfig.THREAD_POOL_NUMBER*2,
|
||||
0L, TimeUnit.MILLISECONDS,
|
||||
pool = new ThreadPoolExecutor(ApplicationConfig.THREAD_POOL_NUMBER, ApplicationConfig.THREAD_POOL_NUMBER,
|
||||
0L, TimeUnit.SECONDS,
|
||||
new LinkedBlockingQueue<>(1024), namedThreadFactory, new ThreadPoolExecutor.AbortPolicy());
|
||||
|
||||
// pool = Executors.newFixedThreadPool(ApplicationConfig.THREAD_POOL_NUMBER);
|
||||
}
|
||||
|
||||
public static ExecutorThreadPool getInstance(){
|
||||
@@ -44,7 +39,7 @@ public class ExecutorThreadPool {
|
||||
public void awaitThreadTask(){
|
||||
try {
|
||||
while (!pool.awaitTermination(ApplicationConfig.THREAD_AWAIT_TERMINATION_TIME, TimeUnit.SECONDS)) {
|
||||
System.out.println("线程池没有关闭");
|
||||
LOG.warn("线程池没有关闭");
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
package cn.ac.iie.utils;
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.net.URL;
|
||||
import java.util.HashMap;
|
||||
|
||||
public class TopDomainUtils {
|
||||
private static Logger logger = LoggerFactory.getLogger(TopDomainUtils.class);
|
||||
|
||||
|
||||
public static String getSecDomain(String urlDomain, HashMap<String, HashMap<String, String>> maps) {
|
||||
String[] split = urlDomain.split("\\.");
|
||||
String secDomain = null;
|
||||
for (int i = split.length - 1; i >= 0; i--) {
|
||||
int mapsIndex = split.length - (i + 1);
|
||||
HashMap<String, String> innerMap = maps.get("map_id_" + mapsIndex);
|
||||
HashMap<String, String> fullTop = maps.get("full");
|
||||
if (!(innerMap.containsKey(split[i]))) {
|
||||
StringBuilder strSec = new StringBuilder();
|
||||
for (int j = i; j < split.length; j++) {
|
||||
strSec.append(split[j]).append(".");
|
||||
}
|
||||
secDomain = strSec.substring(0, strSec.length() - 1);
|
||||
if (fullTop.containsKey(getTopDomainFromSecDomain(secDomain))) {
|
||||
break;
|
||||
} else {
|
||||
while (!fullTop.containsKey(getTopDomainFromSecDomain(secDomain)) && getTopDomainFromSecDomain(secDomain).contains(".")) {
|
||||
secDomain = getTopDomainFromSecDomain(secDomain);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return secDomain;
|
||||
}
|
||||
|
||||
private static String getTopDomainFromSecDomain(String secDomain) {
|
||||
String quFirstDian = secDomain;
|
||||
if (secDomain.contains(".")) {
|
||||
quFirstDian = secDomain.substring(secDomain.indexOf(".")).substring(1);
|
||||
}
|
||||
return quFirstDian;
|
||||
}
|
||||
|
||||
private static File getTopDomainFile(){
|
||||
URL url = TopDomainUtils.class.getClassLoader().getResource(ApplicationConfig.TOP_DOMAIN_FILE_NAME);
|
||||
File file = null;
|
||||
if (url!=null){
|
||||
file = new File(url.getFile());
|
||||
}
|
||||
if (file != null && file.isFile() && file.exists()){
|
||||
return file;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public static HashMap<String, HashMap<String, String>> readTopDomainFile() {
|
||||
URL url = TopDomainUtils.class.getClassLoader().getResource(ApplicationConfig.TOP_DOMAIN_FILE_NAME);
|
||||
assert url != null;
|
||||
HashMap<String, HashMap<String, String>> maps = makeHashMap(url.getFile());
|
||||
try {
|
||||
String encoding = "UTF-8";
|
||||
File file = new File(url.getFile());
|
||||
if (file.isFile() && file.exists()) {
|
||||
InputStreamReader read = new InputStreamReader(
|
||||
new FileInputStream(file), encoding);
|
||||
BufferedReader bufferedReader = new BufferedReader(read);
|
||||
String lineTxt;
|
||||
while ((lineTxt = bufferedReader.readLine()) != null) {
|
||||
HashMap<String, String> fullTop = maps.get("full");
|
||||
fullTop.put(lineTxt, lineTxt);
|
||||
maps.put("full", fullTop);
|
||||
String[] split = lineTxt.split("\\.");
|
||||
for (int i = split.length - 1; i >= 0; i--) {
|
||||
int mapsIndex = split.length - (i + 1);
|
||||
HashMap<String, String> innerMap = maps.get("map_id_" + mapsIndex);
|
||||
innerMap.put(split[i], split[i]);
|
||||
maps.put("map_id_" + mapsIndex, innerMap);
|
||||
}
|
||||
}
|
||||
read.close();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("TopDomainUtils>=>readTopDomainFile get filePathData error--->{" + e + "}<---");
|
||||
e.printStackTrace();
|
||||
}
|
||||
return maps;
|
||||
}
|
||||
|
||||
private static int getMaxLength(String filePath) {
|
||||
int lengthDomain = 0;
|
||||
try {
|
||||
String encoding = "UTF-8";
|
||||
File file = new File(filePath);
|
||||
if (file.isFile() && file.exists()) {
|
||||
InputStreamReader read = new InputStreamReader(
|
||||
new FileInputStream(file), encoding);
|
||||
BufferedReader bufferedReader = new BufferedReader(read);
|
||||
String lineTxt;
|
||||
while ((lineTxt = bufferedReader.readLine()) != null) {
|
||||
String[] split = lineTxt.split("\\.");
|
||||
if (split.length > lengthDomain) {
|
||||
lengthDomain = split.length;
|
||||
}
|
||||
}
|
||||
read.close();
|
||||
} else {
|
||||
logger.error("TopDomainUtils>>getMaxLength filePath is wrong--->{" + filePath + "}<---");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("TopDomainUtils>=>getMaxLength get filePathData error--->{" + e + "}<---");
|
||||
e.printStackTrace();
|
||||
}
|
||||
return lengthDomain;
|
||||
}
|
||||
|
||||
private static HashMap<String, HashMap<String, String>> makeHashMap(String filePath) {
|
||||
int maxLength = getMaxLength(filePath);
|
||||
HashMap<String, HashMap<String, String>> maps = new HashMap<>();
|
||||
for (int i = 0; i < maxLength; i++) {
|
||||
maps.put("map_id_" + i, new HashMap<String, String>());
|
||||
}
|
||||
maps.put("full", new HashMap<String, String>());
|
||||
return maps;
|
||||
}
|
||||
|
||||
/**
|
||||
* 通用方法,传入url,返回domain,这里的domain不包含端口号,含有:一定是v6
|
||||
* @param oriUrl
|
||||
* @return
|
||||
*/
|
||||
public static String getDomainFromUrl(String oriUrl) {
|
||||
//先按照?切分,排除后续干扰
|
||||
String url = oriUrl.split("[?]")[0];
|
||||
//排除http://或https://干扰
|
||||
url = url.replaceAll("https://", "").replaceAll("http://", "");
|
||||
String domain;
|
||||
|
||||
//获取domain
|
||||
if (url.split("/")[0].split(":").length <= 2) {
|
||||
//按照:切分后最终长度为1或2,说明是v4
|
||||
domain = url
|
||||
//按照/切分,索引0包含domain
|
||||
.split("/")[0]
|
||||
//v4按照:切分去除domain上的端口号后,索引0为最终域名
|
||||
.split(":")[0];
|
||||
} else {
|
||||
//按照:切分后长度>2,说明是v6地址,v6地址不包含端口号(暂定),只需要先切分//再切分/
|
||||
domain = url.split("/")[0];
|
||||
}
|
||||
return domain;
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,33 +1,25 @@
|
||||
#arangoDB参数配置
|
||||
arangoDB.host=192.168.40.182
|
||||
arangoDB.host=192.168.44.12
|
||||
arangoDB.port=8529
|
||||
arangoDB.user=upsert
|
||||
arangoDB.password=ceiec2018
|
||||
arangoDB.DB.name=ip-learning-test
|
||||
arangoDB.DB.name=tsg_galaxy_v3_test
|
||||
arangoDB.batch=100000
|
||||
arangoDB.ttl=3600
|
||||
|
||||
arangoDB.read.limit=10
|
||||
update.arango.batch=10000
|
||||
|
||||
thread.pool.number=10
|
||||
thread.pool.number=40
|
||||
thread.await.termination.time=10
|
||||
|
||||
|
||||
|
||||
#读取clickhouse时间范围方式,0:读取过去一小时,1:指定时间范围
|
||||
clickhouse.time.limit.type=0
|
||||
read.clickhouse.max.time=1571245230
|
||||
read.clickhouse.min.time=1571245220
|
||||
|
||||
#读取arangoDB时间范围方式,0:正常读,1:指定时间范围
|
||||
arango.time.limit.type=0
|
||||
read.arango.max.time=1571245220
|
||||
read.arango.min.time=1571245210
|
||||
time.limit.type=1
|
||||
read.clickhouse.max.time=1603421554
|
||||
read.clickhouse.min.time=1603354682
|
||||
|
||||
update.interval=3600
|
||||
distinct.client.ip.num=10000
|
||||
distinct.client.ip.num=100
|
||||
recent.count.hour=24
|
||||
|
||||
top.domain.file.name=topDomain.txt
|
||||
|
||||
arangoDB.read.limit=
|
||||
@@ -1,8 +1,7 @@
|
||||
drivers=ru.yandex.clickhouse.ClickHouseDriver
|
||||
db.id=192.168.40.193:8123/tsg_galaxy_zx?socket_timeout=3600000
|
||||
#db.id=192.168.40.186:8123/tsg_galaxy_v3?socket_timeout=300000
|
||||
mdb.user=default
|
||||
mdb.password=111111
|
||||
db.id=192.168.44.10:8123/tsg_galaxy_v3?socket_timeout=3600000&compress=0
|
||||
mdb.password=ceiec2019
|
||||
initialsize=1
|
||||
minidle=1
|
||||
maxactive=50
|
||||
|
||||
@@ -4,20 +4,19 @@ log4j.logger.org.apache.http.wire=OFF
|
||||
|
||||
#Log4j
|
||||
log4j.rootLogger=info,console,file
|
||||
# <20><><EFBFBD><EFBFBD>̨<EFBFBD><CCA8>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
|
||||
# <20><><EFBFBD><EFBFBD>̨<EFBFBD><CCA8>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
|
||||
log4j.appender.console=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.console.Threshold=info
|
||||
log4j.appender.console.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
|
||||
|
||||
# <20>ļ<EFBFBD><C4BC><EFBFBD>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
|
||||
# <20>ļ<EFBFBD><C4BC><EFBFBD>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
|
||||
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.file.Threshold=info
|
||||
log4j.appender.file.encoding=UTF-8
|
||||
log4j.appender.file.Append=true
|
||||
#·<><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>·<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ӧ<EFBFBD><EFBFBD>Ŀ<EFBFBD><EFBFBD>
|
||||
#·<><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>·<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ӧ<EFBFBD><EFBFBD>Ŀ<EFBFBD><EFBFBD>
|
||||
#log4j.appender.file.file=/home/ceiec/iplearning/logs/ip-learning-application.log
|
||||
#log4j.appender.file.file=/home/ceiec/iplearning/testLog/ip-learning-application.log
|
||||
log4j.appender.file.file=./logs/ip-learning-application.log
|
||||
log4j.appender.file.DatePattern='.'yyyy-MM-dd
|
||||
log4j.appender.file.layout=org.apache.log4j.PatternLayout
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -11,6 +11,7 @@ import java.util.List;
|
||||
|
||||
public class TestList {
|
||||
public static void main(String[] args) {
|
||||
/*
|
||||
ArangoDBConnect arangoConnect = ArangoDBConnect.getInstance();
|
||||
ArangoCursor<BaseEdgeDocument> documents = arangoConnect.executorQuery("FOR doc IN R_LOCATE_FQDN2IP filter doc.FIRST_FOUND_TIME >= 1596080839 and doc.FIRST_FOUND_TIME <= 1596395473 RETURN doc", BaseEdgeDocument.class);
|
||||
List<BaseEdgeDocument> baseEdgeDocuments = documents.asListRemaining();
|
||||
@@ -18,8 +19,8 @@ public class TestList {
|
||||
doc.updateAttribute("PROTOCOL_TYPE","123");
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
/*
|
||||
ArrayList<Integer> integers = new ArrayList<>();
|
||||
integers.add(10);
|
||||
integers.add(8);
|
||||
@@ -39,7 +40,9 @@ public class TestList {
|
||||
integers.add(5);
|
||||
Collections.sort(integers);
|
||||
System.out.println(integers);
|
||||
Collections.reverse(integers);
|
||||
System.out.println(integers);
|
||||
System.out.println(integers.indexOf(5));
|
||||
*/
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,27 +1,11 @@
|
||||
package cn.ac.iie;
|
||||
|
||||
import cn.ac.iie.dao.BaseArangoData;
|
||||
import cn.ac.iie.utils.ArangoDBConnect;
|
||||
import com.arangodb.ArangoCursor;
|
||||
import com.arangodb.entity.BaseDocument;
|
||||
import com.arangodb.entity.BaseEdgeDocument;
|
||||
|
||||
public class readHistoryDataTest {
|
||||
public static void main(String[] args) {
|
||||
ArangoDBConnect instance = ArangoDBConnect.getInstance();
|
||||
// ArangoCursor<Long> baseDocuments = instance.executorQuery("RETURN LENGTH(R_LOCATE_FQDN2IP)", Long.class);
|
||||
// while (baseDocuments.hasNext()){
|
||||
// Long next = baseDocuments.next();
|
||||
// System.out.println(next.toString());
|
||||
// }
|
||||
// instance.clean();
|
||||
BaseArangoData baseArangoData = new BaseArangoData();
|
||||
|
||||
String sql = "FOR doc IN FQDN filter doc.FIRST_FOUND_TIME >= 1595423493 and doc.FIRST_FOUND_TIME <= 1595809766 limit 763,10 RETURN doc";
|
||||
ArangoCursor<BaseDocument> baseDocuments = instance.executorQuery(sql, BaseDocument.class);
|
||||
while (baseDocuments.hasNext()){
|
||||
BaseDocument next = baseDocuments.next();
|
||||
System.out.println(next.toString());
|
||||
}
|
||||
instance.clean();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import cn.ac.iie.utils.ArangoDBConnect;
|
||||
import cn.ac.iie.utils.ExecutorThreadPool;
|
||||
import com.arangodb.ArangoCursor;
|
||||
import com.arangodb.entity.BaseDocument;
|
||||
import com.arangodb.entity.BaseEdgeDocument;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@@ -20,22 +19,12 @@ import java.util.concurrent.CountDownLatch;
|
||||
*/
|
||||
public class BaseArangoData {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
|
||||
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexFqdnMap = new ConcurrentHashMap<>();
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexIpMap = new ConcurrentHashMap<>();
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexSubscriberMap = new ConcurrentHashMap<>();
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
|
||||
|
||||
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
|
||||
|
||||
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
|
||||
|
||||
public <T extends BaseDocument> void readHistoryData(String table,
|
||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
|
||||
Class<T> type) {
|
||||
public <T extends BaseDocument> ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> readHistoryData(String table, Class<T> type) {
|
||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap = new ConcurrentHashMap<>();
|
||||
try {
|
||||
LOG.warn("开始更新" + table);
|
||||
long start = System.currentTimeMillis();
|
||||
@@ -43,9 +32,9 @@ public class BaseArangoData {
|
||||
historyMap.put(i, new ConcurrentHashMap<>());
|
||||
}
|
||||
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER());
|
||||
long[] timeRange = getTimeRange(table);
|
||||
Long countTotal = getCountTotal(table);
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
|
||||
String sql = getQuerySql(timeRange, i, table);
|
||||
String sql = getQuerySql(countTotal, i, table);
|
||||
ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
|
||||
threadPool.executor(readHistoryArangoData);
|
||||
}
|
||||
@@ -55,49 +44,34 @@ public class BaseArangoData {
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return historyMap;
|
||||
}
|
||||
|
||||
private long[] getTimeRange(String table) {
|
||||
long minTime = 0L;
|
||||
long maxTime = 0L;
|
||||
long startTime = System.currentTimeMillis();
|
||||
String sql = "LET doc = (FOR doc IN " + table + " RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
|
||||
switch (ApplicationConfig.ARANGO_TIME_LIMIT_TYPE()) {
|
||||
case 0:
|
||||
ArangoCursor<BaseDocument> timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
|
||||
private Long getCountTotal(String table){
|
||||
long start = System.currentTimeMillis();
|
||||
Long cnt = 0L;
|
||||
String sql = "RETURN LENGTH("+table+")";
|
||||
try {
|
||||
if (timeDoc != null) {
|
||||
while (timeDoc.hasNext()) {
|
||||
BaseDocument doc = timeDoc.next();
|
||||
maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER();
|
||||
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
|
||||
ArangoCursor<Long> longs = arangoDBConnect.executorQuery(sql, Long.class);
|
||||
while (longs.hasNext()){
|
||||
cnt = longs.next();
|
||||
}
|
||||
} else {
|
||||
LOG.warn("获取ArangoDb时间范围为空");
|
||||
}catch (Exception e){
|
||||
LOG.error(sql +"执行异常");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
maxTime = ApplicationConfig.READ_ARANGO_MAX_TIME();
|
||||
minTime = ApplicationConfig.READ_ARANGO_MIN_TIME();
|
||||
break;
|
||||
default:
|
||||
}
|
||||
long lastTime = System.currentTimeMillis();
|
||||
LOG.warn(sql + "\n查询最大最小时间用时:" + (lastTime - startTime));
|
||||
return new long[]{minTime, maxTime};
|
||||
|
||||
long last = System.currentTimeMillis();
|
||||
LOG.warn(sql+" 结果:"+cnt+" 执行时间:"+(last-start));
|
||||
return cnt;
|
||||
}
|
||||
|
||||
private String getQuerySql(long[] timeRange, int threadNumber, String table) {
|
||||
long minTime = timeRange[0];
|
||||
long maxTime = timeRange[1];
|
||||
long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER();
|
||||
long maxThreadTime = minTime + (threadNumber + 1) * diffTime;
|
||||
long minThreadTime = minTime + threadNumber * diffTime;
|
||||
return "FOR doc IN " + table + " filter doc.FIRST_FOUND_TIME >= " + minThreadTime + " and doc.FIRST_FOUND_TIME <= " + maxThreadTime + " " + ApplicationConfig.ARANGODB_READ_LIMIT() + " RETURN doc";
|
||||
private String getQuerySql(Long cnt,int threadNumber, String table){
|
||||
long sepNum = cnt / ApplicationConfig.THREAD_POOL_NUMBER() + 1;
|
||||
long offsetNum = threadNumber * sepNum;
|
||||
if (sepNum >= ApplicationConfig.ARANGODB_READ_LIMIT() * 10000){
|
||||
sepNum = ApplicationConfig.ARANGODB_READ_LIMIT() * 10000;
|
||||
}
|
||||
return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -19,12 +19,13 @@ import java.util.concurrent.CountDownLatch;
|
||||
* @author wlh
|
||||
* 多线程全量读取arangoDb历史数据,封装到map
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
||||
public static long currentHour = System.currentTimeMillis() / (60 * 60 * 1000) * 60 * 60;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ReadHistoryArangoData.class);
|
||||
static final Integer RECENT_COUNT_HOUR = ApplicationConfig.RECENT_COUNT_HOUR();
|
||||
private static final Integer RECENT_COUNT_HOUR = ApplicationConfig.RECENT_COUNT_HOUR();
|
||||
|
||||
public static final HashSet<String> PROTOCOL_SET;
|
||||
private static final HashSet<String> PROTOCOL_SET;
|
||||
|
||||
static {
|
||||
PROTOCOL_SET = new HashSet<>();
|
||||
@@ -66,7 +67,7 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
||||
String key = doc.getKey();
|
||||
switch (table) {
|
||||
case "R_LOCATE_FQDN2IP":
|
||||
// updateProtocolDocument(doc);
|
||||
updateProtocolDocument(doc);
|
||||
deleteDistinctClientIpByTime(doc);
|
||||
break;
|
||||
default:
|
||||
@@ -92,7 +93,7 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
||||
for (String protocol : PROTOCOL_SET) {
|
||||
String protocolRecent = protocol + "_CNT_RECENT";
|
||||
ArrayList<Long> cntRecent = (ArrayList<Long>) doc.getAttribute(protocolRecent);
|
||||
Long[] cntRecentsSrc = cntRecent.toArray(new Long[cntRecent.size()]);
|
||||
Long[] cntRecentsSrc = cntRecent.toArray(new Long[0]);
|
||||
Long[] cntRecentsDst = new Long[RECENT_COUNT_HOUR];
|
||||
System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1);
|
||||
cntRecentsDst[0] = 0L;
|
||||
@@ -104,6 +105,11 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
||||
private void deleteDistinctClientIpByTime(T doc) {
|
||||
ArrayList<String> distCip = (ArrayList<String>) doc.getAttribute("DIST_CIP");
|
||||
ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS");
|
||||
if (distCip == null || distCip.isEmpty()){
|
||||
doc.updateAttribute("DIST_CIP", new String[0]);
|
||||
doc.updateAttribute("DIST_CIP_TS", new long[0]);
|
||||
return;
|
||||
}
|
||||
distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
|
||||
Collections.sort(distCipTs);
|
||||
int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600);
|
||||
|
||||
@@ -27,14 +27,27 @@ public class ArangoDBConnect {
|
||||
}
|
||||
|
||||
private static void getArangoDatabase(){
|
||||
arangoDB = new ArangoDB.Builder()
|
||||
ArangoDB.Builder host = getArangoHost();
|
||||
arangoDB = host
|
||||
.maxConnections(ApplicationConfig.THREAD_POOL_NUMBER())
|
||||
.host(ApplicationConfig.ARANGODB_HOST(), ApplicationConfig.ARANGODB_PORT())
|
||||
.acquireHostList(true)
|
||||
.user(ApplicationConfig.ARANGODB_USER())
|
||||
.password(ApplicationConfig.ARANGODB_PASSWORD())
|
||||
.build();
|
||||
}
|
||||
|
||||
private static ArangoDB.Builder getArangoHost(){
|
||||
String hostList = ApplicationConfig.ARANGODB_HOST();
|
||||
String[] split = hostList.split(",");
|
||||
ArangoDB.Builder host = new ArangoDB.Builder();
|
||||
for (String hostStr : split) {
|
||||
host.host(hostStr, ApplicationConfig.ARANGODB_PORT());
|
||||
LOG.warn("arangoDB host {} 已添加",hostStr);
|
||||
}
|
||||
LOG.warn("获取arangoDB host成功");
|
||||
return host;
|
||||
}
|
||||
|
||||
public static synchronized ArangoDBConnect getInstance(){
|
||||
if (null == conn){
|
||||
conn = new ArangoDBConnect();
|
||||
@@ -70,26 +83,6 @@ public class ArangoDBConnect {
|
||||
}
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public <T> void insertAndUpdate(ArrayList<T> docInsert,ArrayList<T> docUpdate,String collectionName){
|
||||
ArangoDatabase database = getDatabase();
|
||||
try {
|
||||
ArangoCollection collection = database.collection(collectionName);
|
||||
if (!docInsert.isEmpty()){
|
||||
collection.importDocuments(docInsert);
|
||||
}
|
||||
if (!docUpdate.isEmpty()){
|
||||
collection.replaceDocuments(docUpdate);
|
||||
}
|
||||
}catch (Exception e){
|
||||
System.out.println("更新失败");
|
||||
e.printStackTrace();
|
||||
}finally {
|
||||
docInsert.clear();
|
||||
docInsert.clear();
|
||||
}
|
||||
}
|
||||
|
||||
public <T> void overwrite(ArrayList<T> docOverwrite,String collectionName){
|
||||
ArangoDatabase database = getDatabase();
|
||||
try {
|
||||
@@ -101,11 +94,11 @@ public class ArangoDBConnect {
|
||||
MultiDocumentEntity<DocumentCreateEntity<T>> documentCreateEntityMultiDocumentEntity = collection.insertDocuments(docOverwrite, documentCreateOptions);
|
||||
Collection<ErrorEntity> errors = documentCreateEntityMultiDocumentEntity.getErrors();
|
||||
for (ErrorEntity errorEntity:errors){
|
||||
LOG.warn("写入arangoDB异常:"+errorEntity.getErrorMessage());
|
||||
LOG.debug("写入arangoDB异常:"+errorEntity.getErrorMessage());
|
||||
}
|
||||
}
|
||||
}catch (Exception e){
|
||||
System.out.println("更新失败:"+e.toString());
|
||||
LOG.error("更新arangoDB失败:"+e.toString());
|
||||
}finally {
|
||||
docOverwrite.clear();
|
||||
}
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
package cn.ac.iie.utils;
|
||||
|
||||
public class TopDomainUtils {
|
||||
|
||||
/**
|
||||
* 通用方法,传入url,返回domain,这里的domain不包含端口号,含有:一定是v6
|
||||
* @param oriUrl
|
||||
* @return
|
||||
*/
|
||||
public static String getDomainFromUrl(String oriUrl) {
|
||||
String url = oriUrl.split("[?]")[0];
|
||||
url = url.replaceAll("https://", "").replaceAll("http://", "");
|
||||
String domain;
|
||||
|
||||
if (url.split("/")[0].split(":").length <= 2) {
|
||||
domain = url
|
||||
.split("/")[0]
|
||||
.split(":")[0];
|
||||
} else {
|
||||
domain = url.split("/")[0];
|
||||
}
|
||||
return domain;
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,46 +1,40 @@
|
||||
#spark任务配置
|
||||
spark.sql.shuffle.partitions=5
|
||||
spark.sql.shuffle.partitions=10
|
||||
spark.executor.memory=4g
|
||||
spark.app.name=test
|
||||
spark.network.timeout=300s
|
||||
repartitionNumber=36
|
||||
spark.serializer=org.apache.spark.serializer.KryoSerializer
|
||||
#spark.serializer=org.apache.spark.serializer.JavaSerializer
|
||||
master=local[*]
|
||||
#spark读取clickhouse配置
|
||||
#spark.read.clickhouse.url=jdbc:clickhouse://192.168.40.186:8123/tsg_galaxy_v3
|
||||
spark.read.clickhouse.url=jdbc:clickhouse://192.168.40.193:8123/tsg_galaxy_zx
|
||||
spark.read.clickhouse.url=jdbc:clickhouse://192.168.44.10:8123/tsg_galaxy_v3
|
||||
spark.read.clickhouse.driver=ru.yandex.clickhouse.ClickHouseDriver
|
||||
spark.read.clickhouse.user=default
|
||||
spark.read.clickhouse.password=111111
|
||||
spark.read.clickhouse.numPartitions=144
|
||||
spark.read.clickhouse.password=ceiec2019
|
||||
spark.read.clickhouse.numPartitions=5
|
||||
spark.read.clickhouse.fetchsize=10000
|
||||
spark.read.clickhouse.partitionColumn=recv_time
|
||||
spark.read.clickhouse.partitionColumn=LAST_FOUND_TIME
|
||||
clickhouse.socket.timeout=300000
|
||||
#arangoDB配置
|
||||
arangoDB.host=192.168.40.182
|
||||
arangoDB.host=192.168.40.123,192.168.40.223,192.168.40.222
|
||||
#arangoDB.host=192.168.40.223
|
||||
arangoDB.port=8529
|
||||
arangoDB.user=upsert
|
||||
arangoDB.password=ceiec2018
|
||||
#arangoDB.DB.name=insert_iplearn_index
|
||||
arangoDB.DB.name=ip-learning-test-0
|
||||
arangoDB.password=ceiec2019
|
||||
arangoDB.DB.name=tsg_galaxy_v3
|
||||
arangoDB.ttl=3600
|
||||
|
||||
thread.pool.number=5
|
||||
thread.pool.number=10
|
||||
|
||||
#读取clickhouse时间范围方式,0:读取过去一小时;1:指定时间范围
|
||||
clickhouse.time.limit.type=1
|
||||
read.clickhouse.max.time=1571241640
|
||||
read.clickhouse.min.time=1571241600
|
||||
read.clickhouse.max.time=1600916194
|
||||
read.clickhouse.min.time=1599197648
|
||||
|
||||
#读取arangoDB时间范围方式,0:正常读;1:指定时间范围
|
||||
arango.time.limit.type=0
|
||||
read.arango.max.time=1571245320
|
||||
read.arango.min.time=1571245200
|
||||
|
||||
arangoDB.read.limit=
|
||||
arangoDB.read.sepNum=10
|
||||
update.arango.batch=10000
|
||||
|
||||
distinct.client.ip.num=10000
|
||||
recent.count.hour=24
|
||||
|
||||
update.interval=10800
|
||||
update.interval=3600
|
||||
|
||||
@@ -36,12 +36,7 @@ object ApplicationConfig {
|
||||
val READ_CLICKHOUSE_MAX_TIME: Long = config.getLong("read.clickhouse.max.time")
|
||||
val READ_CLICKHOUSE_MIN_TIME: Long = config.getLong("read.clickhouse.min.time")
|
||||
|
||||
val ARANGO_TIME_LIMIT_TYPE: Int = config.getInt("arango.time.limit.type")
|
||||
|
||||
val READ_ARANGO_MAX_TIME: Long = config.getLong("read.arango.max.time")
|
||||
val READ_ARANGO_MIN_TIME: Long = config.getLong("read.arango.min.time")
|
||||
|
||||
val ARANGODB_READ_LIMIT: String = config.getString("arangoDB.read.limit")
|
||||
val ARANGODB_READ_LIMIT: Long = config.getLong("arangoDB.read.sepNum")
|
||||
val UPDATE_ARANGO_BATCH: Int = config.getInt("update.arango.batch")
|
||||
val RECENT_COUNT_HOUR: Int = config.getInt("recent.count.hour")
|
||||
val DISTINCT_CLIENT_IP_NUM: Int = config.getInt("distinct.client.ip.num")
|
||||
|
||||
@@ -2,7 +2,6 @@ package cn.ac.iie.dao
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig
|
||||
import cn.ac.iie.utils.SparkSessionUtil.spark
|
||||
import cn.ac.iie.utils.TopDomainUtils
|
||||
import org.apache.spark.sql.DataFrame
|
||||
import org.slf4j.LoggerFactory
|
||||
|
||||
@@ -12,7 +11,7 @@ object BaseClickhouseData {
|
||||
val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
|
||||
private val timeLimit: (Long, Long) = getTimeLimit
|
||||
|
||||
private def initClickhouseData(sql:String): Unit ={
|
||||
private def initClickhouseData(sql:String): DataFrame ={
|
||||
|
||||
val dataFrame: DataFrame = spark.read.format("jdbc")
|
||||
.option("url", ApplicationConfig.SPARK_READ_CLICKHOUSE_URL)
|
||||
@@ -29,16 +28,18 @@ object BaseClickhouseData {
|
||||
.load()
|
||||
dataFrame.printSchema()
|
||||
dataFrame.createOrReplaceGlobalTempView("dbtable")
|
||||
|
||||
dataFrame
|
||||
}
|
||||
|
||||
def loadConnectionDataFromCk(): Unit ={
|
||||
val where = "recv_time >= " + timeLimit._2 + " AND recv_time < " + timeLimit._1
|
||||
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1 + " AND common_schema_type != 'BASE'"
|
||||
val sql =
|
||||
s"""
|
||||
|(SELECT
|
||||
| s1_domain,s1_referer,s1_s_ip,s1_d_ip,recv_time,media_len
|
||||
| ssl_sni,http_host,common_client_ip,common_server_ip,common_recv_time,common_c2s_byte_num,common_s2c_byte_num,common_schema_type
|
||||
|FROM
|
||||
| media_expire_patch
|
||||
| connection_record_log
|
||||
|WHERE $where) as dbtable
|
||||
""".stripMargin
|
||||
|
||||
@@ -49,8 +50,8 @@ object BaseClickhouseData {
|
||||
private def loadRadiusDataFromCk(): Unit ={
|
||||
val where =
|
||||
s"""
|
||||
| common_start_time >= ${timeLimit._2}
|
||||
| AND common_start_time < ${timeLimit._1}
|
||||
| common_recv_time >= ${timeLimit._2}
|
||||
| AND common_recv_time < ${timeLimit._1}
|
||||
| AND common_subscriber_id != ''
|
||||
| AND radius_framed_ip != ''
|
||||
| AND radius_packet_type = 4
|
||||
@@ -59,7 +60,7 @@ object BaseClickhouseData {
|
||||
val sql =
|
||||
s"""
|
||||
|(SELECT
|
||||
| common_subscriber_id,radius_framed_ip,common_start_time
|
||||
| common_subscriber_id,radius_framed_ip,common_recv_time
|
||||
|FROM
|
||||
| tsg_galaxy_v3.radius_record_log
|
||||
|WHERE
|
||||
@@ -69,31 +70,28 @@ object BaseClickhouseData {
|
||||
initClickhouseData(sql)
|
||||
}
|
||||
|
||||
def getDomain(url:String): String ={
|
||||
TopDomainUtils.getDomainFromUrl(url)
|
||||
}
|
||||
|
||||
def getVertexFqdnDf: DataFrame ={
|
||||
loadConnectionDataFromCk()
|
||||
spark.udf.register("getDomain",TopDomainUtils.getDomainFromUrl _)
|
||||
val sql =
|
||||
"""
|
||||
|SELECT
|
||||
| FQDN,MAX(LAST_FOUND_TIME) AS LAST_FOUND_TIME,MIN(FIRST_FOUND_TIME) AS FIRST_FOUND_TIME
|
||||
| FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
|
||||
|FROM
|
||||
| (
|
||||
| (SELECT
|
||||
| s1_domain AS FQDN,MAX(recv_time) AS LAST_FOUND_TIME,MIN(recv_time) AS FIRST_FOUND_TIME
|
||||
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
|
||||
| FROM
|
||||
| global_temp.dbtable
|
||||
| GROUP BY s1_domain
|
||||
| WHERE
|
||||
| common_schema_type = 'SSL' GROUP BY ssl_sni
|
||||
| )
|
||||
| UNION ALL
|
||||
| (SELECT
|
||||
| getDomain(s1_referer) AS FQDN,MAX(recv_time) AS LAST_FOUND_TIME,MIN(recv_time) AS FIRST_FOUND_TIME
|
||||
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
|
||||
| FROM
|
||||
| global_temp.dbtable
|
||||
| GROUP BY getDomain(s1_referer)
|
||||
| WHERE
|
||||
| common_schema_type = 'HTTP' GROUP BY http_host
|
||||
| )
|
||||
| )
|
||||
|GROUP BY
|
||||
@@ -107,79 +105,170 @@ object BaseClickhouseData {
|
||||
vertexFqdnDf
|
||||
}
|
||||
|
||||
def main(args: Array[String]): Unit = {
|
||||
val df = getRelationFqdnLocateIpDf
|
||||
df.show(10)
|
||||
}
|
||||
|
||||
def getVertexIpDf: DataFrame ={
|
||||
loadConnectionDataFromCk()
|
||||
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
||||
val sql =
|
||||
"""
|
||||
|SELECT
|
||||
| *
|
||||
|FROM
|
||||
| (
|
||||
| (
|
||||
| SELECT
|
||||
| s1_s_ip AS IP,
|
||||
| MIN(recv_time) AS FIRST_FOUND_TIME,
|
||||
| MAX(recv_time) AS LAST_FOUND_TIME,
|
||||
| count(*) as SESSION_COUNT,
|
||||
| sum(media_len) as BYTES_SUM,
|
||||
| 'client' as ip_type
|
||||
| FROM
|
||||
| global_temp.dbtable
|
||||
| GROUP BY
|
||||
| IP
|
||||
| )
|
||||
| UNION ALL
|
||||
| (
|
||||
| SELECT
|
||||
| s1_d_ip AS IP,
|
||||
| MIN(recv_time) AS FIRST_FOUND_TIME,
|
||||
| MAX(recv_time) AS LAST_FOUND_TIME,
|
||||
| count(*) as SESSION_COUNT,
|
||||
| sum(media_len) as BYTES_SUM,
|
||||
| 'server' as ip_type
|
||||
| FROM
|
||||
| global_temp.dbtable
|
||||
| GROUP BY
|
||||
| IP
|
||||
| )
|
||||
| )
|
||||
s"""
|
||||
|(SELECT * FROM
|
||||
|((SELECT common_client_ip AS IP,MIN(common_end_time) AS FIRST_FOUND_TIME,
|
||||
|MAX(common_end_time) AS LAST_FOUND_TIME,
|
||||
|count(*) as SESSION_COUNT,
|
||||
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|
||||
|groupUniqArray(2)(common_link_info_c2s)[2] as common_link_info,
|
||||
|'client' as ip_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|where $where
|
||||
|group by common_client_ip)
|
||||
|UNION ALL
|
||||
|(SELECT common_server_ip AS IP,
|
||||
|MIN(common_end_time) AS FIRST_FOUND_TIME,
|
||||
|MAX(common_end_time) AS LAST_FOUND_TIME,
|
||||
|count(*) as SESSION_COUNT,
|
||||
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|
||||
|groupUniqArray(2)(common_link_info_s2c)[2] as common_link_info,
|
||||
|'server' as ip_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|where $where
|
||||
|group by common_server_ip))) as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val vertexIpDf = spark.sql(sql)
|
||||
vertexIpDf.printSchema()
|
||||
vertexIpDf
|
||||
val frame = initClickhouseData(sql)
|
||||
frame.printSchema()
|
||||
frame
|
||||
}
|
||||
|
||||
/*
|
||||
def getRelationFqdnLocateIpDf: DataFrame ={
|
||||
loadConnectionDataFromCk()
|
||||
|
||||
val sql =
|
||||
val sslSql =
|
||||
"""
|
||||
|SELECT
|
||||
| s1_domain AS FQDN,
|
||||
| s1_d_ip AS common_server_ip,
|
||||
| MAX(recv_time) AS LAST_FOUND_TIME,
|
||||
| MIN(recv_time) AS FIRST_FOUND_TIME,
|
||||
| ssl_sni AS FQDN,
|
||||
| common_server_ip,
|
||||
| MAX(common_recv_time) AS LAST_FOUND_TIME,
|
||||
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
||||
| COUNT(*) AS COUNT_TOTAL,
|
||||
| collect_set(s1_s_ip) AS DIST_CIP_RECENT
|
||||
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
|
||||
| 'TLS' AS schema_type
|
||||
|FROM
|
||||
| global_temp.dbtable
|
||||
|WHERE
|
||||
| s1_domain != ''
|
||||
| common_schema_type = 'SSL'
|
||||
|GROUP BY
|
||||
| s1_domain,s1_d_ip
|
||||
| ssl_sni,common_server_ip
|
||||
""".stripMargin
|
||||
|
||||
val httpSql =
|
||||
"""
|
||||
|SELECT
|
||||
| http_host AS FQDN,
|
||||
| common_server_ip,
|
||||
| MAX(common_recv_time) AS LAST_FOUND_TIME,
|
||||
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
||||
| COUNT(*) AS COUNT_TOTAL,
|
||||
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
|
||||
| 'HTTP' AS schema_type
|
||||
|FROM
|
||||
| global_temp.dbtable
|
||||
|WHERE
|
||||
| common_schema_type = 'HTTP'
|
||||
|GROUP BY
|
||||
| http_host,common_server_ip
|
||||
""".stripMargin
|
||||
val sql = s"SELECT * FROM (($sslSql) UNION ALL ($httpSql)) WHERE FQDN != ''"
|
||||
|
||||
LOG.warn(sql)
|
||||
val relationFqdnLocateIpDf = spark.sql(sql)
|
||||
relationFqdnLocateIpDf.printSchema()
|
||||
relationFqdnLocateIpDf
|
||||
}
|
||||
*/
|
||||
|
||||
def getRelationFqdnLocateIpDf: DataFrame ={
|
||||
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
||||
val sql =
|
||||
s"""
|
||||
|(SELECT * FROM
|
||||
|((SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
||||
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'TLS' AS schema_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|WHERE $where and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip)
|
||||
|UNION ALL
|
||||
|(SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
||||
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'HTTP' AS schema_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|WHERE $where and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip))
|
||||
|WHERE FQDN != '') as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val frame = initClickhouseData(sql)
|
||||
frame.printSchema()
|
||||
frame
|
||||
}
|
||||
|
||||
def getRelationSubidLocateIpDf: DataFrame ={
|
||||
val where =
|
||||
s"""
|
||||
| common_recv_time >= ${timeLimit._2}
|
||||
| AND common_recv_time < ${timeLimit._1}
|
||||
| AND common_subscriber_id != ''
|
||||
| AND radius_framed_ip != ''
|
||||
""".stripMargin
|
||||
val sql =
|
||||
s"""
|
||||
|(
|
||||
|SELECT common_subscriber_id,radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME
|
||||
|FROM radius_record_log
|
||||
|WHERE $where GROUP BY common_subscriber_id,radius_framed_ip
|
||||
|) as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val frame = initClickhouseData(sql)
|
||||
frame.printSchema()
|
||||
frame
|
||||
}
|
||||
|
||||
def getVertexSubidDf: DataFrame ={
|
||||
val where =
|
||||
s"""
|
||||
| common_recv_time >= ${timeLimit._2}
|
||||
| AND common_recv_time < ${timeLimit._1}
|
||||
| AND common_subscriber_id != ''
|
||||
| AND radius_framed_ip != ''
|
||||
""".stripMargin
|
||||
val sql =
|
||||
s"""
|
||||
|(
|
||||
|SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM radius_record_log
|
||||
|WHERE $where GROUP BY common_subscriber_id
|
||||
|)as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val frame = initClickhouseData(sql)
|
||||
frame.printSchema()
|
||||
frame
|
||||
}
|
||||
|
||||
def getVertexFramedIpDf: DataFrame ={
|
||||
val where =
|
||||
s"""
|
||||
| common_recv_time >= ${timeLimit._2}
|
||||
| AND common_recv_time < ${timeLimit._1}
|
||||
| AND common_subscriber_id != ''
|
||||
| AND radius_framed_ip != ''
|
||||
""".stripMargin
|
||||
val sql =
|
||||
s"""
|
||||
|(
|
||||
|SELECT DISTINCT radius_framed_ip,common_recv_time as LAST_FOUND_TIME FROM radius_record_log WHERE $where
|
||||
|)as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val frame = initClickhouseData(sql)
|
||||
frame.printSchema()
|
||||
frame
|
||||
}
|
||||
|
||||
private def getTimeLimit: (Long,Long) ={
|
||||
var maxTime = 0L
|
||||
|
||||
@@ -20,6 +20,17 @@ object MergeDataFrame {
|
||||
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
||||
}
|
||||
|
||||
def mergeVertexFrameIp: RDD[Row] ={
|
||||
val values = BaseClickhouseData.getVertexFramedIpDf
|
||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||
.rdd.map(row => {
|
||||
val ip = row.getAs[String]("radius_framed_ip")
|
||||
(ip, row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
||||
LOG.warn(s"读取R_LOCATE_SUBSCRIBER2IP clickhouse成功,共:${values.count()} 条")
|
||||
values
|
||||
}
|
||||
|
||||
def mergeVertexIp(): RDD[Row]={
|
||||
val vertexIpDf = BaseClickhouseData.getVertexIpDf
|
||||
val frame = vertexIpDf.groupBy("IP").agg(
|
||||
@@ -27,7 +38,8 @@ object MergeDataFrame {
|
||||
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
|
||||
collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"),
|
||||
collect_list("BYTES_SUM").alias("BYTES_SUM_LIST"),
|
||||
collect_list("ip_type").alias("ip_type_list")
|
||||
collect_list("ip_type").alias("ip_type_list"),
|
||||
last("common_link_info").alias("common_link_info")
|
||||
)
|
||||
val values = frame.rdd.map(row => (row.get(0), row))
|
||||
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
||||
@@ -35,30 +47,61 @@ object MergeDataFrame {
|
||||
}
|
||||
|
||||
def mergeRelationFqdnLocateIp(): RDD[Row] ={
|
||||
BaseClickhouseData.getRelationFqdnLocateIpDf
|
||||
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf
|
||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||
.filter(row => isDomain(row.getAs[String]("FQDN")))
|
||||
.rdd.map(row => {
|
||||
.groupBy("FQDN", "common_server_ip")
|
||||
.agg(
|
||||
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
|
||||
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
|
||||
collect_list("COUNT_TOTAL").alias("COUNT_TOTAL_LIST"),
|
||||
collect_list("schema_type").alias("schema_type_list"),
|
||||
collect_set("DIST_CIP_RECENT").alias("DIST_CIP_RECENT")
|
||||
)
|
||||
val values = frame.rdd.map(row => {
|
||||
val fqdn = row.getAs[String]("FQDN")
|
||||
val serverIp = row.getAs[String]("common_server_ip")
|
||||
val key = fqdn.concat("-"+serverIp)
|
||||
(key,row)
|
||||
val key = fqdn.concat("-" + serverIp)
|
||||
(key, row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
||||
LOG.warn(s"读取R_LOCATE_FQDN2IP clickhouse成功,共:${values.count()} 条")
|
||||
values
|
||||
|
||||
}
|
||||
|
||||
def mergeRelationSubidLocateIp(): RDD[Row] ={
|
||||
val values = BaseClickhouseData.getRelationSubidLocateIpDf
|
||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||
.rdd.map(row => {
|
||||
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
|
||||
val ip = row.getAs[String]("radius_framed_ip")
|
||||
val key = commonSubscriberId.concat("-" + ip)
|
||||
(key, row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
||||
LOG.warn(s"读取R_LOCATE_SUBSCRIBER2IP clickhouse成功,共:${values.count()} 条")
|
||||
values
|
||||
}
|
||||
|
||||
def mergeVertexSubid(): RDD[Row] ={
|
||||
val values = BaseClickhouseData.getVertexSubidDf
|
||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||
.rdd.map(row => {
|
||||
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
|
||||
(commonSubscriberId, row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
||||
LOG.warn(s"读取SUBSCRIBER clickhouse成功,共:${values.count()} 条")
|
||||
values
|
||||
}
|
||||
|
||||
private def isDomain(fqdn: String): Boolean = {
|
||||
try {
|
||||
if (fqdn == null || fqdn.length == 0) {
|
||||
return false
|
||||
}
|
||||
if (fqdn.contains(":")) {
|
||||
val s = fqdn.split(":")(0)
|
||||
if (s.contains(":")){
|
||||
return false
|
||||
}
|
||||
}
|
||||
val fqdnArr = fqdn.split("\\.")
|
||||
if (fqdnArr.length < 4 || fqdnArr.length > 4){
|
||||
|
||||
val fqdnArr = fqdn.split(":")(0).split("\\.")
|
||||
|
||||
if (fqdnArr.length != 4){
|
||||
return true
|
||||
}
|
||||
for (f <- fqdnArr) {
|
||||
|
||||
@@ -26,6 +26,10 @@ object UpdateDocHandler {
|
||||
hisDoc.addAttribute(attributeName,newAttribute+hisAttritube)
|
||||
}
|
||||
|
||||
def replaceAttribute(hisDoc: BaseDocument,newAttribute:String,attributeName:String): Unit ={
|
||||
hisDoc.addAttribute(attributeName,newAttribute)
|
||||
}
|
||||
|
||||
def separateAttributeByIpType(ipTypeList:ofRef[String],
|
||||
sessionCountList:ofRef[AnyRef],
|
||||
bytesSumList:ofRef[AnyRef]): (Long,Long,Long,Long) ={
|
||||
@@ -94,7 +98,12 @@ object UpdateDocHandler {
|
||||
}
|
||||
|
||||
def mergeDistinctIp(distCipRecent:ofRef[String]): Array[String] ={
|
||||
distCipRecent.distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
|
||||
distCipRecent.flatMap(str => {
|
||||
str.replaceAll("\\[","")
|
||||
.replaceAll("\\]","")
|
||||
.replaceAll("\\'","")
|
||||
.split(",")
|
||||
}).distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
|
||||
}
|
||||
|
||||
def putDistinctIp(doc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
|
||||
|
||||
@@ -5,7 +5,6 @@ import java.util.concurrent.ConcurrentHashMap
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig
|
||||
import cn.ac.iie.dao.BaseArangoData
|
||||
import cn.ac.iie.dao.BaseArangoData._
|
||||
import cn.ac.iie.service.transform.MergeDataFrame._
|
||||
import cn.ac.iie.service.update.UpdateDocHandler._
|
||||
import cn.ac.iie.utils.{ArangoDBConnect, ExecutorThreadPool, SparkSessionUtil}
|
||||
@@ -26,9 +25,12 @@ object UpdateDocument {
|
||||
|
||||
def update(): Unit = {
|
||||
try {
|
||||
updateDocument("FQDN", historyVertexFqdnMap, getVertexFqdnRow, classOf[BaseDocument], mergeVertexFqdn)
|
||||
updateDocument("IP", historyVertexIpMap, getVertexIpRow, classOf[BaseDocument], mergeVertexIp)
|
||||
updateDocument("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, getRelationFqdnLocateIpRow, classOf[BaseEdgeDocument], mergeRelationFqdnLocateIp)
|
||||
// updateDocument("FQDN", getVertexFqdnRow, classOf[BaseDocument], mergeVertexFqdn)
|
||||
// updateDocument("IP", getVertexIpRow, classOf[BaseDocument], mergeVertexIp)
|
||||
updateDocument("R_LOCATE_FQDN2IP", getRelationFqdnLocateIpRow, classOf[BaseEdgeDocument], mergeRelationFqdnLocateIp)
|
||||
updateDocument("SUBSCRIBER",getVertexSubidRow,classOf[BaseDocument],mergeVertexSubid)
|
||||
insertFrameIp()
|
||||
updateDocument("R_LOCATE_SUBSCRIBER2IP",getRelationSubidLocateIpRow,classOf[BaseEdgeDocument],mergeRelationSubidLocateIp)
|
||||
} catch {
|
||||
case e: Exception => e.printStackTrace()
|
||||
} finally {
|
||||
@@ -38,13 +40,33 @@ object UpdateDocument {
|
||||
}
|
||||
}
|
||||
|
||||
private def insertFrameIp(): Unit ={
|
||||
mergeVertexFrameIp.foreachPartition(iter => {
|
||||
val resultDocumentList = new util.ArrayList[BaseDocument]
|
||||
var i = 0
|
||||
iter.foreach(row => {
|
||||
val document = getVertexFrameipRow(row)
|
||||
resultDocumentList.add(document)
|
||||
i += 1
|
||||
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
|
||||
arangoManger.overwrite(resultDocumentList, "IP")
|
||||
LOG.warn(s"更新:IP" + i)
|
||||
i = 0
|
||||
}
|
||||
})
|
||||
if (i != 0) {
|
||||
arangoManger.overwrite(resultDocumentList, "IP")
|
||||
LOG.warn(s"更新IP:" + i)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
private def updateDocument[T <: BaseDocument](collName: String,
|
||||
historyMap: ConcurrentHashMap[Integer, ConcurrentHashMap[String, T]],
|
||||
getDocumentRow: (Row, ConcurrentHashMap[String, T]) => T,
|
||||
clazz: Class[T],
|
||||
getNewDataRdd: () => RDD[Row]
|
||||
): Unit = {
|
||||
baseArangoData.readHistoryData(collName, historyMap, clazz)
|
||||
val historyMap = baseArangoData.readHistoryData(collName, clazz)
|
||||
val hisBc = spark.sparkContext.broadcast(historyMap)
|
||||
try {
|
||||
val start = System.currentTimeMillis()
|
||||
@@ -95,6 +117,56 @@ object UpdateDocument {
|
||||
document
|
||||
}
|
||||
|
||||
private def getRelationSubidLocateIpRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseEdgeDocument]): BaseEdgeDocument ={
|
||||
val subId = row.getAs[String]("common_subscriber_id")
|
||||
val ip = row.getAs[String]("radius_framed_ip")
|
||||
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
|
||||
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
|
||||
|
||||
val key = subId.concat("-"+ip)
|
||||
var document = dictionaryMap.getOrDefault(key,null)
|
||||
if (document != null){
|
||||
updateMaxAttribute(document,lastFoundTime,"LAST_FOUND_TIME")
|
||||
} else {
|
||||
document = new BaseEdgeDocument()
|
||||
document.setKey(key)
|
||||
document.setFrom("SUBSCRIBER/" + subId)
|
||||
document.setTo("IP/" + ip)
|
||||
document.addAttribute("SUBSCRIBER",subId)
|
||||
document.addAttribute("IP",ip)
|
||||
document.addAttribute("FIRST_FOUND_TIME",firstFoundTime)
|
||||
document.addAttribute("LAST_FOUND_TIME",lastFoundTime)
|
||||
}
|
||||
|
||||
document
|
||||
}
|
||||
|
||||
private def getVertexFrameipRow(row: Row): BaseDocument ={
|
||||
val ip = row.getAs[String]("radius_framed_ip")
|
||||
val document = new BaseDocument()
|
||||
document.setKey(ip)
|
||||
document.addAttribute("IP",ip)
|
||||
document
|
||||
}
|
||||
|
||||
private def getVertexSubidRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseDocument]): BaseDocument ={
|
||||
val subId = row.getAs[String]("common_subscriber_id")
|
||||
val subLastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
|
||||
val subFirstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
|
||||
var document = dictionaryMap.getOrDefault(subId,null)
|
||||
if (document != null){
|
||||
updateMaxAttribute(document,subLastFoundTime,"LAST_FOUND_TIME")
|
||||
} else {
|
||||
document = new BaseDocument()
|
||||
document.setKey(subId)
|
||||
document.addAttribute("SUBSCRIBER",subId)
|
||||
document.addAttribute("FIRST_FOUND_TIME",subFirstFoundTime)
|
||||
document.addAttribute("LAST_FOUND_TIME",subLastFoundTime)
|
||||
}
|
||||
|
||||
document
|
||||
}
|
||||
|
||||
private def getVertexIpRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseDocument]): BaseDocument = {
|
||||
val ip = row.getAs[String]("IP")
|
||||
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
|
||||
@@ -102,6 +174,7 @@ object UpdateDocument {
|
||||
val sessionCountList = row.getAs[ofRef[AnyRef]]("SESSION_COUNT_LIST")
|
||||
val bytesSumList = row.getAs[ofRef[AnyRef]]("BYTES_SUM_LIST")
|
||||
val ipTypeList = row.getAs[ofRef[String]]("ip_type_list")
|
||||
val linkInfo = row.getAs[String]("common_link_info")
|
||||
val sepAttributeTuple = separateAttributeByIpType(ipTypeList, sessionCountList, bytesSumList)
|
||||
|
||||
var document = dictionaryMap.getOrDefault(ip, null)
|
||||
@@ -111,6 +184,7 @@ object UpdateDocument {
|
||||
updateSumAttribute(document, sepAttributeTuple._2, "SERVER_BYTES_SUM")
|
||||
updateSumAttribute(document, sepAttributeTuple._3, "CLIENT_SESSION_COUNT")
|
||||
updateSumAttribute(document, sepAttributeTuple._4, "CLIENT_BYTES_SUM")
|
||||
replaceAttribute(document,linkInfo,"COMMON_LINK_INFO")
|
||||
} else {
|
||||
document = new BaseDocument
|
||||
document.setKey(ip)
|
||||
@@ -121,7 +195,7 @@ object UpdateDocument {
|
||||
document.addAttribute("SERVER_BYTES_SUM", sepAttributeTuple._2)
|
||||
document.addAttribute("CLIENT_SESSION_COUNT", sepAttributeTuple._3)
|
||||
document.addAttribute("CLIENT_BYTES_SUM", sepAttributeTuple._4)
|
||||
document.addAttribute("COMMON_LINK_INFO", "")
|
||||
document.addAttribute("COMMON_LINK_INFO", linkInfo)
|
||||
}
|
||||
document
|
||||
}
|
||||
@@ -131,16 +205,18 @@ object UpdateDocument {
|
||||
val serverIp = row.getAs[String]("common_server_ip")
|
||||
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
|
||||
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
|
||||
val countTotal = row.getAs[Long]("COUNT_TOTAL")
|
||||
val countTotalList = row.getAs[ofRef[AnyRef]]("COUNT_TOTAL_LIST")
|
||||
val schemaTypeList = row.getAs[ofRef[AnyRef]]("schema_type_list")
|
||||
val distCipRecent = row.getAs[ofRef[String]]("DIST_CIP_RECENT")
|
||||
|
||||
val sepAttritubeMap: Map[String, Long] = separateAttributeByProtocol(schemaTypeList, countTotalList)
|
||||
val distinctIp: Array[String] = mergeDistinctIp(distCipRecent)
|
||||
|
||||
val key = fqdn.concat("-" + serverIp)
|
||||
var document = dictionaryMap.getOrDefault(key, null)
|
||||
if (document != null) {
|
||||
updateMaxAttribute(document, lastFoundTime, "LAST_FOUND_TIME")
|
||||
updateSumAttribute(document,countTotal,"CNT_TOTAL")
|
||||
updateProtocolAttritube(document, sepAttritubeMap)
|
||||
updateDistinctIp(document, distinctIp)
|
||||
} else {
|
||||
document = new BaseEdgeDocument()
|
||||
@@ -149,7 +225,7 @@ object UpdateDocument {
|
||||
document.setTo("IP/" + serverIp)
|
||||
document.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
|
||||
document.addAttribute("LAST_FOUND_TIME", lastFoundTime)
|
||||
document.addAttribute("CNT_TOTAL",countTotal)
|
||||
putProtocolAttritube(document, sepAttritubeMap)
|
||||
putDistinctIp(document, distinctIp)
|
||||
}
|
||||
document
|
||||
|
||||
@@ -1,35 +1,35 @@
|
||||
package cn.ac.iie.service.update
|
||||
|
||||
import java.util
|
||||
import java.util.ArrayList
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
|
||||
import cn.ac.iie.dao.BaseArangoData
|
||||
import cn.ac.iie.dao.BaseArangoData._
|
||||
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
||||
|
||||
import scala.collection.mutable.WrappedArray.ofRef
|
||||
|
||||
object UpdateDocumentTest {
|
||||
def main(args: Array[String]): Unit = {
|
||||
val baseArangoData = new BaseArangoData()
|
||||
baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, classOf[BaseEdgeDocument])
|
||||
|
||||
val value = BaseArangoData.historyRelationFqdnAddressIpMap.keys()
|
||||
while (value.hasMoreElements) {
|
||||
val integer: Integer = value.nextElement()
|
||||
val map: ConcurrentHashMap[String, BaseEdgeDocument] = historyRelationFqdnAddressIpMap.get(integer)
|
||||
val unit = map.keys()
|
||||
while (unit.hasMoreElements) {
|
||||
val key = unit.nextElement()
|
||||
val edgeDocument = map.get(key)
|
||||
// val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[util.ArrayList[Long]]
|
||||
// val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[util.ArrayList[String]]
|
||||
val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[Array[String]]
|
||||
val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[Array[java.lang.Long]]
|
||||
println(longs.toString + "---" + strings.toString)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
//package cn.ac.iie.service.update
|
||||
//
|
||||
//import java.util
|
||||
//import java.util.ArrayList
|
||||
//import java.util.concurrent.ConcurrentHashMap
|
||||
//
|
||||
//import cn.ac.iie.dao.BaseArangoData
|
||||
//import cn.ac.iie.dao.BaseArangoData._
|
||||
//import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
||||
//
|
||||
//import scala.collection.mutable.WrappedArray.ofRef
|
||||
//
|
||||
//object UpdateDocumentTest {
|
||||
// def main(args: Array[String]): Unit = {
|
||||
// val baseArangoData = new BaseArangoData()
|
||||
// baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, classOf[BaseEdgeDocument])
|
||||
//
|
||||
// val value = BaseArangoData.historyRelationFqdnAddressIpMap.keys()
|
||||
// while (value.hasMoreElements) {
|
||||
// val integer: Integer = value.nextElement()
|
||||
// val map: ConcurrentHashMap[String, BaseEdgeDocument] = historyRelationFqdnAddressIpMap.get(integer)
|
||||
// val unit = map.keys()
|
||||
// while (unit.hasMoreElements) {
|
||||
// val key = unit.nextElement()
|
||||
// val edgeDocument = map.get(key)
|
||||
// // val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[util.ArrayList[Long]]
|
||||
// // val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[util.ArrayList[String]]
|
||||
// val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[Array[String]]
|
||||
// val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[Array[java.lang.Long]]
|
||||
// println(longs.toString + "---" + strings.toString)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
//}
|
||||
|
||||
Reference in New Issue
Block a user