Compare commits
13 Commits
master
...
ip-learnin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60d688f289 | ||
|
|
57fd13d053 | ||
|
|
1017d8fe6c | ||
|
|
17ec1eff72 | ||
|
|
67c2dfd4d2 | ||
|
|
33c0d826ab | ||
|
|
6265bb5e90 | ||
|
|
a1589b4905 | ||
|
|
40e76754d0 | ||
|
|
b7a156b0b8 | ||
|
|
233cf20d50 | ||
|
|
b13fc2bce1 | ||
|
|
cbeba6372b |
@@ -1,93 +0,0 @@
|
|||||||
package cn.ac.iie.dao;
|
|
||||||
|
|
||||||
import cn.ac.iie.config.ApplicationConfig;
|
|
||||||
import cn.ac.iie.service.ingestion.ReadHistoryArangoData;
|
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import cn.ac.iie.utils.ExecutorThreadPool;
|
|
||||||
import com.arangodb.ArangoCursor;
|
|
||||||
import com.arangodb.entity.BaseDocument;
|
|
||||||
import com.arangodb.entity.BaseEdgeDocument;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 获取arangoDB历史数据
|
|
||||||
*/
|
|
||||||
public class BaseArangoData {
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
|
|
||||||
|
|
||||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseDocument>> historyVertexFqdnMap = new ConcurrentHashMap<>();
|
|
||||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseDocument>> historyVertexIpMap = new ConcurrentHashMap<>();
|
|
||||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseDocument>> historyVertexSubscriberMap = new ConcurrentHashMap<>();
|
|
||||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
|
|
||||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
|
|
||||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
|
|
||||||
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
|
|
||||||
|
|
||||||
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
|
|
||||||
|
|
||||||
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
|
|
||||||
|
|
||||||
<T extends BaseDocument> void readHistoryData(String table,
|
|
||||||
ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map,
|
|
||||||
Class<T> type) {
|
|
||||||
try {
|
|
||||||
LOG.info("开始更新"+table);
|
|
||||||
long start = System.currentTimeMillis();
|
|
||||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++){
|
|
||||||
map.put(i,new ConcurrentHashMap<>());
|
|
||||||
}
|
|
||||||
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
|
|
||||||
long[] timeRange = getTimeRange(table);
|
|
||||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
|
|
||||||
String sql = getQuerySql(timeRange, i, table);
|
|
||||||
ReadHistoryArangoData<T> readHistoryArangoData =
|
|
||||||
new ReadHistoryArangoData<>(arangoDBConnect, sql, map,type,table,countDownLatch);
|
|
||||||
threadPool.executor(readHistoryArangoData);
|
|
||||||
}
|
|
||||||
countDownLatch.await();
|
|
||||||
long last = System.currentTimeMillis();
|
|
||||||
LOG.info("读取"+table+" arangoDB 共耗时:"+(last-start));
|
|
||||||
}catch (Exception e){
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private long[] getTimeRange(String table){
|
|
||||||
long minTime = 0L;
|
|
||||||
long maxTime = 0L;
|
|
||||||
long startTime = System.currentTimeMillis();
|
|
||||||
String sql = "LET doc = (FOR doc IN "+table+" RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
|
|
||||||
ArangoCursor<BaseDocument> timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
|
|
||||||
try {
|
|
||||||
if (timeDoc != null){
|
|
||||||
while (timeDoc.hasNext()) {
|
|
||||||
BaseDocument doc = timeDoc.next();
|
|
||||||
maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER;
|
|
||||||
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
|
|
||||||
}
|
|
||||||
long lastTime = System.currentTimeMillis();
|
|
||||||
LOG.info(sql+"\n查询最大最小时间用时:" + (lastTime - startTime));
|
|
||||||
}else {
|
|
||||||
LOG.warn("获取ArangoDb时间范围为空");
|
|
||||||
}
|
|
||||||
}catch (Exception e){
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
return new long[]{minTime, maxTime};
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getQuerySql(long[] timeRange,int threadNumber,String table){
|
|
||||||
long minTime = timeRange[0];
|
|
||||||
long maxTime = timeRange[1];
|
|
||||||
long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER;
|
|
||||||
long maxThreadTime = minTime + (threadNumber + 1)* diffTime;
|
|
||||||
long minThreadTime = minTime + threadNumber * diffTime;
|
|
||||||
return "FOR doc IN "+table+" filter doc.FIRST_FOUND_TIME >= "+minThreadTime+" and doc.FIRST_FOUND_TIME <= "+maxThreadTime+" " + ApplicationConfig.ARANGODB_READ_LIMIT + " RETURN doc";
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -35,11 +35,10 @@ public class BaseClickhouseData {
|
|||||||
private DruidPooledConnection connection;
|
private DruidPooledConnection connection;
|
||||||
private Statement statement;
|
private Statement statement;
|
||||||
|
|
||||||
<T extends BaseDocument> void baseDocumentFromClickhouse(HashMap<Integer, HashMap<String, ArrayList<T>>> newMap,
|
<T extends BaseDocument> HashMap<Integer, HashMap<String,ArrayList<T>>> baseDocumentFromClickhouse(Supplier<String> getSqlSupplier,
|
||||||
Supplier<String> getSqlSupplier,
|
|
||||||
Function<ResultSet,T> formatResultFunc) {
|
Function<ResultSet,T> formatResultFunc) {
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
initializeMap(newMap);
|
HashMap<Integer, HashMap<String, ArrayList<T>>> newMap = initializeMap();
|
||||||
String sql = getSqlSupplier.get();
|
String sql = getSqlSupplier.get();
|
||||||
try {
|
try {
|
||||||
connection = manger.getConnection();
|
connection = manger.getConnection();
|
||||||
@@ -60,18 +59,23 @@ public class BaseClickhouseData {
|
|||||||
}finally {
|
}finally {
|
||||||
manger.clear(statement,connection);
|
manger.clear(statement,connection);
|
||||||
}
|
}
|
||||||
|
return newMap;
|
||||||
}
|
}
|
||||||
|
|
||||||
private <T extends BaseDocument> void initializeMap(HashMap<Integer, HashMap<String,ArrayList<T>>> map){
|
private <T extends BaseDocument> HashMap<Integer, HashMap<String,ArrayList<T>>> initializeMap(){
|
||||||
try {
|
try {
|
||||||
|
HashMap<Integer, HashMap<String, ArrayList<T>>> newDataMap = new HashMap<>();
|
||||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
|
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
|
||||||
map.put(i, new HashMap<>());
|
newDataMap.put(i, new HashMap<>());
|
||||||
}
|
}
|
||||||
|
return newDataMap;
|
||||||
}catch (Exception e){
|
}catch (Exception e){
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
LOG.error("初始化数据失败");
|
LOG.error("数据初始化失败 "+e.toString());
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,15 +4,8 @@ import cn.ac.iie.config.ApplicationConfig;
|
|||||||
import cn.ac.iie.service.ingestion.ReadClickhouseData;
|
import cn.ac.iie.service.ingestion.ReadClickhouseData;
|
||||||
import cn.ac.iie.service.update.Document;
|
import cn.ac.iie.service.update.Document;
|
||||||
import cn.ac.iie.service.update.relationship.LocateFqdn2Ip;
|
import cn.ac.iie.service.update.relationship.LocateFqdn2Ip;
|
||||||
import cn.ac.iie.service.update.relationship.LocateSubscriber2Ip;
|
|
||||||
import cn.ac.iie.service.update.relationship.VisitIp2Fqdn;
|
|
||||||
import cn.ac.iie.service.update.vertex.Fqdn;
|
|
||||||
import cn.ac.iie.service.update.vertex.Ip;
|
|
||||||
import cn.ac.iie.service.update.vertex.Subscriber;
|
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import cn.ac.iie.utils.ExecutorThreadPool;
|
import cn.ac.iie.utils.ExecutorThreadPool;
|
||||||
import com.arangodb.entity.BaseDocument;
|
import com.arangodb.entity.BaseDocument;
|
||||||
import com.arangodb.entity.BaseEdgeDocument;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
@@ -20,14 +13,10 @@ import java.lang.reflect.Constructor;
|
|||||||
import java.sql.ResultSet;
|
import java.sql.ResultSet;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
import static cn.ac.iie.dao.BaseArangoData.*;
|
|
||||||
import static cn.ac.iie.dao.BaseClickhouseData.*;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 更新图数据库业务类
|
* 更新图数据库业务类
|
||||||
* @author wlh
|
* @author wlh
|
||||||
@@ -35,74 +24,46 @@ import static cn.ac.iie.dao.BaseClickhouseData.*;
|
|||||||
public class UpdateGraphData {
|
public class UpdateGraphData {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(UpdateGraphData.class);
|
private static final Logger LOG = LoggerFactory.getLogger(UpdateGraphData.class);
|
||||||
private static ExecutorThreadPool pool = ExecutorThreadPool.getInstance();
|
private static ExecutorThreadPool pool = ExecutorThreadPool.getInstance();
|
||||||
private static ArangoDBConnect arangoManger = ArangoDBConnect.getInstance();
|
|
||||||
private static BaseArangoData baseArangoData = new BaseArangoData();
|
|
||||||
private static BaseClickhouseData baseClickhouseData = new BaseClickhouseData();
|
private static BaseClickhouseData baseClickhouseData = new BaseClickhouseData();
|
||||||
|
|
||||||
|
|
||||||
public void updateArango(){
|
public void updateArango(){
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
try {
|
try {
|
||||||
|
updateDocument("ip_learning.r_locate_fqdn2ip_local", LocateFqdn2Ip.class,
|
||||||
updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN", Fqdn.class,BaseDocument.class,
|
ReadClickhouseData::getRelationshipFqdnAddressIpSqlYsp,ReadClickhouseData::getRelationFqdnAddressIpDocument);
|
||||||
ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
|
|
||||||
|
|
||||||
updateDocument(newVertexIpMap,historyVertexIpMap,"IP", Ip.class,BaseDocument.class,
|
|
||||||
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
|
|
||||||
|
|
||||||
updateDocument(newVertexSubscriberMap,historyVertexSubscriberMap,"SUBSCRIBER", Subscriber.class,BaseDocument.class,
|
|
||||||
ReadClickhouseData::getVertexSubscriberSql,ReadClickhouseData::getVertexSubscriberDocument);
|
|
||||||
|
|
||||||
updateDocument(newRelationFqdnAddressIpMap,historyRelationFqdnAddressIpMap,"R_LOCATE_FQDN2IP", LocateFqdn2Ip.class,BaseEdgeDocument.class,
|
|
||||||
ReadClickhouseData::getRelationshipFqdnAddressIpSql,ReadClickhouseData::getRelationFqdnAddressIpDocument);
|
|
||||||
|
|
||||||
// updateDocument(newRelationIpVisitFqdnMap,historyRelationIpVisitFqdnMap,"R_VISIT_IP2FQDN",
|
|
||||||
// VisitIp2Fqdn.class,BaseEdgeDocument.class,
|
|
||||||
// ReadClickhouseData::getRelationshipIpVisitFqdnSql,ReadClickhouseData::getRelationIpVisitFqdnDocument);
|
|
||||||
|
|
||||||
updateDocument(newRelationSubsciberLocateIpMap,historyRelationSubsciberLocateIpMap,"R_LOCATE_SUBSCRIBER2IP",
|
|
||||||
LocateSubscriber2Ip.class,BaseEdgeDocument.class,
|
|
||||||
ReadClickhouseData::getRelationshipSubsciberLocateIpSql,ReadClickhouseData::getRelationshipSubsciberLocateIpDocument);
|
|
||||||
|
|
||||||
|
|
||||||
long last = System.currentTimeMillis();
|
long last = System.currentTimeMillis();
|
||||||
LOG.info("iplearning application运行完毕,用时:"+(last - start));
|
LOG.info("iplearning application运行完毕,用时:"+(last - start));
|
||||||
}catch (Exception e){
|
}catch (Exception e){
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}finally {
|
}finally {
|
||||||
arangoManger.clean();
|
|
||||||
pool.shutdown();
|
pool.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private <T extends BaseDocument> void updateDocument(HashMap<Integer, HashMap<String, ArrayList<T>>> newMap,
|
private <T extends BaseDocument> void updateDocument(String collection,
|
||||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
|
|
||||||
String collection,
|
|
||||||
Class<? extends Document<T>> taskType,
|
Class<? extends Document<T>> taskType,
|
||||||
Class<T> docmentType,
|
|
||||||
Supplier<String> getSqlSupplier,
|
Supplier<String> getSqlSupplier,
|
||||||
Function<ResultSet,T> formatResultFunc
|
Function<ResultSet,T> formatResultFunc
|
||||||
) {
|
) {
|
||||||
try {
|
try {
|
||||||
|
|
||||||
baseArangoData.readHistoryData(collection,historyMap,docmentType);
|
|
||||||
LOG.info(collection+" 读取clickhouse,封装结果集");
|
LOG.info(collection+" 读取clickhouse,封装结果集");
|
||||||
baseClickhouseData.baseDocumentFromClickhouse(newMap, getSqlSupplier,formatResultFunc);
|
HashMap<Integer, HashMap<String, ArrayList<T>>> newMap =
|
||||||
|
baseClickhouseData.baseDocumentFromClickhouse(getSqlSupplier, formatResultFunc);
|
||||||
|
|
||||||
LOG.info(collection+" 开始更新");
|
LOG.info(collection+" 开始更新");
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
|
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
|
||||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++){
|
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++){
|
||||||
HashMap<String, ArrayList<T>> tmpNewMap = newMap.get(i);
|
HashMap<String, ArrayList<T>> tmpNewMap = newMap.get(i);
|
||||||
ConcurrentHashMap<String, T> tmpHisMap = historyMap.get(i);
|
|
||||||
Constructor constructor = taskType.getConstructor(
|
Constructor constructor = taskType.getConstructor(
|
||||||
HashMap.class,
|
HashMap.class,
|
||||||
ArangoDBConnect.class,
|
|
||||||
String.class,
|
String.class,
|
||||||
ConcurrentHashMap.class,
|
|
||||||
CountDownLatch.class);
|
CountDownLatch.class);
|
||||||
Document docTask = (Document)constructor.newInstance(tmpNewMap, arangoManger, collection, tmpHisMap, countDownLatch);
|
Document docTask = (Document)constructor.newInstance(tmpNewMap, collection, countDownLatch);
|
||||||
pool.executor(docTask);
|
pool.executor(docTask);
|
||||||
}
|
}
|
||||||
countDownLatch.await();
|
countDownLatch.await();
|
||||||
@@ -110,11 +71,6 @@ public class UpdateGraphData {
|
|||||||
LOG.info(collection+" 更新完毕,共耗时:"+(last-start));
|
LOG.info(collection+" 更新完毕,共耗时:"+(last-start));
|
||||||
}catch (Exception e){
|
}catch (Exception e){
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}finally {
|
|
||||||
newMap.clear();
|
|
||||||
historyMap.clear();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -66,9 +66,11 @@ public class ReadClickhouseData {
|
|||||||
long bytesSum = resultSet.getLong("BYTES_SUM");
|
long bytesSum = resultSet.getLong("BYTES_SUM");
|
||||||
String ipType = resultSet.getString("ip_type");
|
String ipType = resultSet.getString("ip_type");
|
||||||
String[] commonLinkInfos = (String[]) resultSet.getArray("common_link_info").getArray();
|
String[] commonLinkInfos = (String[]) resultSet.getArray("common_link_info").getArray();
|
||||||
String commonLinkInfo = "";
|
String commonLinkInfo;
|
||||||
if (commonLinkInfos.length > 1){
|
if (commonLinkInfos.length > 1 && !commonLinkInfos[1].equals("")){
|
||||||
commonLinkInfo = commonLinkInfos[1];
|
commonLinkInfo = commonLinkInfos[1];
|
||||||
|
}else {
|
||||||
|
commonLinkInfo = commonLinkInfos[0];
|
||||||
}
|
}
|
||||||
newDoc.setKey(ip);
|
newDoc.setKey(ip);
|
||||||
newDoc.addAttribute("IP", ip);
|
newDoc.addAttribute("IP", ip);
|
||||||
@@ -144,7 +146,8 @@ public class ReadClickhouseData {
|
|||||||
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
|
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
|
||||||
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
|
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
|
||||||
long countTotal = resultSet.getLong("COUNT_TOTAL");
|
long countTotal = resultSet.getLong("COUNT_TOTAL");
|
||||||
String schemaType = resultSet.getString("schema_type");
|
// String schemaType = resultSet.getString("schema_type");
|
||||||
|
String schemaType = "";
|
||||||
String[] distCipRecents = (String[]) resultSet.getArray("DIST_CIP_RECENT").getArray();
|
String[] distCipRecents = (String[]) resultSet.getArray("DIST_CIP_RECENT").getArray();
|
||||||
long[] clientIpTs = new long[distCipRecents.length];
|
long[] clientIpTs = new long[distCipRecents.length];
|
||||||
for (int i = 0; i < clientIpTs.length; i++) {
|
for (int i = 0; i < clientIpTs.length; i++) {
|
||||||
@@ -154,8 +157,8 @@ public class ReadClickhouseData {
|
|||||||
String key = vFqdn + "-" + vIp;
|
String key = vFqdn + "-" + vIp;
|
||||||
newDoc = new BaseEdgeDocument();
|
newDoc = new BaseEdgeDocument();
|
||||||
newDoc.setKey(key);
|
newDoc.setKey(key);
|
||||||
newDoc.setFrom("FQDN/" + vFqdn);
|
newDoc.setFrom(vFqdn);
|
||||||
newDoc.setTo("IP/" + vIp);
|
newDoc.setTo(vIp);
|
||||||
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
|
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
|
||||||
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
|
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
|
||||||
newDoc.addAttribute("DIST_CIP", distCipRecents);
|
newDoc.addAttribute("DIST_CIP", distCipRecents);
|
||||||
@@ -183,8 +186,8 @@ public class ReadClickhouseData {
|
|||||||
|
|
||||||
newDoc = new BaseEdgeDocument();
|
newDoc = new BaseEdgeDocument();
|
||||||
newDoc.setKey(key);
|
newDoc.setKey(key);
|
||||||
newDoc.setFrom("IP/" + vIp);
|
newDoc.setFrom(vIp);
|
||||||
newDoc.setTo("FQDN/" + vFqdn);
|
newDoc.setTo(vFqdn);
|
||||||
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
|
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
|
||||||
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
|
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
|
||||||
newDoc.addAttribute("PROTOCOL_TYPE", schemaType);
|
newDoc.addAttribute("PROTOCOL_TYPE", schemaType);
|
||||||
@@ -212,12 +215,8 @@ public class ReadClickhouseData {
|
|||||||
if (fqdn == null || fqdn.length() == 0){
|
if (fqdn == null || fqdn.length() == 0){
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (fqdn.contains(":")){
|
fqdn = fqdn.split(":")[0];
|
||||||
String s = fqdn.split(":")[0];
|
|
||||||
if (s.contains(":")){
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
String[] fqdnArr = fqdn.split("\\.");
|
String[] fqdnArr = fqdn.split("\\.");
|
||||||
if (fqdnArr.length < 4 || fqdnArr.length > 4) {
|
if (fqdnArr.length < 4 || fqdnArr.length > 4) {
|
||||||
return true;
|
return true;
|
||||||
@@ -263,9 +262,9 @@ public class ReadClickhouseData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static String getVertexIpSql() {
|
public static String getVertexIpSql() {
|
||||||
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime + " AND common_schema_type != 'BASE'";
|
||||||
String clientIpSql = "SELECT common_client_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info) as common_link_info,'client' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
|
String clientIpSql = "SELECT common_client_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info_c2s) as common_link_info,'client' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
|
||||||
String serverIpSql = "SELECT common_server_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info) as common_link_info,'server' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
|
String serverIpSql = "SELECT common_server_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info_s2c) as common_link_info,'server' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
|
||||||
return "SELECT * FROM((" + clientIpSql + ") UNION ALL (" + serverIpSql + "))";
|
return "SELECT * FROM((" + clientIpSql + ") UNION ALL (" + serverIpSql + "))";
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -276,6 +275,11 @@ public class ReadClickhouseData {
|
|||||||
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
|
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static String getRelationshipFqdnAddressIpSqlYsp() {
|
||||||
|
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime +" AND s1_domain != '' AND s1_d_ip != '' ";
|
||||||
|
return "SELECT s1_domain AS FQDN,s1_d_ip AS common_server_ip,MIN( recv_time ) AS FIRST_FOUND_TIME,MAX( recv_time ) AS LAST_FOUND_TIME,COUNT( * ) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(s1_s_ip) AS DIST_CIP_RECENT FROM media_expire_patch WHERE "+where+" GROUP BY s1_d_ip,s1_domain";
|
||||||
|
}
|
||||||
|
|
||||||
public static String getRelationshipIpVisitFqdnSql() {
|
public static String getRelationshipIpVisitFqdnSql() {
|
||||||
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
||||||
String httpSql = "SELECT http_host AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'HTTP' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_client_ip";
|
String httpSql = "SELECT http_host AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'HTTP' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_client_ip";
|
||||||
|
|||||||
@@ -1,116 +0,0 @@
|
|||||||
package cn.ac.iie.service.ingestion;
|
|
||||||
|
|
||||||
import cn.ac.iie.config.ApplicationConfig;
|
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import com.arangodb.ArangoCursor;
|
|
||||||
import com.arangodb.entity.BaseDocument;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
|
||||||
|
|
||||||
import static cn.ac.iie.service.ingestion.ReadClickhouseData.*;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @author wlh
|
|
||||||
* 多线程全量读取arangoDb历史数据,封装到map
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(ReadHistoryArangoData.class);
|
|
||||||
|
|
||||||
private ArangoDBConnect arangoConnect;
|
|
||||||
private String query;
|
|
||||||
private ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> map;
|
|
||||||
private Class<T> type;
|
|
||||||
private String table;
|
|
||||||
private CountDownLatch countDownLatch;
|
|
||||||
|
|
||||||
public ReadHistoryArangoData(ArangoDBConnect arangoConnect,
|
|
||||||
String query,
|
|
||||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> map,
|
|
||||||
Class<T> type,
|
|
||||||
String table,
|
|
||||||
CountDownLatch countDownLatch) {
|
|
||||||
this.arangoConnect = arangoConnect;
|
|
||||||
this.query = query;
|
|
||||||
this.map = map;
|
|
||||||
this.type = type;
|
|
||||||
this.table = table;
|
|
||||||
this.countDownLatch = countDownLatch;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
long s = System.currentTimeMillis();
|
|
||||||
ArangoCursor<T> docs = arangoConnect.executorQuery(query, type);
|
|
||||||
if (docs != null) {
|
|
||||||
ArrayList<T> list = new ArrayList<>();
|
|
||||||
List<T> baseDocuments = docs.asListRemaining();
|
|
||||||
int i = 0;
|
|
||||||
for (T doc : baseDocuments) {
|
|
||||||
String key = doc.getKey();
|
|
||||||
switch (table) {
|
|
||||||
case "R_LOCATE_FQDN2IP":
|
|
||||||
updateProtocolDocument(doc);
|
|
||||||
deleteDistinctClientIpByTime(doc);
|
|
||||||
list.add(doc);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER;
|
|
||||||
ConcurrentHashMap<String, T> tmpMap = map.get(hashCode);
|
|
||||||
tmpMap.put(key, doc);
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
arangoConnect.overwrite(list,table);
|
|
||||||
long l = System.currentTimeMillis();
|
|
||||||
LOG.info(query + "\n读取" + i + "条数据,运行时间:" + (l - s));
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
} finally {
|
|
||||||
countDownLatch.countDown();
|
|
||||||
LOG.info("本线程读取完毕,剩余线程数量:" + countDownLatch.getCount());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void updateProtocolDocument(T doc) {
|
|
||||||
if (doc.getProperties().containsKey("PROTOCOL_TYPE")) {
|
|
||||||
for (String protocol : ReadClickhouseData.PROTOCOL_SET) {
|
|
||||||
String protocolRecent = protocol + "_CNT_RECENT";
|
|
||||||
ArrayList<Long> cntRecent = (ArrayList<Long>) doc.getAttribute(protocolRecent);
|
|
||||||
Long[] cntRecentsSrc = cntRecent.toArray(new Long[0]);
|
|
||||||
Long[] cntRecentsDst = new Long[RECENT_COUNT_HOUR];
|
|
||||||
System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1);
|
|
||||||
cntRecentsDst[0] = 0L;
|
|
||||||
doc.addAttribute(protocolRecent, cntRecentsDst);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void deleteDistinctClientIpByTime(T doc) {
|
|
||||||
ArrayList<String> distCip = (ArrayList<String>) doc.getAttribute("DIST_CIP");
|
|
||||||
ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS");
|
|
||||||
distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
|
|
||||||
Collections.sort(distCipTs);
|
|
||||||
Collections.reverse(distCipTs);
|
|
||||||
int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600);
|
|
||||||
String[] distCipArr = new String[index];
|
|
||||||
long[] disCipTsArr = new long[index];
|
|
||||||
if (index != 0 && distCip.size() + 1 == distCipTs.size()){
|
|
||||||
for (int i = 0; i < index; i++) {
|
|
||||||
distCipArr[i] = distCip.get(i);
|
|
||||||
disCipTsArr[i] = distCipTs.get(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
doc.updateAttribute("DIST_CIP", distCipArr);
|
|
||||||
doc.updateAttribute("DIST_CIP_TS", disCipTsArr);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,34 +1,32 @@
|
|||||||
package cn.ac.iie.service.update;
|
package cn.ac.iie.service.update;
|
||||||
|
|
||||||
import cn.ac.iie.config.ApplicationConfig;
|
import cn.ac.iie.config.ApplicationConfig;
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
import cn.ac.iie.utils.ClickhouseConnect;
|
||||||
|
import com.alibaba.druid.pool.DruidPooledConnection;
|
||||||
import com.arangodb.entity.BaseDocument;
|
import com.arangodb.entity.BaseDocument;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.sql.PreparedStatement;
|
||||||
|
import java.sql.SQLException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
|
||||||
public class Document<T extends BaseDocument> extends Thread{
|
public class Document<T extends BaseDocument> extends Thread{
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(Document.class);
|
private static final Logger LOG = LoggerFactory.getLogger(Document.class);
|
||||||
private HashMap<String, ArrayList<T>> newDocumentMap;
|
private HashMap<String, ArrayList<T>> newDocumentMap;
|
||||||
private ArangoDBConnect arangoManger;
|
|
||||||
private String collectionName;
|
private String collectionName;
|
||||||
private ConcurrentHashMap<String, T> historyDocumentMap;
|
|
||||||
private CountDownLatch countDownLatch;
|
private CountDownLatch countDownLatch;
|
||||||
|
|
||||||
|
private ClickhouseConnect manger = ClickhouseConnect.getInstance();
|
||||||
|
|
||||||
Document(HashMap<String, ArrayList<T>> newDocumentMap,
|
Document(HashMap<String, ArrayList<T>> newDocumentMap,
|
||||||
ArangoDBConnect arangoManger,
|
|
||||||
String collectionName,
|
String collectionName,
|
||||||
ConcurrentHashMap<String, T> historyDocumentMap,
|
|
||||||
CountDownLatch countDownLatch) {
|
CountDownLatch countDownLatch) {
|
||||||
this.newDocumentMap = newDocumentMap;
|
this.newDocumentMap = newDocumentMap;
|
||||||
this.arangoManger = arangoManger;
|
|
||||||
this.collectionName = collectionName;
|
this.collectionName = collectionName;
|
||||||
this.historyDocumentMap = historyDocumentMap;
|
|
||||||
this.countDownLatch = countDownLatch;
|
this.countDownLatch = countDownLatch;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,28 +34,32 @@ public class Document<T extends BaseDocument> extends Thread{
|
|||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
LOG.info("新读取数据"+newDocumentMap.size()+"条,历史数据"+historyDocumentMap.size()+"条");
|
LOG.info("读取数据"+newDocumentMap.size()+"条");
|
||||||
try {
|
try {
|
||||||
Set<String> keySet = newDocumentMap.keySet();
|
Set<String> keySet = newDocumentMap.keySet();
|
||||||
ArrayList<T> resultDocumentList = new ArrayList<>();
|
DruidPooledConnection connection = manger.getConnection();
|
||||||
|
String sql = "INSERT INTO "+collectionName+" VALUES(?,?,?,?,?,?,?,?,?)";
|
||||||
|
PreparedStatement pstm = connection.prepareStatement(sql);
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (String key : keySet) {
|
for (String key : keySet) {
|
||||||
ArrayList<T> newDocumentSchemaList = newDocumentMap.getOrDefault(key, null);
|
ArrayList<T> newDocumentSchemaList = newDocumentMap.getOrDefault(key, null);
|
||||||
if (newDocumentSchemaList != null) {
|
if (newDocumentSchemaList != null) {
|
||||||
T newDocument = mergeDocument(newDocumentSchemaList);
|
T newDocument = mergeDocument(newDocumentSchemaList);
|
||||||
|
pstm = setPstm(pstm, newDocument);
|
||||||
i += 1;
|
i += 1;
|
||||||
T historyDocument = historyDocumentMap.getOrDefault(key, null);
|
pstm.addBatch();
|
||||||
updateDocument(newDocument,historyDocument,resultDocumentList);
|
|
||||||
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
|
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
|
||||||
arangoManger.overwrite(resultDocumentList, collectionName);
|
pstm.executeBatch();
|
||||||
LOG.info("更新"+collectionName+":" + i);
|
connection.commit();
|
||||||
|
LOG.warn("写入clickhouse数据量:" + i);
|
||||||
i = 0;
|
i = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (i != 0) {
|
if (i != 0) {
|
||||||
arangoManger.overwrite(resultDocumentList, collectionName);
|
pstm.executeBatch();
|
||||||
LOG.info("更新"+collectionName+":" + i);
|
connection.commit();
|
||||||
|
LOG.warn("写入clickhouse数据量:" + i);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
@@ -69,18 +71,8 @@ public class Document<T extends BaseDocument> extends Thread{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void updateDocument(T newDocument, T historyDocument, ArrayList<T> resultDocumentList) {
|
protected PreparedStatement setPstm(PreparedStatement pstm,T newDocument) throws SQLException {
|
||||||
if (historyDocument != null){
|
return pstm;
|
||||||
updateFunction(newDocument,historyDocument);
|
|
||||||
resultDocumentList.add(historyDocument);
|
|
||||||
}else {
|
|
||||||
resultDocumentList.add(newDocument);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void updateFunction(T newDocument, T historyDocument) {
|
|
||||||
Object lastFoundTime = newDocument.getAttribute("LAST_FOUND_TIME");
|
|
||||||
historyDocument.addAttribute("LAST_FOUND_TIME",lastFoundTime);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private T mergeDocument(ArrayList<T> newDocumentSchemaList){
|
private T mergeDocument(ArrayList<T> newDocumentSchemaList){
|
||||||
|
|||||||
@@ -1,27 +1,18 @@
|
|||||||
package cn.ac.iie.service.update;
|
package cn.ac.iie.service.update;
|
||||||
|
|
||||||
import cn.ac.iie.service.ingestion.ReadClickhouseData;
|
import cn.ac.iie.service.ingestion.ReadClickhouseData;
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import com.arangodb.entity.BaseEdgeDocument;
|
import com.arangodb.entity.BaseEdgeDocument;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
|
||||||
public class Relationship extends Document<BaseEdgeDocument> {
|
public class Relationship extends Document<BaseEdgeDocument> {
|
||||||
|
|
||||||
public Relationship(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
|
public Relationship(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
|
||||||
ArangoDBConnect arangoManger,
|
|
||||||
String collectionName,
|
String collectionName,
|
||||||
ConcurrentHashMap<String, BaseEdgeDocument> historyDocumentMap,
|
|
||||||
CountDownLatch countDownLatch) {
|
CountDownLatch countDownLatch) {
|
||||||
super(newDocumentHashMap,arangoManger,collectionName,historyDocumentMap,countDownLatch);
|
super(newDocumentHashMap,collectionName,countDownLatch);
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument){
|
|
||||||
super.updateFunction(newEdgeDocument,historyEdgeDocument);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@@ -29,26 +20,6 @@ public class Relationship extends Document<BaseEdgeDocument> {
|
|||||||
super.mergeFunction(lastDoc, newDocument);
|
super.mergeFunction(lastDoc, newDocument);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void updateProcotol(BaseEdgeDocument historyEdgeDocument, String schema, BaseEdgeDocument newEdgeDocument){
|
|
||||||
String recentSchema = schema +"_CNT_RECENT";
|
|
||||||
String totalSchema = schema + "_CNT_TOTAL";
|
|
||||||
long countTotal = Long.parseLong(newEdgeDocument.getAttribute(totalSchema).toString());
|
|
||||||
if (countTotal > 0L){
|
|
||||||
long updateCountTotal = Long.parseLong(historyEdgeDocument.getAttribute(totalSchema).toString());
|
|
||||||
|
|
||||||
Long[] cntRecent = (Long[]) historyEdgeDocument.getAttribute(recentSchema);
|
|
||||||
cntRecent[0] = countTotal;
|
|
||||||
|
|
||||||
historyEdgeDocument.addAttribute(recentSchema, cntRecent);
|
|
||||||
historyEdgeDocument.addAttribute(totalSchema, countTotal + updateCountTotal);
|
|
||||||
String hisProtocolType = historyEdgeDocument.getAttribute("PROTOCOL_TYPE").toString();
|
|
||||||
if (!hisProtocolType.contains(schema)){
|
|
||||||
hisProtocolType = hisProtocolType + "," + schema;
|
|
||||||
historyEdgeDocument.addAttribute("PROTOCOL_TYPE",hisProtocolType);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void mergeProtocol(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
|
protected void mergeProtocol(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
|
||||||
String schema = lastDoc.getAttribute("PROTOCOL_TYPE").toString();
|
String schema = lastDoc.getAttribute("PROTOCOL_TYPE").toString();
|
||||||
if (ReadClickhouseData.PROTOCOL_SET.contains(schema)){
|
if (ReadClickhouseData.PROTOCOL_SET.contains(schema)){
|
||||||
|
|||||||
@@ -1,11 +1,9 @@
|
|||||||
package cn.ac.iie.service.update;
|
package cn.ac.iie.service.update;
|
||||||
|
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import com.arangodb.entity.BaseDocument;
|
import com.arangodb.entity.BaseDocument;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -15,11 +13,9 @@ import java.util.concurrent.CountDownLatch;
|
|||||||
public class Vertex extends Document<BaseDocument> {
|
public class Vertex extends Document<BaseDocument> {
|
||||||
|
|
||||||
public Vertex(HashMap<String, ArrayList<BaseDocument>> newDocumentHashMap,
|
public Vertex(HashMap<String, ArrayList<BaseDocument>> newDocumentHashMap,
|
||||||
ArangoDBConnect arangoManger,
|
|
||||||
String collectionName,
|
String collectionName,
|
||||||
ConcurrentHashMap<String, BaseDocument> historyDocumentMap,
|
|
||||||
CountDownLatch countDownLatch) {
|
CountDownLatch countDownLatch) {
|
||||||
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
|
super(newDocumentHashMap, collectionName, countDownLatch);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,25 +1,23 @@
|
|||||||
package cn.ac.iie.service.update.relationship;
|
package cn.ac.iie.service.update.relationship;
|
||||||
|
|
||||||
import cn.ac.iie.service.ingestion.ReadClickhouseData;
|
|
||||||
import cn.ac.iie.service.update.Relationship;
|
import cn.ac.iie.service.update.Relationship;
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import com.arangodb.entity.BaseEdgeDocument;
|
import com.arangodb.entity.BaseEdgeDocument;
|
||||||
|
import ru.yandex.clickhouse.ClickHouseArray;
|
||||||
|
import ru.yandex.clickhouse.domain.ClickHouseDataType;
|
||||||
|
|
||||||
|
import java.sql.PreparedStatement;
|
||||||
|
import java.sql.SQLException;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
|
||||||
import static cn.ac.iie.service.ingestion.ReadClickhouseData.DISTINCT_CLIENT_IP_NUM;
|
|
||||||
import static cn.ac.iie.service.ingestion.ReadClickhouseData.currentHour;
|
import static cn.ac.iie.service.ingestion.ReadClickhouseData.currentHour;
|
||||||
|
|
||||||
public class LocateFqdn2Ip extends Relationship {
|
public class LocateFqdn2Ip extends Relationship {
|
||||||
|
|
||||||
public LocateFqdn2Ip(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
|
public LocateFqdn2Ip(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
|
||||||
ArangoDBConnect arangoManger,
|
|
||||||
String collectionName,
|
String collectionName,
|
||||||
ConcurrentHashMap<String, BaseEdgeDocument> historyDocumentMap,
|
|
||||||
CountDownLatch countDownLatch) {
|
CountDownLatch countDownLatch) {
|
||||||
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap,countDownLatch);
|
super(newDocumentHashMap, collectionName,countDownLatch);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@@ -29,7 +27,23 @@ public class LocateFqdn2Ip extends Relationship {
|
|||||||
mergeProtocol(lastDoc, newDocument);
|
mergeProtocol(lastDoc, newDocument);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void mergeDistinctClientIp(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument){
|
@Override
|
||||||
|
protected PreparedStatement setPstm(PreparedStatement pstm,BaseEdgeDocument newDocument) throws SQLException {
|
||||||
|
pstm.setString(1,newDocument.getFrom());
|
||||||
|
pstm.setString(2,newDocument.getTo());
|
||||||
|
pstm.setLong(3,Long.parseLong(newDocument.getAttribute("FIRST_FOUND_TIME").toString()));
|
||||||
|
pstm.setLong(4,Long.parseLong(newDocument.getAttribute("LAST_FOUND_TIME").toString()));
|
||||||
|
pstm.setLong(5,Long.parseLong(newDocument.getAttribute("DNS_CNT_TOTAL").toString()));
|
||||||
|
pstm.setLong(6,Long.parseLong(newDocument.getAttribute("TLS_CNT_TOTAL").toString()));
|
||||||
|
pstm.setLong(7,Long.parseLong(newDocument.getAttribute("HTTP_CNT_TOTAL").toString()));
|
||||||
|
Object[] distCips = (Object[]) newDocument.getAttribute("DIST_CIP");
|
||||||
|
pstm.setArray(8,new ClickHouseArray(ClickHouseDataType.Int64, distCips));
|
||||||
|
pstm.setLong(9,currentHour);
|
||||||
|
|
||||||
|
return pstm;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void mergeDistinctClientIp(BaseEdgeDocument lastDoc, BaseEdgeDocument newDocument){
|
||||||
HashSet<String> clientIpSet = new HashSet<>();
|
HashSet<String> clientIpSet = new HashSet<>();
|
||||||
String[] distCips = (String[]) newDocument.getAttribute("DIST_CIP");
|
String[] distCips = (String[]) newDocument.getAttribute("DIST_CIP");
|
||||||
String[] lastDistCips = (String[]) lastDoc.getAttribute("DIST_CIP");
|
String[] lastDistCips = (String[]) lastDoc.getAttribute("DIST_CIP");
|
||||||
@@ -43,56 +57,5 @@ public class LocateFqdn2Ip extends Relationship {
|
|||||||
newDocument.addAttribute("DIST_CIP_TS", clientIpTs);
|
newDocument.addAttribute("DIST_CIP_TS", clientIpTs);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument) {
|
|
||||||
super.updateFunction(newEdgeDocument, historyEdgeDocument);
|
|
||||||
for (String schema:ReadClickhouseData.PROTOCOL_SET){
|
|
||||||
updateProcotol(historyEdgeDocument,schema,newEdgeDocument);
|
|
||||||
}
|
|
||||||
updateDistinctClientIp(newEdgeDocument, historyEdgeDocument);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void updateDistinctClientIp(BaseEdgeDocument newEdgeDocument,BaseEdgeDocument edgeDocument){
|
|
||||||
String[] distCip = (String[]) edgeDocument.getAttribute("DIST_CIP");
|
|
||||||
long[] distCipTs = (long[]) edgeDocument.getAttribute("DIST_CIP_TS");
|
|
||||||
HashMap<String, Long> distCipToTs = new HashMap<>();
|
|
||||||
if (distCip.length == distCipTs.length){
|
|
||||||
for (int i = 0;i < distCip.length;i++){
|
|
||||||
distCipToTs.put(distCip[i],distCipTs[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Object[] distCipRecent = (Object[])newEdgeDocument.getAttribute("DIST_CIP");
|
|
||||||
for (Object cip:distCipRecent){
|
|
||||||
distCipToTs.put(cip.toString(), currentHour);
|
|
||||||
}
|
|
||||||
|
|
||||||
Map<String, Long> sortDistCip = sortMapByValue(distCipToTs);
|
|
||||||
edgeDocument.addAttribute("DIST_CIP",sortDistCip.keySet().toArray());
|
|
||||||
edgeDocument.addAttribute("DIST_CIP_TS",sortDistCip.values().toArray());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 使用 Map按value进行排序
|
|
||||||
*/
|
|
||||||
private Map<String, Long> sortMapByValue(Map<String, Long> oriMap) {
|
|
||||||
if (oriMap == null || oriMap.isEmpty()) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
Map<String, Long> sortedMap = new LinkedHashMap<>();
|
|
||||||
List<Map.Entry<String, Long>> entryList = new ArrayList<>(oriMap.entrySet());
|
|
||||||
entryList.sort((o1, o2) -> o2.getValue().compareTo(o1.getValue()));
|
|
||||||
|
|
||||||
if(entryList.size() > DISTINCT_CLIENT_IP_NUM){
|
|
||||||
for(Map.Entry<String, Long> set:entryList.subList(0, DISTINCT_CLIENT_IP_NUM)){
|
|
||||||
sortedMap.put(set.getKey(), set.getValue());
|
|
||||||
}
|
|
||||||
}else {
|
|
||||||
for(Map.Entry<String, Long> set:entryList){
|
|
||||||
sortedMap.put(set.getKey(), set.getValue());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return sortedMap;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,17 @@
|
|||||||
package cn.ac.iie.service.update.relationship;
|
package cn.ac.iie.service.update.relationship;
|
||||||
|
|
||||||
import cn.ac.iie.service.update.Relationship;
|
import cn.ac.iie.service.update.Relationship;
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import com.arangodb.entity.BaseEdgeDocument;
|
import com.arangodb.entity.BaseEdgeDocument;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
|
||||||
public class LocateSubscriber2Ip extends Relationship {
|
public class LocateSubscriber2Ip extends Relationship {
|
||||||
|
|
||||||
public LocateSubscriber2Ip(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
|
public LocateSubscriber2Ip(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
|
||||||
ArangoDBConnect arangoManger,
|
|
||||||
String collectionName,
|
String collectionName,
|
||||||
ConcurrentHashMap<String, BaseEdgeDocument> historyDocumentMap,
|
|
||||||
CountDownLatch countDownLatch) {
|
CountDownLatch countDownLatch) {
|
||||||
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
|
super(newDocumentHashMap, collectionName, countDownLatch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,30 +2,17 @@ package cn.ac.iie.service.update.relationship;
|
|||||||
|
|
||||||
import cn.ac.iie.service.ingestion.ReadClickhouseData;
|
import cn.ac.iie.service.ingestion.ReadClickhouseData;
|
||||||
import cn.ac.iie.service.update.Relationship;
|
import cn.ac.iie.service.update.Relationship;
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import com.arangodb.entity.BaseEdgeDocument;
|
import com.arangodb.entity.BaseEdgeDocument;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
|
||||||
public class VisitIp2Fqdn extends Relationship {
|
public class VisitIp2Fqdn extends Relationship {
|
||||||
public VisitIp2Fqdn(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
|
public VisitIp2Fqdn(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
|
||||||
ArangoDBConnect arangoManger,
|
|
||||||
String collectionName,
|
String collectionName,
|
||||||
ConcurrentHashMap<String, BaseEdgeDocument> historyDocumentMap,
|
|
||||||
CountDownLatch countDownLatch) {
|
CountDownLatch countDownLatch) {
|
||||||
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap,countDownLatch);
|
super(newDocumentHashMap, collectionName,countDownLatch);
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument) {
|
|
||||||
super.updateFunction(newEdgeDocument, historyEdgeDocument);
|
|
||||||
for (String schema: ReadClickhouseData.PROTOCOL_SET){
|
|
||||||
updateProcotol(historyEdgeDocument,schema,newEdgeDocument);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@@ -1,21 +1,17 @@
|
|||||||
package cn.ac.iie.service.update.vertex;
|
package cn.ac.iie.service.update.vertex;
|
||||||
|
|
||||||
import cn.ac.iie.service.update.Vertex;
|
import cn.ac.iie.service.update.Vertex;
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import com.arangodb.entity.BaseDocument;
|
import com.arangodb.entity.BaseDocument;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
|
||||||
public class Fqdn extends Vertex {
|
public class Fqdn extends Vertex {
|
||||||
|
|
||||||
public Fqdn(HashMap<String, ArrayList<BaseDocument>> newDocumentHashMap,
|
public Fqdn(HashMap<String, ArrayList<BaseDocument>> newDocumentHashMap,
|
||||||
ArangoDBConnect arangoManger,
|
|
||||||
String collectionName,
|
String collectionName,
|
||||||
ConcurrentHashMap<String, BaseDocument> historyDocumentMap,
|
|
||||||
CountDownLatch countDownLatch) {
|
CountDownLatch countDownLatch) {
|
||||||
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap,countDownLatch);
|
super(newDocumentHashMap, collectionName,countDownLatch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,29 +1,18 @@
|
|||||||
package cn.ac.iie.service.update.vertex;
|
package cn.ac.iie.service.update.vertex;
|
||||||
|
|
||||||
import cn.ac.iie.service.update.Vertex;
|
import cn.ac.iie.service.update.Vertex;
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import com.arangodb.entity.BaseDocument;
|
import com.arangodb.entity.BaseDocument;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
|
||||||
public class Ip extends Vertex {
|
public class Ip extends Vertex {
|
||||||
|
|
||||||
public Ip(HashMap<String, ArrayList<BaseDocument>> newDocumentHashMap,
|
public Ip(HashMap<String, ArrayList<BaseDocument>> newDocumentHashMap,
|
||||||
ArangoDBConnect arangoManger,
|
|
||||||
String collectionName,
|
String collectionName,
|
||||||
ConcurrentHashMap<String, BaseDocument> historyDocumentMap,
|
|
||||||
CountDownLatch countDownLatch) {
|
CountDownLatch countDownLatch) {
|
||||||
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap,countDownLatch);
|
super(newDocumentHashMap, collectionName,countDownLatch);
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void updateFunction(BaseDocument newDocument, BaseDocument historyDocument) {
|
|
||||||
super.updateFunction(newDocument, historyDocument);
|
|
||||||
updateIpByType(newDocument, historyDocument);
|
|
||||||
super.replaceAttribute(newDocument,historyDocument,"COMMON_LINK_INFO");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@@ -1,21 +1,17 @@
|
|||||||
package cn.ac.iie.service.update.vertex;
|
package cn.ac.iie.service.update.vertex;
|
||||||
|
|
||||||
import cn.ac.iie.service.update.Vertex;
|
import cn.ac.iie.service.update.Vertex;
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import com.arangodb.entity.BaseDocument;
|
import com.arangodb.entity.BaseDocument;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
|
||||||
public class Subscriber extends Vertex {
|
public class Subscriber extends Vertex {
|
||||||
|
|
||||||
public Subscriber(HashMap<String, ArrayList<BaseDocument>> newDocumentHashMap,
|
public Subscriber(HashMap<String, ArrayList<BaseDocument>> newDocumentHashMap,
|
||||||
ArangoDBConnect arangoManger,
|
|
||||||
String collectionName,
|
String collectionName,
|
||||||
ConcurrentHashMap<String, BaseDocument> historyDocumentMap,
|
|
||||||
CountDownLatch countDownLatch) {
|
CountDownLatch countDownLatch) {
|
||||||
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
|
super(newDocumentHashMap, collectionName, countDownLatch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,96 +0,0 @@
|
|||||||
package cn.ac.iie.utils;
|
|
||||||
|
|
||||||
import cn.ac.iie.config.ApplicationConfig;
|
|
||||||
import com.arangodb.ArangoCollection;
|
|
||||||
import com.arangodb.ArangoCursor;
|
|
||||||
import com.arangodb.ArangoDB;
|
|
||||||
import com.arangodb.ArangoDatabase;
|
|
||||||
import com.arangodb.entity.DocumentCreateEntity;
|
|
||||||
import com.arangodb.entity.ErrorEntity;
|
|
||||||
import com.arangodb.entity.MultiDocumentEntity;
|
|
||||||
import com.arangodb.model.AqlQueryOptions;
|
|
||||||
import com.arangodb.model.DocumentCreateOptions;
|
|
||||||
import com.arangodb.util.MapBuilder;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
public class ArangoDBConnect {
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(ArangoDBConnect.class);
|
|
||||||
private static ArangoDB arangoDB = null;
|
|
||||||
private static ArangoDBConnect conn = null;
|
|
||||||
static {
|
|
||||||
getArangoDB();
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void getArangoDB(){
|
|
||||||
arangoDB = new ArangoDB.Builder()
|
|
||||||
.maxConnections(ApplicationConfig.THREAD_POOL_NUMBER)
|
|
||||||
.host(ApplicationConfig.ARANGODB_HOST, ApplicationConfig.ARANGODB_PORT)
|
|
||||||
.user(ApplicationConfig.ARANGODB_USER)
|
|
||||||
.password(ApplicationConfig.ARANGODB_PASSWORD)
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
public static synchronized ArangoDBConnect getInstance(){
|
|
||||||
if (null == conn){
|
|
||||||
conn = new ArangoDBConnect();
|
|
||||||
}
|
|
||||||
return conn;
|
|
||||||
}
|
|
||||||
|
|
||||||
private ArangoDatabase getDatabase(){
|
|
||||||
return arangoDB.db(ApplicationConfig.ARANGODB_DB_NAME);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void clean(){
|
|
||||||
try {
|
|
||||||
if (arangoDB != null){
|
|
||||||
arangoDB.shutdown();
|
|
||||||
}
|
|
||||||
}catch (Exception e){
|
|
||||||
LOG.error(e.getMessage());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public <T> ArangoCursor<T> executorQuery(String query,Class<T> type){
|
|
||||||
ArangoDatabase database = getDatabase();
|
|
||||||
Map<String, Object> bindVars = new MapBuilder().get();
|
|
||||||
AqlQueryOptions options = new AqlQueryOptions()
|
|
||||||
.ttl(ApplicationConfig.ARANGODB_TTL);
|
|
||||||
try {
|
|
||||||
return database.query(query, bindVars, options, type);
|
|
||||||
}catch (Exception e){
|
|
||||||
LOG.error(e.getMessage());
|
|
||||||
return null;
|
|
||||||
}finally {
|
|
||||||
bindVars.clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public <T> void overwrite(List<T> docOverwrite, String collectionName){
|
|
||||||
ArangoDatabase database = getDatabase();
|
|
||||||
try {
|
|
||||||
ArangoCollection collection = database.collection(collectionName);
|
|
||||||
if (!docOverwrite.isEmpty()){
|
|
||||||
DocumentCreateOptions documentCreateOptions = new DocumentCreateOptions();
|
|
||||||
documentCreateOptions.overwrite(true);
|
|
||||||
documentCreateOptions.silent(true);
|
|
||||||
MultiDocumentEntity<DocumentCreateEntity<T>> documentCreateEntityMultiDocumentEntity = collection.insertDocuments(docOverwrite, documentCreateOptions);
|
|
||||||
Collection<ErrorEntity> errors = documentCreateEntityMultiDocumentEntity.getErrors();
|
|
||||||
for (ErrorEntity errorEntity:errors){
|
|
||||||
LOG.error("写入arangoDB异常:"+errorEntity.getErrorMessage());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}catch (Exception e){
|
|
||||||
LOG.error("更新失败:"+e.toString());
|
|
||||||
}finally {
|
|
||||||
docOverwrite.clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,11 +1,9 @@
|
|||||||
#arangoDB参数配置
|
#arangoDB参数配置
|
||||||
arangoDB.host=192.168.40.182
|
arangoDB.host=192.168.44.12
|
||||||
#arangoDB.host=192.168.40.224
|
|
||||||
arangoDB.port=8529
|
arangoDB.port=8529
|
||||||
arangoDB.user=upsert
|
arangoDB.user=upsert
|
||||||
arangoDB.password=ceiec2018
|
arangoDB.password=ceiec2018
|
||||||
arangoDB.DB.name=ip-learning-test
|
arangoDB.DB.name=tsg_galaxy_v3
|
||||||
#arangoDB.DB.name=tsg_galaxy_v3
|
|
||||||
arangoDB.batch=100000
|
arangoDB.batch=100000
|
||||||
arangoDB.ttl=3600
|
arangoDB.ttl=3600
|
||||||
|
|
||||||
@@ -17,9 +15,9 @@ thread.await.termination.time=10
|
|||||||
|
|
||||||
|
|
||||||
#读取clickhouse时间范围方式,0:读取过去一小时,1:指定时间范围
|
#读取clickhouse时间范围方式,0:读取过去一小时,1:指定时间范围
|
||||||
time.limit.type=0
|
time.limit.type=1
|
||||||
read.clickhouse.max.time=1596684142
|
read.clickhouse.max.time=1571241610
|
||||||
read.clickhouse.min.time=1596425769
|
read.clickhouse.min.time=1571241600
|
||||||
|
|
||||||
update.interval=3600
|
update.interval=3600
|
||||||
distinct.client.ip.num=10000
|
distinct.client.ip.num=10000
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
drivers=ru.yandex.clickhouse.ClickHouseDriver
|
drivers=ru.yandex.clickhouse.ClickHouseDriver
|
||||||
mdb.user=default
|
mdb.user=default
|
||||||
db.id=192.168.40.186:8123/tsg_galaxy_v3?socket_timeout=300000
|
#db.id=192.168.44.10:8124/tsg_galaxy_v3?socket_timeout=300000
|
||||||
mdb.password=111111
|
#db.id=192.168.44.12:8123/tsg_galaxy_v3?socket_timeout=300000&compress=0
|
||||||
#db.id=192.168.40.224:8123/tsg_galaxy_v3?socket_timeout=300000
|
|
||||||
#mdb.password=ceiec2019
|
#mdb.password=ceiec2019
|
||||||
|
db.id=192.168.40.193:8123/tsg_galaxy_zx?socket_timeout=3600000
|
||||||
|
mdb.password=111111
|
||||||
initialsize=1
|
initialsize=1
|
||||||
minidle=1
|
minidle=1
|
||||||
maxactive=50
|
maxactive=50
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
package cn.ac.iie;
|
package cn.ac.iie;
|
||||||
|
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import com.arangodb.ArangoCursor;
|
import com.arangodb.ArangoCursor;
|
||||||
import com.arangodb.entity.BaseEdgeDocument;
|
import com.arangodb.entity.BaseEdgeDocument;
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,8 @@
|
|||||||
package cn.ac.iie;
|
package cn.ac.iie;
|
||||||
|
|
||||||
import cn.ac.iie.dao.BaseArangoData;
|
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import cn.ac.iie.utils.ExecutorThreadPool;
|
|
||||||
import com.arangodb.entity.*;
|
import com.arangodb.entity.*;
|
||||||
|
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
|
|
||||||
public class TestMap {
|
public class TestMap {
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +0,0 @@
|
|||||||
package cn.ac.iie;
|
|
||||||
|
|
||||||
import cn.ac.iie.dao.BaseArangoData;
|
|
||||||
import com.arangodb.entity.BaseEdgeDocument;
|
|
||||||
|
|
||||||
public class readHistoryDataTest {
|
|
||||||
public static void main(String[] args) {
|
|
||||||
BaseArangoData baseArangoData = new BaseArangoData();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -27,6 +27,12 @@
|
|||||||
<version>4.4.6</version>
|
<version>4.4.6</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.alibaba</groupId>
|
||||||
|
<artifactId>druid</artifactId>
|
||||||
|
<version>1.1.10</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
@@ -75,6 +81,12 @@
|
|||||||
<version>3.2.0</version>
|
<version>3.2.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.scala-lang.modules</groupId>
|
||||||
|
<artifactId>scala-xml_2.11</artifactId>
|
||||||
|
<version>1.0.4</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.scala-tools</groupId>
|
<groupId>org.scala-tools</groupId>
|
||||||
|
|||||||
@@ -1,103 +0,0 @@
|
|||||||
package cn.ac.iie.dao;
|
|
||||||
|
|
||||||
import cn.ac.iie.config.ApplicationConfig;
|
|
||||||
import cn.ac.iie.service.read.ReadHistoryArangoData;
|
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import cn.ac.iie.utils.ExecutorThreadPool;
|
|
||||||
import com.arangodb.ArangoCursor;
|
|
||||||
import com.arangodb.entity.BaseDocument;
|
|
||||||
import com.arangodb.entity.BaseEdgeDocument;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 获取arangoDB历史数据
|
|
||||||
*
|
|
||||||
* @author wlh
|
|
||||||
*/
|
|
||||||
public class BaseArangoData {
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
|
|
||||||
|
|
||||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexFqdnMap = new ConcurrentHashMap<>();
|
|
||||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexIpMap = new ConcurrentHashMap<>();
|
|
||||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexSubscriberMap = new ConcurrentHashMap<>();
|
|
||||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
|
|
||||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
|
|
||||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
|
|
||||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
|
|
||||||
|
|
||||||
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
|
|
||||||
|
|
||||||
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
|
|
||||||
|
|
||||||
public <T extends BaseDocument> void readHistoryData(String table,
|
|
||||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
|
|
||||||
Class<T> type) {
|
|
||||||
try {
|
|
||||||
LOG.warn("开始更新" + table);
|
|
||||||
long start = System.currentTimeMillis();
|
|
||||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
|
|
||||||
historyMap.put(i, new ConcurrentHashMap<>());
|
|
||||||
}
|
|
||||||
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER());
|
|
||||||
long[] timeRange = getTimeRange(table);
|
|
||||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
|
|
||||||
String sql = getQuerySql(timeRange, i, table);
|
|
||||||
ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
|
|
||||||
threadPool.executor(readHistoryArangoData);
|
|
||||||
}
|
|
||||||
countDownLatch.await();
|
|
||||||
long last = System.currentTimeMillis();
|
|
||||||
LOG.warn("读取" + table + " arangoDB 共耗时:" + (last - start));
|
|
||||||
} catch (Exception e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private long[] getTimeRange(String table) {
|
|
||||||
long minTime = 0L;
|
|
||||||
long maxTime = 0L;
|
|
||||||
long startTime = System.currentTimeMillis();
|
|
||||||
String sql = "LET doc = (FOR doc IN " + table + " RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
|
|
||||||
switch (ApplicationConfig.ARANGO_TIME_LIMIT_TYPE()) {
|
|
||||||
case 0:
|
|
||||||
ArangoCursor<BaseDocument> timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
|
|
||||||
try {
|
|
||||||
if (timeDoc != null) {
|
|
||||||
while (timeDoc.hasNext()) {
|
|
||||||
BaseDocument doc = timeDoc.next();
|
|
||||||
maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER();
|
|
||||||
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
LOG.warn("获取ArangoDb时间范围为空");
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
maxTime = ApplicationConfig.READ_ARANGO_MAX_TIME();
|
|
||||||
minTime = ApplicationConfig.READ_ARANGO_MIN_TIME();
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
long lastTime = System.currentTimeMillis();
|
|
||||||
LOG.warn(sql + "\n查询最大最小时间用时:" + (lastTime - startTime));
|
|
||||||
return new long[]{minTime, maxTime};
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getQuerySql(long[] timeRange, int threadNumber, String table) {
|
|
||||||
long minTime = timeRange[0];
|
|
||||||
long maxTime = timeRange[1];
|
|
||||||
long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER();
|
|
||||||
long maxThreadTime = minTime + (threadNumber + 1) * diffTime;
|
|
||||||
long minThreadTime = minTime + threadNumber * diffTime;
|
|
||||||
return "FOR doc IN " + table + " filter doc.FIRST_FOUND_TIME >= " + minThreadTime + " and doc.FIRST_FOUND_TIME <= " + maxThreadTime + " " + ApplicationConfig.ARANGODB_READ_LIMIT() + " RETURN doc";
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,125 +0,0 @@
|
|||||||
package cn.ac.iie.service.read;
|
|
||||||
|
|
||||||
import cn.ac.iie.config.ApplicationConfig;
|
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import com.arangodb.ArangoCursor;
|
|
||||||
import com.arangodb.entity.BaseDocument;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @author wlh
|
|
||||||
* 多线程全量读取arangoDb历史数据,封装到map
|
|
||||||
*/
|
|
||||||
public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
|
||||||
public static long currentHour = System.currentTimeMillis() / (60 * 60 * 1000) * 60 * 60;
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(ReadHistoryArangoData.class);
|
|
||||||
static final Integer RECENT_COUNT_HOUR = ApplicationConfig.RECENT_COUNT_HOUR();
|
|
||||||
|
|
||||||
public static final HashSet<String> PROTOCOL_SET;
|
|
||||||
|
|
||||||
static {
|
|
||||||
PROTOCOL_SET = new HashSet<>();
|
|
||||||
PROTOCOL_SET.add("HTTP");
|
|
||||||
PROTOCOL_SET.add("TLS");
|
|
||||||
PROTOCOL_SET.add("DNS");
|
|
||||||
}
|
|
||||||
|
|
||||||
private ArangoDBConnect arangoConnect;
|
|
||||||
private String query;
|
|
||||||
private ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map;
|
|
||||||
private Class<T> type;
|
|
||||||
private String table;
|
|
||||||
private CountDownLatch countDownLatch;
|
|
||||||
|
|
||||||
public ReadHistoryArangoData(ArangoDBConnect arangoConnect,
|
|
||||||
String query,
|
|
||||||
ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map,
|
|
||||||
Class<T> type,
|
|
||||||
String table,
|
|
||||||
CountDownLatch countDownLatch) {
|
|
||||||
this.arangoConnect = arangoConnect;
|
|
||||||
this.query = query;
|
|
||||||
this.map = map;
|
|
||||||
this.type = type;
|
|
||||||
this.table = table;
|
|
||||||
this.countDownLatch = countDownLatch;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
long s = System.currentTimeMillis();
|
|
||||||
ArangoCursor<T> docs = arangoConnect.executorQuery(query, type);
|
|
||||||
if (docs != null) {
|
|
||||||
List<T> baseDocuments = docs.asListRemaining();
|
|
||||||
int i = 0;
|
|
||||||
for (T doc : baseDocuments) {
|
|
||||||
String key = doc.getKey();
|
|
||||||
switch (table) {
|
|
||||||
case "R_LOCATE_FQDN2IP":
|
|
||||||
updateProtocolDocument(doc);
|
|
||||||
deleteDistinctClientIpByTime(doc);
|
|
||||||
break;
|
|
||||||
case "R_VISIT_IP2FQDN":
|
|
||||||
updateProtocolDocument(doc);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER();
|
|
||||||
ConcurrentHashMap<String, T> tmpMap = map.get(hashCode);
|
|
||||||
tmpMap.put(key, doc);
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
long l = System.currentTimeMillis();
|
|
||||||
LOG.warn(query + "\n读取" + i + "条数据,运行时间:" + (l - s));
|
|
||||||
}
|
|
||||||
}catch (Exception e){
|
|
||||||
e.printStackTrace();
|
|
||||||
}finally {
|
|
||||||
countDownLatch.countDown();
|
|
||||||
LOG.warn("本线程读取完毕,剩余线程数量:"+countDownLatch.getCount());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void updateProtocolDocument(T doc) {
|
|
||||||
if (doc.getProperties().containsKey("PROTOCOL_TYPE")) {
|
|
||||||
for (String protocol : PROTOCOL_SET) {
|
|
||||||
String protocolRecent = protocol + "_CNT_RECENT";
|
|
||||||
ArrayList<Long> cntRecent = (ArrayList<Long>) doc.getAttribute(protocolRecent);
|
|
||||||
Long[] cntRecentsSrc = cntRecent.toArray(new Long[cntRecent.size()]);
|
|
||||||
Long[] cntRecentsDst = new Long[RECENT_COUNT_HOUR];
|
|
||||||
System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1);
|
|
||||||
cntRecentsDst[0] = 0L;
|
|
||||||
doc.addAttribute(protocolRecent, cntRecentsDst);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void deleteDistinctClientIpByTime(T doc) {
|
|
||||||
ArrayList<String> distCip = (ArrayList<String>) doc.getAttribute("DIST_CIP");
|
|
||||||
ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS");
|
|
||||||
distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
|
|
||||||
Collections.sort(distCipTs);
|
|
||||||
int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600);
|
|
||||||
String[] distCipArr = new String[index];
|
|
||||||
long[] disCipTsArr = new long[index];
|
|
||||||
if (distCip.size() + 1 == distCipTs.size()){
|
|
||||||
for (int i = 0; i < index; i++) {
|
|
||||||
distCipArr[i] = distCip.get(i);
|
|
||||||
disCipTsArr[i] = distCipTs.get(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
doc.updateAttribute("DIST_CIP", distCipArr);
|
|
||||||
doc.updateAttribute("DIST_CIP_TS", disCipTsArr);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,93 @@
|
|||||||
|
package cn.ac.iie.utils;
|
||||||
|
|
||||||
|
import cn.ac.iie.config.ApplicationConfig;
|
||||||
|
import com.alibaba.druid.pool.DruidDataSource;
|
||||||
|
import com.alibaba.druid.pool.DruidPooledConnection;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.sql.Connection;
|
||||||
|
import java.sql.SQLException;
|
||||||
|
import java.sql.Statement;
|
||||||
|
|
||||||
|
public class ClickhouseConnect {
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(ClickhouseConnect.class);
|
||||||
|
private static DruidDataSource dataSource = null;
|
||||||
|
private static ClickhouseConnect dbConnect = null;
|
||||||
|
|
||||||
|
static {
|
||||||
|
getDbConnect();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void getDbConnect() {
|
||||||
|
try {
|
||||||
|
if (dataSource == null) {
|
||||||
|
dataSource = new DruidDataSource();
|
||||||
|
//设置连接参数
|
||||||
|
dataSource.setUrl(ApplicationConfig.SPARK_WRITE_CLICKHOUSE_URL());
|
||||||
|
dataSource.setDriverClassName(ApplicationConfig.SPARK_READ_CLICKHOUSE_DRIVER());
|
||||||
|
dataSource.setUsername(ApplicationConfig.SPARK_READ_CLICKHOUSE_USER());
|
||||||
|
dataSource.setPassword(ApplicationConfig.SPARK_READ_CLICKHOUSE_PASSWORD());
|
||||||
|
//配置初始化大小、最小、最大
|
||||||
|
dataSource.setInitialSize(ApplicationConfig.SPARK_WRITE_CLICKHOUSE_INITIALSIZE());
|
||||||
|
dataSource.setMinIdle(ApplicationConfig.SPARK_WRITE_CLICKHOUSE_MINIDLE());
|
||||||
|
dataSource.setMaxActive(ApplicationConfig.SPARK_WRITE_CLICKHOUSE_MAXACTIVE());
|
||||||
|
//配置获取连接等待超时的时间
|
||||||
|
dataSource.setMaxWait(30000);
|
||||||
|
//配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
|
||||||
|
dataSource.setTimeBetweenEvictionRunsMillis(2000);
|
||||||
|
//防止过期
|
||||||
|
dataSource.setValidationQuery("SELECT 1");
|
||||||
|
dataSource.setTestWhileIdle(true);
|
||||||
|
dataSource.setTestOnBorrow(true);
|
||||||
|
dataSource.setKeepAlive(true);
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOG.error(e.getMessage());
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 数据库连接池单例
|
||||||
|
*
|
||||||
|
* @return dbConnect
|
||||||
|
*/
|
||||||
|
public static synchronized ClickhouseConnect getInstance() {
|
||||||
|
if (null == dbConnect) {
|
||||||
|
dbConnect = new ClickhouseConnect();
|
||||||
|
}
|
||||||
|
return dbConnect;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 返回druid数据库连接
|
||||||
|
*
|
||||||
|
* @return 连接
|
||||||
|
* @throws SQLException sql异常
|
||||||
|
*/
|
||||||
|
public DruidPooledConnection getConnection() throws SQLException {
|
||||||
|
return dataSource.getConnection();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 清空PreparedStatement、Connection对象,未定义的置空。
|
||||||
|
*
|
||||||
|
* @param pstmt PreparedStatement对象
|
||||||
|
* @param connection Connection对象
|
||||||
|
*/
|
||||||
|
public void clear(Statement pstmt, Connection connection) {
|
||||||
|
try {
|
||||||
|
if (pstmt != null) {
|
||||||
|
pstmt.close();
|
||||||
|
}
|
||||||
|
if (connection != null) {
|
||||||
|
connection.close();
|
||||||
|
}
|
||||||
|
} catch (SQLException e) {
|
||||||
|
LOG.error(e.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@@ -11,9 +11,17 @@ spark.read.clickhouse.url=jdbc:clickhouse://192.168.40.186:8123/tsg_galaxy_v3
|
|||||||
spark.read.clickhouse.driver=ru.yandex.clickhouse.ClickHouseDriver
|
spark.read.clickhouse.driver=ru.yandex.clickhouse.ClickHouseDriver
|
||||||
spark.read.clickhouse.user=default
|
spark.read.clickhouse.user=default
|
||||||
spark.read.clickhouse.password=111111
|
spark.read.clickhouse.password=111111
|
||||||
spark.read.clickhouse.numPartitions=144
|
spark.read.clickhouse.numPartitions=10
|
||||||
spark.read.clickhouse.fetchsize=10000
|
spark.read.clickhouse.fetchsize=10000
|
||||||
spark.read.clickhouse.partitionColumn=common_recv_time
|
spark.read.clickhouse.partitionColumn=LAST_FOUND_TIME
|
||||||
|
|
||||||
|
spark.write.clickhouse.url=jdbc:clickhouse://192.168.40.194:8123/ip_learning?socket_timeout=3600000
|
||||||
|
spark.write.clickhouse.user=default
|
||||||
|
spark.write.clickhouse.password=111111
|
||||||
|
spark.write.clickhouse.initialsize=1
|
||||||
|
spark.write.clickhouse.minidle=1
|
||||||
|
spark.write.clickhouse.maxactive=50
|
||||||
|
|
||||||
clickhouse.socket.timeout=300000
|
clickhouse.socket.timeout=300000
|
||||||
#arangoDB配置
|
#arangoDB配置
|
||||||
arangoDB.host=192.168.40.182
|
arangoDB.host=192.168.40.182
|
||||||
@@ -27,9 +35,9 @@ arangoDB.ttl=3600
|
|||||||
thread.pool.number=5
|
thread.pool.number=5
|
||||||
|
|
||||||
#读取clickhouse时间范围方式,0:读取过去一小时;1:指定时间范围
|
#读取clickhouse时间范围方式,0:读取过去一小时;1:指定时间范围
|
||||||
clickhouse.time.limit.type=0
|
clickhouse.time.limit.type=1
|
||||||
read.clickhouse.max.time=1571245220
|
read.clickhouse.max.time=1598246519
|
||||||
read.clickhouse.min.time=1571245210
|
read.clickhouse.min.time=1597161600
|
||||||
|
|
||||||
#读取arangoDB时间范围方式,0:正常读;1:指定时间范围
|
#读取arangoDB时间范围方式,0:正常读;1:指定时间范围
|
||||||
arango.time.limit.type=0
|
arango.time.limit.type=0
|
||||||
|
|||||||
@@ -21,6 +21,13 @@ object ApplicationConfig {
|
|||||||
val SPARK_READ_CLICKHOUSE_FETCHSIZE: String = config.getString("spark.read.clickhouse.fetchsize")
|
val SPARK_READ_CLICKHOUSE_FETCHSIZE: String = config.getString("spark.read.clickhouse.fetchsize")
|
||||||
val SPARK_READ_CLICKHOUSE_PARTITIONCOLUMN: String = config.getString("spark.read.clickhouse.partitionColumn")
|
val SPARK_READ_CLICKHOUSE_PARTITIONCOLUMN: String = config.getString("spark.read.clickhouse.partitionColumn")
|
||||||
|
|
||||||
|
val SPARK_WRITE_CLICKHOUSE_URL: String = config.getString("spark.write.clickhouse.url")
|
||||||
|
val SPARK_WRITE_CLICKHOUSE_USER: String = config.getString("spark.write.clickhouse.user")
|
||||||
|
val SPARK_WRITE_CLICKHOUSE_PASSWORD: String = config.getString("spark.write.clickhouse.password")
|
||||||
|
val SPARK_WRITE_CLICKHOUSE_INITIALSIZE: Int = config.getInt("spark.write.clickhouse.initialsize")
|
||||||
|
val SPARK_WRITE_CLICKHOUSE_MINIDLE: Int = config.getInt("spark.write.clickhouse.minidle")
|
||||||
|
val SPARK_WRITE_CLICKHOUSE_MAXACTIVE: Int = config.getInt("spark.write.clickhouse.maxactive")
|
||||||
|
|
||||||
val ARANGODB_HOST: String= config.getString("arangoDB.host")
|
val ARANGODB_HOST: String= config.getString("arangoDB.host")
|
||||||
val ARANGODB_PORT: Int = config.getInt("arangoDB.port")
|
val ARANGODB_PORT: Int = config.getInt("arangoDB.port")
|
||||||
val ARANGODB_USER: String= config.getString("arangoDB.user")
|
val ARANGODB_USER: String= config.getString("arangoDB.user")
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ object BaseClickhouseData {
|
|||||||
val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
|
val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
|
||||||
private val timeLimit: (Long, Long) = getTimeLimit
|
private val timeLimit: (Long, Long) = getTimeLimit
|
||||||
|
|
||||||
private def initClickhouseData(sql:String): Unit ={
|
private def initClickhouseData(sql:String): DataFrame ={
|
||||||
|
|
||||||
val dataFrame: DataFrame = spark.read.format("jdbc")
|
val dataFrame: DataFrame = spark.read.format("jdbc")
|
||||||
.option("url", ApplicationConfig.SPARK_READ_CLICKHOUSE_URL)
|
.option("url", ApplicationConfig.SPARK_READ_CLICKHOUSE_URL)
|
||||||
@@ -28,10 +28,11 @@ object BaseClickhouseData {
|
|||||||
.load()
|
.load()
|
||||||
dataFrame.printSchema()
|
dataFrame.printSchema()
|
||||||
dataFrame.createOrReplaceGlobalTempView("dbtable")
|
dataFrame.createOrReplaceGlobalTempView("dbtable")
|
||||||
|
dataFrame
|
||||||
}
|
}
|
||||||
|
|
||||||
def loadConnectionDataFromCk(): Unit ={
|
def loadConnectionDataFromCk(): Unit ={
|
||||||
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1 + " AND common_schema_type != 'BASE'"
|
||||||
val sql =
|
val sql =
|
||||||
s"""
|
s"""
|
||||||
|(SELECT
|
|(SELECT
|
||||||
@@ -146,6 +147,30 @@ object BaseClickhouseData {
|
|||||||
vertexIpDf
|
vertexIpDf
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def getRelationFqdnLocateIpDf(): DataFrame ={
|
||||||
|
val where = "common_end_time >= " + timeLimit._2 + " AND common_end_time < " + timeLimit._1
|
||||||
|
val sql =
|
||||||
|
s"""
|
||||||
|
|(SELECT * FROM
|
||||||
|
|((SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_end_time) AS LAST_FOUND_TIME,MIN(common_end_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
||||||
|
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'TLS' AS schema_type
|
||||||
|
|FROM tsg_galaxy_v3.connection_record_log
|
||||||
|
|WHERE $where and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip)
|
||||||
|
|UNION ALL
|
||||||
|
|(SELECT http_host AS FQDN,common_server_ip,MAX(common_end_time) AS LAST_FOUND_TIME,MIN(common_end_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
||||||
|
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'HTTP' AS schema_type
|
||||||
|
|FROM tsg_galaxy_v3.connection_record_log
|
||||||
|
|WHERE $where and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip))
|
||||||
|
|WHERE FQDN != '') as dbtable
|
||||||
|
""".stripMargin
|
||||||
|
LOG.warn(sql)
|
||||||
|
val frame = initClickhouseData(sql)
|
||||||
|
frame.printSchema()
|
||||||
|
frame
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
def getRelationFqdnLocateIpDf: DataFrame ={
|
def getRelationFqdnLocateIpDf: DataFrame ={
|
||||||
loadConnectionDataFromCk()
|
loadConnectionDataFromCk()
|
||||||
val sslSql =
|
val sslSql =
|
||||||
@@ -190,6 +215,7 @@ object BaseClickhouseData {
|
|||||||
relationFqdnLocateIpDf.printSchema()
|
relationFqdnLocateIpDf.printSchema()
|
||||||
relationFqdnLocateIpDf
|
relationFqdnLocateIpDf
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
private def getTimeLimit: (Long,Long) ={
|
private def getTimeLimit: (Long,Long) ={
|
||||||
var maxTime = 0L
|
var maxTime = 0L
|
||||||
|
|||||||
@@ -6,14 +6,17 @@ import cn.ac.iie.config.ApplicationConfig
|
|||||||
import cn.ac.iie.dao.BaseClickhouseData
|
import cn.ac.iie.dao.BaseClickhouseData
|
||||||
import cn.ac.iie.spark.partition.CustomPartitioner
|
import cn.ac.iie.spark.partition.CustomPartitioner
|
||||||
import org.apache.spark.rdd.RDD
|
import org.apache.spark.rdd.RDD
|
||||||
import org.apache.spark.sql.Row
|
import org.apache.spark.sql.{DataFrame, Row}
|
||||||
import org.apache.spark.sql.functions._
|
import org.apache.spark.sql.functions._
|
||||||
import org.slf4j.LoggerFactory
|
import org.slf4j.LoggerFactory
|
||||||
|
|
||||||
|
|
||||||
object MergeDataFrame {
|
object MergeDataFrame {
|
||||||
private val LOG = LoggerFactory.getLogger(MergeDataFrame.getClass)
|
private val LOG = LoggerFactory.getLogger(MergeDataFrame.getClass)
|
||||||
private val pattern = Pattern.compile("^[\\d]*$")
|
private val pattern = Pattern.compile("^[\\d]*$")
|
||||||
|
|
||||||
|
private val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
|
||||||
|
|
||||||
def mergeVertexFqdn(): RDD[Row] ={
|
def mergeVertexFqdn(): RDD[Row] ={
|
||||||
BaseClickhouseData.getVertexFqdnDf
|
BaseClickhouseData.getVertexFqdnDf
|
||||||
.rdd.filter(row => isDomain(row.getAs[String](0))).map(row => (row.get(0),row))
|
.rdd.filter(row => isDomain(row.getAs[String](0))).map(row => (row.get(0),row))
|
||||||
@@ -34,7 +37,7 @@ object MergeDataFrame {
|
|||||||
values
|
values
|
||||||
}
|
}
|
||||||
|
|
||||||
def mergeRelationFqdnLocateIp(): RDD[Row] ={
|
def mergeRelationFqdnLocateIp(): DataFrame ={
|
||||||
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf.filter(row => isDomain(row.getAs[String]("FQDN")))
|
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf.filter(row => isDomain(row.getAs[String]("FQDN")))
|
||||||
.groupBy("FQDN", "common_server_ip")
|
.groupBy("FQDN", "common_server_ip")
|
||||||
.agg(
|
.agg(
|
||||||
@@ -43,14 +46,8 @@ object MergeDataFrame {
|
|||||||
collect_list("COUNT_TOTAL").alias("COUNT_TOTAL_LIST"),
|
collect_list("COUNT_TOTAL").alias("COUNT_TOTAL_LIST"),
|
||||||
collect_list("schema_type").alias("schema_type_list"),
|
collect_list("schema_type").alias("schema_type_list"),
|
||||||
collect_set("DIST_CIP_RECENT").alias("DIST_CIP_RECENT")
|
collect_set("DIST_CIP_RECENT").alias("DIST_CIP_RECENT")
|
||||||
)
|
).repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||||
frame.rdd.map(row => {
|
frame
|
||||||
val fqdn = row.getAs[String]("FQDN")
|
|
||||||
val serverIp = row.getAs[String]("common_server_ip")
|
|
||||||
val key = fqdn.concat("-"+serverIp)
|
|
||||||
(key,row)
|
|
||||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private def isDomain(fqdn: String): Boolean = {
|
private def isDomain(fqdn: String): Boolean = {
|
||||||
@@ -58,13 +55,9 @@ object MergeDataFrame {
|
|||||||
if (fqdn == null || fqdn.length == 0) {
|
if (fqdn == null || fqdn.length == 0) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if (fqdn.contains(":")) {
|
val domain = fqdn.split(":")(0)
|
||||||
val s = fqdn.split(":")(0)
|
|
||||||
if (s.contains(":")){
|
val fqdnArr = domain.split("\\.")
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
val fqdnArr = fqdn.split("\\.")
|
|
||||||
if (fqdnArr.length < 4 || fqdnArr.length > 4){
|
if (fqdnArr.length < 4 || fqdnArr.length > 4){
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ package cn.ac.iie.service.update
|
|||||||
import java.lang
|
import java.lang
|
||||||
|
|
||||||
import cn.ac.iie.config.ApplicationConfig
|
import cn.ac.iie.config.ApplicationConfig
|
||||||
import cn.ac.iie.service.read.ReadHistoryArangoData
|
|
||||||
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
||||||
|
|
||||||
import scala.collection.mutable
|
import scala.collection.mutable
|
||||||
@@ -13,6 +12,8 @@ import scala.collection.mutable.WrappedArray.ofRef
|
|||||||
object UpdateDocHandler {
|
object UpdateDocHandler {
|
||||||
val PROTOCOL_SET: Set[String] = Set("HTTP","TLS","DNS")
|
val PROTOCOL_SET: Set[String] = Set("HTTP","TLS","DNS")
|
||||||
|
|
||||||
|
private val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
|
||||||
|
|
||||||
def updateMaxAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
|
def updateMaxAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
|
||||||
var hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
|
var hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
|
||||||
if (newAttribute > hisAttritube){
|
if (newAttribute > hisAttritube){
|
||||||
@@ -93,13 +94,27 @@ object UpdateDocHandler {
|
|||||||
doc.addAttribute("PROTOCOL_TYPE",protocolTypeBuilder.toString().replaceFirst(",",""))
|
doc.addAttribute("PROTOCOL_TYPE",protocolTypeBuilder.toString().replaceFirst(",",""))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
def mergeDistinctIp(distCipRecent:ofRef[ofRef[String]]): Array[String] ={
|
def mergeDistinctIp(distCipRecent:ofRef[ofRef[String]]): Array[String] ={
|
||||||
distCipRecent.flatten.distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
|
distCipRecent.flatten.distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
def mergeDistinctIp(distCipRecent:ofRef[String]): Array[String] ={
|
||||||
|
distCipRecent.flatMap(str => {
|
||||||
|
str.replaceAll("\\[", "")
|
||||||
|
.replaceAll("\\]", "")
|
||||||
|
.replaceAll("'", "")
|
||||||
|
.split(",")
|
||||||
|
}).distinct.toArray
|
||||||
|
// distCipRecent.flatten.distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def putDistinctIp(doc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
|
def putDistinctIp(doc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
|
||||||
val map = newDistinctIp.map(ip => {
|
val map = newDistinctIp.map(ip => {
|
||||||
(ip, ReadHistoryArangoData.currentHour)
|
(ip, currentHour)
|
||||||
}).toMap
|
}).toMap
|
||||||
doc.addAttribute("DIST_CIP",map.keys.toArray)
|
doc.addAttribute("DIST_CIP",map.keys.toArray)
|
||||||
doc.addAttribute("DIST_CIP_TS",map.values.toArray)
|
doc.addAttribute("DIST_CIP_TS",map.values.toArray)
|
||||||
@@ -112,7 +127,7 @@ object UpdateDocHandler {
|
|||||||
val distCipToTsMap: Map[String, Long] = hisDistCip.zip(hisDistCipTs).toMap
|
val distCipToTsMap: Map[String, Long] = hisDistCip.zip(hisDistCipTs).toMap
|
||||||
val muDistCipToTsMap: mutable.Map[String, Long] = mutable.Map(distCipToTsMap.toSeq:_*)
|
val muDistCipToTsMap: mutable.Map[String, Long] = mutable.Map(distCipToTsMap.toSeq:_*)
|
||||||
newDistinctIp.foreach(cip => {
|
newDistinctIp.foreach(cip => {
|
||||||
muDistCipToTsMap.put(cip,ReadHistoryArangoData.currentHour)
|
muDistCipToTsMap.put(cip,currentHour)
|
||||||
})
|
})
|
||||||
val resultMap = muDistCipToTsMap.toList.sortBy(-_._2).take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toMap
|
val resultMap = muDistCipToTsMap.toList.sortBy(-_._2).take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toMap
|
||||||
hisDoc.addAttribute("DIST_CIP",resultMap.keys.toArray)
|
hisDoc.addAttribute("DIST_CIP",resultMap.keys.toArray)
|
||||||
|
|||||||
@@ -1,83 +1,99 @@
|
|||||||
package cn.ac.iie.service.update
|
package cn.ac.iie.service.update
|
||||||
|
|
||||||
import java.util
|
import java.sql.PreparedStatement
|
||||||
import java.util.concurrent.ConcurrentHashMap
|
import java.util.concurrent.ConcurrentHashMap
|
||||||
|
|
||||||
import cn.ac.iie.config.ApplicationConfig
|
import cn.ac.iie.config.ApplicationConfig
|
||||||
import cn.ac.iie.dao.BaseArangoData
|
|
||||||
import cn.ac.iie.dao.BaseArangoData._
|
|
||||||
import cn.ac.iie.service.transform.MergeDataFrame._
|
|
||||||
import cn.ac.iie.service.update.UpdateDocHandler._
|
import cn.ac.iie.service.update.UpdateDocHandler._
|
||||||
import cn.ac.iie.utils.{ArangoDBConnect, ExecutorThreadPool, SparkSessionUtil}
|
import cn.ac.iie.utils.{ClickhouseConnect, SparkSessionUtil}
|
||||||
import cn.ac.iie.utils.SparkSessionUtil.spark
|
import cn.ac.iie.service.transform.MergeDataFrame._
|
||||||
|
import com.alibaba.druid.pool.DruidPooledConnection
|
||||||
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
||||||
import org.apache.spark.TaskContext
|
import org.apache.spark.sql.{DataFrame, Row}
|
||||||
import org.apache.spark.rdd.RDD
|
|
||||||
import org.apache.spark.sql.Row
|
|
||||||
import org.slf4j.LoggerFactory
|
import org.slf4j.LoggerFactory
|
||||||
|
import ru.yandex.clickhouse.ClickHouseArray
|
||||||
|
|
||||||
import scala.collection.mutable.WrappedArray.ofRef
|
import scala.collection.mutable.WrappedArray.ofRef
|
||||||
|
|
||||||
object UpdateDocument {
|
object UpdateDocument {
|
||||||
private val pool = ExecutorThreadPool.getInstance
|
|
||||||
private val arangoManger: ArangoDBConnect = ArangoDBConnect.getInstance()
|
|
||||||
private val LOG = LoggerFactory.getLogger(UpdateDocument.getClass)
|
private val LOG = LoggerFactory.getLogger(UpdateDocument.getClass)
|
||||||
private val baseArangoData = new BaseArangoData()
|
|
||||||
|
private val manger: ClickhouseConnect = ClickhouseConnect.getInstance()
|
||||||
|
private val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
|
||||||
|
|
||||||
def update(): Unit = {
|
def update(): Unit = {
|
||||||
try {
|
try {
|
||||||
updateDocument("FQDN", historyVertexFqdnMap, getVertexFqdnRow, classOf[BaseDocument], mergeVertexFqdn)
|
updateDocument("r_locate_fqdn2ip_local", getRelationFqdnLocIpPstm, mergeRelationFqdnLocateIp)
|
||||||
updateDocument("IP", historyVertexIpMap, getVertexIpRow, classOf[BaseDocument], mergeVertexIp)
|
|
||||||
updateDocument("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, getRelationFqdnLocateIpRow, classOf[BaseEdgeDocument], mergeRelationFqdnLocateIp)
|
|
||||||
} catch {
|
} catch {
|
||||||
case e: Exception => e.printStackTrace()
|
case e: Exception => e.printStackTrace()
|
||||||
} finally {
|
} finally {
|
||||||
pool.shutdown()
|
|
||||||
arangoManger.clean()
|
|
||||||
SparkSessionUtil.closeSpark()
|
SparkSessionUtil.closeSpark()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private def updateDocument[T <: BaseDocument](collName: String,
|
private def updateDocument[T <: BaseDocument](tableName: String,
|
||||||
historyMap: ConcurrentHashMap[Integer, ConcurrentHashMap[String, T]],
|
setPstm: (Row, PreparedStatement) => PreparedStatement,
|
||||||
getDocumentRow: (Row, ConcurrentHashMap[String, T]) => T,
|
getNewDataRdd: () => DataFrame): Unit = {
|
||||||
clazz: Class[T],
|
|
||||||
getNewDataRdd: () => RDD[Row]
|
|
||||||
): Unit = {
|
|
||||||
baseArangoData.readHistoryData(collName, historyMap, clazz)
|
|
||||||
val hisBc = spark.sparkContext.broadcast(historyMap)
|
|
||||||
try {
|
try {
|
||||||
val start = System.currentTimeMillis()
|
val start = System.currentTimeMillis()
|
||||||
val newDataRdd = getNewDataRdd()
|
val newDataFrame = getNewDataRdd()
|
||||||
newDataRdd.foreachPartition(iter => {
|
|
||||||
val partitionId: Int = TaskContext.get.partitionId
|
newDataFrame.foreachPartition(iter => {
|
||||||
val dictionaryMap: ConcurrentHashMap[String, T] = hisBc.value.get(partitionId)
|
val connection: DruidPooledConnection = manger.getConnection
|
||||||
val resultDocumentList = new util.ArrayList[T]
|
val sql = s"INSERT INTO $tableName VALUES(?,?,?,?,?,?,?,?,?)"
|
||||||
|
var pstm: PreparedStatement = connection.prepareStatement(sql)
|
||||||
var i = 0
|
var i = 0
|
||||||
iter.foreach(row => {
|
iter.foreach(row => {
|
||||||
val document = getDocumentRow(row, dictionaryMap)
|
pstm = setPstm(row,pstm)
|
||||||
resultDocumentList.add(document)
|
|
||||||
i += 1
|
i += 1
|
||||||
|
pstm.addBatch()
|
||||||
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
|
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
|
||||||
arangoManger.overwrite(resultDocumentList, collName)
|
pstm.executeBatch()
|
||||||
LOG.warn(s"更新:$collName" + i)
|
connection.commit()
|
||||||
|
LOG.warn(s"写入$tableName 数据量:" + i)
|
||||||
i = 0
|
i = 0
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
if (i != 0) {
|
if (i != 0) {
|
||||||
arangoManger.overwrite(resultDocumentList, collName)
|
pstm.executeBatch
|
||||||
LOG.warn(s"更新$collName:" + i)
|
connection.commit()
|
||||||
|
LOG.warn(s"写入$tableName 数据量:" + i)
|
||||||
}
|
}
|
||||||
|
manger.clear(pstm,connection)
|
||||||
})
|
})
|
||||||
val last = System.currentTimeMillis()
|
val last = System.currentTimeMillis()
|
||||||
LOG.warn(s"更新$collName 时间:${last - start}")
|
LOG.warn(s"更新$tableName 时间:${last - start}")
|
||||||
} catch {
|
} catch {
|
||||||
case e: Exception => e.printStackTrace()
|
case e: Exception => e.printStackTrace()
|
||||||
} finally {
|
|
||||||
hisBc.destroy()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private def getRelationFqdnLocIpPstm(row: Row,pstm: PreparedStatement): PreparedStatement ={
|
||||||
|
val fqdn = row.getAs[String]("FQDN")
|
||||||
|
val serverIp = row.getAs[String]("common_server_ip")
|
||||||
|
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
|
||||||
|
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
|
||||||
|
val countTotalList = row.getAs[ofRef[AnyRef]]("COUNT_TOTAL_LIST")
|
||||||
|
val schemaTypeList = row.getAs[ofRef[AnyRef]]("schema_type_list")
|
||||||
|
|
||||||
|
val distCipRecent = row.getAs[ofRef[String]]("DIST_CIP_RECENT")
|
||||||
|
val disCips = mergeDistinctIp(distCipRecent)
|
||||||
|
|
||||||
|
val sepAttritubeMap: Map[String, Long] = separateAttributeByProtocol(schemaTypeList, countTotalList)
|
||||||
|
|
||||||
|
pstm.setString(1,fqdn)
|
||||||
|
pstm.setString(2,serverIp)
|
||||||
|
pstm.setLong(3,firstFoundTime)
|
||||||
|
pstm.setLong(4,lastFoundTime)
|
||||||
|
pstm.setLong(5,sepAttritubeMap.getOrElse("HTTP",0L))
|
||||||
|
pstm.setLong(6,sepAttritubeMap.getOrElse("TLS",0L))
|
||||||
|
pstm.setLong(7,sepAttritubeMap.getOrElse("DNS",0L))
|
||||||
|
pstm.setArray(8,new ClickHouseArray(1, disCips))
|
||||||
|
pstm.setLong(9,currentHour)
|
||||||
|
|
||||||
|
pstm
|
||||||
|
}
|
||||||
|
|
||||||
private def getVertexFqdnRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseDocument]): BaseDocument = {
|
private def getVertexFqdnRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseDocument]): BaseDocument = {
|
||||||
val fqdn = row.getAs[String]("FQDN")
|
val fqdn = row.getAs[String]("FQDN")
|
||||||
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
|
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
|
||||||
@@ -126,35 +142,4 @@ object UpdateDocument {
|
|||||||
document
|
document
|
||||||
}
|
}
|
||||||
|
|
||||||
private def getRelationFqdnLocateIpRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseEdgeDocument]): BaseEdgeDocument = {
|
|
||||||
val fqdn = row.getAs[String]("FQDN")
|
|
||||||
val serverIp = row.getAs[String]("common_server_ip")
|
|
||||||
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
|
|
||||||
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
|
|
||||||
val countTotalList = row.getAs[ofRef[AnyRef]]("COUNT_TOTAL_LIST")
|
|
||||||
val schemaTypeList = row.getAs[ofRef[AnyRef]]("schema_type_list")
|
|
||||||
val distCipRecent = row.getAs[ofRef[ofRef[String]]]("DIST_CIP_RECENT")
|
|
||||||
|
|
||||||
val sepAttritubeMap: Map[String, Long] = separateAttributeByProtocol(schemaTypeList, countTotalList)
|
|
||||||
val distinctIp: Array[String] = mergeDistinctIp(distCipRecent)
|
|
||||||
|
|
||||||
val key = fqdn.concat("-" + serverIp)
|
|
||||||
var document = dictionaryMap.getOrDefault(key, null)
|
|
||||||
if (document != null) {
|
|
||||||
updateMaxAttribute(document, lastFoundTime, "LAST_FOUND_TIME")
|
|
||||||
updateProtocolAttritube(document, sepAttritubeMap)
|
|
||||||
updateDistinctIp(document, distinctIp)
|
|
||||||
} else {
|
|
||||||
document = new BaseEdgeDocument()
|
|
||||||
document.setKey(key)
|
|
||||||
document.setFrom("FQDN/" + fqdn)
|
|
||||||
document.setTo("IP/" + serverIp)
|
|
||||||
document.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
|
|
||||||
document.addAttribute("LAST_FOUND_TIME", lastFoundTime)
|
|
||||||
putProtocolAttritube(document, sepAttritubeMap)
|
|
||||||
putDistinctIp(document, distinctIp)
|
|
||||||
}
|
|
||||||
document
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,35 +0,0 @@
|
|||||||
package cn.ac.iie.service.update
|
|
||||||
|
|
||||||
import java.util
|
|
||||||
import java.util.ArrayList
|
|
||||||
import java.util.concurrent.ConcurrentHashMap
|
|
||||||
|
|
||||||
import cn.ac.iie.dao.BaseArangoData
|
|
||||||
import cn.ac.iie.dao.BaseArangoData._
|
|
||||||
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
|
||||||
|
|
||||||
import scala.collection.mutable.WrappedArray.ofRef
|
|
||||||
|
|
||||||
object UpdateDocumentTest {
|
|
||||||
def main(args: Array[String]): Unit = {
|
|
||||||
val baseArangoData = new BaseArangoData()
|
|
||||||
baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, classOf[BaseEdgeDocument])
|
|
||||||
|
|
||||||
val value = BaseArangoData.historyRelationFqdnAddressIpMap.keys()
|
|
||||||
while (value.hasMoreElements) {
|
|
||||||
val integer: Integer = value.nextElement()
|
|
||||||
val map: ConcurrentHashMap[String, BaseEdgeDocument] = historyRelationFqdnAddressIpMap.get(integer)
|
|
||||||
val unit = map.keys()
|
|
||||||
while (unit.hasMoreElements) {
|
|
||||||
val key = unit.nextElement()
|
|
||||||
val edgeDocument = map.get(key)
|
|
||||||
// val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[util.ArrayList[Long]]
|
|
||||||
// val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[util.ArrayList[String]]
|
|
||||||
val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[Array[String]]
|
|
||||||
val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[Array[java.lang.Long]]
|
|
||||||
println(longs.toString + "---" + strings.toString)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user