14 Commits

Author SHA1 Message Date
wanglihui
74b1fe6616 修改读取arangoDb方式为分页读取。 2020-08-20 09:21:47 +08:00
wanglihui
0cb75f3eab 根据server与client IP类型区分链路信息 2020-08-19 13:58:27 +08:00
wanglihui
611335c053 YSP分析spark测试 2020-08-12 15:53:55 +08:00
wanglihui
3b0a0517f5 ignore scala.xml 2020-08-12 14:49:00 +08:00
wanglihui
885be09053 整合YSP项目 2020-08-12 14:42:32 +08:00
wanglihui
55879a2c32 YSP分析spark版本 2020-08-11 15:18:45 +08:00
wanglihui
ad6582893b merge from master 2020-08-11 09:30:50 +08:00
wanglihui
9a5cf0126b Merge branch 'master' into ip-learning-graph-datacenter 2020-08-11 09:28:54 +08:00
wanglihui
10c742e9c2 Merge branch 'master' into ip-learning-graph-datacenter
# Conflicts:
#	IP-learning-graph/src/main/resources/clickhouse.properties
2020-08-06 16:51:47 +08:00
wanglihui
c391fbffba IP Learning tsg项目 spark版本首次提交 2020-08-06 16:42:35 +08:00
wanglihui
a034238679 delete directory ip-learning 2020-08-06 16:36:40 +08:00
wanglihui
95603676bb tsg iplearning 数据中心部署版本 2020-08-06 13:59:28 +08:00
wanglihui
c7af3cf247 tsg iplearning 数据中心部署版本 2020-08-06 13:39:41 +08:00
wanglihui
87010127ae tsg iplearning 数据中心部署版本 2020-08-05 14:24:37 +08:00
36 changed files with 9683 additions and 645 deletions

View File

@@ -13,8 +13,7 @@ public class ApplicationConfig {
public static final Integer ARANGODB_TTL = ConfigUtils.getIntProperty( "arangoDB.ttl");
public static final Integer ARANGODB_BATCH = ConfigUtils.getIntProperty( "arangoDB.batch");
public static final Integer UPDATE_ARANGO_BATCH = ConfigUtils.getIntProperty("update.arango.batch");
public static final Long ARANGODB_READ_LIMIT = ConfigUtils.getLongProperty("arangoDB.read.limit");
public static final Integer UPDATE_ARANGO_BATCH =ConfigUtils.getIntProperty("update.arango.batch");
public static final Integer THREAD_POOL_NUMBER = ConfigUtils.getIntProperty( "thread.pool.number");
public static final Integer THREAD_AWAIT_TERMINATION_TIME = ConfigUtils.getIntProperty( "thread.await.termination.time");
@@ -22,11 +21,19 @@ public class ApplicationConfig {
public static final Long READ_CLICKHOUSE_MAX_TIME = ConfigUtils.getLongProperty("read.clickhouse.max.time");
public static final Long READ_CLICKHOUSE_MIN_TIME = ConfigUtils.getLongProperty("read.clickhouse.min.time");
public static final Integer TIME_LIMIT_TYPE = ConfigUtils.getIntProperty("time.limit.type");
public static final Integer CLICKHOUSE_TIME_LIMIT_TYPE = ConfigUtils.getIntProperty("clickhouse.time.limit.type");
public static final Integer UPDATE_INTERVAL = ConfigUtils.getIntProperty("update.interval");
public static final Integer DISTINCT_CLIENT_IP_NUM = ConfigUtils.getIntProperty("distinct.client.ip.num");
public static final Integer RECENT_COUNT_HOUR = ConfigUtils.getIntProperty("recent.count.hour");
public static final String TOP_DOMAIN_FILE_NAME = ConfigUtils.getStringProperty("top.domain.file.name");
public static final String ARANGODB_READ_LIMIT = ConfigUtils.getStringProperty("arangoDB.read.limit");
public static final Integer ARANGO_TIME_LIMIT_TYPE = ConfigUtils.getIntProperty("arango.time.limit.type");
public static final Long READ_ARANGO_MAX_TIME = ConfigUtils.getLongProperty("read.arango.max.time");
public static final Long READ_ARANGO_MIN_TIME = ConfigUtils.getLongProperty("read.arango.min.time");
}

View File

@@ -1,7 +1,7 @@
package cn.ac.iie.dao;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.service.ingestion.ReadHistoryArangoData;
import cn.ac.iie.service.read.ReadHistoryArangoData;
import cn.ac.iie.utils.ArangoDBConnect;
import cn.ac.iie.utils.ExecutorThreadPool;
import com.arangodb.ArangoCursor;
@@ -10,35 +10,38 @@ import com.arangodb.entity.BaseEdgeDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
/**
* 获取arangoDB历史数据
*
* @author wlh
*/
public class BaseArangoData {
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseDocument>> historyVertexFqdnMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseDocument>> historyVertexIpMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseDocument>> historyVertexSubscriberMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer,ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexFqdnMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexIpMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexSubscriberMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
<T extends BaseDocument> void readHistoryData(String table,
ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map,
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
Class<T> type) {
try {
LOG.info("开始更新"+table);
LOG.info("开始更新" + table);
long start = System.currentTimeMillis();
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++){
map.put(i,new ConcurrentHashMap<>());
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
historyMap.put(i, new ConcurrentHashMap<>());
}
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
// long[] timeRange = getTimeRange(table);
@@ -46,18 +49,51 @@ public class BaseArangoData {
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
// String sql = getQuerySql(timeRange, i, table);
String sql = getQuerySql(countTotal, i, table);
ReadHistoryArangoData<T> readHistoryArangoData =
new ReadHistoryArangoData<>(arangoDBConnect, sql, map,type,table,countDownLatch);
ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
threadPool.executor(readHistoryArangoData);
}
countDownLatch.await();
long last = System.currentTimeMillis();
LOG.info("读取"+table+" arangoDB 共耗时:"+(last-start));
}catch (Exception e){
LOG.info("读取" + table + " arangoDB 共耗时:" + (last - start));
} catch (Exception e) {
e.printStackTrace();
}
}
private long[] getTimeRange(String table) {
long minTime = 0L;
long maxTime = 0L;
long startTime = System.currentTimeMillis();
String sql = "LET doc = (FOR doc IN " + table + " RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
switch (ApplicationConfig.ARANGO_TIME_LIMIT_TYPE) {
case 0:
ArangoCursor<BaseDocument> timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
try {
if (timeDoc != null) {
while (timeDoc.hasNext()) {
BaseDocument doc = timeDoc.next();
maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER;
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
}
} else {
LOG.warn("获取ArangoDb时间范围为空");
}
} catch (Exception e) {
e.printStackTrace();
}
break;
case 1:
maxTime = ApplicationConfig.READ_ARANGO_MAX_TIME;
minTime = ApplicationConfig.READ_ARANGO_MIN_TIME;
break;
default:
}
long lastTime = System.currentTimeMillis();
LOG.info(sql + "\n查询最大最小时间用时" + (lastTime - startTime));
return new long[]{minTime, maxTime};
}
private Long getCountTotal(String table){
long start = System.currentTimeMillis();
Long cnt = 0L;
@@ -75,15 +111,19 @@ public class BaseArangoData {
return cnt;
}
private String getQuerySql(long[] timeRange, int threadNumber, String table) {
long minTime = timeRange[0];
long maxTime = timeRange[1];
long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER;
long maxThreadTime = minTime + (threadNumber + 1) * diffTime;
long minThreadTime = minTime + threadNumber * diffTime;
return "FOR doc IN " + table + " filter doc.FIRST_FOUND_TIME >= " + minThreadTime + " and doc.FIRST_FOUND_TIME <= " + maxThreadTime + " " + ApplicationConfig.ARANGODB_READ_LIMIT + " RETURN doc";
}
private String getQuerySql(Long cnt,int threadNumber, String table){
long sepNum = cnt / ApplicationConfig.THREAD_POOL_NUMBER + 1;
long offsetNum = threadNumber * sepNum;
if (sepNum >= ApplicationConfig.ARANGODB_READ_LIMIT * 10000){
sepNum = ApplicationConfig.ARANGODB_READ_LIMIT * 10000;
}
return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
}
}

View File

@@ -15,7 +15,7 @@ import java.util.HashMap;
import java.util.function.Function;
import java.util.function.Supplier;
import static cn.ac.iie.service.ingestion.ReadClickhouseData.*;
import static cn.ac.iie.service.read.ReadClickhouseData.putMapByHashcode;
/**
* 读取clickhouse数据封装到map
@@ -24,23 +24,25 @@ import static cn.ac.iie.service.ingestion.ReadClickhouseData.*;
public class BaseClickhouseData {
private static final Logger LOG = LoggerFactory.getLogger(BaseClickhouseData.class);
private static ClickhouseConnect manger = ClickhouseConnect.getInstance();
static HashMap<Integer, HashMap<String, ArrayList<BaseDocument>>> newVertexFqdnMap = new HashMap<>();
static HashMap<Integer, HashMap<String, ArrayList<BaseDocument>>> newVertexIpMap = new HashMap<>();
static HashMap<Integer, HashMap<String,ArrayList<BaseDocument>>> newVertexSubscriberMap = new HashMap<>();
static HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> newRelationFqdnAddressIpMap = new HashMap<>();
static HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> newRelationIpVisitFqdnMap = new HashMap<>();
static HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> newRelationSubsciberLocateIpMap = new HashMap<>();
static HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> newRelationFqdnSameFqdnMap = new HashMap<>();
private static ClickhouseConnect manger = ClickhouseConnect.getInstance();
private DruidPooledConnection connection;
private Statement statement;
<T extends BaseDocument> void baseDocumentFromClickhouse(HashMap<Integer, HashMap<String, ArrayList<T>>> newMap,
Supplier<String> getSqlSupplier,
Function<ResultSet,T> formatResultFunc) {
Function<ResultSet,T> formatResultFunc){
long start = System.currentTimeMillis();
initializeMap(newMap);
String sql = getSqlSupplier.get();
LOG.info(sql);
try {
connection = manger.getConnection();
statement = connection.createStatement();
@@ -54,7 +56,7 @@ public class BaseClickhouseData {
}
}
long last = System.currentTimeMillis();
LOG.info(sql + "\n读取"+i+"条数据,运行时间:" + (last - start));
LOG.info("读取"+i+"条数据,运行时间:" + (last - start));
}catch (Exception e){
e.printStackTrace();
}finally {
@@ -65,7 +67,7 @@ public class BaseClickhouseData {
private <T extends BaseDocument> void initializeMap(HashMap<Integer, HashMap<String,ArrayList<T>>> map){
try {
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
map.put(i, new HashMap<>());
map.put(i, new HashMap<>(16));
}
}catch (Exception e){
e.printStackTrace();

View File

@@ -1,14 +1,13 @@
package cn.ac.iie.dao;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.service.ingestion.ReadClickhouseData;
import cn.ac.iie.service.read.ReadClickhouseData;
import cn.ac.iie.service.update.Document;
import cn.ac.iie.service.update.relationship.LocateFqdn2Ip;
import cn.ac.iie.service.update.relationship.LocateSubscriber2Ip;
import cn.ac.iie.service.update.relationship.SameFqdn2Fqdn;
import cn.ac.iie.service.update.relationship.VisitIp2Fqdn;
import cn.ac.iie.service.update.vertex.Fqdn;
import cn.ac.iie.service.update.vertex.Ip;
import cn.ac.iie.service.update.vertex.Subscriber;
import cn.ac.iie.utils.ArangoDBConnect;
import cn.ac.iie.utils.ExecutorThreadPool;
import com.arangodb.entity.BaseDocument;
@@ -36,37 +35,36 @@ public class UpdateGraphData {
private static final Logger LOG = LoggerFactory.getLogger(UpdateGraphData.class);
private static ExecutorThreadPool pool = ExecutorThreadPool.getInstance();
private static ArangoDBConnect arangoManger = ArangoDBConnect.getInstance();
private static BaseArangoData baseArangoData = new BaseArangoData();
private static BaseClickhouseData baseClickhouseData = new BaseClickhouseData();
public void updateArango(){
long start = System.currentTimeMillis();
try {
updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN",
Fqdn.class,BaseDocument.class,
ReadClickhouseData::getVertexFqdnSql, ReadClickhouseData::getVertexFqdnDocument);
// updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN", Fqdn.class,BaseDocument.class,
// ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
updateDocument(newVertexIpMap,historyVertexIpMap,"IP",
Ip.class,BaseDocument.class,
ReadClickhouseData::getVertexIpSql, ReadClickhouseData::getVertexIpDocument);
updateDocument(newVertexIpMap,historyVertexIpMap,"IP", Ip.class,BaseDocument.class,
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
updateDocument(newRelationFqdnAddressIpMap,historyRelationFqdnAddressIpMap,"R_LOCATE_FQDN2IP",
LocateFqdn2Ip.class,BaseEdgeDocument.class,
ReadClickhouseData::getRelationshipFqdnAddressIpSql, ReadClickhouseData::getRelationFqdnAddressIpDocument);
updateDocument(newVertexSubscriberMap,historyVertexSubscriberMap,"SUBSCRIBER", Subscriber.class,BaseDocument.class,
ReadClickhouseData::getVertexSubscriberSql,ReadClickhouseData::getVertexSubscriberDocument);
updateDocument(newRelationIpVisitFqdnMap,historyRelationIpVisitFqdnMap,"R_VISIT_IP2FQDN",
VisitIp2Fqdn.class,BaseEdgeDocument.class,
ReadClickhouseData::getRelationshipIpVisitFqdnSql, ReadClickhouseData::getRelationIpVisitFqdnDocument);
updateDocument(newRelationFqdnAddressIpMap,historyRelationFqdnAddressIpMap,"R_LOCATE_FQDN2IP", LocateFqdn2Ip.class,BaseEdgeDocument.class,
ReadClickhouseData::getRelationshipFqdnAddressIpSql,ReadClickhouseData::getRelationFqdnAddressIpDocument);
// updateDocument(newRelationIpVisitFqdnMap,historyRelationIpVisitFqdnMap,"R_VISIT_IP2FQDN",
// VisitIp2Fqdn.class,BaseEdgeDocument.class,
// ReadClickhouseData::getRelationshipIpVisitFqdnSql,ReadClickhouseData::getRelationIpVisitFqdnDocument);
updateDocument(newRelationSubsciberLocateIpMap,historyRelationSubsciberLocateIpMap,"R_LOCATE_SUBSCRIBER2IP",
LocateSubscriber2Ip.class,BaseEdgeDocument.class,
ReadClickhouseData::getRelationshipSubsciberLocateIpSql,ReadClickhouseData::getRelationshipSubsciberLocateIpDocument);
updateDocument(newRelationFqdnSameFqdnMap,historyRelationFqdnSameFqdnMap,"R_SAME_ORIGIN_FQDN2FQDN",
SameFqdn2Fqdn.class,BaseEdgeDocument.class,
ReadClickhouseData::getRelationshipFqdnSameFqdnSql, ReadClickhouseData::getRelationshipFqdnSameFqdnDocument);
long last = System.currentTimeMillis();
LOG.info("iplearning application运行完毕用时"+(last - start));
LOG.info("更新图数据库时间共计"+(last - start));
}catch (Exception e){
e.printStackTrace();
}finally {
@@ -75,15 +73,13 @@ public class UpdateGraphData {
}
}
private <T extends BaseDocument> void updateDocument(HashMap<Integer, HashMap<String, ArrayList<T>>> newMap,
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
String collection,
Class<? extends Document<T>> taskType,
Class<T> docmentType,
Supplier<String> getSqlSupplier,
Function<ResultSet,T> formatResultFunc
) {
Function<ResultSet,T> formatResultFunc) {
try {
baseArangoData.readHistoryData(collection,historyMap,docmentType);

View File

@@ -1,6 +1,7 @@
package cn.ac.iie.service.ingestion;
package cn.ac.iie.service.read;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.utils.TopDomainUtils;
import com.arangodb.entity.BaseDocument;
import com.arangodb.entity.BaseEdgeDocument;
import org.slf4j.Logger;
@@ -22,10 +23,10 @@ public class ReadClickhouseData {
private static Pattern pattern = Pattern.compile("^[\\d]*$");
private static final Logger LOG = LoggerFactory.getLogger(ReadClickhouseData.class);
private static long[] timeLimit = getTimeLimit();
private static long maxTime = timeLimit[0];
private static long minTime = timeLimit[1];
public static final Integer DISTINCT_CLIENT_IP_NUM = ApplicationConfig.DISTINCT_CLIENT_IP_NUM;
static final Integer RECENT_COUNT_HOUR = ApplicationConfig.RECENT_COUNT_HOUR;
public static final HashSet<String> PROTOCOL_SET;
@@ -37,10 +38,11 @@ public class ReadClickhouseData {
PROTOCOL_SET.add("DNS");
}
public static BaseDocument getVertexFqdnDocument(ResultSet resultSet) {
public static BaseDocument getVertexFqdnDocument(ResultSet resultSet){
BaseDocument newDoc = null;
try {
String fqdnName = resultSet.getString("FQDN");
String fqdnOrReferer = resultSet.getString("FQDN");
String fqdnName = TopDomainUtils.getDomainFromUrl(fqdnOrReferer);
if (isDomain(fqdnName)) {
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
@@ -50,13 +52,13 @@ public class ReadClickhouseData {
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
}
} catch (Exception e) {
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static BaseDocument getVertexIpDocument(ResultSet resultSet) {
public static BaseDocument getVertexIpDocument(ResultSet resultSet){
BaseDocument newDoc = new BaseDocument();
try {
String ip = resultSet.getString("IP");
@@ -65,13 +67,6 @@ public class ReadClickhouseData {
long sessionCount = resultSet.getLong("SESSION_COUNT");
long bytesSum = resultSet.getLong("BYTES_SUM");
String ipType = resultSet.getString("ip_type");
String[] commonLinkInfos = (String[]) resultSet.getArray("common_link_info").getArray();
String commonLinkInfo;
if (commonLinkInfos.length > 1 && !commonLinkInfos[1].equals("")){
commonLinkInfo = commonLinkInfos[1];
}else {
commonLinkInfo = commonLinkInfos[0];
}
newDoc.setKey(ip);
newDoc.addAttribute("IP", ip);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
@@ -90,15 +85,20 @@ public class ReadClickhouseData {
newDoc.addAttribute("CLIENT_BYTES_SUM", 0L);
break;
default:
newDoc.addAttribute("SERVER_SESSION_COUNT", 0L);
newDoc.addAttribute("SERVER_BYTES_SUM", 0L);
newDoc.addAttribute("CLIENT_SESSION_COUNT", 0L);
newDoc.addAttribute("CLIENT_BYTES_SUM", 0L);
break;
}
newDoc.addAttribute("COMMON_LINK_INFO", commonLinkInfo);
} catch (Exception e) {
// newDoc.addAttribute("COMMON_LINK_INFO", "");
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static BaseDocument getVertexSubscriberDocument(ResultSet resultSet) {
public static BaseDocument getVertexSubscriberDocument(ResultSet resultSet){
BaseDocument newDoc = new BaseDocument();
try {
String subscriberId = resultSet.getString("common_subscriber_id");
@@ -107,14 +107,13 @@ public class ReadClickhouseData {
newDoc.setKey(subscriberId);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
newDoc.addAttribute("SUBSCRIBER_ID",subscriberId);
} catch (Exception e) {
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static BaseEdgeDocument getRelationshipSubsciberLocateIpDocument(ResultSet resultSet) {
public static BaseEdgeDocument getRelationshipSubsciberLocateIpDocument(ResultSet resultSet){
BaseEdgeDocument newDoc = new BaseEdgeDocument();
try {
String subscriberId = resultSet.getString("common_subscriber_id");
@@ -130,14 +129,14 @@ public class ReadClickhouseData {
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
newDoc.addAttribute("COUNT_TOTAL", countTotal);
} catch (Exception e) {
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static BaseEdgeDocument getRelationFqdnAddressIpDocument(ResultSet resultSet) {
public static BaseEdgeDocument getRelationFqdnAddressIpDocument(ResultSet resultSet){
BaseEdgeDocument newDoc = null;
try {
String vFqdn = resultSet.getString("FQDN");
@@ -146,7 +145,6 @@ public class ReadClickhouseData {
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
long countTotal = resultSet.getLong("COUNT_TOTAL");
String schemaType = resultSet.getString("schema_type");
String[] distCipRecents = (String[]) resultSet.getArray("DIST_CIP_RECENT").getArray();
long[] clientIpTs = new long[distCipRecents.length];
for (int i = 0; i < clientIpTs.length; i++) {
@@ -160,18 +158,43 @@ public class ReadClickhouseData {
newDoc.setTo("IP/" + vIp);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
newDoc.addAttribute("CNT_TOTAL",countTotal);
newDoc.addAttribute("DIST_CIP", distCipRecents);
newDoc.addAttribute("DIST_CIP_TS", clientIpTs);
newDoc.addAttribute("PROTOCOL_TYPE", schemaType);
checkSchemaProperty(newDoc, schemaType, countTotal);
}
} catch (Exception e) {
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static BaseEdgeDocument getRelationIpVisitFqdnDocument(ResultSet resultSet) {
public static BaseEdgeDocument getRelationshipFqdnSameFqdnDocument(ResultSet resultSet){
BaseEdgeDocument newDoc = null;
try {
String domainFqdn = resultSet.getString("domainFqdn");
String referer = resultSet.getString("referer");
String refererFqdn = TopDomainUtils.getDomainFromUrl(referer);
if (isDomain(refererFqdn) && isDomain(domainFqdn)){
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
long countTotal = resultSet.getLong("COUNT_TOTAL");
String key = domainFqdn + "-" + refererFqdn;
newDoc = new BaseEdgeDocument();
newDoc.setKey(key);
newDoc.setFrom("FQDN/" + domainFqdn);
newDoc.setTo("FQDN/" + refererFqdn);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
newDoc.addAttribute("CNT_TOTAL",countTotal);
}
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static BaseEdgeDocument getRelationIpVisitFqdnDocument(ResultSet resultSet){
BaseEdgeDocument newDoc = null;
try {
String vFqdn = resultSet.getString("FQDN");
@@ -181,18 +204,16 @@ public class ReadClickhouseData {
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
long countTotal = resultSet.getLong("COUNT_TOTAL");
String schemaType = resultSet.getString("schema_type");
newDoc = new BaseEdgeDocument();
newDoc.setKey(key);
newDoc.setFrom("IP/" + vIp);
newDoc.setTo("FQDN/" + vFqdn);
newDoc.addAttribute("CNT_TOTAL",countTotal);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
newDoc.addAttribute("PROTOCOL_TYPE", schemaType);
checkSchemaProperty(newDoc, schemaType, countTotal);
}
} catch (Exception e) {
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
@@ -201,8 +222,8 @@ public class ReadClickhouseData {
public static <T extends BaseDocument> void putMapByHashcode(T newDoc, HashMap<Integer, HashMap<String, ArrayList<T>>> map) {
if (newDoc != null) {
String key = newDoc.getKey();
int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER;
HashMap<String, ArrayList<T>> documentHashMap = map.getOrDefault(hashCode, new HashMap<>());
int i = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER;
HashMap<String, ArrayList<T>> documentHashMap = map.getOrDefault(i, new HashMap<>());
ArrayList<T> documentArrayList = documentHashMap.getOrDefault(key, new ArrayList<>());
documentArrayList.add(newDoc);
documentHashMap.put(key, documentArrayList);
@@ -241,16 +262,17 @@ public class ReadClickhouseData {
return false;
}
private static void checkSchemaProperty(BaseEdgeDocument newDoc, String schema, long countTotal) {
long[] recentCnt = new long[RECENT_COUNT_HOUR];
recentCnt[0] = countTotal;
for (String protocol : PROTOCOL_SET) {
String protocolRecent = protocol + "_CNT_RECENT";
String protocolTotal = protocol + "_CNT_TOTAL";
if (protocol.equals(schema)) {
for (String protocol: PROTOCOL_SET){
String protocolRecent = protocol +"_CNT_RECENT";
String protocolTotal = protocol + "_CNT_TOTAL";
if (protocol.equals(schema)){
newDoc.addAttribute(protocolTotal, countTotal);
newDoc.addAttribute(protocolRecent, recentCnt);
} else {
}else {
newDoc.addAttribute(protocolTotal, 0L);
newDoc.addAttribute(protocolRecent, new long[RECENT_COUNT_HOUR]);
}
@@ -258,32 +280,32 @@ public class ReadClickhouseData {
}
public static String getVertexFqdnSql() {
String where = "common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
String sslSql = "SELECT ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni";
String httpSql = "SELECT http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host";
return "SELECT FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME FROM ((" + sslSql + ") UNION ALL (" + httpSql + ")) GROUP BY FQDN HAVING FQDN != ''";
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime;
String mediaDomainSql = "SELECT s1_domain AS FQDN,MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME FROM media_expire_patch WHERE "+where+" and s1_domain != '' GROUP BY s1_domain";
String refererSql = "SELECT s1_referer AS FQDN,MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME FROM media_expire_patch WHERE "+where+" and s1_referer != '' GROUP BY s1_referer";
return "SELECT * FROM((" + mediaDomainSql + ") UNION ALL (" + refererSql + "))";
}
public static String getVertexIpSql() {
// String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime + " AND common_schema_type != 'BASE'";
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime ;
String clientIpSql = "SELECT common_client_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info_c2s) as common_link_info,'client' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
String serverIpSql = "SELECT common_server_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info_s2c) as common_link_info,'server' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
String where = " recv_time >= " + minTime + " AND recv_time < " + maxTime;
String clientIpSql = "SELECT s1_s_ip AS IP, MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(media_len) as BYTES_SUM,'client' as ip_type FROM media_expire_patch where " + where + " group by IP";
String serverIpSql = "SELECT s1_d_ip AS IP, MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(media_len) as BYTES_SUM,'server' as ip_type FROM media_expire_patch where " + where + " group by IP";
return "SELECT * FROM((" + clientIpSql + ") UNION ALL (" + serverIpSql + "))";
}
public static String getRelationshipFqdnAddressIpSql() {
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
String sslSql = "SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'TLS' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip";
String httpSql = "SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'HTTP' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip";
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime +" AND s1_domain != '' AND s1_d_ip != '' ";
return "SELECT s1_domain AS FQDN,s1_d_ip AS common_server_ip,MIN( recv_time ) AS FIRST_FOUND_TIME,MAX( recv_time ) AS LAST_FOUND_TIME,COUNT( * ) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(s1_s_ip) AS DIST_CIP_RECENT FROM media_expire_patch WHERE "+where+" GROUP BY s1_d_ip,s1_domain";
}
public static String getRelationshipFqdnSameFqdnSql(){
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime +" AND s1_domain != '' AND s1_referer != '' ";
return "SELECT s1_domain AS domainFqdn,s1_referer AS referer,MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL FROM media_expire_patch where "+where+" GROUP BY s1_domain,s1_referer";
}
public static String getRelationshipIpVisitFqdnSql() {
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
String httpSql = "SELECT http_host AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'HTTP' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_client_ip";
String sslSql = "SELECT ssl_sni AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'TLS' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE common_schema_type = 'SSL' GROUP BY ssl_sni,common_client_ip";
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime+" AND s1_s_ip != '' AND s1_domain != '' ";
return "SELECT s1_s_ip AS common_client_ip,s1_domain AS FQDN,MIN( recv_time ) AS FIRST_FOUND_TIME,MAX( recv_time ) AS LAST_FOUND_TIME,COUNT( * ) AS COUNT_TOTAL FROM media_expire_patch WHERE "+where+" GROUP BY s1_s_ip,s1_domain";
}
public static String getVertexSubscriberSql() {
@@ -299,7 +321,7 @@ public class ReadClickhouseData {
private static long[] getTimeLimit() {
long maxTime = 0L;
long minTime = 0L;
switch (ApplicationConfig.TIME_LIMIT_TYPE) {
switch (ApplicationConfig.CLICKHOUSE_TIME_LIMIT_TYPE) {
case 0:
maxTime = currentHour;
minTime = maxTime - ApplicationConfig.UPDATE_INTERVAL;

View File

@@ -1,4 +1,4 @@
package cn.ac.iie.service.ingestion;
package cn.ac.iie.service.read;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.utils.ArangoDBConnect;
@@ -8,32 +8,29 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import static cn.ac.iie.service.ingestion.ReadClickhouseData.*;
import static cn.ac.iie.service.read.ReadClickhouseData.RECENT_COUNT_HOUR;
/**
* @author wlh
* 多线程全量读取arangoDb历史数据封装到map
*/
@SuppressWarnings("unchecked")
public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
private static final Logger LOG = LoggerFactory.getLogger(ReadHistoryArangoData.class);
private ArangoDBConnect arangoConnect;
private String query;
private ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> map;
private ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map;
private Class<T> type;
private String table;
private CountDownLatch countDownLatch;
public ReadHistoryArangoData(ArangoDBConnect arangoConnect,
String query,
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> map,
ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map,
Class<T> type,
String table,
CountDownLatch countDownLatch) {
@@ -51,33 +48,23 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
long s = System.currentTimeMillis();
ArangoCursor<T> docs = arangoConnect.executorQuery(query, type);
if (docs != null) {
ArrayList<T> list = new ArrayList<>();
List<T> baseDocuments = docs.asListRemaining();
int i = 0;
for (T doc : baseDocuments) {
String key = doc.getKey();
switch (table) {
case "R_LOCATE_FQDN2IP":
updateProtocolDocument(doc);
// deleteDistinctClientIpByTime(doc);
list.add(doc);
break;
default:
}
int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER;
ConcurrentHashMap<String, T> tmpMap = map.get(hashCode);
tmpMap.put(key, doc);
i++;
}
arangoConnect.overwrite(list,table);
long l = System.currentTimeMillis();
LOG.info(query + "\n读取" + i + "条数据,运行时间:" + (l - s));
}
} catch (Exception e) {
LOG.error(Arrays.toString(e.getStackTrace()));
} finally {
}catch (Exception e){
e.printStackTrace();
}finally {
countDownLatch.countDown();
LOG.info("本线程读取完毕,剩余线程数量:" + countDownLatch.getCount());
LOG.info("本线程读取完毕,剩余线程数量:"+countDownLatch.getCount());
}
}
@@ -86,7 +73,7 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
for (String protocol : ReadClickhouseData.PROTOCOL_SET) {
String protocolRecent = protocol + "_CNT_RECENT";
ArrayList<Long> cntRecent = (ArrayList<Long>) doc.getAttribute(protocolRecent);
Long[] cntRecentsSrc = cntRecent.toArray(new Long[0]);
Long[] cntRecentsSrc = cntRecent.toArray(new Long[cntRecent.size()]);
Long[] cntRecentsDst = new Long[RECENT_COUNT_HOUR];
System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1);
cntRecentsDst[0] = 0L;
@@ -95,28 +82,4 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
}
}
private void deleteDistinctClientIpByTime(T doc) {
ArrayList<String> distCip = (ArrayList<String>) doc.getAttribute("DIST_CIP");
ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS");
if (distCip == null || distCip.isEmpty()){
doc.updateAttribute("DIST_CIP", new String[0]);
doc.updateAttribute("DIST_CIP_TS", new long[0]);
return;
}
distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
Collections.sort(distCipTs);
Collections.reverse(distCipTs);
int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600);
String[] distCipArr = new String[index];
long[] disCipTsArr = new long[index];
if (index != 0 && distCip.size() + 1 == distCipTs.size()){
for (int i = 0; i < index; i++) {
distCipArr[i] = distCip.get(i);
disCipTsArr[i] = distCipTs.get(i);
}
}
doc.updateAttribute("DIST_CIP", distCipArr);
doc.updateAttribute("DIST_CIP_TS", disCipTsArr);
}
}

View File

@@ -124,10 +124,5 @@ public class Document<T extends BaseDocument> extends Thread{
lastDoc.addAttribute(attribute,firstSumAttribute+lastSumAttribute);
}
protected void replaceAttribute(T firstDoc,T lastDoc,String attribute){
Object attributeObj = firstDoc.getAttribute(attribute);
lastDoc.addAttribute(attribute,attributeObj);
}
}

View File

@@ -1,6 +1,6 @@
package cn.ac.iie.service.update;
import cn.ac.iie.service.ingestion.ReadClickhouseData;
import cn.ac.iie.service.read.ReadClickhouseData;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseEdgeDocument;
@@ -24,11 +24,6 @@ public class Relationship extends Document<BaseEdgeDocument> {
super.updateFunction(newEdgeDocument,historyEdgeDocument);
}
@Override
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
super.mergeFunction(lastDoc, newDocument);
}
protected void updateProcotol(BaseEdgeDocument historyEdgeDocument, String schema, BaseEdgeDocument newEdgeDocument){
String recentSchema = schema +"_CNT_RECENT";
String totalSchema = schema + "_CNT_TOTAL";
@@ -49,6 +44,11 @@ public class Relationship extends Document<BaseEdgeDocument> {
}
}
@Override
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
super.mergeFunction(lastDoc, newDocument);
}
protected void mergeProtocol(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
String schema = lastDoc.getAttribute("PROTOCOL_TYPE").toString();
if (ReadClickhouseData.PROTOCOL_SET.contains(schema)){

View File

@@ -22,4 +22,19 @@ public class Vertex extends Document<BaseDocument> {
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
}
@Override
protected void updateFunction(BaseDocument newDocument, BaseDocument historyDocument) {
super.updateFunction(newDocument, historyDocument);
}
@Override
protected void mergeFunction(BaseDocument lastDoc,BaseDocument newDocument) {
super.mergeFunction(lastDoc, newDocument);
}
@Override
public void run() {
super.run();
}
}

View File

@@ -1,6 +1,5 @@
package cn.ac.iie.service.update.relationship;
import cn.ac.iie.service.ingestion.ReadClickhouseData;
import cn.ac.iie.service.update.Relationship;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseEdgeDocument;
@@ -9,8 +8,8 @@ import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import static cn.ac.iie.service.ingestion.ReadClickhouseData.DISTINCT_CLIENT_IP_NUM;
import static cn.ac.iie.service.ingestion.ReadClickhouseData.currentHour;
import static cn.ac.iie.service.read.ReadClickhouseData.DISTINCT_CLIENT_IP_NUM;
import static cn.ac.iie.service.read.ReadClickhouseData.currentHour;
public class LocateFqdn2Ip extends Relationship {
@@ -26,7 +25,7 @@ public class LocateFqdn2Ip extends Relationship {
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument){
super.mergeFunction(lastDoc, newDocument);
mergeDistinctClientIp(lastDoc, newDocument);
mergeProtocol(lastDoc, newDocument);
putSumAttribute(lastDoc, newDocument,"CNT_TOTAL");
}
private void mergeDistinctClientIp(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument){
@@ -46,10 +45,8 @@ public class LocateFqdn2Ip extends Relationship {
@Override
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument) {
super.updateFunction(newEdgeDocument, historyEdgeDocument);
for (String schema:ReadClickhouseData.PROTOCOL_SET){
updateProcotol(historyEdgeDocument,schema,newEdgeDocument);
}
updateDistinctClientIp(newEdgeDocument, historyEdgeDocument);
putSumAttribute(newEdgeDocument, historyEdgeDocument,"CNT_TOTAL");
}
private void updateDistinctClientIp(BaseEdgeDocument newEdgeDocument,BaseEdgeDocument edgeDocument){

View File

@@ -0,0 +1,34 @@
package cn.ac.iie.service.update.relationship;
import cn.ac.iie.service.update.Relationship;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseEdgeDocument;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
public class SameFqdn2Fqdn extends Relationship {
public SameFqdn2Fqdn(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
ArangoDBConnect arangoManger,
String collectionName,
ConcurrentHashMap<String, BaseEdgeDocument> historyDocumentMap,
CountDownLatch countDownLatch) {
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
}
@Override
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument) {
super.updateFunction(newEdgeDocument, historyEdgeDocument);
putSumAttribute(newEdgeDocument,historyEdgeDocument,"CNT_TOTAL");
}
@Override
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
super.mergeFunction(lastDoc, newDocument);
putSumAttribute(lastDoc,newDocument,"CNT_TOTAL");
}
}

View File

@@ -1,13 +1,11 @@
package cn.ac.iie.service.update.relationship;
import cn.ac.iie.service.ingestion.ReadClickhouseData;
import cn.ac.iie.service.update.Relationship;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseEdgeDocument;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
@@ -23,14 +21,12 @@ public class VisitIp2Fqdn extends Relationship {
@Override
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument) {
super.updateFunction(newEdgeDocument, historyEdgeDocument);
for (String schema: ReadClickhouseData.PROTOCOL_SET){
updateProcotol(historyEdgeDocument,schema,newEdgeDocument);
}
putSumAttribute(newEdgeDocument,historyEdgeDocument,"CNT_TOTAL");
}
@Override
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
super.mergeFunction(lastDoc, newDocument);
mergeProtocol(lastDoc,newDocument);
putSumAttribute(lastDoc,newDocument,"CNT_TOTAL");
}
}

View File

@@ -16,20 +16,26 @@ public class Ip extends Vertex {
String collectionName,
ConcurrentHashMap<String, BaseDocument> historyDocumentMap,
CountDownLatch countDownLatch) {
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap,countDownLatch);
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
}
@Override
protected void updateFunction(BaseDocument newDocument, BaseDocument historyDocument) {
super.updateFunction(newDocument, historyDocument);
updateIpByType(newDocument, historyDocument);
super.replaceAttribute(newDocument,historyDocument,"COMMON_LINK_INFO");
}
@Override
protected void mergeFunction(BaseDocument lastDoc, BaseDocument newDocument) {
super.mergeFunction(lastDoc, newDocument);
updateIpByType(lastDoc, newDocument);
mergeIpByType(lastDoc, newDocument);
}
private void mergeIpByType(BaseDocument lastDoc, BaseDocument newDocument) {
putSumAttribute(lastDoc,newDocument,"CLIENT_SESSION_COUNT");
putSumAttribute(lastDoc,newDocument,"CLIENT_BYTES_SUM");
putSumAttribute(lastDoc,newDocument,"SERVER_SESSION_COUNT");
putSumAttribute(lastDoc,newDocument,"SERVER_BYTES_SUM");
}
private void updateIpByType(BaseDocument newDocument, BaseDocument historyDocument) {

View File

@@ -2,9 +2,15 @@ package cn.ac.iie.test;
import cn.ac.iie.dao.UpdateGraphData;
/**
* iplearning程序入口
* @author wlh
*/
public class IpLearningApplicationTest {
public static void main(String[] args) {
UpdateGraphData updateGraphData = new UpdateGraphData();
updateGraphData.updateArango();

View File

@@ -16,7 +16,6 @@ import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
public class ArangoDBConnect {
@@ -24,10 +23,10 @@ public class ArangoDBConnect {
private static ArangoDB arangoDB = null;
private static ArangoDBConnect conn = null;
static {
getArangoDB();
getArangoDatabase();
}
private static void getArangoDB(){
private static void getArangoDatabase(){
arangoDB = new ArangoDB.Builder()
.maxConnections(ApplicationConfig.THREAD_POOL_NUMBER)
.host(ApplicationConfig.ARANGODB_HOST, ApplicationConfig.ARANGODB_PORT)
@@ -53,26 +52,45 @@ public class ArangoDBConnect {
arangoDB.shutdown();
}
}catch (Exception e){
LOG.error(e.getMessage());
e.printStackTrace();
}
}
public <T> ArangoCursor<T> executorQuery(String query,Class<T> type){
ArangoDatabase database = getDatabase();
Map<String, Object> bindVars = new MapBuilder().get();
AqlQueryOptions options = new AqlQueryOptions()
.ttl(ApplicationConfig.ARANGODB_TTL);
AqlQueryOptions options = new AqlQueryOptions().ttl(ApplicationConfig.ARANGODB_TTL);
try {
return database.query(query, bindVars, options, type);
}catch (Exception e){
LOG.error(e.getMessage());
e.printStackTrace();
return null;
}finally {
bindVars.clear();
}
}
public <T> void overwrite(List<T> docOverwrite, String collectionName){
@Deprecated
public <T> void insertAndUpdate(ArrayList<T> docInsert,ArrayList<T> docUpdate,String collectionName){
ArangoDatabase database = getDatabase();
try {
ArangoCollection collection = database.collection(collectionName);
if (!docInsert.isEmpty()){
collection.importDocuments(docInsert);
}
if (!docUpdate.isEmpty()){
collection.replaceDocuments(docUpdate);
}
}catch (Exception e){
System.out.println("更新失败");
e.printStackTrace();
}finally {
docInsert.clear();
docInsert.clear();
}
}
public <T> void overwrite(ArrayList<T> docOverwrite,String collectionName){
ArangoDatabase database = getDatabase();
try {
ArangoCollection collection = database.collection(collectionName);
@@ -83,14 +101,16 @@ public class ArangoDBConnect {
MultiDocumentEntity<DocumentCreateEntity<T>> documentCreateEntityMultiDocumentEntity = collection.insertDocuments(docOverwrite, documentCreateOptions);
Collection<ErrorEntity> errors = documentCreateEntityMultiDocumentEntity.getErrors();
for (ErrorEntity errorEntity:errors){
LOG.error("写入arangoDB异常"+errorEntity.getErrorMessage());
LOG.debug("写入arangoDB异常"+errorEntity.getErrorMessage());
}
}
}catch (Exception e){
LOG.error("更新失败:"+e.toString());
System.out.println("更新失败:"+e.toString());
}finally {
docOverwrite.clear();
}
}
}

View File

@@ -2,8 +2,6 @@ package cn.ac.iie.utils;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.pool.DruidPooledConnection;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.ResultSet;
@@ -12,7 +10,6 @@ import java.sql.Statement;
import java.util.Properties;
public class ClickhouseConnect {
private static final Logger LOG = LoggerFactory.getLogger(ClickhouseConnect.class);
private static DruidDataSource dataSource = null;
private static ClickhouseConnect dbConnect = null;
private static Properties props = new Properties();
@@ -46,7 +43,7 @@ public class ClickhouseConnect {
dataSource.setKeepAlive(true);
}
} catch (Exception e) {
LOG.error(e.getMessage());
e.printStackTrace();
}
}
@@ -88,7 +85,7 @@ public class ClickhouseConnect {
connection.close();
}
} catch (SQLException e) {
LOG.error(e.getMessage());
e.printStackTrace();
}
}
@@ -101,7 +98,7 @@ public class ClickhouseConnect {
pstm = connection.createStatement();
return pstm.executeQuery(query);
}catch (Exception e){
LOG.error(e.getMessage());
e.printStackTrace();
return null;
}
}

View File

@@ -1,12 +1,8 @@
package cn.ac.iie.utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Properties;
public class ConfigUtils {
private static final Logger LOG = LoggerFactory.getLogger(ConfigUtils.class);
private static Properties propCommon = new Properties();
public static String getStringProperty(String key) {
@@ -29,12 +25,12 @@ public class ConfigUtils {
static {
try {
propCommon.load(ConfigUtils.class.getClassLoader().getResourceAsStream("application.properties"));
LOG.info("application.properties加载成功");
System.out.println("application.properties加载成功");
} catch (Exception e) {
propCommon = null;
LOG.error("配置加载失败");
System.err.println("配置加载失败");
}
}
}

View File

@@ -2,13 +2,14 @@ package cn.ac.iie.utils;
import cn.ac.iie.config.ApplicationConfig;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.*;
/**
* 线程池管理
* @author wlh
*/
public class ExecutorThreadPool {
private static final Logger LOG = LoggerFactory.getLogger(ExecutorThreadPool.class);
private static ExecutorService pool = null ;
private static ExecutorThreadPool poolExecutor = null;
@@ -19,9 +20,13 @@ public class ExecutorThreadPool {
private static void getThreadPool(){
ThreadFactory namedThreadFactory = new ThreadFactoryBuilder()
.setNameFormat("iplearning-application-pool-%d").build();
pool = new ThreadPoolExecutor(ApplicationConfig.THREAD_POOL_NUMBER, ApplicationConfig.THREAD_POOL_NUMBER,
0L, TimeUnit.SECONDS,
//Common Thread Pool
pool = new ThreadPoolExecutor(ApplicationConfig.THREAD_POOL_NUMBER, ApplicationConfig.THREAD_POOL_NUMBER*2,
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(1024), namedThreadFactory, new ThreadPoolExecutor.AbortPolicy());
// pool = Executors.newFixedThreadPool(ApplicationConfig.THREAD_POOL_NUMBER);
}
public static ExecutorThreadPool getInstance(){
@@ -39,7 +44,7 @@ public class ExecutorThreadPool {
public void awaitThreadTask(){
try {
while (!pool.awaitTermination(ApplicationConfig.THREAD_AWAIT_TERMINATION_TIME, TimeUnit.SECONDS)) {
LOG.warn("线程池没有关闭");
System.out.println("线程池没有关闭");
}
} catch (InterruptedException e) {
e.printStackTrace();

View File

@@ -0,0 +1,161 @@
package cn.ac.iie.utils;
import cn.ac.iie.config.ApplicationConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.net.URL;
import java.util.HashMap;
public class TopDomainUtils {
private static Logger logger = LoggerFactory.getLogger(TopDomainUtils.class);
public static String getSecDomain(String urlDomain, HashMap<String, HashMap<String, String>> maps) {
String[] split = urlDomain.split("\\.");
String secDomain = null;
for (int i = split.length - 1; i >= 0; i--) {
int mapsIndex = split.length - (i + 1);
HashMap<String, String> innerMap = maps.get("map_id_" + mapsIndex);
HashMap<String, String> fullTop = maps.get("full");
if (!(innerMap.containsKey(split[i]))) {
StringBuilder strSec = new StringBuilder();
for (int j = i; j < split.length; j++) {
strSec.append(split[j]).append(".");
}
secDomain = strSec.substring(0, strSec.length() - 1);
if (fullTop.containsKey(getTopDomainFromSecDomain(secDomain))) {
break;
} else {
while (!fullTop.containsKey(getTopDomainFromSecDomain(secDomain)) && getTopDomainFromSecDomain(secDomain).contains(".")) {
secDomain = getTopDomainFromSecDomain(secDomain);
}
break;
}
}
}
return secDomain;
}
private static String getTopDomainFromSecDomain(String secDomain) {
String quFirstDian = secDomain;
if (secDomain.contains(".")) {
quFirstDian = secDomain.substring(secDomain.indexOf(".")).substring(1);
}
return quFirstDian;
}
private static File getTopDomainFile(){
URL url = TopDomainUtils.class.getClassLoader().getResource(ApplicationConfig.TOP_DOMAIN_FILE_NAME);
File file = null;
if (url!=null){
file = new File(url.getFile());
}
if (file != null && file.isFile() && file.exists()){
return file;
}
return null;
}
public static HashMap<String, HashMap<String, String>> readTopDomainFile() {
URL url = TopDomainUtils.class.getClassLoader().getResource(ApplicationConfig.TOP_DOMAIN_FILE_NAME);
assert url != null;
HashMap<String, HashMap<String, String>> maps = makeHashMap(url.getFile());
try {
String encoding = "UTF-8";
File file = new File(url.getFile());
if (file.isFile() && file.exists()) {
InputStreamReader read = new InputStreamReader(
new FileInputStream(file), encoding);
BufferedReader bufferedReader = new BufferedReader(read);
String lineTxt;
while ((lineTxt = bufferedReader.readLine()) != null) {
HashMap<String, String> fullTop = maps.get("full");
fullTop.put(lineTxt, lineTxt);
maps.put("full", fullTop);
String[] split = lineTxt.split("\\.");
for (int i = split.length - 1; i >= 0; i--) {
int mapsIndex = split.length - (i + 1);
HashMap<String, String> innerMap = maps.get("map_id_" + mapsIndex);
innerMap.put(split[i], split[i]);
maps.put("map_id_" + mapsIndex, innerMap);
}
}
read.close();
}
} catch (Exception e) {
logger.error("TopDomainUtils>=>readTopDomainFile get filePathData error--->{" + e + "}<---");
e.printStackTrace();
}
return maps;
}
private static int getMaxLength(String filePath) {
int lengthDomain = 0;
try {
String encoding = "UTF-8";
File file = new File(filePath);
if (file.isFile() && file.exists()) {
InputStreamReader read = new InputStreamReader(
new FileInputStream(file), encoding);
BufferedReader bufferedReader = new BufferedReader(read);
String lineTxt;
while ((lineTxt = bufferedReader.readLine()) != null) {
String[] split = lineTxt.split("\\.");
if (split.length > lengthDomain) {
lengthDomain = split.length;
}
}
read.close();
} else {
logger.error("TopDomainUtils>>getMaxLength filePath is wrong--->{" + filePath + "}<---");
}
} catch (Exception e) {
logger.error("TopDomainUtils>=>getMaxLength get filePathData error--->{" + e + "}<---");
e.printStackTrace();
}
return lengthDomain;
}
private static HashMap<String, HashMap<String, String>> makeHashMap(String filePath) {
int maxLength = getMaxLength(filePath);
HashMap<String, HashMap<String, String>> maps = new HashMap<>();
for (int i = 0; i < maxLength; i++) {
maps.put("map_id_" + i, new HashMap<String, String>());
}
maps.put("full", new HashMap<String, String>());
return maps;
}
/**
* 通用方法,传入url,返回domain,这里的domain不包含端口号,含有:一定是v6
* @param oriUrl
* @return
*/
public static String getDomainFromUrl(String oriUrl) {
//先按照?切分,排除后续干扰
String url = oriUrl.split("[?]")[0];
//排除http://或https://干扰
url = url.replaceAll("https://", "").replaceAll("http://", "");
String domain;
//获取domain
if (url.split("/")[0].split(":").length <= 2) {
//按照:切分后最终长度为1或2,说明是v4
domain = url
//按照/切分,索引0包含domain
.split("/")[0]
//v4按照:切分去除domain上的端口号后,索引0为最终域名
.split(":")[0];
} else {
//按照:切分后长度>2,说明是v6地址,v6地址不包含端口号(暂定),只需要先切分//再切分/
domain = url.split("/")[0];
}
return domain;
}
}

View File

@@ -1,25 +1,33 @@
#arangoDB参数配置
arangoDB.host=192.168.44.12
arangoDB.host=192.168.40.182
arangoDB.port=8529
arangoDB.user=upsert
arangoDB.password=ceiec2018
arangoDB.DB.name=tsg_galaxy_v3_test
arangoDB.DB.name=ip-learning-test
arangoDB.batch=100000
arangoDB.ttl=3600
arangoDB.read.limit=10
update.arango.batch=10000
thread.pool.number=40
thread.pool.number=10
thread.await.termination.time=10
#读取clickhouse时间范围方式0读取过去一小时1指定时间范围
time.limit.type=1
read.clickhouse.max.time=1603421554
read.clickhouse.min.time=1603354682
clickhouse.time.limit.type=0
read.clickhouse.max.time=1571245230
read.clickhouse.min.time=1571245220
#读取arangoDB时间范围方式0正常读1指定时间范围
arango.time.limit.type=0
read.arango.max.time=1571245220
read.arango.min.time=1571245210
update.interval=3600
distinct.client.ip.num=100
distinct.client.ip.num=10000
recent.count.hour=24
top.domain.file.name=topDomain.txt
arangoDB.read.limit=

View File

@@ -1,7 +1,8 @@
drivers=ru.yandex.clickhouse.ClickHouseDriver
db.id=192.168.40.193:8123/tsg_galaxy_zx?socket_timeout=3600000
#db.id=192.168.40.186:8123/tsg_galaxy_v3?socket_timeout=300000
mdb.user=default
db.id=192.168.44.10:8123/tsg_galaxy_v3?socket_timeout=3600000&compress=0
mdb.password=ceiec2019
mdb.password=111111
initialsize=1
minidle=1
maxactive=50

View File

@@ -4,19 +4,20 @@ log4j.logger.org.apache.http.wire=OFF
#Log4j
log4j.rootLogger=info,console,file
# <20><><EFBFBD><EFBFBD>̨<EFBFBD><CCA8>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
# <20><><EFBFBD><EFBFBD>̨<EFBFBD><CCA8>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.Threshold=info
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
# <20>ļ<EFBFBD><C4BC><EFBFBD>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
# <20>ļ<EFBFBD><C4BC><EFBFBD>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
log4j.appender.file.Threshold=info
log4j.appender.file.encoding=UTF-8
log4j.appender.file.Append=true
<><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>·<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ӧ<EFBFBD><EFBFBD>Ŀ<EFBFBD><EFBFBD>
<><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>·<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ӧ<EFBFBD><EFBFBD>Ŀ<EFBFBD><EFBFBD>
#log4j.appender.file.file=/home/ceiec/iplearning/logs/ip-learning-application.log
#log4j.appender.file.file=/home/ceiec/iplearning/testLog/ip-learning-application.log
log4j.appender.file.file=./logs/ip-learning-application.log
log4j.appender.file.DatePattern='.'yyyy-MM-dd
log4j.appender.file.layout=org.apache.log4j.PatternLayout

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,6 @@ import java.util.List;
public class TestList {
public static void main(String[] args) {
/*
ArangoDBConnect arangoConnect = ArangoDBConnect.getInstance();
ArangoCursor<BaseEdgeDocument> documents = arangoConnect.executorQuery("FOR doc IN R_LOCATE_FQDN2IP filter doc.FIRST_FOUND_TIME >= 1596080839 and doc.FIRST_FOUND_TIME <= 1596395473 RETURN doc", BaseEdgeDocument.class);
List<BaseEdgeDocument> baseEdgeDocuments = documents.asListRemaining();
@@ -19,8 +18,8 @@ public class TestList {
doc.updateAttribute("PROTOCOL_TYPE","123");
}
*/
/*
ArrayList<Integer> integers = new ArrayList<>();
integers.add(10);
integers.add(8);
@@ -40,9 +39,7 @@ public class TestList {
integers.add(5);
Collections.sort(integers);
System.out.println(integers);
Collections.reverse(integers);
System.out.println(integers);
System.out.println(integers.indexOf(5));
*/
}
}

View File

@@ -1,11 +1,27 @@
package cn.ac.iie;
import cn.ac.iie.dao.BaseArangoData;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.ArangoCursor;
import com.arangodb.entity.BaseDocument;
import com.arangodb.entity.BaseEdgeDocument;
public class readHistoryDataTest {
public static void main(String[] args) {
BaseArangoData baseArangoData = new BaseArangoData();
ArangoDBConnect instance = ArangoDBConnect.getInstance();
// ArangoCursor<Long> baseDocuments = instance.executorQuery("RETURN LENGTH(R_LOCATE_FQDN2IP)", Long.class);
// while (baseDocuments.hasNext()){
// Long next = baseDocuments.next();
// System.out.println(next.toString());
// }
// instance.clean();
String sql = "FOR doc IN FQDN filter doc.FIRST_FOUND_TIME >= 1595423493 and doc.FIRST_FOUND_TIME <= 1595809766 limit 763,10 RETURN doc";
ArangoCursor<BaseDocument> baseDocuments = instance.executorQuery(sql, BaseDocument.class);
while (baseDocuments.hasNext()){
BaseDocument next = baseDocuments.next();
System.out.println(next.toString());
}
instance.clean();
}
}

View File

@@ -6,6 +6,7 @@ import cn.ac.iie.utils.ArangoDBConnect;
import cn.ac.iie.utils.ExecutorThreadPool;
import com.arangodb.ArangoCursor;
import com.arangodb.entity.BaseDocument;
import com.arangodb.entity.BaseEdgeDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -19,12 +20,22 @@ import java.util.concurrent.CountDownLatch;
*/
public class BaseArangoData {
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexFqdnMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexIpMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexSubscriberMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
public <T extends BaseDocument> ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> readHistoryData(String table, Class<T> type) {
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap = new ConcurrentHashMap<>();
public <T extends BaseDocument> void readHistoryData(String table,
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
Class<T> type) {
try {
LOG.warn("开始更新" + table);
long start = System.currentTimeMillis();
@@ -32,9 +43,9 @@ public class BaseArangoData {
historyMap.put(i, new ConcurrentHashMap<>());
}
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER());
Long countTotal = getCountTotal(table);
long[] timeRange = getTimeRange(table);
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
String sql = getQuerySql(countTotal, i, table);
String sql = getQuerySql(timeRange, i, table);
ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
threadPool.executor(readHistoryArangoData);
}
@@ -44,34 +55,49 @@ public class BaseArangoData {
} catch (Exception e) {
e.printStackTrace();
}
return historyMap;
}
private Long getCountTotal(String table){
long start = System.currentTimeMillis();
Long cnt = 0L;
String sql = "RETURN LENGTH("+table+")";
try {
ArangoCursor<Long> longs = arangoDBConnect.executorQuery(sql, Long.class);
while (longs.hasNext()){
cnt = longs.next();
}
}catch (Exception e){
LOG.error(sql +"执行异常");
private long[] getTimeRange(String table) {
long minTime = 0L;
long maxTime = 0L;
long startTime = System.currentTimeMillis();
String sql = "LET doc = (FOR doc IN " + table + " RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
switch (ApplicationConfig.ARANGO_TIME_LIMIT_TYPE()) {
case 0:
ArangoCursor<BaseDocument> timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
try {
if (timeDoc != null) {
while (timeDoc.hasNext()) {
BaseDocument doc = timeDoc.next();
maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER();
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
}
} else {
LOG.warn("获取ArangoDb时间范围为空");
}
} catch (Exception e) {
e.printStackTrace();
}
break;
case 1:
maxTime = ApplicationConfig.READ_ARANGO_MAX_TIME();
minTime = ApplicationConfig.READ_ARANGO_MIN_TIME();
break;
default:
}
long last = System.currentTimeMillis();
LOG.warn(sql+" 结果:"+cnt+" 执行时间:"+(last-start));
return cnt;
long lastTime = System.currentTimeMillis();
LOG.warn(sql + "\n查询最大最小时间用时" + (lastTime - startTime));
return new long[]{minTime, maxTime};
}
private String getQuerySql(Long cnt,int threadNumber, String table){
long sepNum = cnt / ApplicationConfig.THREAD_POOL_NUMBER() + 1;
long offsetNum = threadNumber * sepNum;
if (sepNum >= ApplicationConfig.ARANGODB_READ_LIMIT() * 10000){
sepNum = ApplicationConfig.ARANGODB_READ_LIMIT() * 10000;
}
return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
private String getQuerySql(long[] timeRange, int threadNumber, String table) {
long minTime = timeRange[0];
long maxTime = timeRange[1];
long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER();
long maxThreadTime = minTime + (threadNumber + 1) * diffTime;
long minThreadTime = minTime + threadNumber * diffTime;
return "FOR doc IN " + table + " filter doc.FIRST_FOUND_TIME >= " + minThreadTime + " and doc.FIRST_FOUND_TIME <= " + maxThreadTime + " " + ApplicationConfig.ARANGODB_READ_LIMIT() + " RETURN doc";
}
}

View File

@@ -19,13 +19,12 @@ import java.util.concurrent.CountDownLatch;
* @author wlh
* 多线程全量读取arangoDb历史数据封装到map
*/
@SuppressWarnings("unchecked")
public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
public static long currentHour = System.currentTimeMillis() / (60 * 60 * 1000) * 60 * 60;
private static final Logger LOG = LoggerFactory.getLogger(ReadHistoryArangoData.class);
private static final Integer RECENT_COUNT_HOUR = ApplicationConfig.RECENT_COUNT_HOUR();
static final Integer RECENT_COUNT_HOUR = ApplicationConfig.RECENT_COUNT_HOUR();
private static final HashSet<String> PROTOCOL_SET;
public static final HashSet<String> PROTOCOL_SET;
static {
PROTOCOL_SET = new HashSet<>();
@@ -67,7 +66,7 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
String key = doc.getKey();
switch (table) {
case "R_LOCATE_FQDN2IP":
updateProtocolDocument(doc);
// updateProtocolDocument(doc);
deleteDistinctClientIpByTime(doc);
break;
default:
@@ -93,7 +92,7 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
for (String protocol : PROTOCOL_SET) {
String protocolRecent = protocol + "_CNT_RECENT";
ArrayList<Long> cntRecent = (ArrayList<Long>) doc.getAttribute(protocolRecent);
Long[] cntRecentsSrc = cntRecent.toArray(new Long[0]);
Long[] cntRecentsSrc = cntRecent.toArray(new Long[cntRecent.size()]);
Long[] cntRecentsDst = new Long[RECENT_COUNT_HOUR];
System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1);
cntRecentsDst[0] = 0L;
@@ -105,11 +104,6 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
private void deleteDistinctClientIpByTime(T doc) {
ArrayList<String> distCip = (ArrayList<String>) doc.getAttribute("DIST_CIP");
ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS");
if (distCip == null || distCip.isEmpty()){
doc.updateAttribute("DIST_CIP", new String[0]);
doc.updateAttribute("DIST_CIP_TS", new long[0]);
return;
}
distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
Collections.sort(distCipTs);
int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600);

View File

@@ -27,27 +27,14 @@ public class ArangoDBConnect {
}
private static void getArangoDatabase(){
ArangoDB.Builder host = getArangoHost();
arangoDB = host
arangoDB = new ArangoDB.Builder()
.maxConnections(ApplicationConfig.THREAD_POOL_NUMBER())
.acquireHostList(true)
.host(ApplicationConfig.ARANGODB_HOST(), ApplicationConfig.ARANGODB_PORT())
.user(ApplicationConfig.ARANGODB_USER())
.password(ApplicationConfig.ARANGODB_PASSWORD())
.build();
}
private static ArangoDB.Builder getArangoHost(){
String hostList = ApplicationConfig.ARANGODB_HOST();
String[] split = hostList.split(",");
ArangoDB.Builder host = new ArangoDB.Builder();
for (String hostStr : split) {
host.host(hostStr, ApplicationConfig.ARANGODB_PORT());
LOG.warn("arangoDB host {} 已添加",hostStr);
}
LOG.warn("获取arangoDB host成功");
return host;
}
public static synchronized ArangoDBConnect getInstance(){
if (null == conn){
conn = new ArangoDBConnect();
@@ -83,6 +70,26 @@ public class ArangoDBConnect {
}
}
@Deprecated
public <T> void insertAndUpdate(ArrayList<T> docInsert,ArrayList<T> docUpdate,String collectionName){
ArangoDatabase database = getDatabase();
try {
ArangoCollection collection = database.collection(collectionName);
if (!docInsert.isEmpty()){
collection.importDocuments(docInsert);
}
if (!docUpdate.isEmpty()){
collection.replaceDocuments(docUpdate);
}
}catch (Exception e){
System.out.println("更新失败");
e.printStackTrace();
}finally {
docInsert.clear();
docInsert.clear();
}
}
public <T> void overwrite(ArrayList<T> docOverwrite,String collectionName){
ArangoDatabase database = getDatabase();
try {
@@ -94,11 +101,11 @@ public class ArangoDBConnect {
MultiDocumentEntity<DocumentCreateEntity<T>> documentCreateEntityMultiDocumentEntity = collection.insertDocuments(docOverwrite, documentCreateOptions);
Collection<ErrorEntity> errors = documentCreateEntityMultiDocumentEntity.getErrors();
for (ErrorEntity errorEntity:errors){
LOG.debug("写入arangoDB异常"+errorEntity.getErrorMessage());
LOG.warn("写入arangoDB异常"+errorEntity.getErrorMessage());
}
}
}catch (Exception e){
LOG.error("更新arangoDB失败:"+e.toString());
System.out.println("更新失败:"+e.toString());
}finally {
docOverwrite.clear();
}

View File

@@ -0,0 +1,25 @@
package cn.ac.iie.utils;
public class TopDomainUtils {
/**
* 通用方法,传入url,返回domain,这里的domain不包含端口号,含有:一定是v6
* @param oriUrl
* @return
*/
public static String getDomainFromUrl(String oriUrl) {
String url = oriUrl.split("[?]")[0];
url = url.replaceAll("https://", "").replaceAll("http://", "");
String domain;
if (url.split("/")[0].split(":").length <= 2) {
domain = url
.split("/")[0]
.split(":")[0];
} else {
domain = url.split("/")[0];
}
return domain;
}
}

View File

@@ -1,40 +1,46 @@
#spark任务配置
spark.sql.shuffle.partitions=10
spark.sql.shuffle.partitions=5
spark.executor.memory=4g
spark.app.name=test
spark.network.timeout=300s
repartitionNumber=36
spark.serializer=org.apache.spark.serializer.KryoSerializer
#spark.serializer=org.apache.spark.serializer.JavaSerializer
master=local[*]
#spark读取clickhouse配置
spark.read.clickhouse.url=jdbc:clickhouse://192.168.44.10:8123/tsg_galaxy_v3
#spark.read.clickhouse.url=jdbc:clickhouse://192.168.40.186:8123/tsg_galaxy_v3
spark.read.clickhouse.url=jdbc:clickhouse://192.168.40.193:8123/tsg_galaxy_zx
spark.read.clickhouse.driver=ru.yandex.clickhouse.ClickHouseDriver
spark.read.clickhouse.user=default
spark.read.clickhouse.password=ceiec2019
spark.read.clickhouse.numPartitions=5
spark.read.clickhouse.password=111111
spark.read.clickhouse.numPartitions=144
spark.read.clickhouse.fetchsize=10000
spark.read.clickhouse.partitionColumn=LAST_FOUND_TIME
spark.read.clickhouse.partitionColumn=recv_time
clickhouse.socket.timeout=300000
#arangoDB配置
arangoDB.host=192.168.40.123,192.168.40.223,192.168.40.222
#arangoDB.host=192.168.40.223
arangoDB.host=192.168.40.182
arangoDB.port=8529
arangoDB.user=upsert
arangoDB.password=ceiec2019
arangoDB.DB.name=tsg_galaxy_v3
arangoDB.password=ceiec2018
#arangoDB.DB.name=insert_iplearn_index
arangoDB.DB.name=ip-learning-test-0
arangoDB.ttl=3600
thread.pool.number=10
thread.pool.number=5
#读取clickhouse时间范围方式0读取过去一小时1指定时间范围
clickhouse.time.limit.type=1
read.clickhouse.max.time=1600916194
read.clickhouse.min.time=1599197648
read.clickhouse.max.time=1571241640
read.clickhouse.min.time=1571241600
arangoDB.read.sepNum=10
#读取arangoDB时间范围方式0正常读1指定时间范围
arango.time.limit.type=0
read.arango.max.time=1571245320
read.arango.min.time=1571245200
arangoDB.read.limit=
update.arango.batch=10000
distinct.client.ip.num=10000
recent.count.hour=24
update.interval=3600
update.interval=10800

View File

@@ -36,7 +36,12 @@ object ApplicationConfig {
val READ_CLICKHOUSE_MAX_TIME: Long = config.getLong("read.clickhouse.max.time")
val READ_CLICKHOUSE_MIN_TIME: Long = config.getLong("read.clickhouse.min.time")
val ARANGODB_READ_LIMIT: Long = config.getLong("arangoDB.read.sepNum")
val ARANGO_TIME_LIMIT_TYPE: Int = config.getInt("arango.time.limit.type")
val READ_ARANGO_MAX_TIME: Long = config.getLong("read.arango.max.time")
val READ_ARANGO_MIN_TIME: Long = config.getLong("read.arango.min.time")
val ARANGODB_READ_LIMIT: String = config.getString("arangoDB.read.limit")
val UPDATE_ARANGO_BATCH: Int = config.getInt("update.arango.batch")
val RECENT_COUNT_HOUR: Int = config.getInt("recent.count.hour")
val DISTINCT_CLIENT_IP_NUM: Int = config.getInt("distinct.client.ip.num")

View File

@@ -2,6 +2,7 @@ package cn.ac.iie.dao
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.utils.SparkSessionUtil.spark
import cn.ac.iie.utils.TopDomainUtils
import org.apache.spark.sql.DataFrame
import org.slf4j.LoggerFactory
@@ -11,7 +12,7 @@ object BaseClickhouseData {
val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
private val timeLimit: (Long, Long) = getTimeLimit
private def initClickhouseData(sql:String): DataFrame ={
private def initClickhouseData(sql:String): Unit ={
val dataFrame: DataFrame = spark.read.format("jdbc")
.option("url", ApplicationConfig.SPARK_READ_CLICKHOUSE_URL)
@@ -28,18 +29,16 @@ object BaseClickhouseData {
.load()
dataFrame.printSchema()
dataFrame.createOrReplaceGlobalTempView("dbtable")
dataFrame
}
def loadConnectionDataFromCk(): Unit ={
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1 + " AND common_schema_type != 'BASE'"
val where = "recv_time >= " + timeLimit._2 + " AND recv_time < " + timeLimit._1
val sql =
s"""
|(SELECT
| ssl_sni,http_host,common_client_ip,common_server_ip,common_recv_time,common_c2s_byte_num,common_s2c_byte_num,common_schema_type
| s1_domain,s1_referer,s1_s_ip,s1_d_ip,recv_time,media_len
|FROM
| connection_record_log
| media_expire_patch
|WHERE $where) as dbtable
""".stripMargin
@@ -50,8 +49,8 @@ object BaseClickhouseData {
private def loadRadiusDataFromCk(): Unit ={
val where =
s"""
| common_recv_time >= ${timeLimit._2}
| AND common_recv_time < ${timeLimit._1}
| common_start_time >= ${timeLimit._2}
| AND common_start_time < ${timeLimit._1}
| AND common_subscriber_id != ''
| AND radius_framed_ip != ''
| AND radius_packet_type = 4
@@ -60,7 +59,7 @@ object BaseClickhouseData {
val sql =
s"""
|(SELECT
| common_subscriber_id,radius_framed_ip,common_recv_time
| common_subscriber_id,radius_framed_ip,common_start_time
|FROM
| tsg_galaxy_v3.radius_record_log
|WHERE
@@ -70,28 +69,31 @@ object BaseClickhouseData {
initClickhouseData(sql)
}
def getDomain(url:String): String ={
TopDomainUtils.getDomainFromUrl(url)
}
def getVertexFqdnDf: DataFrame ={
loadConnectionDataFromCk()
spark.udf.register("getDomain",TopDomainUtils.getDomainFromUrl _)
val sql =
"""
|SELECT
| FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
| FQDN,MAX(LAST_FOUND_TIME) AS LAST_FOUND_TIME,MIN(FIRST_FOUND_TIME) AS FIRST_FOUND_TIME
|FROM
| (
| (SELECT
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| s1_domain AS FQDN,MAX(recv_time) AS LAST_FOUND_TIME,MIN(recv_time) AS FIRST_FOUND_TIME
| FROM
| global_temp.dbtable
| WHERE
| common_schema_type = 'SSL' GROUP BY ssl_sni
| GROUP BY s1_domain
| )
| UNION ALL
| (SELECT
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| getDomain(s1_referer) AS FQDN,MAX(recv_time) AS LAST_FOUND_TIME,MIN(recv_time) AS FIRST_FOUND_TIME
| FROM
| global_temp.dbtable
| WHERE
| common_schema_type = 'HTTP' GROUP BY http_host
| GROUP BY getDomain(s1_referer)
| )
| )
|GROUP BY
@@ -105,170 +107,79 @@ object BaseClickhouseData {
vertexFqdnDf
}
def getVertexIpDf: DataFrame ={
loadConnectionDataFromCk()
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
val sql =
s"""
|(SELECT * FROM
|((SELECT common_client_ip AS IP,MIN(common_end_time) AS FIRST_FOUND_TIME,
|MAX(common_end_time) AS LAST_FOUND_TIME,
|count(*) as SESSION_COUNT,
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|groupUniqArray(2)(common_link_info_c2s)[2] as common_link_info,
|'client' as ip_type
|FROM tsg_galaxy_v3.connection_record_log
|where $where
|group by common_client_ip)
|UNION ALL
|(SELECT common_server_ip AS IP,
|MIN(common_end_time) AS FIRST_FOUND_TIME,
|MAX(common_end_time) AS LAST_FOUND_TIME,
|count(*) as SESSION_COUNT,
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|groupUniqArray(2)(common_link_info_s2c)[2] as common_link_info,
|'server' as ip_type
|FROM tsg_galaxy_v3.connection_record_log
|where $where
|group by common_server_ip))) as dbtable
""".stripMargin
LOG.warn(sql)
val frame = initClickhouseData(sql)
frame.printSchema()
frame
def main(args: Array[String]): Unit = {
val df = getRelationFqdnLocateIpDf
df.show(10)
}
def getVertexIpDf: DataFrame ={
loadConnectionDataFromCk()
val sql =
"""
|SELECT
| *
|FROM
| (
| (
| SELECT
| s1_s_ip AS IP,
| MIN(recv_time) AS FIRST_FOUND_TIME,
| MAX(recv_time) AS LAST_FOUND_TIME,
| count(*) as SESSION_COUNT,
| sum(media_len) as BYTES_SUM,
| 'client' as ip_type
| FROM
| global_temp.dbtable
| GROUP BY
| IP
| )
| UNION ALL
| (
| SELECT
| s1_d_ip AS IP,
| MIN(recv_time) AS FIRST_FOUND_TIME,
| MAX(recv_time) AS LAST_FOUND_TIME,
| count(*) as SESSION_COUNT,
| sum(media_len) as BYTES_SUM,
| 'server' as ip_type
| FROM
| global_temp.dbtable
| GROUP BY
| IP
| )
| )
""".stripMargin
LOG.warn(sql)
val vertexIpDf = spark.sql(sql)
vertexIpDf.printSchema()
vertexIpDf
}
/*
def getRelationFqdnLocateIpDf: DataFrame ={
loadConnectionDataFromCk()
val sslSql =
"""
|SELECT
| ssl_sni AS FQDN,
| common_server_ip,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| COUNT(*) AS COUNT_TOTAL,
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
| 'TLS' AS schema_type
|FROM
| global_temp.dbtable
|WHERE
| common_schema_type = 'SSL'
|GROUP BY
| ssl_sni,common_server_ip
""".stripMargin
val httpSql =
val sql =
"""
|SELECT
| http_host AS FQDN,
| common_server_ip,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| s1_domain AS FQDN,
| s1_d_ip AS common_server_ip,
| MAX(recv_time) AS LAST_FOUND_TIME,
| MIN(recv_time) AS FIRST_FOUND_TIME,
| COUNT(*) AS COUNT_TOTAL,
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
| 'HTTP' AS schema_type
| collect_set(s1_s_ip) AS DIST_CIP_RECENT
|FROM
| global_temp.dbtable
|WHERE
| common_schema_type = 'HTTP'
| s1_domain != ''
|GROUP BY
| http_host,common_server_ip
| s1_domain,s1_d_ip
""".stripMargin
val sql = s"SELECT * FROM (($sslSql) UNION ALL ($httpSql)) WHERE FQDN != ''"
LOG.warn(sql)
val relationFqdnLocateIpDf = spark.sql(sql)
relationFqdnLocateIpDf.printSchema()
relationFqdnLocateIpDf
}
*/
def getRelationFqdnLocateIpDf: DataFrame ={
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
val sql =
s"""
|(SELECT * FROM
|((SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'TLS' AS schema_type
|FROM tsg_galaxy_v3.connection_record_log
|WHERE $where and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip)
|UNION ALL
|(SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'HTTP' AS schema_type
|FROM tsg_galaxy_v3.connection_record_log
|WHERE $where and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip))
|WHERE FQDN != '') as dbtable
""".stripMargin
LOG.warn(sql)
val frame = initClickhouseData(sql)
frame.printSchema()
frame
}
def getRelationSubidLocateIpDf: DataFrame ={
val where =
s"""
| common_recv_time >= ${timeLimit._2}
| AND common_recv_time < ${timeLimit._1}
| AND common_subscriber_id != ''
| AND radius_framed_ip != ''
""".stripMargin
val sql =
s"""
|(
|SELECT common_subscriber_id,radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME
|FROM radius_record_log
|WHERE $where GROUP BY common_subscriber_id,radius_framed_ip
|) as dbtable
""".stripMargin
LOG.warn(sql)
val frame = initClickhouseData(sql)
frame.printSchema()
frame
}
def getVertexSubidDf: DataFrame ={
val where =
s"""
| common_recv_time >= ${timeLimit._2}
| AND common_recv_time < ${timeLimit._1}
| AND common_subscriber_id != ''
| AND radius_framed_ip != ''
""".stripMargin
val sql =
s"""
|(
|SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM radius_record_log
|WHERE $where GROUP BY common_subscriber_id
|)as dbtable
""".stripMargin
LOG.warn(sql)
val frame = initClickhouseData(sql)
frame.printSchema()
frame
}
def getVertexFramedIpDf: DataFrame ={
val where =
s"""
| common_recv_time >= ${timeLimit._2}
| AND common_recv_time < ${timeLimit._1}
| AND common_subscriber_id != ''
| AND radius_framed_ip != ''
""".stripMargin
val sql =
s"""
|(
|SELECT DISTINCT radius_framed_ip,common_recv_time as LAST_FOUND_TIME FROM radius_record_log WHERE $where
|)as dbtable
""".stripMargin
LOG.warn(sql)
val frame = initClickhouseData(sql)
frame.printSchema()
frame
}
private def getTimeLimit: (Long,Long) ={
var maxTime = 0L

View File

@@ -20,17 +20,6 @@ object MergeDataFrame {
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
}
def mergeVertexFrameIp: RDD[Row] ={
val values = BaseClickhouseData.getVertexFramedIpDf
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
.rdd.map(row => {
val ip = row.getAs[String]("radius_framed_ip")
(ip, row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
LOG.warn(s"读取R_LOCATE_SUBSCRIBER2IP clickhouse成功${values.count()}")
values
}
def mergeVertexIp(): RDD[Row]={
val vertexIpDf = BaseClickhouseData.getVertexIpDf
val frame = vertexIpDf.groupBy("IP").agg(
@@ -38,8 +27,7 @@ object MergeDataFrame {
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"),
collect_list("BYTES_SUM").alias("BYTES_SUM_LIST"),
collect_list("ip_type").alias("ip_type_list"),
last("common_link_info").alias("common_link_info")
collect_list("ip_type").alias("ip_type_list")
)
val values = frame.rdd.map(row => (row.get(0), row))
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
@@ -47,61 +35,30 @@ object MergeDataFrame {
}
def mergeRelationFqdnLocateIp(): RDD[Row] ={
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
BaseClickhouseData.getRelationFqdnLocateIpDf
.filter(row => isDomain(row.getAs[String]("FQDN")))
.groupBy("FQDN", "common_server_ip")
.agg(
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
collect_list("COUNT_TOTAL").alias("COUNT_TOTAL_LIST"),
collect_list("schema_type").alias("schema_type_list"),
collect_set("DIST_CIP_RECENT").alias("DIST_CIP_RECENT")
)
val values = frame.rdd.map(row => {
.rdd.map(row => {
val fqdn = row.getAs[String]("FQDN")
val serverIp = row.getAs[String]("common_server_ip")
val key = fqdn.concat("-" + serverIp)
(key, row)
val key = fqdn.concat("-"+serverIp)
(key,row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
LOG.warn(s"读取R_LOCATE_FQDN2IP clickhouse成功${values.count()}")
values
}
def mergeRelationSubidLocateIp(): RDD[Row] ={
val values = BaseClickhouseData.getRelationSubidLocateIpDf
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
.rdd.map(row => {
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
val ip = row.getAs[String]("radius_framed_ip")
val key = commonSubscriberId.concat("-" + ip)
(key, row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
LOG.warn(s"读取R_LOCATE_SUBSCRIBER2IP clickhouse成功${values.count()}")
values
}
def mergeVertexSubid(): RDD[Row] ={
val values = BaseClickhouseData.getVertexSubidDf
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
.rdd.map(row => {
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
(commonSubscriberId, row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
LOG.warn(s"读取SUBSCRIBER clickhouse成功${values.count()}")
values
}
private def isDomain(fqdn: String): Boolean = {
try {
if (fqdn == null || fqdn.length == 0) {
return false
}
val fqdnArr = fqdn.split(":")(0).split("\\.")
if (fqdnArr.length != 4){
if (fqdn.contains(":")) {
val s = fqdn.split(":")(0)
if (s.contains(":")){
return false
}
}
val fqdnArr = fqdn.split("\\.")
if (fqdnArr.length < 4 || fqdnArr.length > 4){
return true
}
for (f <- fqdnArr) {

View File

@@ -26,10 +26,6 @@ object UpdateDocHandler {
hisDoc.addAttribute(attributeName,newAttribute+hisAttritube)
}
def replaceAttribute(hisDoc: BaseDocument,newAttribute:String,attributeName:String): Unit ={
hisDoc.addAttribute(attributeName,newAttribute)
}
def separateAttributeByIpType(ipTypeList:ofRef[String],
sessionCountList:ofRef[AnyRef],
bytesSumList:ofRef[AnyRef]): (Long,Long,Long,Long) ={
@@ -98,12 +94,7 @@ object UpdateDocHandler {
}
def mergeDistinctIp(distCipRecent:ofRef[String]): Array[String] ={
distCipRecent.flatMap(str => {
str.replaceAll("\\[","")
.replaceAll("\\]","")
.replaceAll("\\'","")
.split(",")
}).distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
distCipRecent.distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
}
def putDistinctIp(doc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={

View File

@@ -5,6 +5,7 @@ import java.util.concurrent.ConcurrentHashMap
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.dao.BaseArangoData
import cn.ac.iie.dao.BaseArangoData._
import cn.ac.iie.service.transform.MergeDataFrame._
import cn.ac.iie.service.update.UpdateDocHandler._
import cn.ac.iie.utils.{ArangoDBConnect, ExecutorThreadPool, SparkSessionUtil}
@@ -25,12 +26,9 @@ object UpdateDocument {
def update(): Unit = {
try {
// updateDocument("FQDN", getVertexFqdnRow, classOf[BaseDocument], mergeVertexFqdn)
// updateDocument("IP", getVertexIpRow, classOf[BaseDocument], mergeVertexIp)
updateDocument("R_LOCATE_FQDN2IP", getRelationFqdnLocateIpRow, classOf[BaseEdgeDocument], mergeRelationFqdnLocateIp)
updateDocument("SUBSCRIBER",getVertexSubidRow,classOf[BaseDocument],mergeVertexSubid)
insertFrameIp()
updateDocument("R_LOCATE_SUBSCRIBER2IP",getRelationSubidLocateIpRow,classOf[BaseEdgeDocument],mergeRelationSubidLocateIp)
updateDocument("FQDN", historyVertexFqdnMap, getVertexFqdnRow, classOf[BaseDocument], mergeVertexFqdn)
updateDocument("IP", historyVertexIpMap, getVertexIpRow, classOf[BaseDocument], mergeVertexIp)
updateDocument("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, getRelationFqdnLocateIpRow, classOf[BaseEdgeDocument], mergeRelationFqdnLocateIp)
} catch {
case e: Exception => e.printStackTrace()
} finally {
@@ -40,33 +38,13 @@ object UpdateDocument {
}
}
private def insertFrameIp(): Unit ={
mergeVertexFrameIp.foreachPartition(iter => {
val resultDocumentList = new util.ArrayList[BaseDocument]
var i = 0
iter.foreach(row => {
val document = getVertexFrameipRow(row)
resultDocumentList.add(document)
i += 1
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
arangoManger.overwrite(resultDocumentList, "IP")
LOG.warn(s"更新:IP" + i)
i = 0
}
})
if (i != 0) {
arangoManger.overwrite(resultDocumentList, "IP")
LOG.warn(s"更新IP:" + i)
}
})
}
private def updateDocument[T <: BaseDocument](collName: String,
historyMap: ConcurrentHashMap[Integer, ConcurrentHashMap[String, T]],
getDocumentRow: (Row, ConcurrentHashMap[String, T]) => T,
clazz: Class[T],
getNewDataRdd: () => RDD[Row]
): Unit = {
val historyMap = baseArangoData.readHistoryData(collName, clazz)
baseArangoData.readHistoryData(collName, historyMap, clazz)
val hisBc = spark.sparkContext.broadcast(historyMap)
try {
val start = System.currentTimeMillis()
@@ -117,56 +95,6 @@ object UpdateDocument {
document
}
private def getRelationSubidLocateIpRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseEdgeDocument]): BaseEdgeDocument ={
val subId = row.getAs[String]("common_subscriber_id")
val ip = row.getAs[String]("radius_framed_ip")
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
val key = subId.concat("-"+ip)
var document = dictionaryMap.getOrDefault(key,null)
if (document != null){
updateMaxAttribute(document,lastFoundTime,"LAST_FOUND_TIME")
} else {
document = new BaseEdgeDocument()
document.setKey(key)
document.setFrom("SUBSCRIBER/" + subId)
document.setTo("IP/" + ip)
document.addAttribute("SUBSCRIBER",subId)
document.addAttribute("IP",ip)
document.addAttribute("FIRST_FOUND_TIME",firstFoundTime)
document.addAttribute("LAST_FOUND_TIME",lastFoundTime)
}
document
}
private def getVertexFrameipRow(row: Row): BaseDocument ={
val ip = row.getAs[String]("radius_framed_ip")
val document = new BaseDocument()
document.setKey(ip)
document.addAttribute("IP",ip)
document
}
private def getVertexSubidRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseDocument]): BaseDocument ={
val subId = row.getAs[String]("common_subscriber_id")
val subLastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
val subFirstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
var document = dictionaryMap.getOrDefault(subId,null)
if (document != null){
updateMaxAttribute(document,subLastFoundTime,"LAST_FOUND_TIME")
} else {
document = new BaseDocument()
document.setKey(subId)
document.addAttribute("SUBSCRIBER",subId)
document.addAttribute("FIRST_FOUND_TIME",subFirstFoundTime)
document.addAttribute("LAST_FOUND_TIME",subLastFoundTime)
}
document
}
private def getVertexIpRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseDocument]): BaseDocument = {
val ip = row.getAs[String]("IP")
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
@@ -174,7 +102,6 @@ object UpdateDocument {
val sessionCountList = row.getAs[ofRef[AnyRef]]("SESSION_COUNT_LIST")
val bytesSumList = row.getAs[ofRef[AnyRef]]("BYTES_SUM_LIST")
val ipTypeList = row.getAs[ofRef[String]]("ip_type_list")
val linkInfo = row.getAs[String]("common_link_info")
val sepAttributeTuple = separateAttributeByIpType(ipTypeList, sessionCountList, bytesSumList)
var document = dictionaryMap.getOrDefault(ip, null)
@@ -184,7 +111,6 @@ object UpdateDocument {
updateSumAttribute(document, sepAttributeTuple._2, "SERVER_BYTES_SUM")
updateSumAttribute(document, sepAttributeTuple._3, "CLIENT_SESSION_COUNT")
updateSumAttribute(document, sepAttributeTuple._4, "CLIENT_BYTES_SUM")
replaceAttribute(document,linkInfo,"COMMON_LINK_INFO")
} else {
document = new BaseDocument
document.setKey(ip)
@@ -195,7 +121,7 @@ object UpdateDocument {
document.addAttribute("SERVER_BYTES_SUM", sepAttributeTuple._2)
document.addAttribute("CLIENT_SESSION_COUNT", sepAttributeTuple._3)
document.addAttribute("CLIENT_BYTES_SUM", sepAttributeTuple._4)
document.addAttribute("COMMON_LINK_INFO", linkInfo)
document.addAttribute("COMMON_LINK_INFO", "")
}
document
}
@@ -205,18 +131,16 @@ object UpdateDocument {
val serverIp = row.getAs[String]("common_server_ip")
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
val countTotalList = row.getAs[ofRef[AnyRef]]("COUNT_TOTAL_LIST")
val schemaTypeList = row.getAs[ofRef[AnyRef]]("schema_type_list")
val countTotal = row.getAs[Long]("COUNT_TOTAL")
val distCipRecent = row.getAs[ofRef[String]]("DIST_CIP_RECENT")
val sepAttritubeMap: Map[String, Long] = separateAttributeByProtocol(schemaTypeList, countTotalList)
val distinctIp: Array[String] = mergeDistinctIp(distCipRecent)
val key = fqdn.concat("-" + serverIp)
var document = dictionaryMap.getOrDefault(key, null)
if (document != null) {
updateMaxAttribute(document, lastFoundTime, "LAST_FOUND_TIME")
updateProtocolAttritube(document, sepAttritubeMap)
updateSumAttribute(document,countTotal,"CNT_TOTAL")
updateDistinctIp(document, distinctIp)
} else {
document = new BaseEdgeDocument()
@@ -225,7 +149,7 @@ object UpdateDocument {
document.setTo("IP/" + serverIp)
document.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
document.addAttribute("LAST_FOUND_TIME", lastFoundTime)
putProtocolAttritube(document, sepAttritubeMap)
document.addAttribute("CNT_TOTAL",countTotal)
putDistinctIp(document, distinctIp)
}
document

View File

@@ -1,35 +1,35 @@
//package cn.ac.iie.service.update
//
//import java.util
//import java.util.ArrayList
//import java.util.concurrent.ConcurrentHashMap
//
//import cn.ac.iie.dao.BaseArangoData
//import cn.ac.iie.dao.BaseArangoData._
//import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
//
//import scala.collection.mutable.WrappedArray.ofRef
//
//object UpdateDocumentTest {
// def main(args: Array[String]): Unit = {
// val baseArangoData = new BaseArangoData()
// baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, classOf[BaseEdgeDocument])
//
// val value = BaseArangoData.historyRelationFqdnAddressIpMap.keys()
// while (value.hasMoreElements) {
// val integer: Integer = value.nextElement()
// val map: ConcurrentHashMap[String, BaseEdgeDocument] = historyRelationFqdnAddressIpMap.get(integer)
// val unit = map.keys()
// while (unit.hasMoreElements) {
// val key = unit.nextElement()
// val edgeDocument = map.get(key)
// // val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[util.ArrayList[Long]]
// // val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[util.ArrayList[String]]
// val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[Array[String]]
// val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[Array[java.lang.Long]]
// println(longs.toString + "---" + strings.toString)
// }
// }
// }
//
//}
package cn.ac.iie.service.update
import java.util
import java.util.ArrayList
import java.util.concurrent.ConcurrentHashMap
import cn.ac.iie.dao.BaseArangoData
import cn.ac.iie.dao.BaseArangoData._
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
import scala.collection.mutable.WrappedArray.ofRef
object UpdateDocumentTest {
def main(args: Array[String]): Unit = {
val baseArangoData = new BaseArangoData()
baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, classOf[BaseEdgeDocument])
val value = BaseArangoData.historyRelationFqdnAddressIpMap.keys()
while (value.hasMoreElements) {
val integer: Integer = value.nextElement()
val map: ConcurrentHashMap[String, BaseEdgeDocument] = historyRelationFqdnAddressIpMap.get(integer)
val unit = map.keys()
while (unit.hasMoreElements) {
val key = unit.nextElement()
val edgeDocument = map.get(key)
// val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[util.ArrayList[Long]]
// val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[util.ArrayList[String]]
val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[Array[String]]
val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[Array[java.lang.Long]]
println(longs.toString + "---" + strings.toString)
}
}
}
}